linux/net/sched/em_meta.c

967 lines
22 KiB
C
Raw Normal View History

/*
* net/sched/em_meta.c Metadata ematch
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*
* ==========================================================================
*
* The metadata ematch compares two meta objects where each object
* represents either a meta value stored in the kernel or a static
* value provided by userspace. The objects are not provided by
* userspace itself but rather a definition providing the information
* to build them. Every object is of a certain type which must be
* equal to the object it is being compared to.
*
* The definition of a objects conists of the type (meta type), a
* identifier (meta id) and additional type specific information.
* The meta id is either TCF_META_TYPE_VALUE for values provided by
* userspace or a index to the meta operations table consisting of
* function pointers to type specific meta data collectors returning
* the value of the requested meta value.
*
* lvalue rvalue
* +-----------+ +-----------+
* | type: INT | | type: INT |
* def | id: DEV | | id: VALUE |
* | data: | | data: 3 |
* +-----------+ +-----------+
* | |
* ---> meta_ops[INT][DEV](...) |
* | |
* ----------- |
* V V
* +-----------+ +-----------+
* | type: INT | | type: INT |
* obj | id: DEV | | id: VALUE |
* | data: 2 |<--data got filled out | data: 3 |
* +-----------+ +-----------+
* | |
* --------------> 2 equals 3 <--------------
*
* This is a simplified schema, the complexity varies depending
* on the meta type. Obviously, the length of the data must also
* be provided for non-numeric types.
*
* Additionally, type dependent modifiers such as shift operators
* or mask may be applied to extend the functionaliy. As of now,
* the variable length type supports shifting the byte string to
* the right, eating up any number of octets and thus supporting
* wildcard interface name comparisons such as "ppp%" matching
* ppp0..9.
*
* NOTE: Certain meta values depend on other subsystems and are
* only available if that subsystem is enabled in the kernel.
*/
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/if_vlan.h>
#include <linux/tc_ematch/tc_em_meta.h>
#include <net/dst.h>
#include <net/route.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
struct meta_obj {
unsigned long value;
unsigned int len;
};
struct meta_value {
struct tcf_meta_val hdr;
unsigned long val;
unsigned int len;
};
struct meta_match {
struct meta_value lvalue;
struct meta_value rvalue;
};
static inline int meta_id(struct meta_value *v)
{
return TCF_META_ID(v->hdr.kind);
}
static inline int meta_type(struct meta_value *v)
{
return TCF_META_TYPE(v->hdr.kind);
}
#define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
struct tcf_pkt_info *info, struct meta_value *v, \
struct meta_obj *dst, int *err)
/**************************************************************************
* System status & misc
**************************************************************************/
META_COLLECTOR(int_random)
{
get_random_bytes(&dst->value, sizeof(dst->value));
}
static inline unsigned long fixed_loadavg(int load)
{
int rnd_load = load + (FIXED_1/200);
int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
}
META_COLLECTOR(int_loadavg_0)
{
dst->value = fixed_loadavg(avenrun[0]);
}
META_COLLECTOR(int_loadavg_1)
{
dst->value = fixed_loadavg(avenrun[1]);
}
META_COLLECTOR(int_loadavg_2)
{
dst->value = fixed_loadavg(avenrun[2]);
}
/**************************************************************************
* Device names & indices
**************************************************************************/
static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
{
if (unlikely(dev == NULL))
return -1;
dst->value = dev->ifindex;
return 0;
}
static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
{
if (unlikely(dev == NULL))
return -1;
dst->value = (unsigned long) dev->name;
dst->len = strlen(dev->name);
return 0;
}
META_COLLECTOR(int_dev)
{
*err = int_dev(skb->dev, dst);
}
META_COLLECTOR(var_dev)
{
*err = var_dev(skb->dev, dst);
}
/**************************************************************************
* vlan tag
**************************************************************************/
META_COLLECTOR(int_vlan_tag)
{
unsigned short tag;
tag = skb_vlan_tag_get(skb);
if (!tag && __vlan_get_tag(skb, &tag))
*err = -1;
else
dst->value = tag;
}
/**************************************************************************
* skb attributes
**************************************************************************/
META_COLLECTOR(int_priority)
{
dst->value = skb->priority;
}
META_COLLECTOR(int_protocol)
{
/* Let userspace take care of the byte ordering */
dst->value = tc_skb_protocol(skb);
}
META_COLLECTOR(int_pkttype)
{
dst->value = skb->pkt_type;
}
META_COLLECTOR(int_pktlen)
{
dst->value = skb->len;
}
META_COLLECTOR(int_datalen)
{
dst->value = skb->data_len;
}
META_COLLECTOR(int_maclen)
{
dst->value = skb->mac_len;
}
META_COLLECTOR(int_rxhash)
{
dst->value = skb_get_hash(skb);
}
/**************************************************************************
* Netfilter
**************************************************************************/
META_COLLECTOR(int_mark)
{
dst->value = skb->mark;
}
/**************************************************************************
* Traffic Control
**************************************************************************/
META_COLLECTOR(int_tcindex)
{
dst->value = skb->tc_index;
}
/**************************************************************************
* Routing
**************************************************************************/
META_COLLECTOR(int_rtclassid)
{
if (unlikely(skb_dst(skb) == NULL))
*err = -1;
else
#ifdef CONFIG_IP_ROUTE_CLASSID
dst->value = skb_dst(skb)->tclassid;
#else
dst->value = 0;
#endif
}
META_COLLECTOR(int_rtiif)
{
if (unlikely(skb_rtable(skb) == NULL))
*err = -1;
else
dst->value = inet_iif(skb);
}
/**************************************************************************
* Socket Attributes
**************************************************************************/
#define skip_nonlocal(skb) \
(unlikely(skb->sk == NULL))
META_COLLECTOR(int_sk_family)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_family;
}
META_COLLECTOR(int_sk_state)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_state;
}
META_COLLECTOR(int_sk_reuse)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_reuse;
}
META_COLLECTOR(int_sk_bound_if)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
/* No error if bound_dev_if is 0, legal userspace check */
dst->value = skb->sk->sk_bound_dev_if;
}
META_COLLECTOR(var_sk_bound_if)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
if (skb->sk->sk_bound_dev_if == 0) {
dst->value = (unsigned long) "any";
dst->len = 3;
} else {
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(skb->sk),
skb->sk->sk_bound_dev_if);
*err = var_dev(dev, dst);
rcu_read_unlock();
}
}
META_COLLECTOR(int_sk_refcnt)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = atomic_read(&skb->sk->sk_refcnt);
}
META_COLLECTOR(int_sk_rcvbuf)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_rcvbuf;
}
META_COLLECTOR(int_sk_shutdown)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_shutdown;
}
META_COLLECTOR(int_sk_proto)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_protocol;
}
META_COLLECTOR(int_sk_type)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_type;
}
META_COLLECTOR(int_sk_rmem_alloc)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = sk_rmem_alloc_get(skb->sk);
}
META_COLLECTOR(int_sk_wmem_alloc)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = sk_wmem_alloc_get(skb->sk);
}
META_COLLECTOR(int_sk_omem_alloc)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = atomic_read(&skb->sk->sk_omem_alloc);
}
META_COLLECTOR(int_sk_rcv_qlen)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_receive_queue.qlen;
}
META_COLLECTOR(int_sk_snd_qlen)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_write_queue.qlen;
}
META_COLLECTOR(int_sk_wmem_queued)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_wmem_queued;
}
META_COLLECTOR(int_sk_fwd_alloc)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_forward_alloc;
}
META_COLLECTOR(int_sk_sndbuf)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_sndbuf;
}
META_COLLECTOR(int_sk_alloc)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = (__force int) skb->sk->sk_allocation;
}
[INET]: speedup inet (tcp/dccp) lookups Arnaldo and I agreed it could be applied now, because I have other pending patches depending on this one (Thank you Arnaldo) (The other important patch moves skc_refcnt in a separate cache line, so that the SMP/NUMA performance doesnt suffer from cache line ping pongs) 1) First some performance data : -------------------------------- tcp_v4_rcv() wastes a *lot* of time in __inet_lookup_established() The most time critical code is : sk_for_each(sk, node, &head->chain) { if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } The sk_for_each() does use prefetch() hints but only the begining of "struct sock" is prefetched. As INET_MATCH first comparison uses inet_sk(__sk)->daddr, wich is far away from the begining of "struct sock", it has to bring into CPU cache cold cache line. Each iteration has to use at least 2 cache lines. This can be problematic if some chains are very long. 2) The goal ----------- The idea I had is to change things so that INET_MATCH() may return FALSE in 99% of cases only using the data already in the CPU cache, using one cache line per iteration. 3) Description of the patch --------------------------- Adds a new 'unsigned int skc_hash' field in 'struct sock_common', filling a 32 bits hole on 64 bits platform. struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; Store in this 32 bits field the full hash, not masked by (ehash_size - 1) Using this full hash as the first comparison done in INET_MATCH permits us immediatly skip the element without touching a second cache line in case of a miss. Suppress the sk_hashent/tw_hashent fields since skc_hash (aliased to sk_hash and tw_hash) already contains the slot number if we mask with (ehash_size - 1) File include/net/inet_hashtables.h 64 bits platforms : #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) ((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 32bits platforms: #define TCP_IPV4_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && \ (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) - Adds a prefetch(head->chain.first) in __inet_lookup_established()/__tcp_v4_check_established() and __inet6_lookup_established()/__tcp_v6_check_established() and __dccp_v4_check_established() to bring into cache the first element of the list, before the {read|write}_lock(&head->lock); Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-10-03 23:13:38 +02:00
META_COLLECTOR(int_sk_hash)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
[INET]: speedup inet (tcp/dccp) lookups Arnaldo and I agreed it could be applied now, because I have other pending patches depending on this one (Thank you Arnaldo) (The other important patch moves skc_refcnt in a separate cache line, so that the SMP/NUMA performance doesnt suffer from cache line ping pongs) 1) First some performance data : -------------------------------- tcp_v4_rcv() wastes a *lot* of time in __inet_lookup_established() The most time critical code is : sk_for_each(sk, node, &head->chain) { if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } The sk_for_each() does use prefetch() hints but only the begining of "struct sock" is prefetched. As INET_MATCH first comparison uses inet_sk(__sk)->daddr, wich is far away from the begining of "struct sock", it has to bring into CPU cache cold cache line. Each iteration has to use at least 2 cache lines. This can be problematic if some chains are very long. 2) The goal ----------- The idea I had is to change things so that INET_MATCH() may return FALSE in 99% of cases only using the data already in the CPU cache, using one cache line per iteration. 3) Description of the patch --------------------------- Adds a new 'unsigned int skc_hash' field in 'struct sock_common', filling a 32 bits hole on 64 bits platform. struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; Store in this 32 bits field the full hash, not masked by (ehash_size - 1) Using this full hash as the first comparison done in INET_MATCH permits us immediatly skip the element without touching a second cache line in case of a miss. Suppress the sk_hashent/tw_hashent fields since skc_hash (aliased to sk_hash and tw_hash) already contains the slot number if we mask with (ehash_size - 1) File include/net/inet_hashtables.h 64 bits platforms : #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) ((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 32bits platforms: #define TCP_IPV4_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && \ (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) - Adds a prefetch(head->chain.first) in __inet_lookup_established()/__tcp_v4_check_established() and __inet6_lookup_established()/__tcp_v6_check_established() and __dccp_v4_check_established() to bring into cache the first element of the list, before the {read|write}_lock(&head->lock); Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-10-03 23:13:38 +02:00
dst->value = skb->sk->sk_hash;
}
META_COLLECTOR(int_sk_lingertime)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_lingertime / HZ;
}
META_COLLECTOR(int_sk_err_qlen)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_error_queue.qlen;
}
META_COLLECTOR(int_sk_ack_bl)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_ack_backlog;
}
META_COLLECTOR(int_sk_max_ack_bl)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_max_ack_backlog;
}
META_COLLECTOR(int_sk_prio)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_priority;
}
META_COLLECTOR(int_sk_rcvlowat)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_rcvlowat;
}
META_COLLECTOR(int_sk_rcvtimeo)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_rcvtimeo / HZ;
}
META_COLLECTOR(int_sk_sndtimeo)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_sndtimeo / HZ;
}
META_COLLECTOR(int_sk_sendmsg_off)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
net: use a per task frag allocator We currently use a per socket order-0 page cache for tcp_sendmsg() operations. This page is used to build fragments for skbs. Its done to increase probability of coalescing small write() into single segments in skbs still in write queue (not yet sent) But it wastes a lot of memory for applications handling many mostly idle sockets, since each socket holds one page in sk->sk_sndmsg_page Its also quite inefficient to build TSO 64KB packets, because we need about 16 pages per skb on arches where PAGE_SIZE = 4096, so we hit page allocator more than wanted. This patch adds a per task frag allocator and uses bigger pages, if available. An automatic fallback is done in case of memory pressure. (up to 32768 bytes per frag, thats order-3 pages on x86) This increases TCP stream performance by 20% on loopback device, but also benefits on other network devices, since 8x less frags are mapped on transmit and unmapped on tx completion. Alexander Duyck mentioned a probable performance win on systems with IOMMU enabled. Its possible some SG enabled hardware cant cope with bigger fragments, but their ndo_start_xmit() should already handle this, splitting a fragment in sub fragments, since some arches have PAGE_SIZE=65536 Successfully tested on various ethernet devices. (ixgbe, igb, bnx2x, tg3, mellanox mlx4) Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Ben Hutchings <bhutchings@solarflare.com> Cc: Vijay Subramanian <subramanian.vijay@gmail.com> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Vijay Subramanian <subramanian.vijay@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-09-24 01:04:42 +02:00
dst->value = skb->sk->sk_frag.offset;
}
META_COLLECTOR(int_sk_write_pend)
{
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
dst->value = skb->sk->sk_write_pending;
}
/**************************************************************************
* Meta value collectors assignment table
**************************************************************************/
struct meta_ops {
void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *);
};
#define META_ID(name) TCF_META_ID_##name
#define META_FUNC(name) { .get = meta_##name }
/* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */
static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
},
[TCF_META_TYPE_INT] = {
[META_ID(RANDOM)] = META_FUNC(int_random),
[META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
[META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
[META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
[META_ID(DEV)] = META_FUNC(int_dev),
[META_ID(PRIORITY)] = META_FUNC(int_priority),
[META_ID(PROTOCOL)] = META_FUNC(int_protocol),
[META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
[META_ID(PKTLEN)] = META_FUNC(int_pktlen),
[META_ID(DATALEN)] = META_FUNC(int_datalen),
[META_ID(MACLEN)] = META_FUNC(int_maclen),
[META_ID(NFMARK)] = META_FUNC(int_mark),
[META_ID(TCINDEX)] = META_FUNC(int_tcindex),
[META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
[META_ID(RTIIF)] = META_FUNC(int_rtiif),
[META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
[META_ID(SK_STATE)] = META_FUNC(int_sk_state),
[META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
[META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
[META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
[META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
[META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
[META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
[META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
[META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
[META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
[META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
[META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
[META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
[META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
[META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
[META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
[META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
[META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
[INET]: speedup inet (tcp/dccp) lookups Arnaldo and I agreed it could be applied now, because I have other pending patches depending on this one (Thank you Arnaldo) (The other important patch moves skc_refcnt in a separate cache line, so that the SMP/NUMA performance doesnt suffer from cache line ping pongs) 1) First some performance data : -------------------------------- tcp_v4_rcv() wastes a *lot* of time in __inet_lookup_established() The most time critical code is : sk_for_each(sk, node, &head->chain) { if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } The sk_for_each() does use prefetch() hints but only the begining of "struct sock" is prefetched. As INET_MATCH first comparison uses inet_sk(__sk)->daddr, wich is far away from the begining of "struct sock", it has to bring into CPU cache cold cache line. Each iteration has to use at least 2 cache lines. This can be problematic if some chains are very long. 2) The goal ----------- The idea I had is to change things so that INET_MATCH() may return FALSE in 99% of cases only using the data already in the CPU cache, using one cache line per iteration. 3) Description of the patch --------------------------- Adds a new 'unsigned int skc_hash' field in 'struct sock_common', filling a 32 bits hole on 64 bits platform. struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; Store in this 32 bits field the full hash, not masked by (ehash_size - 1) Using this full hash as the first comparison done in INET_MATCH permits us immediatly skip the element without touching a second cache line in case of a miss. Suppress the sk_hashent/tw_hashent fields since skc_hash (aliased to sk_hash and tw_hash) already contains the slot number if we mask with (ehash_size - 1) File include/net/inet_hashtables.h 64 bits platforms : #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) ((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 32bits platforms: #define TCP_IPV4_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && \ (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) - Adds a prefetch(head->chain.first) in __inet_lookup_established()/__tcp_v4_check_established() and __inet6_lookup_established()/__tcp_v6_check_established() and __dccp_v4_check_established() to bring into cache the first element of the list, before the {read|write}_lock(&head->lock); Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-10-03 23:13:38 +02:00
[META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
[META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
[META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
[META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
[META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
[META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
[META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
[META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
[META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
[META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
[META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
[META_ID(RXHASH)] = META_FUNC(int_rxhash),
}
};
static inline struct meta_ops *meta_ops(struct meta_value *val)
{
return &__meta_ops[meta_type(val)][meta_id(val)];
}
/**************************************************************************
* Type specific operations for TCF_META_TYPE_VAR
**************************************************************************/
static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
{
int r = a->len - b->len;
if (r == 0)
r = memcmp((void *) a->value, (void *) b->value, a->len);
return r;
}
static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
{
int len = nla_len(nla);
dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
if (dst->val == 0UL)
return -ENOMEM;
dst->len = len;
return 0;
}
static void meta_var_destroy(struct meta_value *v)
{
kfree((void *) v->val);
}
static void meta_var_apply_extras(struct meta_value *v,
struct meta_obj *dst)
{
int shift = v->hdr.shift;
if (shift && shift < dst->len)
dst->len -= shift;
}
static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->val && v->len &&
nla_put(skb, tlv, v->len, (void *) v->val))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
/**************************************************************************
* Type specific operations for TCF_META_TYPE_INT
**************************************************************************/
static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
{
/* Let gcc optimize it, the unlikely is not really based on
* some numbers but jump free code for mismatches seems
* more logical. */
if (unlikely(a->value == b->value))
return 0;
else if (a->value < b->value)
return -1;
else
return 1;
}
static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(unsigned long)) {
dst->val = *(unsigned long *) nla_data(nla);
dst->len = sizeof(unsigned long);
} else if (nla_len(nla) == sizeof(u32)) {
dst->val = nla_get_u32(nla);
dst->len = sizeof(u32);
} else
return -EINVAL;
return 0;
}
static void meta_int_apply_extras(struct meta_value *v,
struct meta_obj *dst)
{
if (v->hdr.shift)
dst->value >>= v->hdr.shift;
if (v->val)
dst->value &= v->val;
}
static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->len == sizeof(unsigned long)) {
if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
goto nla_put_failure;
} else if (v->len == sizeof(u32)) {
if (nla_put_u32(skb, tlv, v->val))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -1;
}
/**************************************************************************
* Type specific operations table
**************************************************************************/
struct meta_type_ops {
void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *);
void (*apply_extras)(struct meta_value *, struct meta_obj *);
int (*dump)(struct sk_buff *, struct meta_value *, int);
};
static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy,
.compare = meta_var_compare,
.change = meta_var_change,
.apply_extras = meta_var_apply_extras,
.dump = meta_var_dump
},
[TCF_META_TYPE_INT] = {
.compare = meta_int_compare,
.change = meta_int_change,
.apply_extras = meta_int_apply_extras,
.dump = meta_int_dump
}
};
static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{
return &__meta_type_ops[meta_type(v)];
}
/**************************************************************************
* Core
**************************************************************************/
static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
struct meta_value *v, struct meta_obj *dst)
{
int err = 0;
if (meta_id(v) == TCF_META_ID_VALUE) {
dst->value = v->val;
dst->len = v->len;
return 0;
}
meta_ops(v)->get(skb, info, v, dst, &err);
if (err < 0)
return err;
if (meta_type_ops(v)->apply_extras)
meta_type_ops(v)->apply_extras(v, dst);
return 0;
}
static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
int r;
struct meta_match *meta = (struct meta_match *) m->data;
struct meta_obj l_value, r_value;
if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
meta_get(skb, info, &meta->rvalue, &r_value) < 0)
return 0;
r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
switch (meta->lvalue.hdr.op) {
case TCF_EM_OPND_EQ:
return !r;
case TCF_EM_OPND_LT:
return r < 0;
case TCF_EM_OPND_GT:
return r > 0;
}
return 0;
}
static void meta_delete(struct meta_match *meta)
{
if (meta) {
struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
if (ops && ops->destroy) {
ops->destroy(&meta->lvalue);
ops->destroy(&meta->rvalue);
}
}
kfree(meta);
}
static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
{
if (nla) {
if (nla_len(nla) == 0)
return -EINVAL;
return meta_type_ops(dst)->change(dst, nla);
}
return 0;
}
static inline int meta_is_supported(struct meta_value *val)
{
return !meta_id(val) || meta_ops(val)->get;
}
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
[TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
};
static int em_meta_change(struct net *net, void *data, int len,
struct tcf_ematch *m)
{
int err;
struct nlattr *tb[TCA_EM_META_MAX + 1];
struct tcf_meta_hdr *hdr;
struct meta_match *meta = NULL;
err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy);
if (err < 0)
goto errout;
err = -EINVAL;
if (tb[TCA_EM_META_HDR] == NULL)
goto errout;
hdr = nla_data(tb[TCA_EM_META_HDR]);
if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
goto errout;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (meta == NULL) {
err = -ENOMEM;
goto errout;
}
memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
if (!meta_is_supported(&meta->lvalue) ||
!meta_is_supported(&meta->rvalue)) {
err = -EOPNOTSUPP;
goto errout;
}
if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
goto errout;
m->datalen = sizeof(*meta);
m->data = (unsigned long) meta;
err = 0;
errout:
if (err && meta)
meta_delete(meta);
return err;
}
static void em_meta_destroy(struct tcf_ematch *m)
{
if (m)
meta_delete((struct meta_match *) m->data);
}
static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
{
struct meta_match *meta = (struct meta_match *) em->data;
struct tcf_meta_hdr hdr;
struct meta_type_ops *ops;
memset(&hdr, 0, sizeof(hdr));
memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
goto nla_put_failure;
ops = meta_type_ops(&meta->lvalue);
if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct tcf_ematch_ops em_meta_ops = {
.kind = TCF_EM_META,
.change = em_meta_change,
.match = em_meta_match,
.destroy = em_meta_destroy,
.dump = em_meta_dump,
.owner = THIS_MODULE,
.link = LIST_HEAD_INIT(em_meta_ops.link)
};
static int __init init_em_meta(void)
{
return tcf_em_register(&em_meta_ops);
}
static void __exit exit_em_meta(void)
{
tcf_em_unregister(&em_meta_ops);
}
MODULE_LICENSE("GPL");
module_init(init_em_meta);
module_exit(exit_em_meta);
MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);