net: Introduce sk_tx_queue_mapping

Introduce sk_tx_queue_mapping; and functions that set, test and
get this value. Reset sk_tx_queue_mapping to -1 whenever the dst
cache is set/reset, and in socket alloc. Setting txq to -1 and
using valid txq=<0 to n-1> allows the tx path to use the value
of sk_tx_queue_mapping directly instead of subtracting 1 on every
tx.

Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Krishna Kumar 2009-10-19 23:46:20 +00:00 committed by David S. Miller
parent 748879776e
commit e022f0b4a0
2 changed files with 30 additions and 1 deletions

View File

@ -107,6 +107,7 @@ struct net;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
* @skc_refcnt: reference count
* @skc_tx_queue_mapping: tx queue number for this connection
* @skc_hash: hash value used with various protocol lookup tables
* @skc_family: network address family
* @skc_state: Connection state
@ -128,6 +129,7 @@ struct sock_common {
struct hlist_nulls_node skc_nulls_node;
};
atomic_t skc_refcnt;
int skc_tx_queue_mapping;
unsigned int skc_hash;
unsigned short skc_family;
@ -215,6 +217,7 @@ struct sock {
#define sk_node __sk_common.skc_node
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
#define sk_copy_start __sk_common.skc_hash
#define sk_hash __sk_common.skc_hash
@ -1094,8 +1097,29 @@ static inline void sock_put(struct sock *sk)
extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested);
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
sk->sk_tx_queue_mapping = tx_queue;
}
static inline void sk_tx_queue_clear(struct sock *sk)
{
sk->sk_tx_queue_mapping = -1;
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
return sk->sk_tx_queue_mapping;
}
static inline bool sk_tx_queue_recorded(const struct sock *sk)
{
return (sk && sk->sk_tx_queue_mapping >= 0);
}
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
@ -1152,6 +1176,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
old_dst = sk->sk_dst_cache;
sk->sk_dst_cache = dst;
dst_release(old_dst);
@ -1170,6 +1195,7 @@ __sk_dst_reset(struct sock *sk)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
old_dst = sk->sk_dst_cache;
sk->sk_dst_cache = NULL;
dst_release(old_dst);

View File

@ -357,6 +357,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
struct dst_entry *dst = sk->sk_dst_cache;
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
sk->sk_dst_cache = NULL;
dst_release(dst);
return NULL;
@ -953,7 +954,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
void *sptr = nsk->sk_security;
#endif
BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
sizeof(osk->sk_tx_queue_mapping));
memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
#ifdef CONFIG_SECURITY_NETWORK
@ -997,6 +999,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (!try_module_get(prot->owner))
goto out_free_sec;
sk_tx_queue_clear(sk);
}
return sk;