From 26af336d1236899f92eb8b9ebb8c4e9898e5564e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 Subject: [PATCH] use skbufhead with raw lock Signed-off-by: Thomas Gleixner --- include/linux/netdevice.h | 1 + include/linux/skbuff.h | 7 +++++++ net/core/dev.c | 26 ++++++++++++++++++++------ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bf46cc813451..73bccf460b49 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2031,6 +2031,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + struct sk_buff_head tofree_queue; #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ab3133797ff7..114130674add 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -152,6 +152,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; + raw_spinlock_t raw_lock; }; struct sk_buff; @@ -1151,6 +1152,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ + raw_spin_lock_init(&list->raw_lock); + __skb_queue_head_init(list); +} + static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { diff --git a/net/core/dev.c b/net/core/dev.c index 265f5933984a..2d8a8ed8c01e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -202,14 +202,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_lock(&sd->input_pkt_queue.lock); + raw_spin_lock(&sd->input_pkt_queue.raw_lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_unlock(&sd->input_pkt_queue.lock); + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); #endif } @@ -3760,7 +3760,7 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } @@ -3769,10 +3769,13 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } + + if (!skb_queue_empty(&sd->tofree_queue)) + raise_softirq_irqoff(NET_RX_SOFTIRQ); } static int napi_gro_complete(struct sk_buff *skb) @@ -4353,10 +4356,17 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + struct sk_buff *skb; void *have; local_irq_disable(); + while ((skb = __skb_dequeue(&sd->tofree_queue))) { + local_irq_enable(); + kfree_skb(skb); + local_irq_disable(); + } + while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; @@ -6842,6 +6852,9 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx_internal(skb); input_queue_head_incr(oldsd); } + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { + kfree_skb(skb); + } return NOTIFY_OK; } @@ -7151,8 +7164,9 @@ static int __init net_dev_init(void) for_each_possible_cpu(i) { struct softnet_data *sd = &per_cpu(softnet_data, i); - skb_queue_head_init(&sd->input_pkt_queue); - skb_queue_head_init(&sd->process_queue); + skb_queue_head_init_raw(&sd->input_pkt_queue); + skb_queue_head_init_raw(&sd->process_queue); + skb_queue_head_init_raw(&sd->tofree_queue); INIT_LIST_HEAD(&sd->poll_list); sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS