use skbufhead with raw lock

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-07-12 15:38:34 +02:00 committed by Alibek Omarov
parent 0eb8f356f4
commit 26af336d12
3 changed files with 28 additions and 6 deletions

View File

@ -2031,6 +2031,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
struct sk_buff_head tofree_queue;
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;

View File

@ -152,6 +152,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
raw_spinlock_t raw_lock;
};
struct sk_buff;
@ -1151,6 +1152,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
{
raw_spin_lock_init(&list->raw_lock);
__skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{

View File

@ -202,14 +202,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
spin_lock(&sd->input_pkt_queue.lock);
raw_spin_lock(&sd->input_pkt_queue.raw_lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
spin_unlock(&sd->input_pkt_queue.lock);
raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
#endif
}
@ -3760,7 +3760,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
kfree_skb(skb);
__skb_queue_tail(&sd->tofree_queue, skb);
input_queue_head_incr(sd);
}
}
@ -3769,10 +3769,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
kfree_skb(skb);
__skb_queue_tail(&sd->tofree_queue, skb);
input_queue_head_incr(sd);
}
}
if (!skb_queue_empty(&sd->tofree_queue))
raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
static int napi_gro_complete(struct sk_buff *skb)
@ -4353,10 +4356,17 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
struct sk_buff *skb;
void *have;
local_irq_disable();
while ((skb = __skb_dequeue(&sd->tofree_queue))) {
local_irq_enable();
kfree_skb(skb);
local_irq_disable();
}
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@ -6842,6 +6852,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx_internal(skb);
input_queue_head_incr(oldsd);
}
while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
kfree_skb(skb);
}
return NOTIFY_OK;
}
@ -7151,8 +7164,9 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
skb_queue_head_init_raw(&sd->input_pkt_queue);
skb_queue_head_init_raw(&sd->process_queue);
skb_queue_head_init_raw(&sd->tofree_queue);
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS