From 6897fc22ea01b562b55c6168592bcbd3ee62b006 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jan 2014 15:45:47 -0800 Subject: [PATCH] kernel: use lockless list for smp_call_function_single Make smp_call_function_single and friends more efficient by using a lockless list. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/blkdev.h | 5 +---- include/linux/smp.h | 6 ++++- kernel/smp.c | 51 +++++++++++------------------------------- 3 files changed, 19 insertions(+), 43 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0375654adb28..8678c4322b44 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -95,10 +95,7 @@ enum rq_cmd_type_bits { * as well! */ struct request { - union { - struct list_head queuelist; - struct llist_node ll_list; - }; + struct list_head queuelist; union { struct call_single_data csd; struct work_struct mq_flush_data; diff --git a/include/linux/smp.h b/include/linux/smp.h index 5da22ee42e16..3834f43f9993 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -11,12 +11,16 @@ #include #include #include +#include extern void cpu_idle(void); typedef void (*smp_call_func_t)(void *info); struct call_single_data { - struct list_head list; + union { + struct list_head list; + struct llist_node llist; + }; smp_call_func_t func; void *info; u16 flags; diff --git a/kernel/smp.c b/kernel/smp.c index bd9f94028838..4ad913e7c253 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -28,12 +28,7 @@ struct call_function_data { static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); -struct call_single_queue { - struct list_head list; - raw_spinlock_t lock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -85,12 +80,8 @@ void __init call_function_init(void) void *cpu = (void *)(long)smp_processor_id(); int i; - for_each_possible_cpu(i) { - struct call_single_queue *q = &per_cpu(call_single_queue, i); - - raw_spin_lock_init(&q->lock); - INIT_LIST_HEAD(&q->list); - } + for_each_possible_cpu(i) + init_llist_head(&per_cpu(call_single_queue, i)); hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); @@ -141,18 +132,9 @@ static void csd_unlock(struct call_single_data *csd) */ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) { - struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); - unsigned long flags; - int ipi; - if (wait) csd->flags |= CSD_FLAG_WAIT; - raw_spin_lock_irqsave(&dst->lock, flags); - ipi = list_empty(&dst->list); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); - /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of @@ -164,7 +146,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ - if (ipi) + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); if (wait) @@ -177,27 +159,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) */ void generic_smp_call_function_single_interrupt(void) { - struct call_single_queue *q = &__get_cpu_var(call_single_queue); - LIST_HEAD(list); + struct llist_node *entry, *next; /* * Shouldn't receive this interrupt on a cpu that is not yet online. */ WARN_ON_ONCE(!cpu_online(smp_processor_id())); - raw_spin_lock(&q->lock); - list_replace_init(&q->list, &list); - raw_spin_unlock(&q->lock); + entry = llist_del_all(&__get_cpu_var(call_single_queue)); + entry = llist_reverse_order(entry); - while (!list_empty(&list)) { + while (entry) { struct call_single_data *csd; - csd = list_entry(list.next, struct call_single_data, list); - list_del(&csd->list); + next = entry->next; + csd = llist_entry(entry, struct call_single_data, llist); csd->func(csd->info); - csd_unlock(csd); + + entry = next; } } @@ -411,17 +392,11 @@ void smp_call_function_many(const struct cpumask *mask, for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); - struct call_single_queue *dst = - &per_cpu(call_single_queue, cpu); - unsigned long flags; csd_lock(csd); csd->func = func; csd->info = info; - - raw_spin_lock_irqsave(&dst->lock, flags); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); + llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); } /* Send a message to all CPUs in the map */