diff --git a/kernel/cpu.c b/kernel/cpu.c index 9a7be0db5820..88455b412deb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -83,6 +83,7 @@ struct hotplug_pcp { int refcount; int grab_lock; struct completion synced; + struct completion unplug_wait; #ifdef CONFIG_PREEMPT_RT_FULL /* * Note, on PREEMPT_RT, the hotplug lock must save the state of @@ -186,6 +187,7 @@ static int sync_unplug_thread(void *data) { struct hotplug_pcp *hp = data; + wait_for_completion(&hp->unplug_wait); preempt_disable(); hp->unplug = current; wait_for_pinned_cpus(hp); @@ -251,6 +253,14 @@ static void __cpu_unplug_sync(struct hotplug_pcp *hp) wait_for_completion(&hp->synced); } +static void __cpu_unplug_wait(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + complete(&hp->unplug_wait); + wait_for_completion(&hp->synced); +} + /* * Start the sync_unplug_thread on the target cpu and wait for it to * complete. @@ -274,6 +284,7 @@ static int cpu_unplug_begin(unsigned int cpu) tell_sched_cpu_down_begin(cpu); init_completion(&hp->synced); + init_completion(&hp->unplug_wait); hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); if (IS_ERR(hp->sync_tsk)) { @@ -289,8 +300,7 @@ static int cpu_unplug_begin(unsigned int cpu) * wait for tasks that are going to enter these sections and * we must not have them block. */ - __cpu_unplug_sync(hp); - + wake_up_process(hp->sync_tsk); return 0; } @@ -615,6 +625,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) #endif synchronize_rcu(); + __cpu_unplug_wait(cpu); smpboot_park_threads(cpu); /* Notifiers are done. Don't let any more tasks pin this CPU. */