sched: Generic migrate_disable

Make migrate_disable() be a preempt_disable() for !rt kernels. This
allows generic code to use it but still enforces that these code
sections stay relatively small.

A preemptible migrate_disable() accessible for general use would allow
people growing arbitrary per-cpu crap instead of clean these things
up.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
This commit is contained in:
Peter Zijlstra 2011-08-11 15:14:58 +02:00 committed by Alibek Omarov
parent d921689f21
commit f03a600884
6 changed files with 30 additions and 23 deletions

View File

@ -150,28 +150,25 @@ do { \
set_preempt_need_resched(); \
} while (0)
#ifdef CONFIG_SMP
extern void migrate_disable(void);
extern void migrate_enable(void);
#else
# define migrate_disable() barrier()
# define migrate_enable() barrier()
#endif
#ifdef CONFIG_PREEMPT_RT_FULL
# define preempt_disable_rt() preempt_disable()
# define preempt_enable_rt() preempt_enable()
# define preempt_disable_nort() barrier()
# define preempt_enable_nort() barrier()
# define migrate_disable_rt() migrate_disable()
# define migrate_enable_rt() migrate_enable()
# ifdef CONFIG_SMP
extern void migrate_disable(void);
extern void migrate_enable(void);
# else /* CONFIG_SMP */
# define migrate_disable() barrier()
# define migrate_enable() barrier()
# endif /* CONFIG_SMP */
#else
# define preempt_disable_rt() barrier()
# define preempt_enable_rt() barrier()
# define preempt_disable_nort() preempt_disable()
# define preempt_enable_nort() preempt_enable()
# define migrate_disable_rt() barrier()
# define migrate_enable_rt() barrier()
# define migrate_disable() preempt_disable()
# define migrate_enable() preempt_enable()
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS

View File

@ -1202,7 +1202,9 @@ struct task_struct {
#endif
unsigned int policy;
#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@ -2978,11 +2980,22 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
static inline int __migrate_disabled(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RT_FULL
return p->migrate_disable;
#else
return 0;
#endif
}
/* Future-safe accessor for struct task_struct's cpus_allowed. */
static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RT_FULL
if (p->migrate_disable)
return cpumask_of(task_cpu(p));
#endif
return &p->cpus_allowed;
}

View File

@ -182,13 +182,8 @@ static inline void kick_all_cpus_sync(void) { }
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
#ifndef CONFIG_PREEMPT_RT_FULL
# define get_cpu_light() get_cpu()
# define put_cpu_light() put_cpu()
#else
# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
# define put_cpu_light() migrate_enable()
#endif
#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
#define put_cpu_light() migrate_enable()
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the

View File

@ -4654,7 +4654,7 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (!p->migrate_disable) {
if (!__migrate_disabled(p)) {
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
@ -4705,7 +4705,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@ -4724,6 +4724,7 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
#ifdef CONFIG_PREEMPT_RT_FULL
void migrate_disable(void)
{
struct task_struct *p = current;
@ -4816,6 +4817,7 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
#endif /* CONFIG_PREEMPT_RT_FULL */
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing

View File

@ -1566,7 +1566,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);

View File

@ -40,7 +40,7 @@ notrace unsigned int debug_smp_processor_id(void)
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
"code: %s/%d\n", preempt_count() - 1,
current->migrate_disable, current->comm, current->pid);
__migrate_disabled(current), current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();