--- kernel/sched/bfs.c | 31 ++++++++----------------------- kernel/stop_machine.c | 3 ++- 2 files changed, 10 insertions(+), 24 deletions(-) Index: linux-3.9-bfs/kernel/sched/bfs.c =================================================================== --- linux-3.9-bfs.orig/kernel/sched/bfs.c 2013-05-03 20:30:13.943541321 +1000 +++ linux-3.9-bfs/kernel/sched/bfs.c 2013-05-03 22:14:05.290989446 +1000 @@ -5486,14 +5486,16 @@ out: EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); #ifdef CONFIG_HOTPLUG_CPU +extern struct task_struct *cpu_stopper_task; /* Run through task list and find tasks affined to just the dead cpu, then * allocate a new affinity */ static void break_sole_affinity(int src_cpu, struct task_struct *idle) { - struct task_struct *p, *t; + struct task_struct *p, *t, *stopper; + stopper = per_cpu(cpu_stopper_task, src_cpu); do_each_thread(t, p) { - if (p != idle && !online_cpus(p)) { + if (p != stopper && !online_cpus(p)) { cpumask_copy(tsk_cpus_allowed(p), cpu_possible_mask); /* * Don't tell them about moving exiting tasks or @@ -5511,22 +5513,6 @@ static void break_sole_affinity(int src_ } /* - * Schedules idle task to be the next runnable task on current CPU. - * It does so by boosting its priority to highest possible. - * Used by CPU offline code. - */ -void sched_idle_next(struct rq *rq, int this_cpu, struct task_struct *idle) -{ - /* cpu has to be offline */ - BUG_ON(cpu_online(this_cpu)); - - __setscheduler(idle, rq, SCHED_FIFO, STOP_PRIO); - - activate_idle_task(idle); - set_tsk_need_resched(rq->curr); -} - -/* * Ensures that the idle task is using init_mm right before its cpu goes * offline. */ @@ -5543,8 +5529,8 @@ void idle_task_exit(void) #endif /* CONFIG_HOTPLUG_CPU */ void sched_set_stop_task(int cpu, struct task_struct *stop) { - struct sched_param stop_param = { .sched_priority = STOP_PRIO }; - struct sched_param start_param = { .sched_priority = MAX_USER_RT_PRIO - 1 }; + struct sched_param stop_param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct sched_param start_param = { .sched_priority = 0 }; struct task_struct *old_stop = cpu_rq(cpu)->stop; if (stop) { @@ -5563,10 +5549,10 @@ void sched_set_stop_task(int cpu, struct if (old_stop) { /* - * Reset it back to a normal rt scheduling prio so that + * Reset it back to a normal scheduling class so that * it can die in pieces. */ - sched_setscheduler_nocheck(old_stop, SCHED_FIFO, &start_param); + sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); } } @@ -5799,7 +5785,6 @@ migration_call(struct notifier_block *nf case CPU_DYING: /* Update our root-domain */ grq_lock_irqsave(&flags); - sched_idle_next(rq, cpu, idle); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); Index: linux-3.9-bfs/kernel/stop_machine.c =================================================================== --- linux-3.9-bfs.orig/kernel/stop_machine.c 2013-05-02 21:04:15.156959021 +1000 +++ linux-3.9-bfs/kernel/stop_machine.c 2013-05-03 20:30:22.710431719 +1000 @@ -40,7 +40,8 @@ struct cpu_stopper { }; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); -static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); +DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); + static bool stop_machine_initialized = false; static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)