Need to set task_cpu in wake_up_new_task to avoid a freshly affined task from running on the wrong CPU. We should use the indirect smt_schedule to avoid a rare crash at startup before smt functions are stable. We should _always_ resched in __setscheduler or we may miss an affinity change till it's descheduled. Go directly into __schedule() in __set_cpus_allowed_ptr to ensure we're responsible for moving a task that has no affiity for the current CPU and don't get preempted. -ck --- kernel/sched/bfs.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) Index: linux-4.7-bfs/kernel/sched/bfs.c =================================================================== --- linux-4.7-bfs.orig/kernel/sched/bfs.c +++ linux-4.7-bfs/kernel/sched/bfs.c @@ -1790,6 +1790,8 @@ void wake_up_new_task(struct task_struct parent = p->parent; rq = task_grq_lock(p, &flags); + if (unlikely(needs_other_cpu(p, task_cpu(p)))) + set_task_cpu(p, cpumask_any(tsk_cpus_allowed(p))); rq_curr = rq->curr; /* @@ -3372,7 +3374,7 @@ static void check_smt_siblings(struct rq if (unlikely(!rq->online)) continue; p = rq->curr; - if (!smt_should_schedule(p, this_rq)) { + if (!smt_schedule(p, this_rq)) { set_tsk_need_resched(p); smp_send_reschedule(other_cpu); } @@ -4021,9 +4023,7 @@ static void __setscheduler(struct task_s if (task_running(p)) { reset_rq_task(rq, p); - /* Resched only if we might now be preempted */ - if (p->prio > oldprio || p->rt_priority < oldrtprio) - resched_task(p); + resched_task(p); } else if (task_queued(p)) { dequeue_task(p); enqueue_task(p, rq); @@ -5455,10 +5455,14 @@ static int __set_cpus_allowed_ptr(struct out: if (queued && !cpumask_subset(new_mask, &old_mask)) try_preempt(p, rq); + if (running_wrong) + preempt_disable(); task_grq_unlock(&flags); - if (running_wrong) - preempt_schedule_common(); + if (running_wrong) { + __schedule(true); + preempt_enable(); + } return ret; }