try_preempt needs special casing of !sched_interactive. -ck --- kernel/sched/bfs.c | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) Index: linux-4.7-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck4.orig/kernel/sched/bfs.c 2016-09-13 17:21:51.170274468 +1000 +++ linux-4.7-ck4/kernel/sched/bfs.c 2016-09-13 17:21:51.168274514 +1000 @@ -1373,13 +1373,10 @@ static inline bool needs_other_cpu(struc return false; } -/* - * When all else is equal, still prefer this_rq. - */ static void try_preempt(struct task_struct *p, struct rq *this_rq) { + int cpu, pcpu, highest_prio, highest_cpu; struct rq *highest_prio_rq = NULL; - int cpu, highest_prio; u64 latest_deadline; cpumask_t tmp; @@ -1403,13 +1400,13 @@ static void try_preempt(struct task_stru return; /* See if this task can preempt the task on the current CPU first. */ - cpu = cpu_of(this_rq); - if (cpumask_test_cpu(cpu, &tmp)) { - if (smt_schedule(p, cpu) && can_preempt(p, this_rq->rq_prio, this_rq->rq_deadline)) { + pcpu = cpu_of(this_rq); + if (!sched_interactive && cpumask_test_cpu(pcpu, &tmp)) { + if (smt_schedule(p, pcpu) && can_preempt(p, this_rq->rq_prio, this_rq->rq_deadline)) { resched_curr(this_rq); return; } - cpumask_clear_cpu(cpu, &tmp); + cpumask_clear_cpu(pcpu, &tmp); } highest_prio = latest_deadline = 0; @@ -1418,35 +1415,40 @@ static void try_preempt(struct task_stru for_each_cpu(cpu, &tmp) { struct rq *rq; int rq_prio; + u64 dl; rq = cpu_rq(cpu); rq_prio = rq->rq_prio; if (rq_prio < highest_prio) continue; + dl = rq->rq_deadline; + if (!sched_interactive && pcpu != cpu) + dl <<= locality_diff(pcpu, rq); if (rq_prio > highest_prio || - deadline_after(rq->rq_deadline, latest_deadline)) { - latest_deadline = rq->rq_deadline; + deadline_after(dl, latest_deadline)) { + latest_deadline = dl; highest_prio = rq_prio; + highest_cpu = cpu; highest_prio_rq = rq; } } - if (likely(highest_prio_rq)) { - cpu = cpu_of(highest_prio_rq); - if (!smt_schedule(p, cpu)) - return; - if (can_preempt(p, highest_prio, latest_deadline)) { - /* - * If we have decided this task should preempt this CPU, - * set the task's CPU to match so there is no discrepancy - * in earliest_deadline_task which biases away tasks with - * a different CPU set. This means waking tasks are - * treated differently to rescheduling tasks. - */ - set_task_cpu(p, cpu); - resched_curr(highest_prio_rq); - } + if (unlikely(!highest_prio_rq)) + return; + if (!smt_schedule(p, highest_cpu)) + return; + if (can_preempt(p, highest_prio, latest_deadline)) { + /* + * If we have decided this task should preempt this CPU, + * set the task's CPU to match so there is no discrepancy + * in earliest_deadline_task which biases away tasks with + * a different CPU set. This means waking tasks are + * treated differently to rescheduling tasks in + * interactive mode. + */ + set_task_cpu(p, highest_cpu); + resched_curr(highest_prio_rq); } } static int __set_cpus_allowed_ptr(struct task_struct *p,