Random microoptimisations. -ck --- kernel/sched/bfs.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) Index: linux-4.7-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck4.orig/kernel/sched/bfs.c 2016-09-13 17:21:53.912211598 +1000 +++ linux-4.7-ck4/kernel/sched/bfs.c 2016-09-13 17:21:53.909211666 +1000 @@ -684,7 +684,7 @@ static int best_smt_bias(struct rq *this if (rq_idle(rq)) continue; - if (!rq->online) + if (unlikely(!rq->online)) continue; if (!rq->rq_mm) continue; @@ -1320,7 +1320,7 @@ static inline bool needs_other_cpu(struc static void try_preempt(struct task_struct *p, struct rq *this_rq) { int cpu, pcpu, highest_prio, highest_cpu; - struct rq *highest_prio_rq = NULL; + struct rq *highest_prio_rq; u64 latest_deadline; cpumask_t tmp; @@ -1331,10 +1331,7 @@ static void try_preempt(struct task_stru if (p->policy == SCHED_IDLEPRIO) return; - if (likely(online_cpus(p))) - cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); - else - return; + cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); /* See if this task can preempt the task on the current CPU first. */ pcpu = cpu_of(this_rq); @@ -1347,6 +1344,7 @@ static void try_preempt(struct task_stru } highest_prio = latest_deadline = 0; + highest_prio_rq = NULL; /* Now look for the CPU with the latest deadline */ for_each_cpu(cpu, &tmp) { @@ -1377,13 +1375,10 @@ static void try_preempt(struct task_stru return; if (can_preempt(p, highest_prio, latest_deadline)) { /* - * If we have decided this task should preempt this CPU, - * set the task's CPU to match so there is no discrepancy - * in earliest_deadline_task which biases away tasks with - * a different CPU set. This means waking tasks are - * treated differently to rescheduling tasks in - * interactive mode. - */ + * If we have decided this task should preempt this CPU, + * set the task's CPU to match thereby speeding up matching + * this task in earliest_deadline_task. + */ set_task_cpu(p, highest_cpu); resched_curr(highest_prio_rq); } @@ -3382,7 +3377,7 @@ static void check_smt_siblings(struct rq rq = cpu_rq(other_cpu); if (rq_idle(rq)) continue; - if (!rq->online) + if (unlikely(!rq->online)) continue; p = rq->curr; if (!smt_should_schedule(p, this_rq)) { @@ -3403,7 +3398,7 @@ static void wake_smt_siblings(struct rq struct rq *rq; rq = cpu_rq(other_cpu); - if (!rq->online) + if (unlikely(!rq->online)) continue; if (rq_idle(rq)) { struct task_struct *p = rq->curr;