It turns out that taking into account load from other threads is unnecessary as one core with two threads ends up running at the speed of the highest thread anyway so simplify load avg sent for logical cores to be identical to physical cores. -ck --- kernel/sched/bfs.c | 42 ++++++------------------------------------ 1 file changed, 6 insertions(+), 36 deletions(-) Index: linux-4.7-ck5/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck5.orig/kernel/sched/bfs.c 2016-09-23 08:33:01.078719073 +1000 +++ linux-4.7-ck5/kernel/sched/bfs.c 2016-09-23 08:33:01.075719092 +1000 @@ -668,6 +668,11 @@ static inline int queued_notrunning(void return grq.qnr; } +static unsigned long rq_load_avg(struct rq *rq) +{ + return rq->soft_affined * SCHED_CAPACITY_SCALE; +} + #ifdef CONFIG_SMT_NICE static const cpumask_t *thread_cpumask(int cpu); @@ -740,41 +745,7 @@ static bool smt_should_schedule(struct t /* Sorry, you lose */ return false; } - -static unsigned long cpu_load_avg(struct rq *rq) -{ - return rq->soft_affined * SCHED_CAPACITY_SCALE; -} - -/* - * This is the proportion of SCHED_CAPACITY_SCALE (1024) used when each thread - * of a CPU with SMT siblings is in use. - */ -#define SCHED_SMT_LOAD (890) - -/* - * Load of a CPU with smt siblings should be considered to be the load from all - * the SMT siblings, thus will be >1 if both threads are in use since they are - * not full cores. - */ -static unsigned long smt_load_avg(struct rq *rq) -{ - unsigned long load = rq->soft_affined * SCHED_SMT_LOAD; - int cpu; - - for_each_cpu(cpu, thread_cpumask(rq->cpu)) - load += cpu_rq(cpu)->soft_affined * SCHED_SMT_LOAD; - return load; -} - -static unsigned long (*rq_load_avg)(struct rq *rq) = &cpu_load_avg; -#else -#define smt_schedule(p, this_rq) (true) -static inline unsigned long rq_load_avg(struct rq *rq) -{ - return rq->soft_affined * SCHED_CAPACITY_SCALE; -} -#endif +#endif /* CONFIG_SMT_NICE */ #ifdef CONFIG_SMP /* * The cpu_idle_map stores a bitmap of all the CPUs currently idle to @@ -7116,7 +7087,6 @@ void __init sched_init_smp(void) check_siblings = &check_smt_siblings; wake_siblings = &wake_smt_siblings; smt_schedule = &smt_should_schedule; - rq_load_avg = &smt_load_avg; } #endif grq_unlock_irq();