Add a tunable to test a variety of different ways of setting load in smt. -ck --- kernel/sched/bfs.c | 38 +++++++++++++++++++++++++++++++++++--- kernel/sysctl.c | 8 ++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) Index: linux-4.7.4-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7.4-ck4.orig/kernel/sched/bfs.c 2016-09-16 15:41:51.408265873 +1000 +++ linux-4.7.4-ck4/kernel/sched/bfs.c 2016-09-16 16:00:58.817245964 +1000 @@ -151,6 +151,7 @@ int rr_interval __read_mostly = 6; * binary yes or no */ int sched_interactive __read_mostly = 1; +int sched_smt_load; /* * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks @@ -751,6 +752,7 @@ static unsigned long cpu_load_avg(struct * of a CPU with SMT siblings is in use. */ #define SCHED_SMT_LOAD (890) +#define SCHED_REMST_LOAD (SCHED_CAPACITY_SCALE - SCHED_SMT_LOAD) /* * Load of a CPU with smt siblings should be considered to be the load from all @@ -759,11 +761,41 @@ static unsigned long cpu_load_avg(struct */ static unsigned long smt_load_avg(struct rq *rq) { - unsigned long load = rq->soft_affined * SCHED_SMT_LOAD; + unsigned long load; int cpu; - for_each_cpu(cpu, thread_cpumask(rq->cpu)) - load += cpu_rq(cpu)->soft_affined * SCHED_SMT_LOAD; + switch (sched_smt_load) { + default: + case 0: + load = cpu_load_avg(rq); + goto out; + case 1: + load = rq->soft_affined * SCHED_SMT_LOAD; + for_each_cpu(cpu, thread_cpumask(rq->cpu)) + load += cpu_rq(cpu)->soft_affined * SCHED_REMST_LOAD; + goto out; + case 2: + load = rq->soft_affined * SCHED_SMT_LOAD; + for_each_cpu(cpu, thread_cpumask(rq->cpu)) + load += cpu_rq(cpu)->soft_affined * SCHED_SMT_LOAD; + goto out; + case 3: + load = cpu_load_avg(rq); + for_each_cpu(cpu, thread_cpumask(rq->cpu)) + load += cpu_rq(cpu)->soft_affined * SCHED_REMST_LOAD; + goto out; + case 4: + load = cpu_load_avg(rq); + for_each_cpu(cpu, thread_cpumask(rq->cpu)) + load += cpu_rq(cpu)->soft_affined * SCHED_SMT_LOAD; + goto out; + case 5: + load = cpu_load_avg(rq); + for_each_cpu(cpu, thread_cpumask(rq->cpu)) + load += cpu_rq(cpu)->soft_affined * SCHED_CAPACITY_SCALE; + goto out; + } +out: return load; } Index: linux-4.7.4-ck4/kernel/sysctl.c =================================================================== --- linux-4.7.4-ck4.orig/kernel/sysctl.c 2016-09-16 15:41:32.374369688 +1000 +++ linux-4.7.4-ck4/kernel/sysctl.c 2016-09-16 15:46:13.608943622 +1000 @@ -131,6 +131,7 @@ static int __read_mostly one_thousand = extern int rr_interval; extern int sched_interactive; extern int sched_iso_cpu; +extern int sched_smt_load; #endif #ifdef CONFIG_PRINTK static int ten_thousand = 10000; @@ -1041,6 +1042,13 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, + { + .procname = "smt_load", + .data = &sched_smt_load, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) {