With the cpufreq rewrite trying to make scheduling decisions based on whether a cpu is throttled or not only worsens behaviour so remove it. -ck --- drivers/cpufreq/cpufreq.c | 6 ---- drivers/cpufreq/intel_pstate.c | 9 +----- kernel/sched/bfs.c | 57 +++-------------------------------------- 3 files changed, 8 insertions(+), 64 deletions(-) Index: linux-4.7-ck4/drivers/cpufreq/cpufreq.c =================================================================== --- linux-4.7-ck4.orig/drivers/cpufreq/cpufreq.c 2016-09-13 17:21:53.652217526 +1000 +++ linux-4.7-ck4/drivers/cpufreq/cpufreq.c 2016-09-13 17:21:53.649217595 +1000 @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -1931,11 +1930,8 @@ int __cpufreq_driver_target(struct cpufr return -ENODEV; /* Make sure that target_freq is within supported range */ - if (target_freq >= policy->max) { + if (target_freq > policy->max) target_freq = policy->max; - cpu_nonscaling(policy->cpu); - } else - cpu_scaling(policy->cpu); if (target_freq < policy->min) target_freq = policy->min; Index: linux-4.7-ck4/drivers/cpufreq/intel_pstate.c =================================================================== --- linux-4.7-ck4.orig/drivers/cpufreq/intel_pstate.c 2016-09-13 17:21:53.652217526 +1000 +++ linux-4.7-ck4/drivers/cpufreq/intel_pstate.c 2016-09-13 17:21:53.649217595 +1000 @@ -853,13 +853,8 @@ static u64 atom_get_val(struct cpudata * vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); vid = ceiling_fp(vid_fp); - if (pstate < cpudata->pstate.max_pstate) - cpu_scaling(cpudata->cpu); - else { - if (pstate > cpudata->pstate.max_pstate) - vid = cpudata->vid.turbo; - cpu_nonscaling(cpudata->cpu); - } + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; return val | vid; } Index: linux-4.7-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck4.orig/kernel/sched/bfs.c 2016-09-13 17:21:53.652217526 +1000 +++ linux-4.7-ck4/kernel/sched/bfs.c 2016-09-13 17:21:53.650217572 +1000 @@ -809,10 +809,7 @@ static bool suitable_idle_cpus(struct ta #define CPUIDLE_CACHE_BUSY (4) #define CPUIDLE_DIFF_CPU (8) #define CPUIDLE_THREAD_BUSY (16) -#define CPUIDLE_THROTTLED (32) -#define CPUIDLE_DIFF_NODE (64) - -static inline bool scaling_rq(struct rq *rq); +#define CPUIDLE_DIFF_NODE (32) /* * The best idle CPU is chosen according to the CPUIDLE ranking above where the @@ -831,9 +828,9 @@ static inline bool scaling_rq(struct rq */ static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask) { - int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THROTTLED | - CPUIDLE_THREAD_BUSY | CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | - CPUIDLE_DIFF_CORE | CPUIDLE_DIFF_THREAD; + int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | + CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | + CPUIDLE_DIFF_THREAD; int cpu_tmp; if (cpumask_test_cpu(best_cpu, tmpmask)) @@ -866,9 +863,6 @@ static int best_mask_cpu(int best_cpu, s if (!(tmp_rq->siblings_idle(tmp_rq))) ranking |= CPUIDLE_THREAD_BUSY; #endif - if (scaling_rq(tmp_rq)) - ranking |= CPUIDLE_THROTTLED; - if (ranking < best_ranking) { best_cpu = cpu_tmp; best_ranking = ranking; @@ -911,25 +905,6 @@ static inline void resched_suitable_idle if (suitable_idle_cpus(p)) resched_best_idle(p); } -/* - * Flags to tell us whether this CPU is running a CPU frequency governor that - * has slowed its speed or not. No locking required as the very rare wrongly - * read value would be harmless. - */ -void cpu_scaling(int cpu) -{ - cpu_rq(cpu)->scaling = true; -} - -void cpu_nonscaling(int cpu) -{ - cpu_rq(cpu)->scaling = false; -} - -static inline bool scaling_rq(struct rq *rq) -{ - return rq->scaling; -} static inline int locality_diff(int cpu, struct rq *rq) { @@ -953,30 +928,11 @@ static inline void resched_suitable_idle { } -void cpu_scaling(int __unused) -{ -} - -void cpu_nonscaling(int __unused) -{ -} - -/* - * Although CPUs can scale in UP, there is nowhere else for tasks to go so this - * always returns 0. - */ -static inline bool scaling_rq(struct rq *rq) -{ - return false; -} - static inline int locality_diff(int cpu, struct rq *rq) { return 0; } #endif /* CONFIG_SMP */ -EXPORT_SYMBOL_GPL(cpu_scaling); -EXPORT_SYMBOL_GPL(cpu_nonscaling); static inline int normal_prio(struct task_struct *p) { @@ -3401,11 +3357,8 @@ task_struct *earliest_deadline_task(stru continue; if (!sched_interactive && (tcpu = task_cpu(p)) != cpu) { - u64 dl; + u64 dl = p->deadline << locality_diff(tcpu, rq); - if (task_sticky(p) && scaling_rq(rq)) - continue; - dl = p->deadline << locality_diff(tcpu, rq); if (unlikely(!deadline_before(dl, earliest_deadline))) continue; earliest_deadline = dl;