--- kernel/sched/bfs.c | 449 ++++++++++++++++++++------------------- kernel/sched/bfs_sched.h | 15 + kernel/sched/cpufreq_schedutil.c | 4 3 files changed, 250 insertions(+), 218 deletions(-) Index: linux-4.7-bfs/kernel/sched/bfs.c =================================================================== --- linux-4.7-bfs.orig/kernel/sched/bfs.c 2016-07-29 12:17:21.042808770 +1000 +++ linux-4.7-bfs/kernel/sched/bfs.c 2016-07-29 14:46:28.009111652 +1000 @@ -136,7 +136,7 @@ void print_scheduler_version(void) { - printk(KERN_INFO "BFS CPU scheduler v0.471 by Con Kolivas.\n"); + printk(KERN_INFO "BFS CPU scheduler v0.472 by Con Kolivas.\n"); } /* @@ -755,7 +755,6 @@ bool cpus_share_cache(int this_cpu, int return (this_rq->cpu_locality[that_cpu] < 3); } -#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SMT_NICE static const cpumask_t *thread_cpumask(int cpu); @@ -824,7 +823,8 @@ static bool smt_should_schedule(struct t /* Sorry, you lose */ return false; } -#endif +#else +#define smt_should_schedule(p, cpu) (1) #endif static bool resched_best_idle(struct task_struct *p) @@ -970,7 +970,7 @@ static void activate_task(struct task_st p->on_rq = 1; grq.nr_running++; inc_qnr(); - cpufreq_trigger(grq.niffies); + cpufreq_trigger(grq.niffies, rq->soft_affined); } static inline void clear_sticky(struct task_struct *p); @@ -987,7 +987,7 @@ static inline void deactivate_task(struc p->on_rq = 0; grq.nr_running--; clear_sticky(p); - cpufreq_trigger(grq.niffies); + cpufreq_trigger(grq.niffies, rq->soft_affined); } #ifdef CONFIG_SMP @@ -1166,11 +1166,6 @@ inline int task_curr(const struct task_s } #ifdef CONFIG_SMP -struct migration_req { - struct task_struct *task; - int dest_cpu; -}; - /* * wait_task_inactive - wait for a thread to unschedule. * @@ -1731,9 +1726,11 @@ int sched_fork(unsigned long __maybe_unu return 0; } +#ifdef CONFIG_SCHEDSTATS + DEFINE_STATIC_KEY_FALSE(sched_schedstats); +static bool __initdata __sched_schedstats = false; -#ifdef CONFIG_SCHEDSTATS static void set_schedstats(bool enabled) { if (enabled) @@ -1756,11 +1753,16 @@ static int __init setup_schedstats(char if (!str) goto out; + /* + * This code is called before jump labels have been set up, so we can't + * change the static branch directly just yet. Instead set a temporary + * variable so init_schedstats() can do it later. + */ if (!strcmp(str, "enable")) { - set_schedstats(true); + __sched_schedstats = true; ret = 1; } else if (!strcmp(str, "disable")) { - set_schedstats(false); + __sched_schedstats = false; ret = 1; } out: @@ -1771,6 +1773,11 @@ out: } __setup("schedstats=", setup_schedstats); +static void __init init_schedstats(void) +{ + set_schedstats(__sched_schedstats); +} + #ifdef CONFIG_PROC_SYSCTL int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1791,8 +1798,10 @@ int sysctl_schedstats(struct ctl_table * set_schedstats(state); return err; } -#endif -#endif +#endif /* CONFIG_PROC_SYSCTL */ +#else /* !CONFIG_SCHEDSTATS */ +static inline void init_schedstats(void) {} +#endif /* CONFIG_SCHEDSTATS */ /* * wake_up_new_task - wake up a newly created task for the first time. @@ -2108,7 +2117,7 @@ context_switch(struct rq *rq, struct tas atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else - switch_mm(oldmm, mm, next); + switch_mm_irqs_off(oldmm, mm, next); if (!prev->mm) { prev->active_mm = NULL; @@ -2242,9 +2251,13 @@ void get_avenrun(unsigned long *loads, u static unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { - load *= exp; - load += active * (FIXED_1 - exp); - return load >> FSHIFT; + unsigned long newload; + + newload = load * exp + active * (FIXED_1 - exp); + if (active >= load) + newload += FIXED_1-1; + + return newload / FIXED_1; } /* @@ -3061,6 +3074,21 @@ void scheduler_tick(void) #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) +/* + * If the value passed in is equal to the current preempt count + * then we just disabled preemption. Start timing the latency. + */ +static inline void preempt_latency_start(int val) +{ + if (preempt_count() == val) { + unsigned long ip = get_lock_parent_ip(); +#ifdef CONFIG_DEBUG_PREEMPT + current->preempt_disable_ip = ip; +#endif + trace_preempt_off(CALLER_ADDR0, ip); + } +} + void preempt_count_add(int val) { #ifdef CONFIG_DEBUG_PREEMPT @@ -3078,17 +3106,21 @@ void preempt_count_add(int val) DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif - if (preempt_count() == val) { - unsigned long ip = get_lock_parent_ip(); -#ifdef CONFIG_DEBUG_PREEMPT - current->preempt_disable_ip = ip; -#endif - trace_preempt_off(CALLER_ADDR0, ip); - } + preempt_latency_start(val); } EXPORT_SYMBOL(preempt_count_add); NOKPROBE_SYMBOL(preempt_count_add); +/* + * If the value passed in equals to the current preempt count + * then we just enabled preemption. Stop timing the latency. + */ +static inline void preempt_latency_stop(int val) +{ + if (preempt_count() == val) + trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); +} + void preempt_count_sub(int val) { #ifdef CONFIG_DEBUG_PREEMPT @@ -3105,12 +3137,15 @@ void preempt_count_sub(int val) return; #endif - if (preempt_count() == val) - trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); + preempt_latency_stop(val); __preempt_count_sub(val); } EXPORT_SYMBOL(preempt_count_sub); NOKPROBE_SYMBOL(preempt_count_sub); + +#else +static inline void preempt_latency_start(int val) { } +static inline void preempt_latency_stop(int val) { } #endif /* @@ -3364,7 +3399,8 @@ static noinline void __schedule_bug(stru static inline void schedule_debug(struct task_struct *prev) { #ifdef CONFIG_SCHED_STACK_END_CHECK - BUG_ON(task_stack_end_corrupted(prev)); + if (task_stack_end_corrupted(prev)) + panic("corrupted stack end detected inside scheduler\n"); #endif if (unlikely(in_atomic_preempt_off())) { @@ -3720,8 +3756,23 @@ void __sched schedule_preempt_disabled(v static void __sched notrace preempt_schedule_common(void) { do { + /* + * Because the function tracer can trace preempt_count_sub() + * and it also uses preempt_enable/disable_notrace(), if + * NEED_RESCHED is set, the preempt_enable_notrace() called + * by the function tracer will call this function again and + * cause infinite recursion. + * + * Preemption must be disabled here before the function + * tracer can trace. Break up preempt_disable() into two + * calls. One to disable preemption without fear of being + * traced. The other to still record the preemption latency, + * which can also be traced by the function tracer. + */ preempt_disable_notrace(); + preempt_latency_start(1); __schedule(true); + preempt_latency_stop(1); preempt_enable_no_resched_notrace(); /* @@ -3773,7 +3824,21 @@ asmlinkage __visible void __sched notrac return; do { + /* + * Because the function tracer can trace preempt_count_sub() + * and it also uses preempt_enable/disable_notrace(), if + * NEED_RESCHED is set, the preempt_enable_notrace() called + * by the function tracer will call this function again and + * cause infinite recursion. + * + * Preemption must be disabled here before the function + * tracer can trace. Break up preempt_disable() into two + * calls. One to disable preemption without fear of being + * traced. The other to still record the preemption latency, + * which can also be traced by the function tracer. + */ preempt_disable_notrace(); + preempt_latency_start(1); /* * Needs preempt disabled in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing @@ -3783,6 +3848,7 @@ asmlinkage __visible void __sched notrac __schedule(true); exception_exit(prev_ctx); + preempt_latency_stop(1); preempt_enable_no_resched_notrace(); } while (need_resched()); } @@ -5152,14 +5218,16 @@ void show_state_filter(unsigned long sta /* * reset the NMI-timeout, listing all files on a slow * console might take a lot of time: + * Also, reset softlockup watchdogs on all CPUs, because + * another CPU might be blocked waiting for us to process + * an IPI. */ touch_nmi_watchdog(); + touch_all_softlockup_watchdogs(); if (!state_filter || (p->state & state_filter)) sched_show_task(p); } - touch_all_softlockup_watchdogs(); - rcu_read_unlock(); /* * Only show locks if all tasks are dumped: @@ -5278,7 +5346,7 @@ void wake_q_add(struct wake_q_head *head * wakeup due to that. * * This cmpxchg() implies a full barrier, which pairs with the write - * barrier implied by the wakeup in wake_up_list(). + * barrier implied by the wakeup in wake_up_q(). */ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) return; @@ -5390,7 +5458,11 @@ int get_nohz_timer_target(void) rcu_read_lock(); for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) { - if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { + if (cpu == i) + continue; + + if (!idle_cpu(i) && is_housekeeping_cpu(i)) { + cpu = i; cpu = i; goto unlock; } @@ -5441,6 +5513,7 @@ void wake_up_nohz_cpu(int cpu) static int __set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask, bool check) { + const struct cpumask *cpu_valid_mask = cpu_active_mask; bool running_wrong = false; bool queued = false; unsigned long flags; @@ -5449,6 +5522,13 @@ static int __set_cpus_allowed_ptr(struct rq = task_grq_lock(p, &flags); + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads are allowed on online && !active CPUs + */ + cpu_valid_mask = cpu_online_mask; + } + /* * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. @@ -5461,7 +5541,7 @@ static int __set_cpus_allowed_ptr(struct if (cpumask_equal(tsk_cpus_allowed(p), new_mask)) goto out; - if (!cpumask_intersects(new_mask, cpu_active_mask)) { + if (!cpumask_intersects(new_mask, cpu_valid_mask)) { ret = -EINVAL; goto out; } @@ -5470,6 +5550,16 @@ static int __set_cpus_allowed_ptr(struct do_set_cpus_allowed(p, new_mask); + if (p->flags & PF_KTHREAD) { + /* + * For kernel threads that do indeed end up on online && + * !active we want to ensure they are strict per-cpu threads. + */ + WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && + !cpumask_intersects(new_mask, cpu_active_mask) && + tsk_nr_cpus_allowed(p) != 1); + } + /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; @@ -5482,7 +5572,7 @@ static int __set_cpus_allowed_ptr(struct } else resched_task(p); } else - set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask)); + set_task_cpu(p, cpumask_any_and(cpu_valid_mask, new_mask)); out: if (queued) @@ -5501,6 +5591,8 @@ int set_cpus_allowed_ptr(struct task_str } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +static bool sched_smp_initialized __read_mostly; + #ifdef CONFIG_HOTPLUG_CPU /* Run through task list and find tasks affined to the dead cpu, then remove * that cpu from the list, enable cpu0 and set the zerobound flag. */ @@ -5574,7 +5666,7 @@ void idle_task_exit(void) BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) { - switch_mm(mm, &init_mm, current); + switch_mm_irqs_off(mm, &init_mm, current); finish_arch_post_lock_switch(); } mmdrop(mm); @@ -5799,120 +5891,6 @@ static void set_rq_offline(struct rq *rq } } -/* - * migration_call - callback that gets triggered when a CPU is added. - */ -static int -migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - unsigned long flags; - struct rq *rq = cpu_rq(cpu); -#ifdef CONFIG_HOTPLUG_CPU - struct task_struct *idle = rq->idle; -#endif - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - return NOTIFY_OK; - case CPU_UP_PREPARE: - break; - - case CPU_ONLINE: - /* Update our root-domain */ - grq_lock_irqsave(&flags); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - - set_rq_online(rq); - } - unbind_zero(cpu); - grq.noc = num_online_cpus(); - grq_unlock_irqrestore(&flags); - break; - -#ifdef CONFIG_HOTPLUG_CPU - case CPU_DEAD: - grq_lock_irq(); - set_rq_task(rq, idle); - update_clocks(rq); - grq_unlock_irq(); - break; - - case CPU_DYING: - /* Update our root-domain */ - grq_lock_irqsave(&flags); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_offline(rq); - } - bind_zero(cpu); - grq.noc = num_online_cpus(); - grq_unlock_irqrestore(&flags); - break; -#endif - } - return NOTIFY_OK; -} - -/* - * Register at high priority so that task migration (migrate_all_tasks) - * happens before everything else. This has to be lower priority than - * the notifier in the perf_counter subsystem, though. - */ -static struct notifier_block migration_notifier = { - .notifier_call = migration_call, - .priority = CPU_PRI_MIGRATION, -}; - -static int sched_cpu_active(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - return NOTIFY_OK; - - case CPU_DOWN_FAILED: - set_cpu_active(cpu, true); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} - -static int sched_cpu_inactive(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: - set_cpu_active((long)hcpu, false); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} - -int __init migration_init(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - int err; - - /* Initialise migration for the boot CPU */ - err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); - BUG_ON(err == NOTIFY_BAD); - migration_call(&migration_notifier, CPU_ONLINE, cpu); - register_cpu_notifier(&migration_notifier); - - /* Register cpu active notifiers */ - cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); - cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); - - return 0; -} -early_initcall(migration_init); - static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG @@ -6630,8 +6608,8 @@ static void sched_init_numa(void) static void sched_domains_numa_masks_set(int cpu) { - int i, j; int node = cpu_to_node(cpu); + int i, j; for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) { @@ -6644,48 +6622,17 @@ static void sched_domains_numa_masks_set static void sched_domains_numa_masks_clear(int cpu) { int i, j; + for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); } } -/* - * Update sched_domains_numa_masks[level][node] array when new cpus - * are onlined. - */ -static int sched_domains_numa_masks_update(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - sched_domains_numa_masks_set(cpu); - break; - - case CPU_DEAD: - sched_domains_numa_masks_clear(cpu); - break; - - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} #else -static inline void sched_init_numa(void) -{ -} - -static int sched_domains_numa_masks_update(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - return 0; -} +static inline void sched_init_numa(void) { } +static void sched_domains_numa_masks_set(unsigned int cpu) { } +static void sched_domains_numa_masks_clear(unsigned int cpu) { } #endif /* CONFIG_NUMA */ static int __sdt_alloc(const struct cpumask *cpu_map) @@ -7023,13 +6970,9 @@ static int num_cpus_frozen; /* used to m * If we come here as part of a suspend/resume, don't touch cpusets because we * want to restore it back to its original state upon resume anyway. */ -static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static void cpuset_cpu_active(void) { - switch (action) { - case CPU_ONLINE_FROZEN: - case CPU_DOWN_FAILED_FROZEN: - + if (cpuhp_tasks_frozen) { /* * num_cpus_frozen tracks how many CPUs are involved in suspend * resume sequence. As long as this is not the last online @@ -7039,41 +6982,118 @@ static int cpuset_cpu_active(struct noti num_cpus_frozen--; if (likely(num_cpus_frozen)) { partition_sched_domains(1, NULL, NULL); - break; + return; } - /* * This is the last CPU online operation. So fall through and * restore the original sched domains by considering the * cpuset configurations. */ - - case CPU_ONLINE: - cpuset_update_active_cpus(true); - break; - default: - return NOTIFY_DONE; } - return NOTIFY_OK; + + cpuset_update_active_cpus(true); } -static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int cpuset_cpu_inactive(unsigned int cpu) { - switch (action) { - case CPU_DOWN_PREPARE: + if (!cpuhp_tasks_frozen) { cpuset_update_active_cpus(false); - break; - case CPU_DOWN_PREPARE_FROZEN: + } else { num_cpus_frozen++; partition_sched_domains(1, NULL, NULL); - break; - default: - return NOTIFY_DONE; } - return NOTIFY_OK; + return 0; +} + +int sched_cpu_activate(unsigned int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + set_cpu_active(cpu, true); + + if (sched_smp_initialized) { + sched_domains_numa_masks_set(cpu); + cpuset_cpu_active(); + } + + /* + * Put the rq online, if not already. This happens: + * + * 1) In the early boot process, because we build the real domains + * after all cpus have been brought up. + * + * 2) At runtime, if cpuset_cpu_active() fails to rebuild the + * domains. + */ + grq_lock_irqsave(&flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_online(rq); + } + unbind_zero(cpu); + grq.noc = num_online_cpus(); + grq_unlock_irqrestore(&flags); + + return 0; } +int sched_cpu_deactivate(unsigned int cpu) +{ + int ret; + + set_cpu_active(cpu, false); + /* + * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU + * users of this state to go away such that all new such users will + * observe it. + * + * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might + * not imply sync_sched(), so wait for both. + * + * Do sync before park smpboot threads to take care the rcu boost case. + */ + if (IS_ENABLED(CONFIG_PREEMPT)) + synchronize_rcu_mult(call_rcu, call_rcu_sched); + else + synchronize_rcu(); + + if (!sched_smp_initialized) + return 0; + + ret = cpuset_cpu_inactive(cpu); + if (ret) { + set_cpu_active(cpu, true); + return ret; + } + sched_domains_numa_masks_clear(cpu); + return 0; +} + +int sched_cpu_starting(unsigned int __maybe_unused cpu) +{ + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +int sched_cpu_dying(unsigned int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + grq_lock_irqsave(&flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_offline(rq); + } + bind_zero(cpu); + grq.noc = num_online_cpus(); + grq_unlock_irqrestore(&flags); + + return 0; +} +#endif + #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) /* * Cheaper version of the below functions in case support for SMT and MC is @@ -7142,10 +7162,6 @@ void __init sched_init_smp(void) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); - hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); - hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); - hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); @@ -7206,6 +7222,7 @@ void __init sched_init_smp(void) printk(KERN_DEBUG "BFS LOCALITY CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]); } } + sched_smp_initialized = true; } #else void __init sched_init_smp(void) @@ -7321,6 +7338,8 @@ void __init sched_init(void) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); idle_thread_set_boot_cpu(); #endif /* SMP */ + + init_schedstats(); } #ifdef CONFIG_DEBUG_ATOMIC_SLEEP Index: linux-4.7-bfs/kernel/sched/bfs_sched.h =================================================================== --- linux-4.7-bfs.orig/kernel/sched/bfs_sched.h 2016-07-29 12:15:37.107601749 +1000 +++ linux-4.7-bfs/kernel/sched/bfs_sched.h 2016-07-29 14:07:56.550689128 +1000 @@ -195,17 +195,26 @@ static inline struct cpuidle_state *idle #ifdef CONFIG_CPU_FREQ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); -static inline void cpufreq_trigger(u64 time) +static inline void cpufreq_trigger(u64 time, unsigned long util) { struct update_util_data *data; data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); if (data) - data->func(data, time, ULONG_MAX, 0); + data->func(data, time, util, 0); } #else -static inline void cpufreq_trigger(u64 __maybe_unused time) +static inline void cpufreq_trigger(u64 time, unsigned long util) { } #endif /* CONFIG_CPU_FREQ */ + +#ifdef arch_scale_freq_capacity +#ifndef arch_scale_freq_invariant +#define arch_scale_freq_invariant() (true) +#endif +#else /* arch_scale_freq_capacity */ +#define arch_scale_freq_invariant() (false) +#endif + #endif /* BFS_SCHED_H */ Index: linux-4.7-bfs/kernel/sched/cpufreq_schedutil.c =================================================================== --- linux-4.7-bfs.orig/kernel/sched/cpufreq_schedutil.c 2016-07-26 10:17:36.067881684 +1000 +++ linux-4.7-bfs/kernel/sched/cpufreq_schedutil.c 2016-07-29 13:35:52.076892385 +1000 @@ -16,7 +16,11 @@ #include #include +#ifdef CONFIG_SCHED_BFS +#include "bfs_sched.h" +#else #include "sched.h" +#endif struct sugov_tunables { struct gov_attr_set attr_set;