--- kernel/sched/bfs.c | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) Index: linux-4.1-bfs/kernel/sched/bfs.c =================================================================== --- linux-4.1-bfs.orig/kernel/sched/bfs.c 2015-08-02 16:54:54.628772671 +1000 +++ linux-4.1-bfs/kernel/sched/bfs.c 2015-08-02 17:34:13.340388825 +1000 @@ -639,15 +639,15 @@ static inline int queued_notrunning(void static inline void set_cpuidle_map(int cpu) { if (likely(cpu_online(cpu))) { - cpu_set(cpu, grq.cpu_idle_map); + cpumask_set_cpu(cpu, &grq.cpu_idle_map); grq.idle_cpus = true; } } static inline void clear_cpuidle_map(int cpu) { - cpu_clear(cpu, grq.cpu_idle_map); - if (cpus_empty(grq.cpu_idle_map)) + cpumask_clear_cpu(cpu, &grq.cpu_idle_map); + if (cpumask_empty(&grq.cpu_idle_map)) grq.idle_cpus = false; } @@ -655,7 +655,7 @@ static bool suitable_idle_cpus(struct ta { if (!grq.idle_cpus) return false; - return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map)); + return (cpumask_intersects(&p->cpus_allowed, &grq.cpu_idle_map)); } #define CPUIDLE_DIFF_THREAD (1) @@ -690,10 +690,10 @@ static int best_mask_cpu(int best_cpu, s CPUIDLE_DIFF_CORE | CPUIDLE_DIFF_THREAD; int cpu_tmp; - if (cpu_isset(best_cpu, *tmpmask)) + if (cpumask_test_cpu(best_cpu, tmpmask)) goto out; - for_each_cpu_mask(cpu_tmp, *tmpmask) { + for_each_cpu(cpu_tmp, tmpmask) { int ranking, locality; struct rq *tmp_rq; @@ -757,7 +757,7 @@ static int best_smt_bias(int cpu) { int other_cpu, best_bias = 0; - for_each_cpu_mask(other_cpu, *thread_cpumask(cpu)) { + for_each_cpu(other_cpu, thread_cpumask(cpu)) { struct rq *rq; if (other_cpu == cpu) @@ -822,7 +822,7 @@ static bool resched_best_idle(struct tas cpumask_t tmpmask; int best_cpu; - cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); + cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map); best_cpu = best_mask_cpu(task_cpu(p), task_rq(p), &tmpmask); #ifdef CONFIG_SMT_NICE if (!smt_should_schedule(p, best_cpu)) @@ -1021,9 +1021,9 @@ resched_closest_idle(struct rq *rq, int { cpumask_t tmpmask; - cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); - cpu_clear(cpu, tmpmask); - if (cpus_empty(tmpmask)) + cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map); + cpumask_clear_cpu(cpu, &tmpmask); + if (cpumask_empty(&tmpmask)) return; resched_best_mask(cpu, rq, &tmpmask); } @@ -1317,7 +1317,7 @@ can_preempt(struct task_struct *p, int p */ static inline bool online_cpus(struct task_struct *p) { - return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed))); + return (likely(cpumask_intersects(&cpu_online_map, &p->cpus_allowed))); } #else /* CONFIG_HOTPLUG_CPU */ /* All available CPUs are always online without hotplug. */ @@ -1333,7 +1333,7 @@ static inline bool online_cpus(struct ta */ static inline bool needs_other_cpu(struct task_struct *p, int cpu) { - if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) + if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed))) return true; return false; } @@ -1363,13 +1363,13 @@ static void try_preempt(struct task_stru return; if (likely(online_cpus(p))) - cpus_and(tmp, cpu_online_map, p->cpus_allowed); + cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); else return; highest_prio = latest_deadline = 0; - for_each_cpu_mask(cpu, tmp) { + for_each_cpu(cpu, &tmp) { struct rq *rq; int rq_prio; @@ -3271,7 +3271,7 @@ static void check_smt_siblings(int cpu) { int other_cpu; - for_each_cpu_mask(other_cpu, *thread_cpumask(cpu)) { + for_each_cpu(other_cpu, thread_cpumask(cpu)) { struct task_struct *p; struct rq *rq; @@ -3297,7 +3297,7 @@ static void wake_smt_siblings(int cpu) if (!queued_notrunning()) return; - for_each_cpu_mask(other_cpu, *thread_cpumask(cpu)) { + for_each_cpu(other_cpu, thread_cpumask(cpu)) { struct rq *rq; if (other_cpu == cpu) @@ -5053,7 +5053,7 @@ void init_idle(struct task_struct *idle, idle->smt_bias = 0; #endif set_rq_task(rq, idle); - do_set_cpus_allowed(idle, &cpumask_of_cpu(cpu)); + do_set_cpus_allowed(idle, get_cpu_mask(cpu)); /* Silence PROVE_RCU */ rcu_read_lock(); set_task_cpu(idle, cpu); @@ -5281,7 +5281,7 @@ static void bind_zero(int src_cpu) stopper = per_cpu(cpu_stopper_task, src_cpu); do_each_thread(t, p) { - if (p != stopper && cpu_isset(src_cpu, *tsk_cpus_allowed(p))) { + if (p != stopper && cpumask_test_cpu(src_cpu, tsk_cpus_allowed(p))) { cpumask_clear_cpu(src_cpu, tsk_cpus_allowed(p)); cpumask_set_cpu(0, tsk_cpus_allowed(p)); p->zerobound = true; @@ -5955,7 +5955,7 @@ cpu_attach_domain(struct sched_domain *s } /* cpus with isolated domains */ -static cpumask_var_t cpu_isolated_map; +cpumask_var_t cpu_isolated_map; /* Setup the mask of cpus configured for isolated domains */ static int __init isolated_cpu_setup(char *str) @@ -6930,7 +6930,7 @@ void __init sched_init_smp(void) if (sd->level > SD_LV_NODE) continue; /* Set locality to local node if not already found lower */ - for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) { + for_each_cpu(other_cpu, sched_domain_span(sd)) { if (rq->cpu_locality[other_cpu] > 3) rq->cpu_locality[other_cpu] = 3; } @@ -6941,17 +6941,17 @@ void __init sched_init_smp(void) * siblings of its own allowing mixed topologies. */ #ifdef CONFIG_SCHED_MC - for_each_cpu_mask(other_cpu, *core_cpumask(cpu)) { + for_each_cpu(other_cpu, core_cpumask(cpu)) { if (rq->cpu_locality[other_cpu] > 2) rq->cpu_locality[other_cpu] = 2; } - if (cpus_weight(*core_cpumask(cpu)) > 1) + if (cpumask_weight(core_cpumask(cpu)) > 1) rq->cache_idle = cache_cpu_idle; #endif #ifdef CONFIG_SCHED_SMT - for_each_cpu_mask(other_cpu, *thread_cpumask(cpu)) + for_each_cpu(other_cpu, thread_cpumask(cpu)) rq->cpu_locality[other_cpu] = 1; - if (cpus_weight(*thread_cpumask(cpu)) > 1) + if (cpumask_weight(thread_cpumask(cpu)) > 1) rq->siblings_idle = siblings_cpu_idle; #endif }