Improve the preempt call by explicitly setting a task that will be used on lookup in earliest_deadline_task to avoid iterating over all tasks. Set the effective runqueue priority for further calls to try_preempt based on the preempting task's priority. Thus on the small chance that something else tries to preempt the runqueue before the new tasks gets the CPU, it will compare to the previously successful preempting task. This should avoid a preempting task from rescheduling a runqueue and then the CPU deciding to take a different task instead of the preempting one. Clean up a number of variables from unnecessary longs to ints and ints to bools. Microoptimise around try preempt by settings highest_prio to that of the task that's trying to preempt to avoid unnecessary comparisons. Break sole affinity on cpu offline -after- the cpu has been set to be offline. -ck Index: linux-3.0.0-bfs/include/linux/sched.h =================================================================== --- linux-3.0.0-bfs.orig/include/linux/sched.h 2011-10-17 14:10:51.684150117 +1100 +++ linux-3.0.0-bfs/include/linux/sched.h 2011-10-17 14:14:51.402150079 +1100 @@ -1233,10 +1233,12 @@ struct task_struct { struct task_struct *wake_entry; #endif #if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS) - int on_cpu; + bool on_cpu; #endif #endif - int on_rq; +#ifndef CONFIG_SCHED_BFS + bool on_rq; +#endif int prio, static_prio, normal_prio; unsigned int rt_priority; @@ -1247,7 +1249,7 @@ struct task_struct { u64 last_ran; u64 sched_time; /* sched_clock time spent running */ #ifdef CONFIG_SMP - int sticky; /* Soft affined flag */ + bool sticky; /* Soft affined flag */ #endif unsigned long rt_timeout; #else /* CONFIG_SCHED_BFS */ @@ -1596,7 +1598,7 @@ struct task_struct { }; #ifdef CONFIG_SCHED_BFS -extern int grunqueue_is_locked(void); +extern bool grunqueue_is_locked(void); extern void grq_unlock_wait(void); extern void cpu_scaling(int cpu); extern void cpu_nonscaling(int cpu); @@ -1611,14 +1613,14 @@ static inline void tsk_cpus_current(stru static inline void print_scheduler_version(void) { - printk(KERN_INFO"BFS CPU scheduler v0.406 by Con Kolivas.\n"); + printk(KERN_INFO"BFS CPU scheduler v0.413 by Con Kolivas.\n"); } -static inline int iso_task(struct task_struct *p) +static inline bool iso_task(struct task_struct *p) { return (p->policy == SCHED_ISO); } -extern void remove_cpu(unsigned long cpu); +extern void remove_cpu(int cpu); #else /* CFS */ extern int runqueue_is_locked(int cpu); static inline void cpu_scaling(int cpu) @@ -1641,12 +1643,12 @@ static inline void print_scheduler_versi printk(KERN_INFO"CFS CPU scheduler.\n"); } -static inline int iso_task(struct task_struct *p) +static inline bool iso_task(struct task_struct *p) { - return 0; + return false; } -static inline void remove_cpu(unsigned long cpu) +static inline void remove_cpu(int cpu) { } #endif /* CONFIG_SCHED_BFS */ @@ -2658,21 +2660,21 @@ extern void signal_wake_up(struct task_s */ #ifdef CONFIG_SMP -static inline unsigned int task_cpu(const struct task_struct *p) +static inline int task_cpu(const struct task_struct *p) { return task_thread_info(p)->cpu; } -extern void set_task_cpu(struct task_struct *p, unsigned int cpu); +extern void set_task_cpu(struct task_struct *p, int cpu); #else -static inline unsigned int task_cpu(const struct task_struct *p) +static inline int task_cpu(const struct task_struct *p) { return 0; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +static inline void set_task_cpu(struct task_struct *p, int cpu) { } Index: linux-3.0.0-bfs/kernel/sched_bfs.c =================================================================== --- linux-3.0.0-bfs.orig/kernel/sched_bfs.c 2011-10-17 14:10:51.684150117 +1100 +++ linux-3.0.0-bfs/kernel/sched_bfs.c 2011-10-17 17:07:02.812148498 +1100 @@ -168,7 +168,7 @@ struct global_rq { #ifdef CONFIG_SMP unsigned long qnr; /* queued not running */ cpumask_t cpu_idle_map; - int idle_cpus; + bool idle_cpus; #endif int noc; /* num_online_cpus stored and updated when it changes */ u64 niffies; /* Nanosecond jiffies */ @@ -227,7 +227,7 @@ struct rq { #endif #endif - struct task_struct *curr, *idle, *stop; + struct task_struct *curr, *idle, *stop, *preempt; struct mm_struct *prev_mm; /* Stored data about rq->curr to work outside grq lock */ @@ -236,7 +236,7 @@ struct rq { int rq_time_slice; u64 rq_last_ran; int rq_prio; - int rq_running; /* There is a task running */ + bool rq_running; /* There is a task running */ /* Accurate timekeeping data */ u64 timekeep_clock; @@ -247,20 +247,20 @@ struct rq { #ifdef CONFIG_SMP int cpu; /* cpu of this runqueue */ - int online; - int scaling; /* This CPU is managed by a scaling CPU freq governor */ + bool online; + bool scaling; /* This CPU is managed by a scaling CPU freq governor */ struct task_struct *sticky_task; struct root_domain *rd; struct sched_domain *sd; - unsigned long *cpu_locality; /* CPU relative cache distance */ + int *cpu_locality; /* CPU relative cache distance */ #ifdef CONFIG_SCHED_SMT - int (*siblings_idle)(unsigned long cpu); + bool (*siblings_idle)(int cpu); /* See if all smt siblings are idle */ cpumask_t smt_siblings; #endif #ifdef CONFIG_SCHED_MC - int (*cache_idle)(unsigned long cpu); + bool (*cache_idle)(int cpu); /* See if all cache siblings are idle */ cpumask_t cache_siblings; #endif @@ -271,7 +271,7 @@ struct rq { #endif u64 clock, old_clock, last_tick; u64 clock_task; - int dither; + bool dither; #ifdef CONFIG_SCHEDSTATS @@ -439,7 +439,7 @@ static inline void update_rq_clock(struc update_rq_clock_task(rq, delta); } -static inline int task_running(struct task_struct *p) +static inline bool task_running(struct task_struct *p) { return p->on_cpu; } @@ -537,7 +537,7 @@ static inline void task_grq_unlock(unsig * This interface allows printk to be called with the runqueue lock * held and know whether or not it is OK to wake up the klogd. */ -inline int grunqueue_is_locked(void) +inline bool grunqueue_is_locked(void) { return raw_spin_is_locked(&grq.lock); } @@ -610,12 +610,12 @@ static inline void finish_lock_switch(st } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int deadline_before(u64 deadline, u64 time) +static inline bool deadline_before(u64 deadline, u64 time) { return (deadline < time); } -static inline int deadline_after(u64 deadline, u64 time) +static inline bool deadline_after(u64 deadline, u64 time) { return (deadline > time); } @@ -626,7 +626,7 @@ static inline int deadline_after(u64 dea * A task that is currently running will have ->on_cpu set but not on the * grq run list. */ -static inline int task_queued(struct task_struct *p) +static inline bool task_queued(struct task_struct *p) { return (!list_empty(&p->run_list)); } @@ -645,7 +645,7 @@ static void dequeue_task(struct task_str * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as * an idle task, we ensure none of the following conditions are met. */ -static int idleprio_suitable(struct task_struct *p) +static bool idleprio_suitable(struct task_struct *p) { return (!freezing(p) && !signal_pending(p) && !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); @@ -655,7 +655,7 @@ static int idleprio_suitable(struct task * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check * that the iso_refractory flag is not set. */ -static int isoprio_suitable(void) +static bool isoprio_suitable(void) { return !grq.iso_refractory; } @@ -710,6 +710,18 @@ static inline int task_timeslice(struct return (rr_interval * task_prio_ratio(p) / 128); } +static void resched_task(struct task_struct *p); + +static inline void preempt_rq(struct rq *rq, struct task_struct *p) +{ + rq->preempt = p; + /* We set the runqueue's apparent priority to the task that will + * replace the current one in case something else tries to preempt + * this runqueue before p gets scheduled */ + rq->rq_prio = p->prio; + resched_task(rq->curr); +} + #ifdef CONFIG_SMP /* * qnr is the "queued but not running" count which is the total number of @@ -737,30 +749,28 @@ static inline int queued_notrunning(void * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the * idle_cpus variable than to do a full bitmask check when we are busy. */ -static inline void set_cpuidle_map(unsigned long cpu) +static inline void set_cpuidle_map(int cpu) { if (likely(cpu_online(cpu))) { cpu_set(cpu, grq.cpu_idle_map); - grq.idle_cpus = 1; + grq.idle_cpus = true; } } -static inline void clear_cpuidle_map(unsigned long cpu) +static inline void clear_cpuidle_map(int cpu) { cpu_clear(cpu, grq.cpu_idle_map); if (cpus_empty(grq.cpu_idle_map)) - grq.idle_cpus = 0; + grq.idle_cpus = false; } -static int suitable_idle_cpus(struct task_struct *p) +static bool suitable_idle_cpus(struct task_struct *p) { if (!grq.idle_cpus) - return 0; + return false; return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map)); } -static void resched_task(struct task_struct *p); - #define CPUIDLE_DIFF_THREAD (1) #define CPUIDLE_DIFF_CORE (2) #define CPUIDLE_CACHE_BUSY (4) @@ -785,14 +795,16 @@ static void resched_task(struct task_str * Other node, other CPU, busy threads. */ static void -resched_best_mask(unsigned long best_cpu, struct rq *rq, cpumask_t *tmpmask) +resched_best_mask(cpumask_t *tmpmask, struct task_struct *p) { - unsigned long cpu_tmp, best_ranking; - - best_ranking = ~0UL; + unsigned int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | + CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | + CPUIDLE_DIFF_THREAD; + int cpu_tmp, best_cpu = task_cpu(p); + struct rq *rq = task_rq(p); for_each_cpu_mask(cpu_tmp, *tmpmask) { - unsigned long ranking; + unsigned int ranking; struct rq *tmp_rq; ranking = 0; @@ -825,7 +837,7 @@ resched_best_mask(unsigned long best_cpu } } - resched_task(cpu_rq(best_cpu)->curr); + preempt_rq(cpu_rq(best_cpu), p); } static void resched_best_idle(struct task_struct *p) @@ -833,7 +845,7 @@ static void resched_best_idle(struct tas cpumask_t tmpmask; cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); - resched_best_mask(task_cpu(p), task_rq(p), &tmpmask); + resched_best_mask(&tmpmask, p); } static inline void resched_suitable_idle(struct task_struct *p) @@ -848,15 +860,15 @@ static inline void resched_suitable_idle */ void cpu_scaling(int cpu) { - cpu_rq(cpu)->scaling = 1; + cpu_rq(cpu)->scaling = true; } void cpu_nonscaling(int cpu) { - cpu_rq(cpu)->scaling = 0; + cpu_rq(cpu)->scaling = false; } -static inline int scaling_rq(struct rq *rq) +static inline bool scaling_rq(struct rq *rq) { return rq->scaling; } @@ -874,15 +886,15 @@ static inline int queued_notrunning(void return grq.nr_running; } -static inline void set_cpuidle_map(unsigned long cpu) +static inline void set_cpuidle_map(int cpu) { } -static inline void clear_cpuidle_map(unsigned long cpu) +static inline void clear_cpuidle_map(int cpu) { } -static inline int suitable_idle_cpus(struct task_struct *p) +static inline bool suitable_idle_cpus(struct task_struct *p) { return uprq->curr == uprq->idle; } @@ -903,9 +915,9 @@ void cpu_nonscaling(int __unused) * Although CPUs can scale in UP, there is nowhere else for tasks to go so this * always returns 0. */ -static inline int scaling_rq(struct rq *rq) +static inline bool scaling_rq(struct rq *rq) { - return 0; + return false; } #endif /* CONFIG_SMP */ EXPORT_SYMBOL_GPL(cpu_scaling); @@ -989,7 +1001,7 @@ static inline void deactivate_task(struc } #ifdef CONFIG_SMP -void set_task_cpu(struct task_struct *p, unsigned int cpu) +void set_task_cpu(struct task_struct *p, int cpu) { #ifdef CONFIG_LOCKDEP /* @@ -1012,17 +1024,17 @@ void set_task_cpu(struct task_struct *p, static inline void clear_sticky(struct task_struct *p) { - p->sticky = 0; + p->sticky = false; } -static inline int task_sticky(struct task_struct *p) +static inline bool task_sticky(struct task_struct *p) { return p->sticky; } /* Reschedule the best idle CPU that is not this one. */ static void -resched_closest_idle(struct rq *rq, unsigned long cpu, struct task_struct *p) +resched_closest_idle(struct rq *rq, int cpu, struct task_struct *p) { cpumask_t tmpmask; @@ -1030,7 +1042,7 @@ resched_closest_idle(struct rq *rq, unsi cpu_clear(cpu, tmpmask); if (cpus_empty(tmpmask)) return; - resched_best_mask(cpu, rq, &tmpmask); + resched_best_mask(&tmpmask, p); } /* @@ -1041,7 +1053,7 @@ resched_closest_idle(struct rq *rq, unsi * latency at all times. */ static inline void -swap_sticky(struct rq *rq, unsigned long cpu, struct task_struct *p) +swap_sticky(struct rq *rq, int cpu, struct task_struct *p) { if (rq->sticky_task) { if (rq->sticky_task == p) { @@ -1072,13 +1084,13 @@ static inline void clear_sticky(struct t { } -static inline int task_sticky(struct task_struct *p) +static inline bool task_sticky(struct task_struct *p) { - return 0; + return false; } static inline void -swap_sticky(struct rq *rq, unsigned long cpu, struct task_struct *p) +swap_sticky(struct rq *rq, int cpu, struct task_struct *p) { } @@ -1091,9 +1103,9 @@ static inline void unstick_task(struct r * Move a task off the global queue and take it to a cpu for it will * become the running task. */ -static inline void take_task(struct rq *rq, struct task_struct *p) +static inline void take_task(int cpu, struct task_struct *p) { - set_task_cpu(p, cpu_of(rq)); + set_task_cpu(p, cpu); dequeue_task(p); clear_sticky(p); dec_qnr(); @@ -1189,7 +1201,7 @@ struct migration_req { unsigned long wait_task_inactive(struct task_struct *p, long match_state) { unsigned long flags; - int running, on_rq; + bool running, on_rq; unsigned long ncsw; struct rq *rq; @@ -1319,19 +1331,20 @@ EXPORT_SYMBOL_GPL(kick_process); * between themselves, they cooperatively multitask. An idle rq scores as * prio PRIO_LIMIT so it is always preempted. */ -static inline int +static inline bool can_preempt(struct task_struct *p, int prio, u64 deadline) { /* Better static priority RT task or better policy preemption */ if (p->prio < prio) - return 1; + return true; if (p->prio > prio) - return 0; + return false; /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */ if (!deadline_before(p->deadline, deadline)) - return 0; - return 1; + return false; + return true; } + #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU /* @@ -1339,15 +1352,15 @@ can_preempt(struct task_struct *p, int p * still wants runtime. This happens to kernel threads during suspend/halt and * disabling of CPUs. */ -static inline int online_cpus(struct task_struct *p) +static inline bool online_cpus(struct task_struct *p) { return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed))); } #else /* CONFIG_HOTPLUG_CPU */ /* All available CPUs are always online without hotplug. */ -static inline int online_cpus(struct task_struct *p) +static inline bool online_cpus(struct task_struct *p) { - return 1; + return true; } #endif @@ -1355,11 +1368,11 @@ static inline int online_cpus(struct tas * Check to see if p can run on cpu, and if not, whether there are any online * CPUs it can run on instead. */ -static inline int needs_other_cpu(struct task_struct *p, int cpu) +static inline bool needs_other_cpu(struct task_struct *p, int cpu) { if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) - return 1; - return 0; + return true; + return false; } /* @@ -1369,9 +1382,8 @@ static inline int needs_other_cpu(struct static void try_preempt(struct task_struct *p, struct rq *this_rq) { struct rq *highest_prio_rq = this_rq; + int cpu, highest_prio; u64 latest_deadline; - unsigned long cpu; - int highest_prio; cpumask_t tmp; /* @@ -1396,7 +1408,7 @@ static void try_preempt(struct task_stru return; latest_deadline = 0; - highest_prio = -1; + highest_prio = p->prio; for_each_cpu_mask(cpu, tmp) { struct rq *rq; @@ -1421,9 +1433,9 @@ static void try_preempt(struct task_stru resched_task(highest_prio_rq->curr); } #else /* CONFIG_SMP */ -static inline int needs_other_cpu(struct task_struct *p, int cpu) +static inline bool needs_other_cpu(struct task_struct *p, int cpu) { - return 0; + return false; } static void try_preempt(struct task_struct *p, struct rq *this_rq) @@ -1516,12 +1528,13 @@ void scheduler_ipi(void) * Returns %true if @p was woken up, %false if it was already running * or @state didn't match @p's state. */ -static int try_to_wake_up(struct task_struct *p, unsigned int state, +static bool try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) { + bool success = false; unsigned long flags; - int cpu, success = 0; struct rq *rq; + int cpu; get_cpu(); @@ -1668,7 +1681,7 @@ void sched_fork(struct task_struct *p) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif - p->on_cpu = 0; + p->on_cpu = false; clear_sticky(p); #ifdef CONFIG_PREEMPT @@ -2683,7 +2696,7 @@ static inline void no_iso_tick(void) } } -static int rq_running_iso(struct rq *rq) +static bool rq_running_iso(struct rq *rq) { return rq->rq_prio == ISO_PRIO; } @@ -2901,14 +2914,21 @@ static inline void check_deadline(struct * selected by the earliest deadline. */ static inline struct -task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle) +task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) { - u64 dl, earliest_deadline = 0; /* Initialise to silence compiler */ - struct task_struct *p, *edt = idle; - unsigned int cpu = cpu_of(rq); + struct task_struct *p, *edt, *rqpreempt = rq->preempt; + u64 dl, uninitialized_var(earliest_deadline); struct list_head *queue; int idx = 0; + if (rqpreempt) { + rq->preempt = NULL; + if (task_queued(rqpreempt)) { + edt = rqpreempt; + goto out_take; + } + } + edt = idle; retry: idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx); if (idx >= PRIO_LIMIT) @@ -2950,8 +2970,8 @@ retry: /* * No rt tasks. Find the earliest deadline task. Now we're in - * O(n) territory. This is what we silenced the compiler for: - * edt will always start as idle. + * O(n) territory. This is what we silenced the compiler for + * with uninitialized_var(): edt will always start as idle. */ if (edt == idle || deadline_before(dl, earliest_deadline)) { @@ -2965,7 +2985,7 @@ retry: goto out; } out_take: - take_task(rq, edt); + take_task(cpu, edt); out: return edt; } @@ -3022,9 +3042,9 @@ static inline void set_rq_task(struct rq rq->rq_policy = p->policy; rq->rq_prio = p->prio; if (p != rq->idle) - rq->rq_running = 1; + rq->rq_running = true; else - rq->rq_running = 0; + rq->rq_running = false; } static void reset_rq_task(struct rq *rq, struct task_struct *p) @@ -3099,9 +3119,9 @@ need_resched: update_clocks(rq); update_cpu_clock(rq, prev, 0); if (rq->clock - rq->last_tick > HALF_JIFFY_NS) - rq->dither = 0; + rq->dither = false; else - rq->dither = 1; + rq->dither = true; clear_tsk_need_resched(prev); @@ -3140,7 +3160,7 @@ need_resched: schedstat_inc(rq, sched_goidle); set_cpuidle_map(cpu); } else { - next = earliest_deadline_task(rq, idle); + next = earliest_deadline_task(rq, cpu, idle); if (likely(next->prio != PRIO_LIMIT)) { prefetch(next); prefetch_stack(next); @@ -3158,8 +3178,8 @@ need_resched: unstick_task(rq, prev); set_rq_task(rq, next); grq.nr_switches++; - prev->on_cpu = 0; - next->on_cpu = 1; + prev->on_cpu = false; + next->on_cpu = true; rq->curr = next; ++*switch_count; @@ -4457,7 +4477,7 @@ SYSCALL_DEFINE0(sched_yield) return 0; } -static inline int should_resched(void) +static inline bool should_resched(void) { return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); } @@ -5045,8 +5065,6 @@ void sched_idle_next(struct rq *rq, int /* cpu has to be offline */ BUG_ON(cpu_online(this_cpu)); - break_sole_affinity(this_cpu, idle); - __setscheduler(idle, rq, SCHED_FIFO, STOP_PRIO); activate_idle_task(idle); @@ -5268,7 +5286,7 @@ static void set_rq_online(struct rq *rq) { if (!rq->online) { cpumask_set_cpu(cpu_of(rq), rq->rd->online); - rq->online = 1; + rq->online = true; } } @@ -5276,7 +5294,7 @@ static void set_rq_offline(struct rq *rq { if (rq->online) { cpumask_clear_cpu(cpu_of(rq), rq->rd->online); - rq->online = 0; + rq->online = false; } } @@ -5331,6 +5349,7 @@ migration_call(struct notifier_block *nf BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } + break_sole_affinity(cpu, idle); grq.noc = num_online_cpus(); grq_unlock_irqrestore(&flags); break; @@ -6663,14 +6682,14 @@ static int cpuset_cpu_inactive(struct no * Cheaper version of the below functions in case support for SMT and MC is * compiled in but CPUs have no siblings. */ -static int sole_cpu_idle(unsigned long cpu) +static bool sole_cpu_idle(int cpu) { return rq_idle(cpu_rq(cpu)); } #endif #ifdef CONFIG_SCHED_SMT /* All this CPU's SMT siblings are idle */ -static int siblings_cpu_idle(unsigned long cpu) +static bool siblings_cpu_idle(int cpu) { return cpumask_subset(&(cpu_rq(cpu)->smt_siblings), &grq.cpu_idle_map); @@ -6678,7 +6697,7 @@ static int siblings_cpu_idle(unsigned lo #endif #ifdef CONFIG_SCHED_MC /* All this CPU's shared cache siblings are idle */ -static int cache_cpu_idle(unsigned long cpu) +static bool cache_cpu_idle(int cpu) { return cpumask_subset(&(cpu_rq(cpu)->cache_siblings), &grq.cpu_idle_map); @@ -6736,8 +6755,7 @@ void __init sched_init_smp(void) for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); for_each_domain(cpu, sd) { - unsigned long locality; - int other_cpu; + int locality, other_cpu; #ifdef CONFIG_SCHED_SMT if (sd->level == SD_LV_SIBLING) { @@ -6823,13 +6841,14 @@ void __init sched_init(void) rq = cpu_rq(i); rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = rq->iowait_pc = rq->idle_pc = 0; - rq->dither = 0; + rq->dither = false; + rq->preempt = NULL; #ifdef CONFIG_SMP rq->sticky_task = NULL; rq->last_niffy = 0; rq->sd = NULL; rq->rd = NULL; - rq->online = 0; + rq->online = false; rq->cpu = i; rq_attach_root(rq, &def_root_domain); #endif @@ -6858,8 +6877,7 @@ void __init sched_init(void) rq->cache_idle = sole_cpu_idle; cpumask_set_cpu(i, &rq->cache_siblings); #endif - rq->cpu_locality = kmalloc(nr_cpu_ids * sizeof(unsigned long), - GFP_NOWAIT); + rq->cpu_locality = kmalloc(nr_cpu_ids * sizeof(int), GFP_ATOMIC); for_each_possible_cpu(j) { if (i == j) rq->cpu_locality[j] = 0;