Remove the rq->preempt entry. It could lead to some rare races on fork & process exit. -ck --- kernel/sched_bfs.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) Index: linux-3.1-ck2/kernel/sched_bfs.c =================================================================== --- linux-3.1-ck2.orig/kernel/sched_bfs.c 2011-11-11 13:28:24.021507491 +1100 +++ linux-3.1-ck2/kernel/sched_bfs.c 2011-11-11 13:28:24.197507478 +1100 @@ -230,7 +230,7 @@ struct rq { #endif #endif - struct task_struct *curr, *idle, *stop, *preempt; + struct task_struct *curr, *idle, *stop; struct mm_struct *prev_mm; /* Stored data about rq->curr to work outside grq lock */ @@ -739,18 +739,6 @@ static inline int task_timeslice(struct return (rr_interval * task_prio_ratio(p) / 128); } -static void resched_task(struct task_struct *p); - -static inline void preempt_rq(struct rq *rq, struct task_struct *p) -{ - rq->preempt = p; - /* We set the runqueue's apparent priority to the task that will - * replace the current one in case something else tries to preempt - * this runqueue before p gets scheduled */ - rq->rq_prio = p->prio; - resched_task(rq->curr); -} - #ifdef CONFIG_SMP /* * qnr is the "queued but not running" count which is the total number of @@ -807,6 +795,8 @@ static bool suitable_idle_cpus(struct ta #define CPUIDLE_THREAD_BUSY (16) #define CPUIDLE_DIFF_NODE (32) +static void resched_task(struct task_struct *p); + /* * The best idle CPU is chosen according to the CPUIDLE ranking above where the * lowest value would give the most suitable CPU to schedule p onto next. The @@ -866,7 +856,7 @@ resched_best_mask(cpumask_t *tmpmask, st } } - preempt_rq(cpu_rq(best_cpu), p); + resched_task(cpu_rq(best_cpu)->curr); } static void resched_best_idle(struct task_struct *p) @@ -1405,12 +1395,11 @@ static inline bool needs_other_cpu(struc } /* - * latest_deadline and highest_prio_rq are initialised only to silence the - * compiler. When all else is equal, still prefer this_rq. + * When all else is equal, still prefer this_rq. */ static void try_preempt(struct task_struct *p, struct rq *this_rq) { - struct rq *highest_prio_rq = this_rq; + struct rq *highest_prio_rq; int cpu, highest_prio; u64 latest_deadline; cpumask_t tmp; @@ -1436,8 +1425,9 @@ static void try_preempt(struct task_stru else return; - latest_deadline = 0; highest_prio = p->prio; + highest_prio_rq = this_rq; + latest_deadline = this_rq->rq_deadline; for_each_cpu_mask(cpu, tmp) { struct rq *rq; @@ -2975,19 +2965,11 @@ static inline void check_deadline(struct static inline struct task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) { - struct task_struct *p, *edt, *rqpreempt = rq->preempt; u64 dl, uninitialized_var(earliest_deadline); + struct task_struct *p, *edt = idle; struct list_head *queue; int idx = 0; - if (rqpreempt) { - rq->preempt = NULL; - if (task_queued(rqpreempt)) { - edt = rqpreempt; - goto out_take; - } - } - edt = idle; retry: idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx); if (idx >= PRIO_LIMIT) @@ -6893,7 +6875,6 @@ void __init sched_init(void) rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = rq->iowait_pc = rq->idle_pc = 0; rq->dither = false; - rq->preempt = NULL; #ifdef CONFIG_SMP rq->sticky_task = NULL; rq->last_niffy = 0;