resched_suitable_idle can end up dereferencing on p in resched_best_mask so pass the cpu and rq and not the task to the function. -ck --- kernel/sched_bfs.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) Index: linux-3.1-ck2/kernel/sched_bfs.c =================================================================== --- linux-3.1-ck2.orig/kernel/sched_bfs.c 2011-11-11 13:28:24.543507454 +1100 +++ linux-3.1-ck2/kernel/sched_bfs.c 2011-11-11 13:28:24.720507442 +1100 @@ -814,13 +814,12 @@ static void resched_task(struct task_str * Other node, other CPU, busy threads. */ static void -resched_best_mask(cpumask_t *tmpmask, struct task_struct *p) +resched_best_mask(int best_cpu, struct rq *rq, cpumask_t *tmpmask) { unsigned int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | CPUIDLE_DIFF_THREAD; - int cpu_tmp, best_cpu = task_cpu(p); - struct rq *rq = task_rq(p); + int cpu_tmp; for_each_cpu_mask(cpu_tmp, *tmpmask) { unsigned int ranking; @@ -864,7 +863,7 @@ static void resched_best_idle(struct tas cpumask_t tmpmask; cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); - resched_best_mask(&tmpmask, p); + resched_best_mask(task_cpu(p), task_rq(p), &tmpmask); } static inline void resched_suitable_idle(struct task_struct *p) @@ -1061,7 +1060,7 @@ resched_closest_idle(struct rq *rq, int cpu_clear(cpu, tmpmask); if (cpus_empty(tmpmask)) return; - resched_best_mask(&tmpmask, p); + resched_best_mask(cpu, rq, &tmpmask); } /*