Unnecessary check, no longer in mainline either. As reported by Alfred Chen. -ck --- kernel/sched/bfs.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) Index: linux-3.15.5-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.15.5-ck1.orig/kernel/sched/bfs.c 2014-07-29 13:25:22.557185875 +1000 +++ linux-3.15.5-ck1/kernel/sched/bfs.c 2014-07-29 13:25:22.556185875 +1000 @@ -1149,21 +1149,8 @@ unsigned long wait_task_inactive(struct struct rq *rq; for (;;) { - /* - * We do the initial early heuristics without holding - * any task-queue locks at all. We'll only try to get - * the runqueue lock when things look like they will - * work out! In the unlikely event rq is dereferenced - * since we're lockless, grab it again. - */ -#ifdef CONFIG_SMP -retry_rq: - rq = task_rq(p); - if (unlikely(!rq)) - goto retry_rq; -#else /* CONFIG_SMP */ rq = task_rq(p); -#endif + /* * If the task is actively running on another CPU * still, just relax and busy-wait without holding