Reinstate the old BFS schedule flush plug behaviour after locking in schedule(). -ck --- kernel/sched/bfs.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) Index: linux-4.1.4-ck1/kernel/sched/bfs.c =================================================================== --- linux-4.1.4-ck1.orig/kernel/sched/bfs.c 2015-08-06 09:31:07.317487340 +1000 +++ linux-4.1.4-ck1/kernel/sched/bfs.c 2015-08-06 09:38:39.367987643 +1000 @@ -3368,10 +3368,12 @@ static void __sched __schedule(void) { struct task_struct *prev, *next, *idle; unsigned long *switch_count; - bool deactivate = false; + bool deactivate; struct rq *rq; int cpu; +need_resched: + deactivate = false; preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); @@ -3418,6 +3420,17 @@ static void __sched __schedule(void) switch_count = &prev->nvcsw; } + /* + * If we are going to sleep and we have plugged IO queued, make + * sure to submit it to avoid deadlocks. + */ + if (unlikely(deactivate && blk_needs_flush_plug(prev))) { + grq_unlock_irq(); + preempt_enable_no_resched(); + blk_schedule_flush_plug(prev); + goto need_resched; + } + update_clocks(rq); update_cpu_clock_switch(rq, prev); if (rq->clock - rq->last_tick > HALF_JIFFY_NS) @@ -3508,23 +3521,8 @@ rerun_prev_unlocked: sched_preempt_enable_no_resched(); } -static inline void sched_submit_work(struct task_struct *tsk) -{ - if (!tsk->state || tsk_is_pi_blocked(tsk)) - return; - /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. - */ - if (blk_needs_flush_plug(tsk)) - blk_schedule_flush_plug(tsk); -} - asmlinkage __visible void __sched schedule(void) { - struct task_struct *tsk = current; - - sched_submit_work(tsk); do { __schedule(); } while (need_resched());