Add set preempt need resched on every call to set_tsk_need_resched where explicit reschedule does not occur. --- kernel/sched/bfs.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) Index: linux-3.16-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.16-ck1.orig/kernel/sched/bfs.c 2014-08-22 17:07:04.000000000 +1000 +++ linux-3.16-ck1/kernel/sched/bfs.c 2014-08-22 20:17:51.925549648 +1000 @@ -1179,6 +1179,13 @@ static inline void return_task(struct ta } } +/* Enter with grq lock held. We know p is on the local cpu */ +static inline void __set_tsk_resched(struct task_struct *p) +{ + set_tsk_need_resched(p); + set_preempt_need_resched(); +} + /* * resched_task - mark a task 'to be rescheduled now'. * @@ -1788,7 +1795,7 @@ after_ts_init: * do child-runs-first in anticipation of an exec. This * usually avoids a lot of COW overhead. */ - set_tsk_need_resched(parent); + __set_tsk_resched(parent); } else try_preempt(p, rq); } else { @@ -1800,7 +1807,7 @@ after_ts_init: * be slightly earlier. */ rq->rq_time_slice = 0; - set_tsk_need_resched(parent); + __set_tsk_resched(parent); } time_slice_expired(p); } @@ -2925,9 +2932,10 @@ static void task_running_tick(struct rq /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ p = rq->curr; + grq_lock(); requeue_task(p); - set_tsk_need_resched(p); + __set_tsk_resched(p); grq_unlock(); } @@ -4757,9 +4765,9 @@ EXPORT_SYMBOL(yield); */ int __sched yield_to(struct task_struct *p, bool preempt) { + struct rq *rq, *p_rq; unsigned long flags; int yielded = 0; - struct rq *rq; rq = this_rq(); grq_lock_irqsave(&flags); @@ -4767,6 +4775,8 @@ int __sched yield_to(struct task_struct yielded = -ESRCH; goto out_unlock; } + + p_rq = task_rq(p); yielded = 1; if (p->deadline > rq->rq_deadline) p->deadline = rq->rq_deadline; @@ -4774,7 +4784,8 @@ int __sched yield_to(struct task_struct rq->rq_time_slice = 0; if (p->time_slice > timeslice()) p->time_slice = timeslice(); - set_tsk_need_resched(rq->curr); + if (preempt && rq != rq) + resched_task(p_rq->curr); out_unlock: grq_unlock_irqrestore(&flags); @@ -5040,7 +5051,6 @@ void init_idle(struct task_struct *idle, #if defined(CONFIG_SMP) sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif - set_tsk_need_resched(idle); } void resched_cpu(int cpu)