Index: linux-3.16-ck2/arch/x86/Kconfig =================================================================== --- linux-3.16-ck2.orig/arch/x86/Kconfig 2014-08-25 09:45:57.910543881 +1000 +++ linux-3.16-ck2/arch/x86/Kconfig 2014-08-25 09:48:39.000000000 +1000 @@ -794,7 +794,7 @@ config SCHED_SMT config SMT_NICE bool "SMT (Hyperthreading) aware nice priority and policy support" - depends on X86_HT && SCHED_BFS + depends on X86_HT && SCHED_BFS && SCHED_SMT default y ---help--- Enabling Hyperthreading on Intel CPUs decreases the effectiveness Index: linux-3.16-ck2/kernel/sched/bfs.c =================================================================== --- linux-3.16-ck2.orig/kernel/sched/bfs.c 2014-08-25 09:45:58.002543872 +1000 +++ linux-3.16-ck2/kernel/sched/bfs.c 2014-08-25 09:45:58.285543846 +1000 @@ -150,7 +150,7 @@ EXPORT_SYMBOL(__smp_mb__after_atomic); void print_scheduler_version(void) { - printk(KERN_INFO "BFS CPU scheduler v0.450 by Con Kolivas.\n"); + printk(KERN_INFO "BFS CPU scheduler v0.456 by Con Kolivas.\n"); } /* @@ -1179,6 +1179,13 @@ static inline void return_task(struct ta } } +/* Enter with grq lock held. We know p is on the local cpu */ +static inline void __set_tsk_resched(struct task_struct *p) +{ + set_tsk_need_resched(p); + set_preempt_need_resched(); +} + /* * resched_task - mark a task 'to be rescheduled now'. * @@ -1505,6 +1512,18 @@ ttwu_stat(struct task_struct *p, int cpu #endif /* CONFIG_SCHEDSTATS */ } +#ifdef CONFIG_SMP +void scheduler_ipi(void) +{ + /* + * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting + * TIF_NEED_RESCHED remotely (for the first time) will also send + * this IPI. + */ + preempt_fold_need_resched(); +} +#endif + static inline void ttwu_activate(struct task_struct *p, struct rq *rq, bool is_sync) { @@ -1776,7 +1795,7 @@ after_ts_init: * do child-runs-first in anticipation of an exec. This * usually avoids a lot of COW overhead. */ - set_tsk_need_resched(parent); + __set_tsk_resched(parent); } else try_preempt(p, rq); } else { @@ -1788,7 +1807,7 @@ after_ts_init: * be slightly earlier. */ rq->rq_time_slice = 0; - set_tsk_need_resched(parent); + __set_tsk_resched(parent); } time_slice_expired(p); } @@ -2913,9 +2932,10 @@ static void task_running_tick(struct rq /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ p = rq->curr; + grq_lock(); requeue_task(p); - set_tsk_need_resched(p); + __set_tsk_resched(p); grq_unlock(); } @@ -4745,9 +4765,9 @@ EXPORT_SYMBOL(yield); */ int __sched yield_to(struct task_struct *p, bool preempt) { + struct rq *rq, *p_rq; unsigned long flags; int yielded = 0; - struct rq *rq; rq = this_rq(); grq_lock_irqsave(&flags); @@ -4755,6 +4775,8 @@ int __sched yield_to(struct task_struct yielded = -ESRCH; goto out_unlock; } + + p_rq = task_rq(p); yielded = 1; if (p->deadline > rq->rq_deadline) p->deadline = rq->rq_deadline; @@ -4762,7 +4784,8 @@ int __sched yield_to(struct task_struct rq->rq_time_slice = 0; if (p->time_slice > timeslice()) p->time_slice = timeslice(); - set_tsk_need_resched(rq->curr); + if (preempt && rq != rq) + resched_task(p_rq->curr); out_unlock: grq_unlock_irqrestore(&flags); @@ -5028,7 +5051,6 @@ void init_idle(struct task_struct *idle, #if defined(CONFIG_SMP) sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif - set_tsk_need_resched(idle); } void resched_cpu(int cpu) @@ -5195,7 +5217,7 @@ out: task_grq_unlock(&flags); if (running_wrong) - _cond_resched(); + __cond_resched(); return ret; } Index: linux-3.16-ck2/include/linux/sched.h =================================================================== --- linux-3.16-ck2.orig/include/linux/sched.h 2014-08-25 09:45:58.094543864 +1000 +++ linux-3.16-ck2/include/linux/sched.h 2014-08-25 09:45:58.092543864 +1000 @@ -2510,12 +2510,7 @@ static inline void set_task_comm(struct extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -#ifdef CONFIG_SCHED_BFS -/* scheduler_ipi does nothing on BFS */ -static inline void scheduler_ipi(void) { } -#else void scheduler_ipi(void); -#endif extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { }