--- include/linux/init_task.h | 2 - kernel/sched/bfs.c | 63 +++++++++++++++++++++++++++++++--------------- kernel/sched/bfs_sched.h | 1 3 files changed, 44 insertions(+), 22 deletions(-) Index: linux-4.7-MuQSS/kernel/sched/bfs.c =================================================================== --- linux-4.7-MuQSS.orig/kernel/sched/bfs.c 2016-10-02 03:06:50.066409715 +1100 +++ linux-4.7-MuQSS/kernel/sched/bfs.c 2016-10-02 14:10:09.685406258 +1100 @@ -137,7 +137,7 @@ void print_scheduler_version(void) { - printk(KERN_INFO "MuQSS CPU scheduler v0.102 by Con Kolivas.\n"); + printk(KERN_INFO "MuQSS CPU scheduler v0.103 by Con Kolivas.\n"); } /* @@ -1212,14 +1212,21 @@ static inline void take_task(struct rq * * Returns a descheduling task to the runqueue unless it is being * deactivated. */ -static inline void return_task(struct task_struct *p, struct rq *rq, bool deactivate) +static inline bool return_task(struct task_struct *p, struct rq *rq, + int cpu, bool deactivate) { + bool ret = true; + if (deactivate) deactivate_task(p, rq); else { inc_qnr(); - enqueue_task(p, rq); + if (unlikely(needs_other_cpu(p, cpu))) + ret = false; + else + enqueue_task(p, rq); } + return ret; } /* Enter with rq lock held. We know p is on the local cpu */ @@ -1913,8 +1920,6 @@ void wake_up_new_task(struct task_struct parent = p->parent; raw_spin_lock_irqsave(&p->pi_lock, flags); - if (unlikely(needs_other_cpu(p, task_cpu(p)))) - set_task_cpu(p, cpumask_any(tsk_cpus_allowed(p))); rq = __task_rq_lock(p); rq_curr = rq->curr; @@ -3328,16 +3333,16 @@ found_middle: * * This iterates over runqueues in cache locality order. In interactive mode * it iterates over all CPUs and finds the task with the earliest deadline. - * In non-interactive mode it grabs the first task it finds, being the closest - * to the current CPU in cache locality. + * In non-interactive mode it grabs any task on the local runqueue or the + * busiest nearest cache CPU. */ static inline struct task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) { struct task_struct *edt = idle; u64 earliest_deadline = ~0ULL; + int busiest = 0, i; cpumask_t locked; - int i; lock_rqs(rq, &locked); @@ -3360,8 +3365,15 @@ task_struct *earliest_deadline_task(stru continue; if (!sched_interactive) { - edt = p; - break; + if (rq == other_rq) { + edt = p; + break; + } + if (other_rq->sl->entries > busiest) { + edt = p; + busiest = other_rq->sl->entries; + } + continue; } if (!deadline_before(p->deadline, earliest_deadline)) @@ -3439,10 +3451,6 @@ static inline void set_rq_task(struct rq rq->rq_mm = p->mm; rq->rq_smt_bias = p->smt_bias; #endif - if (p != rq->idle) - rq->rq_running = true; - else - rq->rq_running = false; } static void reset_rq_task(struct rq *rq, struct task_struct *p) @@ -3510,6 +3518,21 @@ static void wake_siblings(struct rq __ma #endif /* + * For when a running task has its affinity changed and can no longer run on + * the current runqueue and needs to be put on another out of __schedule(). + */ +static void queue_other_rq(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + + rq = task_rq_lock(p, &flags); + if (likely(!task_queued(p))) + enqueue_task(p, rq); + task_rq_unlock(rq, p, &flags); +} + +/* * schedule() is the main scheduler function. * * The main means of driving the scheduler and thus entering this function are: @@ -3550,7 +3573,7 @@ static void wake_siblings(struct rq __ma */ static void __sched notrace __schedule(bool preempt) { - struct task_struct *prev, *next, *idle; + struct task_struct *prev, *next, *idle, *queue = NULL; unsigned long *switch_count; bool deactivate = false; struct rq *rq; @@ -3631,14 +3654,11 @@ static void __sched notrace __schedule(b prev->deadline = rq->rq_deadline; check_deadline(prev, rq); prev->last_ran = rq->clock_task; - return_task(prev, rq, deactivate); + if (!return_task(prev, rq, cpu, deactivate)) + queue = prev; } if (unlikely(!queued_notrunning())) { - /* - * This CPU is now truly idle as opposed to when idle is - * scheduled as a high priority task in its own right. - */ next = idle; schedstat_inc(rq, sched_goidle); set_cpuidle_map(cpu); @@ -3669,6 +3689,8 @@ static void __sched notrace __schedule(b trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next); /* unlocks the rq */ + if (unlikely(queue)) + queue_other_rq(queue); } else { check_siblings(rq); rq_unlock_irq(rq); @@ -7355,6 +7377,7 @@ void __init sched_init(void) rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = rq->iowait_pc = rq->idle_pc = 0; rq->dither = false; + set_rq_task(rq, &init_task); #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; Index: linux-4.7-MuQSS/kernel/sched/bfs_sched.h =================================================================== --- linux-4.7-MuQSS.orig/kernel/sched/bfs_sched.h 2016-10-02 00:18:36.805241467 +1000 +++ linux-4.7-MuQSS/kernel/sched/bfs_sched.h 2016-10-02 12:55:48.946371982 +1100 @@ -22,7 +22,6 @@ struct rq { int rq_time_slice; u64 rq_last_ran; int rq_prio; - bool rq_running; /* There is a task running */ u64 load_update; /* When we last updated load */ unsigned long load_avg; /* Rolling load average */ Index: linux-4.7-MuQSS/include/linux/init_task.h =================================================================== --- linux-4.7-MuQSS.orig/include/linux/init_task.h 2016-10-01 20:56:27.463049684 +1000 +++ linux-4.7-MuQSS/include/linux/init_task.h 2016-10-02 13:40:42.856242002 +1100 @@ -204,7 +204,7 @@ extern struct task_group root_task_group .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ - .time_slice = HZ, \ + .time_slice = 1000000, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ INIT_PUSHABLE_TASKS(tsk) \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \