--- kernel/sched/bfs.c | 62 ++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) Index: linux-4.7-MuQSS/kernel/sched/bfs.c =================================================================== --- linux-4.7-MuQSS.orig/kernel/sched/bfs.c 2016-10-01 21:21:40.227172439 +1000 +++ linux-4.7-MuQSS/kernel/sched/bfs.c 2016-10-02 14:10:19.000000000 +1100 @@ -137,7 +137,7 @@ void print_scheduler_version(void) { - printk(KERN_INFO "MuQSS CPU scheduler v0.101 by Con Kolivas.\n"); + printk(KERN_INFO "MuQSS CPU scheduler v0.102 by Con Kolivas.\n"); } /* @@ -174,9 +174,8 @@ static inline int timeslice(void) } /* - * The global runqueue data that all CPUs work off. Data is protected either - * by the global grq lock, or the discrete lock that precedes the data in this - * struct. + * The global runqueue data that all CPUs work off. Contains either atomic + * variables or iso variables protected by iso_lock. */ struct global_rq { atomic_t nr_running; @@ -448,10 +447,11 @@ static inline void lock_all_rqs(void) { int cpu; + preempt_disable(); for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); - rq_lock(rq); + do_raw_spin_lock(&rq->lock); } } @@ -462,8 +462,9 @@ static inline void unlock_all_rqs(void) for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); - rq_unlock(rq); + do_raw_spin_unlock(&rq->lock); } + preempt_enable(); } /* @@ -479,8 +480,11 @@ static inline void lock_rqs(struct rq *t for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); - if (rq != this_rq && !rq_trylock(rq)) - continue; + if (rq != this_rq) { + if (!do_raw_spin_trylock(&rq->lock)) + continue; + spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); + } cpumask_set_cpu(cpu, mask); } } @@ -492,8 +496,12 @@ static inline void unlock_rqs(struct rq cpumask_clear_cpu(this_rq->cpu, mask); - for_each_cpu(cpu, mask) - raw_spin_unlock(&cpu_rq(cpu)->lock); + for_each_cpu(cpu, mask) { + struct rq *rq = cpu_rq(cpu); + + spin_release(&rq->lock.dep_map, 1, _RET_IP_); + do_raw_spin_unlock(&rq->lock); + } } static inline void rq_lock_irq(struct rq *rq) @@ -1230,7 +1238,6 @@ static inline void __set_tsk_resched(str */ void resched_task(struct task_struct *p) { - struct rq *rq = task_rq(p); int cpu; if (test_tsk_need_resched(p)) @@ -1637,36 +1644,33 @@ static bool try_to_wake_up(struct task_s * set_current_state() the waiting thread does. */ smp_mb__before_spinlock(); - raw_spin_lock_irqsave(&p->pi_lock, flags); + + /* + * No need to do time_lock_grq as we only need to update the rq clock + * if we activate the task + */ + rq = task_rq_lock(p, &flags); + cpu = task_cpu(p); /* state is a volatile long, どうして、分からない */ if (!((unsigned int)p->state & state)) - goto out; + goto out_unlock; trace_sched_waking(p); - success = true; - cpu = task_cpu(p); - - /* - * No need to do time_lock_rq as we only need to update the rq clock - * if we activate the task - */ - rq = __task_rq_lock(p); - if (task_queued(p) || task_running(p)) goto out_running; ttwu_activate(p, rq, wake_flags & WF_SYNC); + success = true; out_running: ttwu_post_activation(p, rq, success); - __task_rq_unlock(rq); +out_unlock: + task_rq_unlock(rq, p, &flags); if (schedstat_enabled()) ttwu_stat(p, cpu, wake_flags); -out: - raw_spin_unlock_irqrestore(&p->pi_lock, flags); return success; } @@ -3116,7 +3120,6 @@ static void task_running_tick(struct rq } else if (rq->rq_time_slice >= RESCHED_US) return; - /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ p = rq->curr; rq_lock(rq); @@ -3127,8 +3130,7 @@ static void task_running_tick(struct rq /* * This function gets called by the timer code, with HZ frequency. - * We call it with interrupts disabled. The data modified is all - * local to struct rq so we don't need to grab grq lock. + * We call it with interrupts disabled. */ void scheduler_tick(void) { @@ -3136,7 +3138,6 @@ void scheduler_tick(void) struct rq *rq = cpu_rq(cpu); sched_clock_tick(); - /* grq lock not grabbed, so only update rq clock */ update_rq_clock(rq); update_cpu_clock_tick(rq, rq->curr); update_load_avg(rq); @@ -3425,8 +3426,7 @@ static inline void schedule_debug(struct /* * The currently running task's information is all stored in rq local data - * which is only modified by the local CPU, thereby allowing the data to be - * changed without grabbing the grq lock. + * which is only modified by the local CPU. */ static inline void set_rq_task(struct rq *rq, struct task_struct *p) {