Use upgradeable read/write locks in place of the grq spinlock. Separate out paths which require read/write or are indeterminate and use each subvariant where appropriate to decrease lock contention and improve scalability through more parallel read sections of code. Where code paths are not performance critical, just use wlocks variants. -ck --- kernel/sched/bfs.c | 376 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 255 insertions(+), 121 deletions(-) Index: linux-3.15.5-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.15.5-ck1.orig/kernel/sched/bfs.c 2014-07-27 20:40:07.205098825 +1000 +++ linux-3.15.5-ck1/kernel/sched/bfs.c 2014-07-27 21:30:02.862786582 +1000 @@ -71,6 +71,7 @@ #include #include #include +#include #include #include @@ -162,7 +163,7 @@ static inline int timeslice(void) * struct. */ struct global_rq { - raw_spinlock_t lock; + urwlock_t urw; unsigned long nr_running; unsigned long nr_uninterruptible; unsigned long long nr_switches; @@ -356,90 +357,193 @@ static inline bool task_running(struct t return p->on_cpu; } -static inline void grq_lock(void) - __acquires(grq.lock) +static inline void grq_wlock(void) { - raw_spin_lock(&grq.lock); + urw_wlock(&grq.urw); } -static inline void grq_unlock(void) - __releases(grq.lock) +static inline void grq_wunlock(void) +{ + urw_wunlock(&grq.urw); +} + +static inline void grq_rlock(void) +{ + urw_rlock(&grq.urw); +} + +static inline void grq_runlock(void) +{ + urw_runlock(&grq.urw); +} + +static inline void grq_ulock(void) +{ + urw_ulock(&grq.urw); +} + +static inline void grq_uunlock(void) +{ + urw_uunlock(&grq.urw); +} + +static inline void grq_upgrade(void) +{ + urw_upgrade(&grq.urw); +} + +static inline void grq_udowngrade(void) +{ + urw_udowngrade(&grq.urw); +} + +static inline void grq_wdowngrade(void) +{ + urw_wdowngrade(&grq.urw); +} + +/* Downgrade a write lock back to an intermediate one */ +static inline void grq_wudowngrade(void) { - raw_spin_unlock(&grq.lock); + urw_wudowngrade(&grq.urw); } -static inline void grq_lock_irq(void) - __acquires(grq.lock) +static inline void grq_wlock_irq(void) { - raw_spin_lock_irq(&grq.lock); + urw_wlock_irq(&grq.urw); } -static inline void time_lock_grq(struct rq *rq) - __acquires(grq.lock) +static inline void grq_ulock_irq(void) { - grq_lock(); + urw_ulock_irq(&grq.urw); +} + +static inline void grq_rlock_irq(void) +{ + urw_rlock_irq(&grq.urw); +} + +static inline void time_wlock_grq(struct rq *rq) +{ + grq_wlock(); update_clocks(rq); } -static inline void grq_unlock_irq(void) - __releases(grq.lock) +static inline void grq_wunlock_irq(void) { - raw_spin_unlock_irq(&grq.lock); + urw_wunlock_irq(&grq.urw); } -static inline void grq_lock_irqsave(unsigned long *flags) - __acquires(grq.lock) +static inline void grq_uunlock_irq(void) { - raw_spin_lock_irqsave(&grq.lock, *flags); + urw_uunlock_irq(&grq.urw); } -static inline void grq_unlock_irqrestore(unsigned long *flags) - __releases(grq.lock) +static inline void grq_runlock_irq(void) +{ + urw_runlock_irq(&grq.urw); +} + +static inline void grq_wlock_irqsave(unsigned long *flags) +{ + urw_wlock_irqsave(&grq.urw, flags); +} + +static inline void grq_ulock_irqsave(unsigned long *flags) +{ + urw_ulock_irqsave(&grq.urw, flags); +} + +static inline void grq_rlock_irqsave(unsigned long *flags) +{ + urw_rlock_irqsave(&grq.urw, flags); +} + +static inline void grq_wunlock_irqrestore(unsigned long *flags) +{ + urw_wunlock_irqrestore(&grq.urw, flags); +} + +static inline void grq_uunlock_irqrestore(unsigned long *flags) { - raw_spin_unlock_irqrestore(&grq.lock, *flags); + urw_uunlock_irqrestore(&grq.urw, flags); +} + +static inline void grq_runlock_irqrestore(unsigned long *flags) +{ + urw_runlock_irqrestore(&grq.urw, flags); +} + +static inline struct rq +*task_grq_wlock(struct task_struct *p, unsigned long *flags) +{ + grq_wlock_irqsave(flags); + return task_rq(p); +} + +static inline struct rq +*task_grq_ulock(struct task_struct *p, unsigned long *flags) +{ + grq_ulock_irqsave(flags); + return task_rq(p); } static inline struct rq -*task_grq_lock(struct task_struct *p, unsigned long *flags) - __acquires(grq.lock) +*task_grq_rlock(struct task_struct *p, unsigned long *flags) { - grq_lock_irqsave(flags); + grq_rlock_irqsave(flags); return task_rq(p); } static inline struct rq -*time_task_grq_lock(struct task_struct *p, unsigned long *flags) - __acquires(grq.lock) +*time_task_grq_wlock(struct task_struct *p, unsigned long *flags) { - struct rq *rq = task_grq_lock(p, flags); + struct rq *rq = task_grq_wlock(p, flags); update_clocks(rq); return rq; } -static inline struct rq *task_grq_lock_irq(struct task_struct *p) - __acquires(grq.lock) +static inline struct rq *task_grq_wlock_irq(struct task_struct *p) { - grq_lock_irq(); + grq_wlock_irq(); return task_rq(p); } -static inline void time_task_grq_lock_irq(struct task_struct *p) - __acquires(grq.lock) +static inline struct rq *task_grq_ulock_irq(struct task_struct *p) { - struct rq *rq = task_grq_lock_irq(p); + grq_ulock_irq(); + return task_rq(p); +} + +static inline void time_task_grq_wlock_irq(struct task_struct *p) +{ + struct rq *rq = task_grq_wlock_irq(p); update_clocks(rq); } -static inline void task_grq_unlock_irq(void) - __releases(grq.lock) +static inline void task_grq_wunlock_irq(void) { - grq_unlock_irq(); + grq_wunlock_irq(); } -static inline void task_grq_unlock(unsigned long *flags) - __releases(grq.lock) +static inline void task_grq_runlock_irq(void) +{ + grq_runlock_irq(); +} + +static inline void task_grq_wunlock(unsigned long *flags) +{ + grq_wunlock_irqrestore(flags); +} + +static inline void task_grq_uunlock(unsigned long *flags) { - grq_unlock_irqrestore(flags); + grq_uunlock_irqrestore(flags); +} + +static inline void task_grq_runlock(unsigned long *flags) +{ + grq_runlock_irqrestore(flags); } /** @@ -451,34 +555,41 @@ static inline void task_grq_unlock(unsig */ bool grunqueue_is_locked(void) { - return raw_spin_is_locked(&grq.lock); + return raw_spin_is_locked(&grq.urw.lock); } void grq_unlock_wait(void) - __releases(grq.lock) { smp_mb(); /* spin-unlock-wait is not a full memory barrier */ - raw_spin_unlock_wait(&grq.lock); + raw_spin_unlock_wait(&grq.urw.lock); } -static inline void time_grq_lock(struct rq *rq, unsigned long *flags) - __acquires(grq.lock) +static inline void time_grq_wlock(struct rq *rq, unsigned long *flags) { local_irq_save(*flags); - time_lock_grq(rq); + time_wlock_grq(rq); } -static inline struct rq *__task_grq_lock(struct task_struct *p) - __acquires(grq.lock) +static inline struct rq *__task_grq_wlock(struct task_struct *p) { - grq_lock(); + grq_wlock(); return task_rq(p); } -static inline void __task_grq_unlock(void) - __releases(grq.lock) +static inline struct rq *__task_grq_ulock(struct task_struct *p) +{ + grq_ulock(); + return task_rq(p); +} + +static inline void __task_grq_wunlock(void) +{ + grq_wunlock(); +} + +static inline void __task_grq_uunlock(void) { - grq_unlock(); + grq_uunlock(); } /* @@ -511,16 +622,18 @@ static inline void finish_lock_switch(st { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ - grq.lock.owner = current; + grq.urw.lock.owner = current; + grq.urw.rwlock.owner = current; #endif /* * If we are tracking spinlock dependencies then we have to * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ - spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_); + spin_acquire(&grq.urw.lock.dep_map, 0, 0, _THIS_IP_); + rwlock_acquire(&grq.urw.rwlock.dep_map, 0, 0, _THIS_IP_); - grq_unlock_irq(); + grq_wunlock_irq(); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -528,9 +641,9 @@ static inline void finish_lock_switch(st static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - grq_unlock_irq(); + grq_wunlock_irq(); #else - grq_unlock(); + grq_wunlock(); #endif } @@ -956,14 +1069,14 @@ void set_task_cpu(struct task_struct *p, /* * The caller should hold grq lock. */ - WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.lock)); + WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.urw.lock)); #endif trace_sched_migrate_task(p, cpu); if (task_cpu(p) != cpu) perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); /* - * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be + * After ->cpu is set up to a new value, task_grq_wlock(p, ...) can be * successfully executed on another CPU. We must ensure that updates of * per-task data have been completed by this moment. */ @@ -1089,7 +1202,7 @@ void resched_task(struct task_struct *p) { int cpu; - lockdep_assert_held(&grq.lock); + lockdep_assert_held(&grq.urw.lock); if (test_tsk_need_resched(p)) return; @@ -1173,14 +1286,14 @@ unsigned long wait_task_inactive(struct * lock now, to be *sure*. If we're wrong, we'll * just go back and repeat. */ - rq = task_grq_lock(p, &flags); + rq = task_grq_rlock(p, &flags); trace_sched_wait_task(p); running = task_running(p); on_rq = task_queued(p); ncsw = 0; if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_grq_unlock(&flags); + task_grq_runlock(&flags); /* * If it changed from the expected state, bail out now. @@ -1480,7 +1593,7 @@ static bool try_to_wake_up(struct task_s * No need to do time_lock_grq as we only need to update the rq clock * if we activate the task */ - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); cpu = task_cpu(p); /* state is a volatile long, どうして、分からない */ @@ -1490,13 +1603,17 @@ static bool try_to_wake_up(struct task_s if (task_queued(p) || task_running(p)) goto out_running; + grq_upgrade(); ttwu_activate(p, rq, wake_flags & WF_SYNC); success = true; out_running: ttwu_post_activation(p, rq, success); + if (success) + grq_wudowngrade(); + out_unlock: - task_grq_unlock(&flags); + task_grq_uunlock(&flags); ttwu_stat(p, cpu, wake_flags); @@ -1518,7 +1635,7 @@ static void try_to_wake_up_local(struct struct rq *rq = task_rq(p); bool success = false; - lockdep_assert_held(&grq.lock); + lockdep_assert_held(&grq.urw.lock); if (!(p->state & TASK_NORMAL)) return; @@ -1632,7 +1749,7 @@ void wake_up_new_task(struct task_struct struct rq *rq; parent = p->parent; - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); /* * Reinit new task deadline as its creator deadline could have changed @@ -1652,6 +1769,7 @@ void wake_up_new_task(struct task_struct */ p->prio = rq->curr->normal_prio; + grq_upgrade(); activate_task(p, rq); trace_sched_wakeup_new(p, 1); if (unlikely(p->policy == SCHED_FIFO)) @@ -1693,7 +1811,7 @@ after_ts_init: } time_slice_expired(p); } - task_grq_unlock(&flags); + task_grq_wunlock(&flags); } #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -2260,14 +2378,14 @@ void thread_group_cputime(struct task_st goto out; t = tsk; - grq_lock_irqsave(&flags); + grq_ulock_irqsave(&flags); do { task_cputime(t, &utime, &stime); times->utime += utime; times->stime += stime; times->sum_exec_runtime += do_task_sched_runtime(t); } while_each_thread(tsk, t); - grq_unlock_irqrestore(&flags); + grq_uunlock_irqrestore(&flags); out: rcu_read_unlock(); } @@ -2474,14 +2592,20 @@ ts_account: * Return any ns on the sched_clock that have not yet been accounted in * @p in case that task is currently running. * - * Called with task_grq_lock() held. + * Called with task_grq_ulock() held. */ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) { u64 ns = 0; if (p == rq->curr) { + /* We only need the write lock for updating the clocks which + * will then inhibit anyone else getting the lock but only + * grab it if we need it and then drop it. */ + grq_upgrade(); update_clocks(rq); + grq_wudowngrade(); + ns = rq->clock_task - rq->rq_last_ran; if (unlikely((s64)ns < 0)) ns = 0; @@ -2496,9 +2620,9 @@ unsigned long long task_delta_exec(struc struct rq *rq; u64 ns; - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); ns = do_task_delta_exec(p, rq); - task_grq_unlock(&flags); + task_grq_uunlock(&flags); return ns; } @@ -2547,9 +2671,9 @@ unsigned long long task_sched_runtime(st return tsk_seruntime(p); #endif - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); ns = p->sched_time + do_task_delta_exec(p, rq); - task_grq_unlock(&flags); + task_grq_uunlock(&flags); return ns; } @@ -2814,10 +2938,11 @@ static void task_running_tick(struct rq /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ p = rq->curr; - grq_lock(); + grq_ulock(); requeue_task(p); + grq_upgrade(); set_tsk_need_resched(p); - grq_unlock(); + grq_wunlock(); } /* @@ -3231,7 +3356,7 @@ need_resched: * done by the caller to avoid the race with signal_wake_up(). */ smp_mb__before_spinlock(); - grq_lock_irq(); + grq_wlock_irq(); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -3266,7 +3391,7 @@ need_resched: * sure to submit it to avoid deadlocks. */ if (unlikely(deactivate && blk_needs_flush_plug(prev))) { - grq_unlock_irq(); + grq_wunlock_irq(); preempt_enable_no_resched(); blk_schedule_flush_plug(prev); goto need_resched; @@ -3303,7 +3428,7 @@ need_resched: * again. */ set_rq_task(rq, prev); - grq_unlock_irq(); + grq_wunlock_irq(); goto rerun_prev_unlocked; } else swap_sticky(rq, cpu, prev); @@ -3353,7 +3478,7 @@ need_resched: rq = cpu_rq(cpu); idle = rq->idle; } else - grq_unlock_irq(); + grq_wunlock_irq(); rerun_prev_unlocked: sched_preempt_enable_no_resched(); @@ -3479,7 +3604,7 @@ void rt_mutex_setprio(struct task_struct BUG_ON(prio < 0 || prio > MAX_PRIO); - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); /* * Idle task boosting is a nono in general. There is one @@ -3502,6 +3627,8 @@ void rt_mutex_setprio(struct task_struct trace_sched_pi_setprio(p, prio); oldprio = p->prio; queued = task_queued(p); + + grq_upgrade(); if (queued) dequeue_task(p); p->prio = prio; @@ -3511,9 +3638,10 @@ void rt_mutex_setprio(struct task_struct enqueue_task(p); try_preempt(p, rq); } + grq_wudowngrade(); out_unlock: - task_grq_unlock(&flags); + task_grq_uunlock(&flags); } #endif @@ -3540,7 +3668,7 @@ void set_user_nice(struct task_struct *p * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU. */ - rq = time_task_grq_lock(p, &flags); + rq = time_task_grq_wlock(p, &flags); /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected @@ -3551,6 +3679,7 @@ void set_user_nice(struct task_struct *p p->static_prio = new_static; goto out_unlock; } + queued = task_queued(p); if (queued) dequeue_task(p); @@ -3570,7 +3699,7 @@ void set_user_nice(struct task_struct *p resched_task(p); } out_unlock: - task_grq_unlock(&flags); + task_grq_wunlock(&flags); } EXPORT_SYMBOL(set_user_nice); @@ -3848,13 +3977,13 @@ recheck: * To be able to change p->policy safely, the grunqueue lock must be * held. */ - rq = __task_grq_lock(p); + rq = __task_grq_ulock(p); /* * Changing the policy of the stop threads its a very bad idea */ if (p == rq->stop) { - __task_grq_unlock(); + __task_grq_uunlock(); raw_spin_unlock_irqrestore(&p->pi_lock, flags); return -EINVAL; } @@ -3865,7 +3994,7 @@ recheck: if (unlikely(policy == p->policy && (!is_rt_policy(policy) || param->sched_priority == p->rt_priority))) { - __task_grq_unlock(); + __task_grq_uunlock(); raw_spin_unlock_irqrestore(&p->pi_lock, flags); return 0; } @@ -3873,10 +4002,11 @@ recheck: /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; - __task_grq_unlock(); + __task_grq_uunlock(); raw_spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } + grq_upgrade(); update_clocks(rq); p->sched_reset_on_fork = reset_on_fork; @@ -3888,7 +4018,7 @@ recheck: enqueue_task(p); try_preempt(p, rq); } - __task_grq_unlock(); + __task_grq_wunlock(); raw_spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); @@ -4386,9 +4516,9 @@ long sched_getaffinity(pid_t pid, cpumas if (retval) goto out_unlock; - grq_lock_irqsave(&flags); + grq_rlock_irqsave(&flags); cpumask_and(mask, tsk_cpus_allowed(p), cpu_active_mask); - grq_unlock_irqrestore(&flags); + grq_runlock_irqrestore(&flags); out_unlock: rcu_read_unlock(); @@ -4447,7 +4577,7 @@ SYSCALL_DEFINE0(sched_yield) struct task_struct *p; p = current; - grq_lock_irq(); + grq_rlock_irq(); schedstat_inc(task_rq(p), yld_count); requeue_task(p); @@ -4455,9 +4585,8 @@ SYSCALL_DEFINE0(sched_yield) * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ - __release(grq.lock); - spin_release(&grq.lock.dep_map, 1, _THIS_IP_); - do_raw_spin_unlock(&grq.lock); + __urw_read_unlock(&grq.urw.rwlock); + __release(grq.urw.lock); sched_preempt_enable_no_resched(); schedule(); @@ -4575,7 +4704,7 @@ bool __sched yield_to(struct task_struct struct rq *rq; rq = this_rq(); - grq_lock_irqsave(&flags); + grq_ulock_irqsave(&flags); if (task_running(p) || p->state) { yielded = -ESRCH; goto out_unlock; @@ -4587,9 +4716,11 @@ bool __sched yield_to(struct task_struct rq->rq_time_slice = 0; if (p->time_slice > timeslice()) p->time_slice = timeslice(); + grq_upgrade(); set_tsk_need_resched(rq->curr); + grq_wudowngrade(); out_unlock: - grq_unlock_irqrestore(&flags); + grq_uunlock_irqrestore(&flags); if (yielded > 0) schedule(); @@ -4720,9 +4851,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, p if (retval) goto out_unlock; - grq_lock_irqsave(&flags); + grq_rlock_irqsave(&flags); time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); - grq_unlock_irqrestore(&flags); + grq_runlock_irqrestore(&flags); rcu_read_unlock(); t = ns_to_timespec(time_slice); @@ -4828,7 +4959,7 @@ void init_idle(struct task_struct *idle, struct rq *rq = cpu_rq(cpu); unsigned long flags; - time_grq_lock(rq, &flags); + time_grq_wlock(rq, &flags); idle->last_ran = rq->clock_task; idle->state = TASK_RUNNING; /* Setting prio to illegal value shouldn't matter when never queued */ @@ -4841,7 +4972,7 @@ void init_idle(struct task_struct *idle, rcu_read_unlock(); rq->curr = rq->idle = idle; idle->on_cpu = 1; - grq_unlock_irqrestore(&flags); + grq_wunlock_irqrestore(&flags); /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -4857,9 +4988,9 @@ void resched_cpu(int cpu) { unsigned long flags; - grq_lock_irqsave(&flags); + grq_wlock_irqsave(&flags); resched_task(cpu_curr(cpu)); - grq_unlock_irqrestore(&flags); + grq_wunlock_irqrestore(&flags); } #ifdef CONFIG_SMP @@ -5008,20 +5139,21 @@ int set_cpus_allowed_ptr(struct task_str struct rq *rq; int ret = 0; - rq = task_grq_lock(p, &flags); + rq = task_grq_ulock(p, &flags); if (cpumask_equal(tsk_cpus_allowed(p), new_mask)) - goto out; + goto out_unlock; if (!cpumask_intersects(new_mask, cpu_active_mask)) { ret = -EINVAL; - goto out; + goto out_unlock; } queued = task_queued(p); do_set_cpus_allowed(p, new_mask); + grq_upgrade(); /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; @@ -5035,11 +5167,13 @@ int set_cpus_allowed_ptr(struct task_str resched_task(p); } else set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask)); - out: if (queued) try_preempt(p, rq); - task_grq_unlock(&flags); + grq_wudowngrade(); + +out_unlock: + task_grq_uunlock(&flags); if (running_wrong) _cond_resched(); @@ -5367,7 +5501,7 @@ migration_call(struct notifier_block *nf case CPU_ONLINE: /* Update our root-domain */ - grq_lock_irqsave(&flags); + grq_wlock_irqsave(&flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); @@ -5375,27 +5509,27 @@ migration_call(struct notifier_block *nf } unbind_zero(cpu); grq.noc = num_online_cpus(); - grq_unlock_irqrestore(&flags); + grq_wunlock_irqrestore(&flags); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: - grq_lock_irq(); + grq_wlock_irq(); set_rq_task(rq, idle); update_clocks(rq); - grq_unlock_irq(); + grq_wunlock_irq(); break; case CPU_DYING: /* Update our root-domain */ - grq_lock_irqsave(&flags); + grq_wlock_irqsave(&flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } bind_zero(cpu); grq.noc = num_online_cpus(); - grq_unlock_irqrestore(&flags); + grq_wunlock_irqrestore(&flags); break; #endif } @@ -5590,7 +5724,7 @@ static void rq_attach_root(struct rq *rq struct root_domain *old_rd = NULL; unsigned long flags; - grq_lock_irqsave(&flags); + grq_wlock_irqsave(&flags); if (rq->rd) { old_rd = rq->rd; @@ -5616,7 +5750,7 @@ static void rq_attach_root(struct rq *rq if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); - grq_unlock_irqrestore(&flags); + grq_wunlock_irqrestore(&flags); if (old_rd) call_rcu_sched(&old_rd->rcu, free_rootdomain); @@ -6673,7 +6807,7 @@ void __init sched_init_smp(void) BUG(); free_cpumask_var(non_isolated_cpus); - grq_lock_irq(); + grq_wlock_irq(); /* * Set up the relative cache distance of each online cpu from each * other in a simple array for quick lookup. Locality is determined @@ -6716,7 +6850,7 @@ void __init sched_init_smp(void) rq->siblings_idle = siblings_cpu_idle; #endif } - grq_unlock_irq(); + grq_wunlock_irq(); for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); @@ -6751,7 +6885,7 @@ void __init sched_init(void) for (i = 1 ; i < NICE_WIDTH ; i++) prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; - raw_spin_lock_init(&grq.lock); + urwlock_init(&grq.urw); grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; grq.niffies = 0; grq.last_jiffy = jiffies; @@ -6900,7 +7034,7 @@ void normalize_rt_tasks(void) continue; raw_spin_lock(&p->pi_lock); - rq = __task_grq_lock(p); + rq = __task_grq_wlock(p); queued = task_queued(p); if (queued) @@ -6911,7 +7045,7 @@ void normalize_rt_tasks(void) try_preempt(p, rq); } - __task_grq_unlock(); + __task_grq_wunlock(); raw_spin_unlock(&p->pi_lock); } while_each_thread(g, p);