--- kernel/sched/bfs.c | 70 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 13 deletions(-) Index: linux-3.3-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.3-ck1.orig/kernel/sched/bfs.c 2012-04-01 14:41:01.000000000 +1000 +++ linux-3.3-ck1/kernel/sched/bfs.c 2012-04-01 16:40:07.751918252 +1000 @@ -388,6 +388,7 @@ static struct global_rq grq; * This data should only be modified by the local cpu. */ struct rq { + urw_lock_t urw; #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ u64 nohz_stamp; @@ -722,14 +723,26 @@ static inline void grq_unlock_irqrestore local_irq_restore(*flags); } +/* Read lock rq urw lock and return task rq */ static inline struct rq -*task_grq_rlock(struct task_struct *p, unsigned long *flags) - __acquires(grq.lock) - __acquires(grq.rwlock) - __releases(grq.lock) +*task_rq_rlock(struct task_struct *p, unsigned long *flags) { + struct rq *rq; + grq_rlock_irqsave(flags); - return task_rq(p); + while (42) { + rq = task_rq(p); + urw_read_lock(&rq->urw); + if (likely(rq == task_rq(p))) + return rq; + urw_read_unlock(&rq->urw); + } +} + +static inline void task_rq_runlock(struct rq *rq, unsigned long *flags) +{ + urw_read_unlock(&rq->urw); + grq_runlock_irqrestore(flags); } static inline struct rq @@ -779,12 +792,6 @@ static inline void task_grq_unlock_irq(v grq_unlock_irq(); } -static inline void task_grq_runlock(unsigned long *flags) - __releases(grq.rwlock) -{ - grq_runlock_irqrestore(flags); -} - static inline void task_grq_wunlock(unsigned long *flags) __releases(grq.rwlock) __releases(grq.lock) @@ -798,6 +805,42 @@ static inline void task_grq_unlock(unsig grq_unlock_irqrestore(flags); } +static inline void rq_rlock(struct rq *rq) +{ + grq_read_lock(); + urw_read_lock(&rq->urw); +} + +static inline void rq_ulock(struct rq *rq) +{ + grq_read_lock(); + urw_upgrade_lock(&rq->urw); +} + +static inline void rq_wlock(struct rq *rq) +{ + grq_read_lock(); + urw_write_lock(&rq->urw); +} + +static inline void rq_uulock(struct rq *rq) +{ + grq_lock(); + urw_upgrade_lock(&rq->urw); +} + +static inline void rq_wulock(struct rq *rq) +{ + grq_lock(); + urw_write_lock(&rq->urw); +} + +static inline void rq_wwlock(struct rq *rq) +{ + grq_wlock(); + urw_write_lock(&rq->urw); +} + /** * grunqueue_is_locked * @@ -1548,14 +1591,14 @@ retry_rq: * lock now, to be *sure*. If we're wrong, we'll * just go back and repeat. */ - rq = task_grq_rlock(p, &flags); + rq = task_rq_rlock(p, &flags); trace_sched_wait_task(p); running = task_running(p); on_rq = task_queued(p); ncsw = 0; if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_grq_runlock(&flags); + task_rq_runlock(rq, &flags); /* * If it changed from the expected state, bail out now. @@ -7233,6 +7276,7 @@ void __init sched_init(void) #endif for_each_possible_cpu(i) { rq = cpu_rq(i); + urw_lock_init(&rq->urw); rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = rq->iowait_pc = rq->idle_pc = 0; rq->dither = false;