--- kernel/sched/bfs.c | 86 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 36 deletions(-) Index: linux-3.3-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.3-ck1.orig/kernel/sched/bfs.c 2012-04-03 18:07:25.000000000 +1000 +++ linux-3.3-ck1/kernel/sched/bfs.c 2012-04-03 18:20:52.989420069 +1000 @@ -462,57 +462,60 @@ static inline bool task_running(struct t return p->on_cpu; } -static inline void grq_write_lock(void) +/* Low level write and read lock/unlock */ +static inline void __grq_write_lock(void) __acquires(grq.rwlock) { rwlock_acquire(&grq.rwlock.dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(&grq.rwlock, do_raw_write_trylock, do_raw_write_lock); } -static inline void grq_write_unlock(void) +static inline void __grq_write_unlock(void) __releases(grq.rwlock) { rwlock_release(&grq.rwlock.dep_map, 1, _RET_IP_); do_raw_write_unlock(&grq.rwlock); } -static inline void grq_read_lock(void) +static inline void __grq_read_lock(void) __acquires(grq.rwlock) { rwlock_acquire_read(&grq.rwlock.dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(&grq.rwlock, do_raw_read_trylock, do_raw_read_lock); } -static inline void grq_read_unlock(void) +static inline void __grq_read_unlock(void) __releases(grq.rwlock) { rwlock_release(&grq.rwlock.dep_map, 1, _RET_IP_); do_raw_read_unlock(&grq.rwlock); } +/* Acquire grq write lock */ static inline void grq_wlock(void) __acquires(grq.lock) __acquires(grq.rwlock) { raw_spin_lock(&grq.lock); - grq_write_lock(); -} - -static inline void grq_lock(void) - __acquires(grq.lock) -{ - raw_spin_lock(&grq.lock); + __grq_write_lock(); } static inline void grq_wunlock(void) __releases(grq.rwlock) __releases(grq.lock) { - grq_write_unlock(); + __grq_write_unlock(); raw_spin_unlock(&grq.lock); } -static inline void grq_unlock(void) +/* Acquire grq upgradeable read/write lock */ +static inline void grq_ulock(void) + __acquires(grq.lock) +{ + raw_spin_lock(&grq.lock); +} + +static inline void grq_uunlock(void) __releases(grq.lock) { raw_spin_unlock(&grq.lock); @@ -523,10 +526,10 @@ static inline void grq_wlock_irq(void) __acquires(grq.rwlock) { raw_spin_lock_irq(&grq.lock); - grq_write_lock(); + __grq_write_lock(); } -static inline void grq_lock_irq(void) +static inline void grq_ulock_irq(void) __acquires(grq.lock) { raw_spin_lock_irq(&grq.lock); @@ -544,11 +547,11 @@ static inline void grq_wunlock_irq(void) __releases(grq.rwlock) __releases(grq.lock) { - grq_write_unlock(); + __grq_write_unlock(); raw_spin_unlock_irq(&grq.lock); } -static inline void grq_unlock_irq(void) +static inline void grq_uunlock_irq(void) __releases(grq.lock) { raw_spin_unlock_irq(&grq.lock); @@ -560,7 +563,7 @@ static inline void grq_rlock_irqsave(uns __releases(grq.lock) { raw_spin_lock_irqsave(&grq.lock, *flags); - grq_read_lock(); + __grq_read_lock(); spin_release(&grq.lock.dep_map, 1, _RET_IP_); do_raw_spin_unlock(&grq.lock); } @@ -570,19 +573,30 @@ static inline void grq_wlock_irqsave(uns __acquires(grq.rwlock) { raw_spin_lock_irqsave(&grq.lock, *flags); - grq_write_lock(); + __grq_write_lock(); } -static inline void grq_lock_irqsave(unsigned long *flags) +static inline void grq_ulock_irqsave(unsigned long *flags) __acquires(grq.lock) { raw_spin_lock_irqsave(&grq.lock, *flags); } +/* Upgrade upgradeable lock to write lock */ static inline void grq_upgrade_rwlock(void) __acquires(grq.rwlock) { - grq_write_lock(); + __grq_write_lock(); +} + +/* Downgrade upgradeable lock to read lock */ +static inline void grq_downgrade_rwlock(void) + __acquires(grq.rwlock) + __releases(grq.lock) +{ + __grq_read_lock(); + spin_release(&grq.lock.dep_map, 1, _RET_IP_); + do_raw_spin_unlock(&grq.lock); } static inline void grq_runlock_irqrestore(unsigned long *flags) @@ -595,11 +609,11 @@ static inline void grq_wunlock_irqrestor __releases(grq.rwlock) __releases(grq.lock) { - grq_write_unlock(); + __grq_write_unlock(); raw_spin_unlock_irqrestore(&grq.lock, *flags); } -static inline void grq_unlock_irqrestore(unsigned long *flags) +static inline void grq_uunlock_irqrestore(unsigned long *flags) __releases(grq.lock) { raw_spin_unlock_irqrestore(&grq.lock, *flags); @@ -628,7 +642,7 @@ static inline struct rq *task_grq_lock(struct task_struct *p, unsigned long *flags) __acquires(grq.lock) { - grq_lock_irqsave(flags); + grq_ulock_irqsave(flags); return task_rq(p); } @@ -642,10 +656,10 @@ static inline struct rq return rq; } -static inline struct rq *task_grq_lock_irq(struct task_struct *p) +static inline struct rq *task_grq_ulock_irq(struct task_struct *p) __acquires(grq.lock) { - grq_lock_irq(); + grq_ulock_irq(); return task_rq(p); } @@ -656,10 +670,10 @@ static inline void task_grq_wunlock_irq( grq_wunlock_irq(); } -static inline void task_grq_unlock_irq(void) +static inline void task_grq_uunlock_irq(void) __releases(grq.lock) { - grq_unlock_irq(); + grq_uunlock_irq(); } static inline void task_grq_runlock(unsigned long *flags) @@ -678,7 +692,7 @@ static inline void task_grq_wunlock(unsi static inline void task_grq_unlock(unsigned long *flags) __releases(grq.lock) { - grq_unlock_irqrestore(flags); + grq_uunlock_irqrestore(flags); } /** @@ -721,7 +735,7 @@ static inline struct rq *__task_grq_wloc static inline struct rq *__task_grq_lock(struct task_struct *p) __acquires(grq.lock) { - grq_lock(); + grq_ulock(); return task_rq(p); } @@ -735,7 +749,7 @@ static inline void __task_grq_wunlock(vo static inline void __task_grq_unlock(void) __releases(grq.lock) { - grq_unlock(); + grq_uunlock(); } /* @@ -1892,7 +1906,7 @@ void sched_fork(struct task_struct *p) * value. rq->rq_deadline is only modified within schedule() so it * is always equal to current->deadline. */ - rq = task_grq_lock_irq(curr); + rq = task_grq_ulock_irq(curr); if (likely(rq->rq_time_slice >= RESCHED_US * 2)) { rq->rq_time_slice /= 2; p->time_slice = rq->rq_time_slice; @@ -1913,7 +1927,7 @@ void sched_fork(struct task_struct *p) if (unlikely(rwupgrade)) task_grq_wunlock_irq(); else - task_grq_unlock_irq(); + task_grq_uunlock_irq(); out: put_cpu(); } @@ -4736,7 +4750,7 @@ SYSCALL_DEFINE0(sched_yield) * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ - grq_write_unlock(); + __grq_write_unlock(); __release(grq.lock); spin_release(&grq.lock.dep_map, 1, _THIS_IP_); do_raw_spin_unlock(&grq.lock); @@ -4847,7 +4861,7 @@ bool __sched yield_to(struct task_struct struct rq *rq; rq = this_rq(); - grq_lock_irqsave(&flags); + grq_ulock_irqsave(&flags); if (task_running(p) || p->state) goto out_unlock; @@ -4865,7 +4879,7 @@ out_unlock: grq_wunlock_irqrestore(&flags); schedule(); } else - grq_unlock_irqrestore(&flags); + grq_uunlock_irqrestore(&flags); return yielded; }