--- kernel/sched/bfs.c | 159 +++++++++++++++++++++++------------------------------ 1 file changed, 69 insertions(+), 90 deletions(-) Index: linux-3.3-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.3-ck1.orig/kernel/sched/bfs.c 2012-04-01 14:02:04.000000000 +1000 +++ linux-3.3-ck1/kernel/sched/bfs.c 2012-04-01 14:41:01.294921045 +1000 @@ -169,40 +169,42 @@ struct urw_lock { rwlock_t rwlock; }; -static void urw_lock_init(struct urw_lock *urw) { +typedef struct urw_lock urw_lock_t; + +static void urw_lock_init(urw_lock_t *urw) { raw_spin_lock_init(&urw->lock); rwlock_init(&urw->rwlock); } -static inline void __urw_write_lock(struct urw_lock *urw) +static inline void __urw_write_lock(urw_lock_t *urw) __acquires(ht->rwlock) { rwlock_acquire(&urw->rwlock.dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(&urw->rwlock, do_raw_write_trylock, do_raw_write_lock); } -static inline void __urw_write_unlock(struct urw_lock *urw) +static inline void __urw_write_unlock(urw_lock_t *urw) __releases(&urw->rwlock) { rwlock_release(&urw->rwlock.dep_map, 1, _RET_IP_); do_raw_write_unlock(&urw->rwlock); } -static inline void __urw_read_lock(struct urw_lock *urw) +static inline void __urw_read_lock(urw_lock_t *urw) __acquires(ht->rwlock) { rwlock_acquire_read(&urw->rwlock.dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(&urw->rwlock, do_raw_read_trylock, do_raw_read_lock); } -static inline void __urw_read_unlock(struct urw_lock *urw) +static inline void __urw_read_unlock(urw_lock_t *urw) __releases(&urw->rwlock) { rwlock_release(&urw->rwlock.dep_map, 1, _RET_IP_); do_raw_read_unlock(&urw->rwlock); } -static inline void urw_write_lock(struct urw_lock *urw) +static inline void urw_write_lock(urw_lock_t *urw) __acquires(&urw->lock) __acquires(&urw->rwlock) { @@ -210,7 +212,7 @@ static inline void urw_write_lock(struct __urw_write_lock(urw); } -static inline void urw_write_unlock(struct urw_lock *urw) +static inline void urw_write_unlock(urw_lock_t *urw) __releases(&urw->lock) __releases(&urw->rwlock) { @@ -218,7 +220,7 @@ static inline void urw_write_unlock(stru raw_spin_unlock(&urw->lock); } -static inline void urw_read_lock(struct urw_lock *urw) +static inline void urw_read_lock(urw_lock_t *urw) __acquires(&urw->lock) __acquires(&urw->rwlock) __releases(&urw->lock) @@ -229,13 +231,31 @@ static inline void urw_read_lock(struct do_raw_spin_unlock(&urw->lock); } -static inline void urw_read_unlock(struct urw_lock *urw) +static inline void urw_upgrade_lock(urw_lock_t *urw) + __acquires(&urw->lock) +{ + raw_spin_lock(&urw->lock); +} + +static inline void urw_upgrade_unlock(urw_lock_t *urw) + __releases(&urw->lock) +{ + raw_spin_unlock(&urw->lock); +} + +static inline void upgrade_urw_lock(urw_lock_t *urw) + __acquires(&urw->rwlock) +{ + __urw_write_lock(urw); +} + +static inline void urw_read_unlock(urw_lock_t *urw) __releases(&urw->rwlock) { read_unlock(&urw->rwlock); } -static inline void urw_write_lock_irq(struct urw_lock *urw) +static inline void urw_write_lock_irq(urw_lock_t *urw) __acquires(&urw->lock) __acquires(&urw->rwlock) { @@ -243,7 +263,7 @@ static inline void urw_write_lock_irq(st __urw_write_lock(urw); } -static inline void urw_write_unlock_irq(struct urw_lock *urw) +static inline void urw_write_unlock_irq(urw_lock_t *urw) __releases(&urw->lock) __releases(&urw->rwlock) { @@ -251,7 +271,7 @@ static inline void urw_write_unlock_irq( raw_spin_unlock_irq(&urw->lock); } -static inline void urw_read_lock_irq(struct urw_lock *urw) +static inline void urw_read_lock_irq(urw_lock_t *urw) __acquires(&urw->lock) __acquires(&urw->rwlock) __releases(&urw->lock) @@ -262,13 +282,13 @@ static inline void urw_read_lock_irq(str do_raw_spin_unlock(&urw->lock); } -static inline void urw_read_unlock_irq(struct urw_lock *urw) +static inline void urw_read_unlock_irq(urw_lock_t *urw) __releases(&urw->rwlock) { read_unlock_irq(&urw->rwlock); } -static inline void urw_write_lock_irqsave(struct urw_lock *urw, unsigned long *flags) +static inline void urw_write_lock_irqsave(urw_lock_t *urw, unsigned long *flags) __acquires(&urw->lock) __acquires(&urw->rwlock) { @@ -276,7 +296,7 @@ static inline void urw_write_lock_irqsav __urw_write_lock(urw); } -static inline void urw_write_unlock_irqsave(struct urw_lock *urw, unsigned long *flags) +static inline void urw_write_unlock_irqrestore(urw_lock_t *urw, unsigned long *flags) __releases(&urw->lock) __releases(&urw->rwlock) { @@ -284,7 +304,7 @@ static inline void urw_write_unlock_irqs raw_spin_unlock_irqrestore(&urw->lock, *flags); } -static inline void urw_read_lock_irqsave(struct urw_lock *urw, unsigned long *flags) +static inline void urw_read_lock_irqsave(urw_lock_t *urw, unsigned long *flags) __acquires(&urw->lock) __acquires(&urw->rwlock) __releases(&urw->lock) @@ -295,7 +315,7 @@ static inline void urw_read_lock_irqsave do_raw_spin_unlock(&urw->lock); } -static inline void urw_read_unlock_irqrestore(struct urw_lock *urw, unsigned long *flags) +static inline void urw_read_unlock_irqrestore(urw_lock_t *urw, unsigned long *flags) __releases(&urw->rwlock) { read_unlock_irqrestore(&urw->rwlock, *flags); @@ -307,8 +327,7 @@ static inline void urw_read_unlock_irqre * struct. */ struct global_rq { - raw_spinlock_t lock; - rwlock_t rwlock; + urw_lock_t urw; unsigned long nr_running; unsigned long nr_uninterruptible; unsigned long long nr_switches; @@ -600,146 +619,107 @@ static inline bool task_running(struct t } static inline void grq_write_lock(void) - __acquires(grq.rwlock) { - rwlock_acquire(&grq.rwlock.dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(&grq.rwlock, do_raw_write_trylock, do_raw_write_lock); + urw_write_lock(&grq.urw); } -static inline void grq_write_unlock(void) - __releases(grq.rwlock) -{ - rwlock_release(&grq.rwlock.dep_map, 1, _RET_IP_); - do_raw_write_unlock(&grq.rwlock); +static inline void grq_write_unlock(void) { + urw_write_unlock(&grq.urw); } static inline void grq_read_lock(void) - __acquires(grq.rwlock) { - rwlock_acquire_read(&grq.rwlock.dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(&grq.rwlock, do_raw_read_trylock, do_raw_read_lock); + urw_read_lock(&grq.urw); } static inline void grq_read_unlock(void) - __releases(grq.rwlock) { - rwlock_release(&grq.rwlock.dep_map, 1, _RET_IP_); - do_raw_read_unlock(&grq.rwlock); + urw_read_unlock(&grq.urw); } static inline void grq_wlock(void) - __acquires(grq.lock) - __acquires(grq.rwlock) { - raw_spin_lock(&grq.lock); - grq_write_lock(); + urw_write_lock(&grq.urw); } static inline void grq_lock(void) - __acquires(grq.lock) { - raw_spin_lock(&grq.lock); + urw_upgrade_lock(&grq.urw); } static inline void grq_wunlock(void) - __releases(grq.rwlock) - __releases(grq.lock) { - grq_write_unlock(); - raw_spin_unlock(&grq.lock); + urw_write_unlock(&grq.urw); } static inline void grq_unlock(void) - __releases(grq.lock) { - raw_spin_unlock(&grq.lock); + urw_upgrade_unlock(&grq.urw); } static inline void grq_wlock_irq(void) - __acquires(grq.lock) - __acquires(grq.rwlock) { - raw_spin_lock_irq(&grq.lock); - grq_write_lock(); + urw_write_lock_irq(&grq.urw); } static inline void grq_lock_irq(void) - __acquires(grq.lock) { - raw_spin_lock_irq(&grq.lock); + local_irq_disable(); + urw_upgrade_lock(&grq.urw); } static inline void time_wlock_grq(struct rq *rq) - __acquires(grq.lock) - __acquires(grq.rwlock) { - grq_wlock(); + urw_write_lock(&grq.urw); update_clocks(rq); } static inline void grq_wunlock_irq(void) - __releases(grq.rwlock) - __releases(grq.lock) { - grq_write_unlock(); - raw_spin_unlock_irq(&grq.lock); + urw_write_unlock_irq(&grq.urw); } static inline void grq_unlock_irq(void) - __releases(grq.lock) { - raw_spin_unlock_irq(&grq.lock); + urw_upgrade_unlock(&grq.urw); + local_irq_enable(); } static inline void grq_rlock_irqsave(unsigned long *flags) - __acquires(grq.lock) - __acquires(grq.rwlock) - __releases(grq.lock) { - raw_spin_lock_irqsave(&grq.lock, *flags); - grq_read_lock(); - spin_release(&grq.lock.dep_map, 1, _RET_IP_); - do_raw_spin_unlock(&grq.lock); + urw_read_lock_irqsave(&grq.urw, flags); } static inline void grq_wlock_irqsave(unsigned long *flags) - __acquires(grq.lock) - __acquires(grq.rwlock) { - raw_spin_lock_irqsave(&grq.lock, *flags); - grq_write_lock(); + urw_write_lock_irqsave(&grq.urw, flags); } static inline void grq_lock_irqsave(unsigned long *flags) - __acquires(grq.lock) { - raw_spin_lock_irqsave(&grq.lock, *flags); + local_irq_save(*flags); + urw_upgrade_lock(&grq.urw); } static inline void grq_upgrade_rwlock(void) - __acquires(grq.rwlock) { - grq_write_lock(); + upgrade_urw_lock(&grq.urw); } static inline void grq_runlock_irqrestore(unsigned long *flags) - __releases(grq.rwlock) { - read_unlock_irqrestore(&grq.rwlock, *flags); + urw_read_unlock_irqrestore(&grq.urw, flags); } static inline void grq_wunlock_irqrestore(unsigned long *flags) - __releases(grq.rwlock) - __releases(grq.lock) { - grq_write_unlock(); - raw_spin_unlock_irqrestore(&grq.lock, *flags); + urw_write_unlock_irqrestore(&grq.urw, flags); } static inline void grq_unlock_irqrestore(unsigned long *flags) - __releases(grq.lock) { - raw_spin_unlock_irqrestore(&grq.lock, *flags); + urw_upgrade_unlock(&grq.urw); + local_irq_restore(*flags); } static inline struct rq @@ -827,7 +807,7 @@ static inline void task_grq_unlock(unsig */ bool grunqueue_is_locked(void) { - return raw_spin_is_locked(&grq.lock); + return raw_spin_is_locked(&grq.urw.lock); } #if 0 @@ -1467,7 +1447,7 @@ static void resched_task(struct task_str { int cpu; - assert_raw_spin_locked(&grq.lock); + assert_raw_spin_locked(&grq.urw.lock); if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) return; @@ -1487,7 +1467,7 @@ static void resched_task(struct task_str #else static inline void resched_task(struct task_struct *p) { - assert_raw_spin_locked(&grq.lock); + assert_raw_spin_locked(&grq.urw.lock); set_tsk_need_resched(p); } #endif @@ -4875,8 +4855,8 @@ SYSCALL_DEFINE0(sched_yield) */ grq_write_unlock(); __release(grq.lock); - spin_release(&grq.lock.dep_map, 1, _THIS_IP_); - do_raw_spin_unlock(&grq.lock); + spin_release(&grq.urw.lock.dep_map, 1, _THIS_IP_); + do_raw_spin_unlock(&grq.urw.lock); preempt_enable_no_resched(); schedule(); @@ -7236,8 +7216,7 @@ void __init sched_init(void) for (i = 1 ; i < PRIO_RANGE ; i++) prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; - raw_spin_lock_init(&grq.lock); - rwlock_init(&grq.rwlock); + urw_lock_init(&grq.urw); grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; grq.niffies = 0; grq.last_jiffy = jiffies;