--- kernel/sched/bfs.c | 137 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) Index: linux-3.3-ck1/kernel/sched/bfs.c =================================================================== --- linux-3.3-ck1.orig/kernel/sched/bfs.c 2012-03-28 00:30:33.000000000 +1100 +++ linux-3.3-ck1/kernel/sched/bfs.c 2012-04-01 14:02:04.220213670 +1000 @@ -164,6 +164,143 @@ static inline int timeslice(void) return MS_TO_US(rr_interval); } +struct urw_lock { + raw_spinlock_t lock; + rwlock_t rwlock; +}; + +static void urw_lock_init(struct urw_lock *urw) { + raw_spin_lock_init(&urw->lock); + rwlock_init(&urw->rwlock); +} + +static inline void __urw_write_lock(struct urw_lock *urw) + __acquires(ht->rwlock) +{ + rwlock_acquire(&urw->rwlock.dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(&urw->rwlock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __urw_write_unlock(struct urw_lock *urw) + __releases(&urw->rwlock) +{ + rwlock_release(&urw->rwlock.dep_map, 1, _RET_IP_); + do_raw_write_unlock(&urw->rwlock); +} + +static inline void __urw_read_lock(struct urw_lock *urw) + __acquires(ht->rwlock) +{ + rwlock_acquire_read(&urw->rwlock.dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(&urw->rwlock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline void __urw_read_unlock(struct urw_lock *urw) + __releases(&urw->rwlock) +{ + rwlock_release(&urw->rwlock.dep_map, 1, _RET_IP_); + do_raw_read_unlock(&urw->rwlock); +} + +static inline void urw_write_lock(struct urw_lock *urw) + __acquires(&urw->lock) + __acquires(&urw->rwlock) +{ + raw_spin_lock(&urw->lock); + __urw_write_lock(urw); +} + +static inline void urw_write_unlock(struct urw_lock *urw) + __releases(&urw->lock) + __releases(&urw->rwlock) +{ + __urw_write_unlock(urw); + raw_spin_unlock(&urw->lock); +} + +static inline void urw_read_lock(struct urw_lock *urw) + __acquires(&urw->lock) + __acquires(&urw->rwlock) + __releases(&urw->lock) +{ + raw_spin_lock(&urw->lock); + __urw_read_lock(urw); + spin_release(&urw->lock.dep_map, 1, _RET_IP_); + do_raw_spin_unlock(&urw->lock); +} + +static inline void urw_read_unlock(struct urw_lock *urw) + __releases(&urw->rwlock) +{ + read_unlock(&urw->rwlock); +} + +static inline void urw_write_lock_irq(struct urw_lock *urw) + __acquires(&urw->lock) + __acquires(&urw->rwlock) +{ + raw_spin_lock_irq(&urw->lock); + __urw_write_lock(urw); +} + +static inline void urw_write_unlock_irq(struct urw_lock *urw) + __releases(&urw->lock) + __releases(&urw->rwlock) +{ + __urw_write_unlock(urw); + raw_spin_unlock_irq(&urw->lock); +} + +static inline void urw_read_lock_irq(struct urw_lock *urw) + __acquires(&urw->lock) + __acquires(&urw->rwlock) + __releases(&urw->lock) +{ + raw_spin_lock_irq(&urw->lock); + __urw_read_lock(urw); + spin_release(&urw->lock.dep_map, 1, _RET_IP_); + do_raw_spin_unlock(&urw->lock); +} + +static inline void urw_read_unlock_irq(struct urw_lock *urw) + __releases(&urw->rwlock) +{ + read_unlock_irq(&urw->rwlock); +} + +static inline void urw_write_lock_irqsave(struct urw_lock *urw, unsigned long *flags) + __acquires(&urw->lock) + __acquires(&urw->rwlock) +{ + raw_spin_lock_irqsave(&urw->lock, *flags); + __urw_write_lock(urw); +} + +static inline void urw_write_unlock_irqsave(struct urw_lock *urw, unsigned long *flags) + __releases(&urw->lock) + __releases(&urw->rwlock) +{ + __urw_write_unlock(urw); + raw_spin_unlock_irqrestore(&urw->lock, *flags); +} + +static inline void urw_read_lock_irqsave(struct urw_lock *urw, unsigned long *flags) + __acquires(&urw->lock) + __acquires(&urw->rwlock) + __releases(&urw->lock) +{ + raw_spin_lock_irqsave(&urw->lock, *flags); + __urw_read_lock(urw); + spin_release(&urw->lock.dep_map, 1, _RET_IP_); + do_raw_spin_unlock(&urw->lock); +} + +static inline void urw_read_unlock_irqrestore(struct urw_lock *urw, unsigned long *flags) + __releases(&urw->rwlock) +{ + read_unlock_irqrestore(&urw->rwlock, *flags); +} + /* * The global runqueue data that all CPUs work off. Data is protected either * by the global grq lock, or the discrete lock that precedes the data in this