Add an "above background load" function which can be used for background tasks elsewhere (e.g. VM). -ck --- --- include/linux/sched.h | 7 +++++++ kernel/sched_bfs.c | 20 ++++++++++++++++++++ 2 files changed, 27 insertions(+) Index: linux-3.1-ck2/include/linux/sched.h =================================================================== --- linux-3.1-ck2.orig/include/linux/sched.h 2011-11-11 13:28:20.631507728 +1100 +++ linux-3.1-ck2/include/linux/sched.h 2011-11-11 13:28:21.025507698 +1100 @@ -1600,6 +1600,7 @@ bool grunqueue_is_locked(void); void grq_unlock_wait(void); void cpu_scaling(int cpu); void cpu_nonscaling(int cpu); +int above_background_load(void); #define tsk_seruntime(t) ((t)->sched_time) #define tsk_rttimeout(t) ((t)->rt_timeout) @@ -1647,6 +1648,12 @@ static inline bool iso_task(struct task_ { return false; } + +/* Anyone feel like implementing this? */ +static inline int above_background_load(void) +{ + return 1; +} #endif /* CONFIG_SCHED_BFS */ /* Future-safe accessor for struct task_struct's cpus_allowed. */ Index: linux-3.1-ck2/kernel/sched_bfs.c =================================================================== --- linux-3.1-ck2.orig/kernel/sched_bfs.c 2011-11-11 13:28:20.637507727 +1100 +++ linux-3.1-ck2/kernel/sched_bfs.c 2011-11-11 13:28:21.026507698 +1100 @@ -578,6 +578,26 @@ static inline void __task_grq_unlock(voi grq_unlock(); } +/* + * Look for any tasks *anywhere* that are running nice 0 or better. We do + * this lockless for overhead reasons since the occasional wrong result + * is harmless. + */ +int above_background_load(void) +{ + struct task_struct *cpu_curr; + unsigned long cpu; + + for_each_online_cpu(cpu) { + cpu_curr = cpu_rq(cpu)->curr; + if (unlikely(!cpu_curr)) + continue; + if (PRIO_TO_NICE(cpu_curr->static_prio) < 1) + return 1; + } + return 0; +} + #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) {