Update version number. Remove dodgy fix for voluntary preempt which no longer is required since it is fixed in mainline and now leads to a hang in BFS. Thanks to Matthias Kohler Fix to accounting on 32 bit thanks to Jan Vermeulen. --- kernel/sched/bfs.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) Index: linux-3.7-bfs/kernel/sched/bfs.c =================================================================== --- linux-3.7-bfs.orig/kernel/sched/bfs.c 2012-12-12 21:31:24.000000000 +1100 +++ linux-3.7-bfs/kernel/sched/bfs.c 2012-12-12 21:53:52.485773384 +1100 @@ -136,7 +136,7 @@ void print_scheduler_version(void) { - printk(KERN_INFO "BFS CPU scheduler v0.425 by Con Kolivas.\n"); + printk(KERN_INFO "BFS CPU scheduler v0.426 by Con Kolivas.\n"); } /* @@ -4700,10 +4700,6 @@ static inline bool should_resched(void) static void __cond_resched(void) { - /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */ - if (unlikely(system_state != SYSTEM_RUNNING)) - return; - add_preempt_count(PREEMPT_ACTIVE); schedule(); sub_preempt_count(PREEMPT_ACTIVE); @@ -5072,6 +5068,10 @@ void init_idle(struct task_struct *idle, #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ +void nohz_balance_enter_idle(int cpu) +{ +} + void select_nohz_load_balancer(int stop_tick) { } @@ -7482,9 +7482,9 @@ void task_times(struct task_struct *p, c rtime = nsecs_to_cputime(p->sched_time); if (total) { - u64 temp; + u64 temp = rtime; - temp = (u64)(rtime * utime); + temp *= utime; do_div(temp, total); utime = (cputime_t)temp; } else @@ -7515,9 +7515,9 @@ void thread_group_times(struct task_stru rtime = nsecs_to_cputime(cputime.sum_exec_runtime); if (total) { - u64 temp; + u64 temp = rtime; - temp = (u64)(rtime * cputime.utime); + temp *= cputime.utime; do_div(temp, total); utime = (cputime_t)temp; } else