Division is done more carefully when scaling utime in mainline, match it. -ck --- kernel/sched/bfs.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) Index: linux-3.7-bfs/kernel/sched/bfs.c =================================================================== --- linux-3.7-bfs.orig/kernel/sched/bfs.c 2013-01-29 01:22:35.994785437 +1100 +++ linux-3.7-bfs/kernel/sched/bfs.c 2013-01-29 01:22:36.408780261 +1100 @@ -7477,6 +7477,19 @@ void thread_group_times(struct task_stru *st = cputime.stime; } #else +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ + u64 temp = (__force u64) rtime; + + temp *= (__force u64) utime; + + if (sizeof(cputime_t) == 4) + temp = div_u64(temp, (__force u32) total); + else + temp = div64_u64(temp, (__force u64) total); + + return (__force cputime_t) temp; +} void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { @@ -7484,13 +7497,9 @@ void task_times(struct task_struct *p, c rtime = nsecs_to_cputime(p->sched_time); - if (total) { - u64 temp = rtime; - - temp *= utime; - do_div(temp, total); - utime = (cputime_t)temp; - } else + if (total) + utime = scale_utime(utime, rtime, total); + else utime = rtime; /* @@ -7517,13 +7526,9 @@ void thread_group_times(struct task_stru total = cputime.utime + cputime.stime; rtime = nsecs_to_cputime(cputime.sum_exec_runtime); - if (total) { - u64 temp = rtime; - - temp *= cputime.utime; - do_div(temp, total); - utime = (cputime_t)temp; - } else + if (total) + utime = scale_utime(cputime.utime, rtime, total); + else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime);