Silly microoptimisation just because we can, along with explanation. -ck --- kernel/sched/bfs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) Index: linux-4.7-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck4.orig/kernel/sched/bfs.c 2016-09-13 17:21:53.538220126 +1000 +++ linux-4.7-ck4/kernel/sched/bfs.c 2016-09-13 17:21:53.536220171 +1000 @@ -1009,8 +1009,9 @@ static int effective_prio(struct task_st } /* - * Update the load average for feeding into cpu frequency governors. Use a rolling - * average with ~ time constant of 32ms + * Update the load average for feeding into cpu frequency governors. Use a + * rough estimate of a rolling average with ~ time constant of 32ms. + * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144 */ static void update_load_avg(struct rq *rq) { @@ -1019,10 +1020,10 @@ static void update_load_avg(struct rq *r unsigned long us_interval = (rq->clock - rq->load_update) >> 10; long load; - load = rq->load_avg - (rq->load_avg * us_interval * 80 / 32768 / 128); + load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); if (unlikely(load < 0)) load = 0; - load += rq->soft_affined * rq_load_avg(rq) * us_interval * 80 / 32768 / 128; + load += rq->soft_affined * rq_load_avg(rq) * us_interval * 5 / 262144; rq->load_avg = load; } rq->load_update = rq->clock;