From da6ce7c98302d336c7b7a0624b1efac00eb50780 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 5 May 2020 16:55:52 +1000 Subject: [PATCH 17/19] Fix inappropriate double counting of rolling load average. --- kernel/sched/MuQSS.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index 9c225b179ddb..48e64c76b476 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -742,17 +742,15 @@ static inline int rq_load(struct rq *rq) static void update_load_avg(struct rq *rq, unsigned int flags) { long us_interval, load; - unsigned long curload; us_interval = NS_TO_US(rq->niffies - rq->load_update); if (unlikely(us_interval <= 0)) return; - curload = rq_load(rq); load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); if (unlikely(load < 0)) load = 0; - load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; + load += rq_load(rq) * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; rq->load_avg = load; rq->load_update = rq->niffies; @@ -769,17 +767,15 @@ static void update_load_avg(struct rq *rq, unsigned int flags) static void update_irq_load_avg(struct rq *rq, long delta) { long us_interval, load; - unsigned long curload; us_interval = NS_TO_US(rq->niffies - rq->irq_load_update); if (unlikely(us_interval <= 0)) return; - curload = NS_TO_US(delta) / us_interval; load = rq->irq_load_avg - (rq->irq_load_avg * us_interval * 5 / 262144); if (unlikely(load < 0)) load = 0; - load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; + load += NS_TO_US(delta) * SCHED_CAPACITY_SCALE * 5 / 262144; rq->irq_load_avg = load; rq->irq_load_update = rq->niffies; -- 2.25.1