From 8d5ded85ef9200ec67bc1f8f5017815a73a9afd8 Mon Sep 17 00:00:00 2001 From: ckolivas Date: Mon, 10 Oct 2016 14:06:53 +1100 Subject: [PATCH 16/80] sched_info_de/queued only on de/activate. --- kernel/sched/MuQSS.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index 2bb868be..95063b5 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -795,7 +795,6 @@ static void update_load_avg(struct rq *rq) static void dequeue_task(struct task_struct *p, struct rq *rq) { skiplist_delete(rq->sl, &p->node); - sched_info_dequeued(task_rq(p), p); update_load_avg(rq); } @@ -883,15 +882,9 @@ static void enqueue_task(struct task_struct *p, struct rq *rq) */ randseed = (rq->niffies >> 10) & 0xFFFFFFFF; skiplist_insert(rq->sl, &p->node, sl_id, p, randseed); - sched_info_queued(rq, p); update_load_avg(rq); } -static inline void requeue_task(struct task_struct *p) -{ - sched_info_queued(task_rq(p), p); -} - /* * Returns the relative length of deadline all compared to the shortest * deadline which is that of nice -20. @@ -1242,6 +1235,7 @@ static void activate_task(struct task_struct *p, struct rq *rq) atomic_dec(&grq.nr_uninterruptible); enqueue_task(p, rq); + sched_info_queued(rq, p); p->on_rq = TASK_ON_RQ_QUEUED; atomic_inc(&grq.nr_running); inc_qnr(); @@ -1256,6 +1250,7 @@ static inline void deactivate_task(struct task_struct *p, struct rq *rq) if (task_contributes_to_load(p)) atomic_inc(&grq.nr_uninterruptible); + sched_info_dequeued(rq, p); p->on_rq = 0; atomic_dec(&grq.nr_running); update_load_avg(rq); @@ -3321,7 +3316,6 @@ static void task_running_tick(struct rq *rq) p = rq->curr; rq_lock(rq); - requeue_task(p); __set_tsk_resched(p); rq_unlock(rq); } @@ -5023,7 +5017,6 @@ SYSCALL_DEFINE0(sched_yield) p = current; rq = this_rq_lock(); schedstat_inc(task_rq(p), yld_count); - requeue_task(p); /* * Since we are going to call schedule() anyway, there's -- 2.7.4