Try reverting the softirq handling to see if it's better or worse without it. -ck diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index 2fa9683..987f288 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -752,13 +752,6 @@ static inline bool task_queued(struct task_struct *p) static void enqueue_task(struct rq *rq, struct task_struct *p, int flags); static inline void resched_if_idle(struct rq *rq); -/* Dodgy workaround till we figure out where the softirqs are going */ -static inline void do_pending_softirq(struct rq *rq, struct task_struct *next) -{ - if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt())) - do_softirq_own_stack(); -} - static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP @@ -813,11 +806,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) raw_spin_unlock(&prev->pi_lock); } #endif - rq_unlock(rq); - - do_pending_softirq(rq, current); - - local_irq_enable(); + rq_unlock_irq(rq); } static inline bool deadline_before(u64 deadline, u64 time) @@ -3857,9 +3846,7 @@ static void __sched notrace __schedule(bool preempt) context_switch(rq, prev, next); /* unlocks the rq */ } else { check_siblings(rq); - rq_unlock(rq); - do_pending_softirq(rq, next); - local_irq_enable(); + rq_unlock_irq(rq); } } diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h index 3565a7d..f9510d7 100644 --- a/kernel/sched/MuQSS.h +++ b/kernel/sched/MuQSS.h @@ -1,6 +1,5 @@ #include #include -#include #include #include #include "cpuacct.h" @@ -326,18 +325,4 @@ static inline void cpufreq_trigger(u64 time, unsigned long util) #define arch_scale_freq_invariant() (false) #endif -/* - * This should only be called when current == rq->idle. Dodgy workaround for - * when softirqs are pending and we are in the idle loop. Setting current to - * resched will kick us out of the idle loop and the softirqs will be serviced - * on our next pass through schedule(). - */ -static inline bool softirq_pending(int cpu) -{ - if (likely(!local_softirq_pending())) - return false; - set_tsk_need_resched(current); - return true; -} - #endif /* MUQSS_SCHED_H */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 51264e6..060b76d 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -208,8 +208,6 @@ static void cpu_idle_loop(void) int cpu = smp_processor_id(); while (1) { - bool pending = false; - /* * If the arch has a polling bit, we maintain an invariant: * @@ -221,10 +219,7 @@ static void cpu_idle_loop(void) __current_set_polling(); quiet_vmstat(); - if (unlikely(softirq_pending(cpu))) - pending = true; - else - tick_nohz_idle_enter(); + tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); @@ -264,8 +259,7 @@ static void cpu_idle_loop(void) * not have had an IPI to fold the state for us. */ preempt_set_need_resched(); - if (!pending) - tick_nohz_idle_exit(); + tick_nohz_idle_exit(); __current_clr_polling(); /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cdefab6..c64fc51 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1813,8 +1813,3 @@ static inline void cpufreq_trigger_update(u64 time) {} #else /* arch_scale_freq_capacity */ #define arch_scale_freq_invariant() (false) #endif - -static inline bool softirq_pending(int cpu) -{ - return false; -}