sched, idle: Fix the idle polling state logic
authorPeter Zijlstra <peterz@infradead.org>
Wed, 11 Sep 2013 10:43:13 +0000 (12:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Nov 2013 19:27:55 +0000 (11:27 -0800)
commit ea8117478918a4734586d35ff530721b682425be upstream.

Mike reported that commit 7d1a9417 ("x86: Use generic idle loop")
regressed several workloads and caused excessive reschedule
interrupts.

The patch in question failed to notice that the x86 code had an
inverted sense of the polling state versus the new generic code (x86:
default polling, generic: default !polling).

Fix the two prominent x86 mwait based idle drivers and introduce a few
new generic polling helpers (fixing the wrong smp_mb__after_clear_bit
usage).

Also switch the idle routines to using tif_need_resched() which is an
immediate TIF_NEED_RESCHED test as opposed to need_resched which will
end up being slightly different.

Reported-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: lenb@kernel.org
Cc: tglx@linutronix.de
Link: http://lkml.kernel.org/n/tip-nc03imb0etuefmzybzj7sprf@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kernel/process.c
drivers/acpi/processor_idle.c
drivers/idle/intel_idle.c
include/linux/sched.h
include/linux/thread_info.h
kernel/cpu/idle.c

index c83516be1052552adefc6324ee8b0ecd4a1b1ac2..3fb8d95ab8b5ea3635ddb1f0d9f9c12e3a348285 100644 (file)
@@ -391,9 +391,9 @@ static void amd_e400_idle(void)
                 * The switch back from broadcast mode needs to be
                 * called with interrupts disabled.
                 */
-                local_irq_disable();
-                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
-                local_irq_enable();
+               local_irq_disable();
+               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+               local_irq_enable();
        } else
                default_idle();
 }
index f98dd00b51a94b2d7e2d7dc9cd15f3e941e81bff..c7414a545a4f402506468c0e1d73aa160e55e836 100644 (file)
@@ -119,17 +119,10 @@ static struct dmi_system_id processor_power_dmi_table[] = {
  */
 static void acpi_safe_halt(void)
 {
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched()) {
+       if (!tif_need_resched()) {
                safe_halt();
                local_irq_disable();
        }
-       current_thread_info()->status |= TS_POLLING;
 }
 
 #ifdef ARCH_APICTIMER_STOPS_ON_C3
@@ -737,6 +730,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        if (unlikely(!pr))
                return -EINVAL;
 
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
+                       return -EINVAL;
+       }
+
        lapic_timer_state_broadcast(pr, cx, 1);
        acpi_idle_do_entry(cx);
 
@@ -790,18 +788,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
        if (unlikely(!pr))
                return -EINVAL;
 
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-
-               if (unlikely(need_resched())) {
-                       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
                        return -EINVAL;
-               }
        }
 
        /*
@@ -819,9 +808,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
        sched_clock_idle_wakeup_event(0);
 
-       if (cx->entry_method != ACPI_CSTATE_FFH)
-               current_thread_info()->status |= TS_POLLING;
-
        lapic_timer_state_broadcast(pr, cx, 0);
        return index;
 }
@@ -858,18 +844,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                }
        }
 
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-
-               if (unlikely(need_resched())) {
-                       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
                        return -EINVAL;
-               }
        }
 
        acpi_unlazy_tlb(smp_processor_id());
@@ -915,9 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 
        sched_clock_idle_wakeup_event(0);
 
-       if (cx->entry_method != ACPI_CSTATE_FFH)
-               current_thread_info()->status |= TS_POLLING;
-
        lapic_timer_state_broadcast(pr, cx, 0);
        return index;
 }
index fa6964d8681a0d126fcf7c4845896b3f06298f8f..f116d664b4737edcabb621a9fdb7076cbcee1334 100644 (file)
@@ -359,7 +359,7 @@ static int intel_idle(struct cpuidle_device *dev,
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
-       if (!need_resched()) {
+       if (!current_set_polling_and_test()) {
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
index e27baeeda3f470ed99ae899d8de22da062856bf8..97ace7a665b323ca3751161d6a5d90d5db12ddf3 100644 (file)
@@ -2474,34 +2474,98 @@ static inline int tsk_is_polling(struct task_struct *p)
 {
        return task_thread_info(p)->status & TS_POLLING;
 }
-static inline void current_set_polling(void)
+static inline void __current_set_polling(void)
 {
        current_thread_info()->status |= TS_POLLING;
 }
 
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+       __current_set_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_task()
+        */
+       smp_mb();
+
+       return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
 {
        current_thread_info()->status &= ~TS_POLLING;
-       smp_mb__after_clear_bit();
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+       __current_clr_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_task()
+        */
+       smp_mb();
+
+       return unlikely(tif_need_resched());
 }
 #elif defined(TIF_POLLING_NRFLAG)
 static inline int tsk_is_polling(struct task_struct *p)
 {
        return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
 }
-static inline void current_set_polling(void)
+
+static inline void __current_set_polling(void)
 {
        set_thread_flag(TIF_POLLING_NRFLAG);
 }
 
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+       __current_set_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_task()
+        *
+        * XXX: assumes set/clear bit are identical barrier wise.
+        */
+       smp_mb__after_clear_bit();
+
+       return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
 {
        clear_thread_flag(TIF_POLLING_NRFLAG);
 }
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+       __current_clr_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_task()
+        */
+       smp_mb__after_clear_bit();
+
+       return unlikely(tif_need_resched());
+}
+
 #else
 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
-static inline void current_set_polling(void) { }
-static inline void current_clr_polling(void) { }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+       return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+       return unlikely(tif_need_resched());
+}
 #endif
 
 /*
index e7e04736802f60eca7847b5241b8c217c4b7a732..4ae6f32c8033de9ae577ef2e8418a2ba36a65b7f 100644 (file)
@@ -107,6 +107,8 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 #define set_need_resched()     set_thread_flag(TIF_NEED_RESCHED)
 #define clear_need_resched()   clear_thread_flag(TIF_NEED_RESCHED)
 
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+
 #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
 /*
  * An arch can define its own version of set_restore_sigmask() to get the
index e695c0a0bcb5c84e79ee93e80de57e4fbab69b33..c261409500e441e6880cf712707c4b11db88caef 100644 (file)
@@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void)
        rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
-       while (!need_resched())
+       while (!tif_need_resched())
                cpu_relax();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
@@ -92,8 +92,7 @@ static void cpu_idle_loop(void)
                        if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
                                cpu_idle_poll();
                        } else {
-                               current_clr_polling();
-                               if (!need_resched()) {
+                               if (!current_clr_polling_and_test()) {
                                        stop_critical_timings();
                                        rcu_idle_enter();
                                        arch_cpu_idle();
@@ -103,7 +102,7 @@ static void cpu_idle_loop(void)
                                } else {
                                        local_irq_enable();
                                }
-                               current_set_polling();
+                               __current_set_polling();
                        }
                        arch_cpu_idle_exit();
                }
@@ -129,7 +128,7 @@ void cpu_startup_entry(enum cpuhp_state state)
         */
        boot_init_stack_canary();
 #endif
-       current_set_polling();
+       __current_set_polling();
        arch_cpu_idle_prepare();
        cpu_idle_loop();
 }