Merge tag 'sound-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[cascardo/linux.git] / drivers / acpi / processor_idle.c
index 87b704e..c256bd7 100644 (file)
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
 }
 
 /**
- * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
+ * acpi_idle_do_entry - enter idle state using the appropriate method
  * @cx: cstate data
  *
  * Caller disables interrupt before call and enables interrupt after return.
  */
-static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 {
-       /* Don't trace irqs off for idle */
-       stop_critical_timings();
        if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
@@ -703,37 +701,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
                   gets asserted in time to freeze execution properly. */
                inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
-       start_critical_timings();
 }
 
-/**
- * acpi_idle_enter_c1 - enters an ACPI C1 state-type
- * @dev: the target CPU
- * @drv: cpuidle driver containing cpuidle state info
- * @index: index of target state
- *
- * This is equivalent to the HALT instruction.
- */
-static int acpi_idle_enter_c1(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
-{
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-       lapic_timer_state_broadcast(pr, cx, 1);
-       acpi_idle_do_entry(cx);
-
-       lapic_timer_state_broadcast(pr, cx, 0);
-
-       return index;
-}
-
-
 /**
  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  * @dev: the target CPU
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
        return 0;
 }
 
-/**
- * acpi_idle_enter_simple - enters an ACPI state without BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver with cpuidle state information
- * @index: the index of suggested state
- */
-static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
+static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 {
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
-           !pr->flags.has_cst &&
-           !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
-               return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
-       /*
-        * Must be done before busmaster disable as we might need to
-        * access HPET !
-        */
-       lapic_timer_state_broadcast(pr, cx, 1);
-
-       if (cx->type == ACPI_STATE_C3)
-               ACPI_FLUSH_CPU_CACHE();
-
-       /* Tell the scheduler that we are going deep-idle: */
-       sched_clock_idle_sleep_event();
-       acpi_idle_do_entry(cx);
-
-       sched_clock_idle_wakeup_event(0);
-
-       lapic_timer_state_broadcast(pr, cx, 0);
-       return index;
+       return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
+               !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
+               !pr->flags.has_cst;
 }
 
 static int c3_cpu_count;
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
 
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver containing state data
- * @index: the index of suggested state
- *
- * If BM is detected, the deepest non-C3 idle state is entered instead.
+ * @pr: Target processor
+ * @cx: Target state context
  */
-static int acpi_idle_enter_bm(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
+static void acpi_idle_enter_bm(struct acpi_processor *pr,
+                              struct acpi_processor_cx *cx)
 {
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
-           !pr->flags.has_cst &&
-           !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
-               return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
-       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
-               if (drv->safe_state_index >= 0) {
-                       return drv->states[drv->safe_state_index].enter(dev,
-                                               drv, drv->safe_state_index);
-               } else {
-                       acpi_safe_halt();
-                       return -EBUSY;
-               }
-       }
-
        acpi_unlazy_tlb(smp_processor_id());
 
-       /* Tell the scheduler that we are going deep-idle: */
-       sched_clock_idle_sleep_event();
        /*
         * Must be done before busmaster disable as we might need to
         * access HPET !
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        /*
         * disable bus master
         * bm_check implies we need ARB_DIS
-        * !bm_check implies we need cache flush
         * bm_control implies whether we can do ARB_DIS
         *
         * That leaves a case where bm_check is set and bm_control is
         * not set. In that case we cannot do much, we enter C3
         * without doing anything.
         */
-       if (pr->flags.bm_check && pr->flags.bm_control) {
+       if (pr->flags.bm_control) {
                raw_spin_lock(&c3_lock);
                c3_cpu_count++;
                /* Disable bus master arbitration when all CPUs are in C3 */
                if (c3_cpu_count == num_online_cpus())
                        acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
                raw_spin_unlock(&c3_lock);
-       } else if (!pr->flags.bm_check) {
-               ACPI_FLUSH_CPU_CACHE();
        }
 
        acpi_idle_do_entry(cx);
 
        /* Re-enable bus master arbitration */
-       if (pr->flags.bm_check && pr->flags.bm_control) {
+       if (pr->flags.bm_control) {
                raw_spin_lock(&c3_lock);
                acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
                c3_cpu_count--;
                raw_spin_unlock(&c3_lock);
        }
 
-       sched_clock_idle_wakeup_event(0);
+       lapic_timer_state_broadcast(pr, cx, 0);
+}
+
+static int acpi_idle_enter(struct cpuidle_device *dev,
+                          struct cpuidle_driver *drv, int index)
+{
+       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+       struct acpi_processor *pr;
+
+       pr = __this_cpu_read(processors);
+       if (unlikely(!pr))
+               return -EINVAL;
+
+       if (cx->type != ACPI_STATE_C1) {
+               if (acpi_idle_fallback_to_c1(pr)) {
+                       index = CPUIDLE_DRIVER_STATE_START;
+                       cx = per_cpu(acpi_cstate[index], dev->cpu);
+               } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
+                       if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
+                               acpi_idle_enter_bm(pr, cx);
+                               return index;
+                       } else if (drv->safe_state_index >= 0) {
+                               index = drv->safe_state_index;
+                               cx = per_cpu(acpi_cstate[index], dev->cpu);
+                       } else {
+                               acpi_safe_halt();
+                               return -EBUSY;
+                       }
+               }
+       }
+
+       lapic_timer_state_broadcast(pr, cx, 1);
+
+       if (cx->type == ACPI_STATE_C3)
+               ACPI_FLUSH_CPU_CACHE();
+
+       acpi_idle_do_entry(cx);
 
        lapic_timer_state_broadcast(pr, cx, 0);
+
        return index;
 }
 
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
                strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
                state->exit_latency = cx->latency;
                state->target_residency = cx->latency * latency_factor;
+               state->enter = acpi_idle_enter;
 
                state->flags = 0;
-               switch (cx->type) {
-                       case ACPI_STATE_C1:
-
-                       state->enter = acpi_idle_enter_c1;
-                       state->enter_dead = acpi_idle_play_dead;
-                       drv->safe_state_index = count;
-                       break;
-
-                       case ACPI_STATE_C2:
-                       state->enter = acpi_idle_enter_simple;
+               if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
                        state->enter_dead = acpi_idle_play_dead;
                        drv->safe_state_index = count;
-                       break;
-
-                       case ACPI_STATE_C3:
-                       state->enter = pr->flags.bm_check ?
-                                       acpi_idle_enter_bm :
-                                       acpi_idle_enter_simple;
-                       break;
                }
 
                count++;