cpufreq: stats: Make the stats code non-modular
[cascardo/linux.git] / drivers / cpufreq / cpufreq.c
index c4acfc5..c6a14ba 100644 (file)
@@ -74,9 +74,12 @@ static inline bool has_target(void)
 }
 
 /* internal prototypes */
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
+static int cpufreq_init_governor(struct cpufreq_policy *policy);
+static void cpufreq_exit_governor(struct cpufreq_policy *policy);
 static int cpufreq_start_governor(struct cpufreq_policy *policy);
+static void cpufreq_stop_governor(struct cpufreq_policy *policy);
+static void cpufreq_governor_limits(struct cpufreq_policy *policy);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -344,6 +347,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
                pr_debug("FREQ: %lu - CPU: %lu\n",
                         (unsigned long)freqs->new, (unsigned long)freqs->cpu);
                trace_cpu_frequency(freqs->new, freqs->cpu);
+               cpufreq_stats_record_transition(policy, freqs->new);
                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
                                CPUFREQ_POSTCHANGE, freqs);
                if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -429,6 +433,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 
+/*
+ * Fast frequency switching status count.  Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+       struct notifier_block *nb;
+
+       pr_info("Registered transition notifiers:\n");
+
+       mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+       for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+               pr_info("%pF\n", nb->notifier_call);
+
+       mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers.  Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+       lockdep_assert_held(&policy->rwsem);
+
+       if (!policy->fast_switch_possible)
+               return;
+
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (cpufreq_fast_switch_count >= 0) {
+               cpufreq_fast_switch_count++;
+               policy->fast_switch_enabled = true;
+       } else {
+               pr_warn("CPU%u: Fast frequency switching not enabled\n",
+                       policy->cpu);
+               cpufreq_list_transition_notifiers();
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (policy->fast_switch_enabled) {
+               policy->fast_switch_enabled = false;
+               if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+                       cpufreq_fast_switch_count--;
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
@@ -954,13 +1025,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
                return 0;
 
        down_write(&policy->rwsem);
-       if (has_target()) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               if (ret) {
-                       pr_err("%s: Failed to stop governor\n", __func__);
-                       goto unlock;
-               }
-       }
+       if (has_target())
+               cpufreq_stop_governor(policy);
 
        cpumask_set_cpu(cpu, policy->cpus);
 
@@ -969,8 +1035,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
                if (ret)
                        pr_err("%s: Failed to start governor\n", __func__);
        }
-
-unlock:
        up_write(&policy->rwsem);
        return ret;
 }
@@ -1045,6 +1109,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
                                             CPUFREQ_REMOVE_POLICY, policy);
 
        down_write(&policy->rwsem);
+       cpufreq_stats_free_table(policy);
        cpufreq_remove_dev_symlink(policy);
        kobj = &policy->kobj;
        cmp = &policy->kobj_unregister;
@@ -1195,13 +1260,12 @@ static int cpufreq_online(unsigned int cpu)
                }
        }
 
-       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                                    CPUFREQ_START, policy);
-
        if (new_policy) {
                ret = cpufreq_add_dev_interface(policy);
                if (ret)
                        goto out_exit_policy;
+
+               cpufreq_stats_create_table(policy);
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                CPUFREQ_CREATE_POLICY, policy);
 
@@ -1210,6 +1274,9 @@ static int cpufreq_online(unsigned int cpu)
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
        }
 
+       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+                                    CPUFREQ_START, policy);
+
        ret = cpufreq_init_policy(policy);
        if (ret) {
                pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1248,26 +1315,24 @@ out_free_policy:
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
+       struct cpufreq_policy *policy;
        unsigned cpu = dev->id;
-       int ret;
 
        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
 
-       if (cpu_online(cpu)) {
-               ret = cpufreq_online(cpu);
-       } else {
-               /*
-                * A hotplug notifier will follow and we will handle it as CPU
-                * online then.  For now, just create the sysfs link, unless
-                * there is no policy or the link is already present.
-                */
-               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (cpu_online(cpu))
+               return cpufreq_online(cpu);
 
-               ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-                       ? add_cpu_dev_symlink(policy, cpu) : 0;
-       }
+       /*
+        * A hotplug notifier will follow and we will handle it as CPU online
+        * then.  For now, just create the sysfs link, unless there is no policy
+        * or the link is already present.
+        */
+       policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+               return 0;
 
-       return ret;
+       return add_cpu_dev_symlink(policy, cpu);
 }
 
 static void cpufreq_offline(unsigned int cpu)
@@ -1284,11 +1349,8 @@ static void cpufreq_offline(unsigned int cpu)
        }
 
        down_write(&policy->rwsem);
-       if (has_target()) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               if (ret)
-                       pr_err("%s: Failed to stop governor\n", __func__);
-       }
+       if (has_target())
+               cpufreq_stop_governor(policy);
 
        cpumask_clear_cpu(cpu, policy->cpus);
 
@@ -1317,12 +1379,8 @@ static void cpufreq_offline(unsigned int cpu)
        if (cpufreq_driver->stop_cpu)
                cpufreq_driver->stop_cpu(policy);
 
-       /* If cpu is last user of policy, free policy */
-       if (has_target()) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-               if (ret)
-                       pr_err("%s: Failed to exit governor\n", __func__);
-       }
+       if (has_target())
+               cpufreq_exit_governor(policy);
 
        /*
         * Perform the ->exit() even during light-weight tear-down,
@@ -1447,8 +1505,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 
        ret_freq = cpufreq_driver->get(policy->cpu);
 
-       /* Updating inactive policies is invalid, so avoid doing that. */
-       if (unlikely(policy_is_inactive(policy)))
+       /*
+        * Updating inactive policies is invalid, so avoid doing that.  Also
+        * if fast frequency switching is used with the given policy, the check
+        * against policy->cur is pointless, so skip it in that case too.
+        */
+       if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
                return ret_freq;
 
        if (ret_freq && policy->cur &&
@@ -1552,7 +1614,6 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
 void cpufreq_suspend(void)
 {
        struct cpufreq_policy *policy;
-       int ret;
 
        if (!cpufreq_driver)
                return;
@@ -1565,14 +1626,8 @@ void cpufreq_suspend(void)
        for_each_active_policy(policy) {
                if (has_target()) {
                        down_write(&policy->rwsem);
-                       ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                       cpufreq_stop_governor(policy);
                        up_write(&policy->rwsem);
-
-                       if (ret) {
-                               pr_err("%s: Failed to stop governor for policy: %p\n",
-                                       __func__, policy);
-                               continue;
-                       }
                }
 
                if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -1679,8 +1734,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
 
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
+
+               if (cpufreq_fast_switch_count > 0) {
+                       mutex_unlock(&cpufreq_fast_switch_lock);
+                       return -EBUSY;
+               }
                ret = srcu_notifier_chain_register(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret)
+                       cpufreq_fast_switch_count--;
+
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_register(
@@ -1713,8 +1778,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
 
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
+
                ret = srcu_notifier_chain_unregister(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+                       cpufreq_fast_switch_count++;
+
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_unregister(
@@ -1733,6 +1804,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
  *                              GOVERNORS                            *
  *********************************************************************/
 
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+ * callback to indicate an error condition, the hardware configuration must be
+ * preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+                                       unsigned int target_freq)
+{
+       clamp_val(target_freq, policy->min, policy->max);
+
+       return cpufreq_driver->fast_switch(policy, target_freq);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
 /* Must set freqs->new to intermediate frequency */
 static int __target_intermediate(struct cpufreq_policy *policy,
                                 struct cpufreq_freqs *freqs, int index)
@@ -1825,10 +1927,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                return -ENODEV;
 
        /* Make sure that target_freq is within supported range */
-       if (target_freq > policy->max)
-               target_freq = policy->max;
-       if (target_freq < policy->min)
-               target_freq = policy->min;
+       target_freq = clamp_val(target_freq, policy->min, policy->max);
 
        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
                 policy->cpu, target_freq, relation, old_target_freq);
@@ -1892,7 +1991,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
        return NULL;
 }
 
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+static int cpufreq_init_governor(struct cpufreq_policy *policy)
 {
        int ret;
 
@@ -1920,37 +2019,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
                }
        }
 
-       if (event == CPUFREQ_GOV_POLICY_INIT)
-               if (!try_module_get(policy->governor->owner))
-                       return -EINVAL;
-
-       pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
+       if (!try_module_get(policy->governor->owner))
+               return -EINVAL;
 
-       ret = policy->governor->governor(policy, event);
+       pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
 
-       if (!ret) {
-               if (event == CPUFREQ_GOV_POLICY_INIT)
-                       policy->governor->initialized++;
-               else if (event == CPUFREQ_GOV_POLICY_EXIT)
-                       policy->governor->initialized--;
+       if (policy->governor->init) {
+               ret = policy->governor->init(policy);
+               if (ret) {
+                       module_put(policy->governor->owner);
+                       return ret;
+               }
        }
 
-       if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
-                       ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
-               module_put(policy->governor->owner);
+       return 0;
+}
+
+static void cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+       if (cpufreq_suspended || !policy->governor)
+               return;
 
-       return ret;
+       pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+       if (policy->governor->exit)
+               policy->governor->exit(policy);
+
+       module_put(policy->governor->owner);
 }
 
 static int cpufreq_start_governor(struct cpufreq_policy *policy)
 {
        int ret;
 
+       if (cpufreq_suspended)
+               return 0;
+
+       if (!policy->governor)
+               return -EINVAL;
+
+       pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
                cpufreq_update_current_freq(policy);
 
-       ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
-       return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+       if (policy->governor->start) {
+               ret = policy->governor->start(policy);
+               if (ret)
+                       return ret;
+       }
+
+       if (policy->governor->limits)
+               policy->governor->limits(policy);
+
+       return 0;
+}
+
+static void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+       if (cpufreq_suspended || !policy->governor)
+               return;
+
+       pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+       if (policy->governor->stop)
+               policy->governor->stop(policy);
+}
+
+static void cpufreq_governor_limits(struct cpufreq_policy *policy)
+{
+       if (cpufreq_suspended || !policy->governor)
+               return;
+
+       pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+       if (policy->governor->limits)
+               policy->governor->limits(policy);
 }
 
 int cpufreq_register_governor(struct cpufreq_governor *governor)
@@ -1965,7 +2109,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
 
        mutex_lock(&cpufreq_governor_mutex);
 
-       governor->initialized = 0;
        err = -EBUSY;
        if (!find_governor(governor->name)) {
                err = 0;
@@ -2091,7 +2234,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 
        if (new_policy->governor == policy->governor) {
                pr_debug("cpufreq: governor limits update\n");
-               return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+               cpufreq_governor_limits(policy);
+               return 0;
        }
 
        pr_debug("governor switch\n");
@@ -2100,39 +2244,27 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        old_gov = policy->governor;
        /* end old governor */
        if (old_gov) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               if (ret) {
-                       /* This can happen due to race with other operations */
-                       pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
-                                __func__, old_gov->name, ret);
-                       return ret;
-               }
-
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-               if (ret) {
-                       pr_err("%s: Failed to Exit Governor: %s (%d)\n",
-                              __func__, old_gov->name, ret);
-                       return ret;
-               }
+               cpufreq_stop_governor(policy);
+               cpufreq_exit_governor(policy);
        }
 
        /* start new governor */
        policy->governor = new_policy->governor;
-       ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+       ret = cpufreq_init_governor(policy);
        if (!ret) {
                ret = cpufreq_start_governor(policy);
                if (!ret) {
                        pr_debug("cpufreq: governor change\n");
                        return 0;
                }
-               cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               cpufreq_exit_governor(policy);
        }
 
        /* new governor failed, so re-start old one */
        pr_debug("starting governor %s failed\n", policy->governor->name);
        if (old_gov) {
                policy->governor = old_gov;
-               if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+               if (cpufreq_init_governor(policy))
                        policy->governor = NULL;
                else
                        cpufreq_start_governor(policy);
@@ -2193,16 +2325,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
                cpufreq_online(cpu);
                break;
 
        case CPU_DOWN_PREPARE:
                cpufreq_offline(cpu);
                break;
-
-       case CPU_DOWN_FAILED:
-               cpufreq_online(cpu);
-               break;
        }
        return NOTIFY_OK;
 }
@@ -2233,7 +2362,7 @@ static int cpufreq_boost_set_sw(int state)
 
                        down_write(&policy->rwsem);
                        policy->user_policy.max = policy->max;
-                       cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                       cpufreq_governor_limits(policy);
                        up_write(&policy->rwsem);
                }
        }
@@ -2377,10 +2506,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        register_hotcpu_notifier(&cpufreq_cpu_notifier);
        pr_debug("driver %s up and running\n", driver_data->name);
-
-out:
-       put_online_cpus();
-       return ret;
+       goto out;
 
 err_if_unreg:
        subsys_interface_unregister(&cpufreq_interface);
@@ -2390,7 +2516,9 @@ err_null_driver:
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        cpufreq_driver = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-       goto out;
+out:
+       put_online_cpus();
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);