2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver *cpufreq_driver;
65 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66 static DEFINE_RWLOCK(cpufreq_driver_lock);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static int cpufreq_start_governor(struct cpufreq_policy *policy);
81 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 static struct srcu_notifier_head cpufreq_transition_notifier_list;
94 static bool init_cpufreq_transition_notifier_list_called;
95 static int __init init_cpufreq_transition_notifier_list(void)
97 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
98 init_cpufreq_transition_notifier_list_called = true;
101 pure_initcall(init_cpufreq_transition_notifier_list);
103 static int off __read_mostly;
104 static int cpufreq_disabled(void)
108 void disable_cpufreq(void)
112 static DEFINE_MUTEX(cpufreq_governor_mutex);
114 bool have_governor_per_policy(void)
116 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
118 EXPORT_SYMBOL_GPL(have_governor_per_policy);
120 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
122 if (have_governor_per_policy())
123 return &policy->kobj;
125 return cpufreq_global_kobject;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
129 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
135 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
137 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
138 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
139 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
140 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
141 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
142 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
144 idle_time = cur_wall_time - busy_time;
146 *wall = cputime_to_usecs(cur_wall_time);
148 return cputime_to_usecs(idle_time);
151 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
153 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
155 if (idle_time == -1ULL)
156 return get_cpu_idle_time_jiffy(cpu, wall);
158 idle_time += get_cpu_iowait_time_us(cpu, wall);
162 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
165 * This is a generic cpufreq init() routine which can be used by cpufreq
166 * drivers of SMP systems. It will do following:
167 * - validate & show freq table passed
168 * - set policies transition latency
169 * - policy->cpus with all possible CPUs
171 int cpufreq_generic_init(struct cpufreq_policy *policy,
172 struct cpufreq_frequency_table *table,
173 unsigned int transition_latency)
177 ret = cpufreq_table_validate_and_show(policy, table);
179 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
183 policy->cpuinfo.transition_latency = transition_latency;
186 * The driver only supports the SMP configuration where all processors
187 * share the clock and voltage and clock.
189 cpumask_setall(policy->cpus);
193 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
199 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
201 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
203 unsigned int cpufreq_generic_get(unsigned int cpu)
205 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
207 if (!policy || IS_ERR(policy->clk)) {
208 pr_err("%s: No %s associated to cpu: %d\n",
209 __func__, policy ? "clk" : "policy", cpu);
213 return clk_get_rate(policy->clk) / 1000;
215 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
218 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
220 * @cpu: cpu to find policy for.
222 * This returns policy for 'cpu', returns NULL if it doesn't exist.
223 * It also increments the kobject reference count to mark it busy and so would
224 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
225 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
226 * freed as that depends on the kobj count.
228 * Return: A valid policy on success, otherwise NULL on failure.
230 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
232 struct cpufreq_policy *policy = NULL;
235 if (WARN_ON(cpu >= nr_cpu_ids))
238 /* get the cpufreq driver */
239 read_lock_irqsave(&cpufreq_driver_lock, flags);
241 if (cpufreq_driver) {
243 policy = cpufreq_cpu_get_raw(cpu);
245 kobject_get(&policy->kobj);
248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
252 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
255 * cpufreq_cpu_put: Decrements the usage count of a policy
257 * @policy: policy earlier returned by cpufreq_cpu_get().
259 * This decrements the kobject reference count incremented earlier by calling
262 void cpufreq_cpu_put(struct cpufreq_policy *policy)
264 kobject_put(&policy->kobj);
266 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
268 /*********************************************************************
269 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
270 *********************************************************************/
273 * adjust_jiffies - adjust the system "loops_per_jiffy"
275 * This function alters the system "loops_per_jiffy" for the clock
276 * speed change. Note that loops_per_jiffy cannot be updated on SMP
277 * systems as each CPU might be scaled differently. So, use the arch
278 * per-CPU loops_per_jiffy value wherever possible.
280 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
283 static unsigned long l_p_j_ref;
284 static unsigned int l_p_j_ref_freq;
286 if (ci->flags & CPUFREQ_CONST_LOOPS)
289 if (!l_p_j_ref_freq) {
290 l_p_j_ref = loops_per_jiffy;
291 l_p_j_ref_freq = ci->old;
292 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
293 l_p_j_ref, l_p_j_ref_freq);
295 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
296 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
299 loops_per_jiffy, ci->new);
304 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
305 struct cpufreq_freqs *freqs, unsigned int state)
307 BUG_ON(irqs_disabled());
309 if (cpufreq_disabled())
312 freqs->flags = cpufreq_driver->flags;
313 pr_debug("notification %u of frequency transition to %u kHz\n",
318 case CPUFREQ_PRECHANGE:
319 /* detect if the driver reported a value as "old frequency"
320 * which is not equal to what the cpufreq core thinks is
323 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
324 if ((policy) && (policy->cpu == freqs->cpu) &&
325 (policy->cur) && (policy->cur != freqs->old)) {
326 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327 freqs->old, policy->cur);
328 freqs->old = policy->cur;
331 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
332 CPUFREQ_PRECHANGE, freqs);
333 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
336 case CPUFREQ_POSTCHANGE:
337 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
338 pr_debug("FREQ: %lu - CPU: %lu\n",
339 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
340 trace_cpu_frequency(freqs->new, freqs->cpu);
341 cpufreq_stats_record_transition(policy, freqs->new);
342 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
343 CPUFREQ_POSTCHANGE, freqs);
344 if (likely(policy) && likely(policy->cpu == freqs->cpu))
345 policy->cur = freqs->new;
351 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
352 * on frequency transition.
354 * This function calls the transition notifiers and the "adjust_jiffies"
355 * function. It is called twice on all CPU frequency changes that have
358 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
359 struct cpufreq_freqs *freqs, unsigned int state)
361 for_each_cpu(freqs->cpu, policy->cpus)
362 __cpufreq_notify_transition(policy, freqs, state);
365 /* Do post notifications when there are chances that transition has failed */
366 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
367 struct cpufreq_freqs *freqs, int transition_failed)
369 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370 if (!transition_failed)
373 swap(freqs->old, freqs->new);
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
375 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
378 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
379 struct cpufreq_freqs *freqs)
383 * Catch double invocations of _begin() which lead to self-deadlock.
384 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
385 * doesn't invoke _begin() on their behalf, and hence the chances of
386 * double invocations are very low. Moreover, there are scenarios
387 * where these checks can emit false-positive warnings in these
388 * drivers; so we avoid that by skipping them altogether.
390 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
391 && current == policy->transition_task);
394 wait_event(policy->transition_wait, !policy->transition_ongoing);
396 spin_lock(&policy->transition_lock);
398 if (unlikely(policy->transition_ongoing)) {
399 spin_unlock(&policy->transition_lock);
403 policy->transition_ongoing = true;
404 policy->transition_task = current;
406 spin_unlock(&policy->transition_lock);
408 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
412 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
413 struct cpufreq_freqs *freqs, int transition_failed)
415 if (unlikely(WARN_ON(!policy->transition_ongoing)))
418 cpufreq_notify_post_transition(policy, freqs, transition_failed);
420 policy->transition_ongoing = false;
421 policy->transition_task = NULL;
423 wake_up(&policy->transition_wait);
425 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
428 * Fast frequency switching status count. Positive means "enabled", negative
429 * means "disabled" and 0 means "not decided yet".
431 static int cpufreq_fast_switch_count;
432 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
434 static void cpufreq_list_transition_notifiers(void)
436 struct notifier_block *nb;
438 pr_info("Registered transition notifiers:\n");
440 mutex_lock(&cpufreq_transition_notifier_list.mutex);
442 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443 pr_info("%pF\n", nb->notifier_call);
445 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
449 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450 * @policy: cpufreq policy to enable fast frequency switching for.
452 * Try to enable fast frequency switching for @policy.
454 * The attempt will fail if there is at least one transition notifier registered
455 * at this point, as fast frequency switching is quite fundamentally at odds
456 * with transition notifiers. Thus if successful, it will make registration of
457 * transition notifiers fail going forward.
459 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
461 lockdep_assert_held(&policy->rwsem);
463 if (!policy->fast_switch_possible)
466 mutex_lock(&cpufreq_fast_switch_lock);
467 if (cpufreq_fast_switch_count >= 0) {
468 cpufreq_fast_switch_count++;
469 policy->fast_switch_enabled = true;
471 pr_warn("CPU%u: Fast frequency switching not enabled\n",
473 cpufreq_list_transition_notifiers();
475 mutex_unlock(&cpufreq_fast_switch_lock);
477 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
480 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481 * @policy: cpufreq policy to disable fast frequency switching for.
483 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
485 mutex_lock(&cpufreq_fast_switch_lock);
486 if (policy->fast_switch_enabled) {
487 policy->fast_switch_enabled = false;
488 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489 cpufreq_fast_switch_count--;
491 mutex_unlock(&cpufreq_fast_switch_lock);
493 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
496 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
498 * @target_freq: target frequency to resolve.
500 * The target to driver frequency mapping is cached in the policy.
502 * Return: Lowest driver-supported frequency greater than or equal to the
503 * given target_freq, subject to policy (min/max) and driver limitations.
505 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
506 unsigned int target_freq)
508 target_freq = clamp_val(target_freq, policy->min, policy->max);
509 policy->cached_target_freq = target_freq;
511 if (cpufreq_driver->target_index) {
514 idx = cpufreq_frequency_table_target(policy, target_freq,
516 policy->cached_resolved_idx = idx;
517 return policy->freq_table[idx].frequency;
520 if (cpufreq_driver->resolve_freq)
521 return cpufreq_driver->resolve_freq(policy, target_freq);
526 /*********************************************************************
528 *********************************************************************/
529 static ssize_t show_boost(struct kobject *kobj,
530 struct attribute *attr, char *buf)
532 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
535 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
536 const char *buf, size_t count)
540 ret = sscanf(buf, "%d", &enable);
541 if (ret != 1 || enable < 0 || enable > 1)
544 if (cpufreq_boost_trigger_state(enable)) {
545 pr_err("%s: Cannot %s BOOST!\n",
546 __func__, enable ? "enable" : "disable");
550 pr_debug("%s: cpufreq BOOST %s\n",
551 __func__, enable ? "enabled" : "disabled");
555 define_one_global_rw(boost);
557 static struct cpufreq_governor *find_governor(const char *str_governor)
559 struct cpufreq_governor *t;
562 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
569 * cpufreq_parse_governor - parse a governor string
571 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
572 struct cpufreq_governor **governor)
576 if (cpufreq_driver->setpolicy) {
577 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
578 *policy = CPUFREQ_POLICY_PERFORMANCE;
580 } else if (!strncasecmp(str_governor, "powersave",
582 *policy = CPUFREQ_POLICY_POWERSAVE;
586 struct cpufreq_governor *t;
588 mutex_lock(&cpufreq_governor_mutex);
590 t = find_governor(str_governor);
595 mutex_unlock(&cpufreq_governor_mutex);
596 ret = request_module("cpufreq_%s", str_governor);
597 mutex_lock(&cpufreq_governor_mutex);
600 t = find_governor(str_governor);
608 mutex_unlock(&cpufreq_governor_mutex);
614 * cpufreq_per_cpu_attr_read() / show_##file_name() -
615 * print out cpufreq information
617 * Write out information from cpufreq_driver->policy[cpu]; object must be
621 #define show_one(file_name, object) \
622 static ssize_t show_##file_name \
623 (struct cpufreq_policy *policy, char *buf) \
625 return sprintf(buf, "%u\n", policy->object); \
628 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
629 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
630 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
631 show_one(scaling_min_freq, min);
632 show_one(scaling_max_freq, max);
634 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
638 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
639 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
641 ret = sprintf(buf, "%u\n", policy->cur);
645 static int cpufreq_set_policy(struct cpufreq_policy *policy,
646 struct cpufreq_policy *new_policy);
649 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
651 #define store_one(file_name, object) \
652 static ssize_t store_##file_name \
653 (struct cpufreq_policy *policy, const char *buf, size_t count) \
656 struct cpufreq_policy new_policy; \
658 memcpy(&new_policy, policy, sizeof(*policy)); \
660 ret = sscanf(buf, "%u", &new_policy.object); \
664 temp = new_policy.object; \
665 ret = cpufreq_set_policy(policy, &new_policy); \
667 policy->user_policy.object = temp; \
669 return ret ? ret : count; \
672 store_one(scaling_min_freq, min);
673 store_one(scaling_max_freq, max);
676 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
678 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
681 unsigned int cur_freq = __cpufreq_get(policy);
683 return sprintf(buf, "<unknown>");
684 return sprintf(buf, "%u\n", cur_freq);
688 * show_scaling_governor - show the current policy for the specified CPU
690 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
692 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
693 return sprintf(buf, "powersave\n");
694 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
695 return sprintf(buf, "performance\n");
696 else if (policy->governor)
697 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
698 policy->governor->name);
703 * store_scaling_governor - store policy for the specified CPU
705 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
706 const char *buf, size_t count)
709 char str_governor[16];
710 struct cpufreq_policy new_policy;
712 memcpy(&new_policy, policy, sizeof(*policy));
714 ret = sscanf(buf, "%15s", str_governor);
718 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
719 &new_policy.governor))
722 ret = cpufreq_set_policy(policy, &new_policy);
723 return ret ? ret : count;
727 * show_scaling_driver - show the cpufreq driver currently loaded
729 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
731 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
735 * show_scaling_available_governors - show the available CPUfreq governors
737 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
741 struct cpufreq_governor *t;
744 i += sprintf(buf, "performance powersave");
748 for_each_governor(t) {
749 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
750 - (CPUFREQ_NAME_LEN + 2)))
752 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
755 i += sprintf(&buf[i], "\n");
759 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
764 for_each_cpu(cpu, mask) {
766 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
767 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
768 if (i >= (PAGE_SIZE - 5))
771 i += sprintf(&buf[i], "\n");
774 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
777 * show_related_cpus - show the CPUs affected by each transition even if
778 * hw coordination is in use
780 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
782 return cpufreq_show_cpus(policy->related_cpus, buf);
786 * show_affected_cpus - show the CPUs affected by each transition
788 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
790 return cpufreq_show_cpus(policy->cpus, buf);
793 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
794 const char *buf, size_t count)
796 unsigned int freq = 0;
799 if (!policy->governor || !policy->governor->store_setspeed)
802 ret = sscanf(buf, "%u", &freq);
806 policy->governor->store_setspeed(policy, freq);
811 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
813 if (!policy->governor || !policy->governor->show_setspeed)
814 return sprintf(buf, "<unsupported>\n");
816 return policy->governor->show_setspeed(policy, buf);
820 * show_bios_limit - show the current cpufreq HW/BIOS limitation
822 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
826 if (cpufreq_driver->bios_limit) {
827 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
829 return sprintf(buf, "%u\n", limit);
831 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
834 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
835 cpufreq_freq_attr_ro(cpuinfo_min_freq);
836 cpufreq_freq_attr_ro(cpuinfo_max_freq);
837 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
838 cpufreq_freq_attr_ro(scaling_available_governors);
839 cpufreq_freq_attr_ro(scaling_driver);
840 cpufreq_freq_attr_ro(scaling_cur_freq);
841 cpufreq_freq_attr_ro(bios_limit);
842 cpufreq_freq_attr_ro(related_cpus);
843 cpufreq_freq_attr_ro(affected_cpus);
844 cpufreq_freq_attr_rw(scaling_min_freq);
845 cpufreq_freq_attr_rw(scaling_max_freq);
846 cpufreq_freq_attr_rw(scaling_governor);
847 cpufreq_freq_attr_rw(scaling_setspeed);
849 static struct attribute *default_attrs[] = {
850 &cpuinfo_min_freq.attr,
851 &cpuinfo_max_freq.attr,
852 &cpuinfo_transition_latency.attr,
853 &scaling_min_freq.attr,
854 &scaling_max_freq.attr,
857 &scaling_governor.attr,
858 &scaling_driver.attr,
859 &scaling_available_governors.attr,
860 &scaling_setspeed.attr,
864 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
865 #define to_attr(a) container_of(a, struct freq_attr, attr)
867 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
869 struct cpufreq_policy *policy = to_policy(kobj);
870 struct freq_attr *fattr = to_attr(attr);
873 down_read(&policy->rwsem);
874 ret = fattr->show(policy, buf);
875 up_read(&policy->rwsem);
880 static ssize_t store(struct kobject *kobj, struct attribute *attr,
881 const char *buf, size_t count)
883 struct cpufreq_policy *policy = to_policy(kobj);
884 struct freq_attr *fattr = to_attr(attr);
885 ssize_t ret = -EINVAL;
889 if (cpu_online(policy->cpu)) {
890 down_write(&policy->rwsem);
891 ret = fattr->store(policy, buf, count);
892 up_write(&policy->rwsem);
900 static void cpufreq_sysfs_release(struct kobject *kobj)
902 struct cpufreq_policy *policy = to_policy(kobj);
903 pr_debug("last reference is dropped\n");
904 complete(&policy->kobj_unregister);
907 static const struct sysfs_ops sysfs_ops = {
912 static struct kobj_type ktype_cpufreq = {
913 .sysfs_ops = &sysfs_ops,
914 .default_attrs = default_attrs,
915 .release = cpufreq_sysfs_release,
918 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
920 struct device *cpu_dev;
922 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
927 cpu_dev = get_cpu_device(cpu);
928 if (WARN_ON(!cpu_dev))
931 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
934 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
936 struct device *cpu_dev;
938 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
940 cpu_dev = get_cpu_device(cpu);
941 if (WARN_ON(!cpu_dev))
944 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
947 /* Add/remove symlinks for all related CPUs */
948 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
953 /* Some related CPUs might not be present (physically hotplugged) */
954 for_each_cpu(j, policy->real_cpus) {
955 ret = add_cpu_dev_symlink(policy, j);
963 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
967 /* Some related CPUs might not be present (physically hotplugged) */
968 for_each_cpu(j, policy->real_cpus)
969 remove_cpu_dev_symlink(policy, j);
972 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
974 struct freq_attr **drv_attr;
977 /* set up files for this cpu device */
978 drv_attr = cpufreq_driver->attr;
979 while (drv_attr && *drv_attr) {
980 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
985 if (cpufreq_driver->get) {
986 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
991 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
995 if (cpufreq_driver->bios_limit) {
996 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1001 return cpufreq_add_dev_symlink(policy);
1004 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1009 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1011 struct cpufreq_governor *gov = NULL;
1012 struct cpufreq_policy new_policy;
1014 memcpy(&new_policy, policy, sizeof(*policy));
1016 /* Update governor of new_policy to the governor used before hotplug */
1017 gov = find_governor(policy->last_governor);
1019 pr_debug("Restoring governor %s for cpu %d\n",
1020 policy->governor->name, policy->cpu);
1022 gov = cpufreq_default_governor();
1027 new_policy.governor = gov;
1029 /* Use the default policy if there is no last_policy. */
1030 if (cpufreq_driver->setpolicy) {
1031 if (policy->last_policy)
1032 new_policy.policy = policy->last_policy;
1034 cpufreq_parse_governor(gov->name, &new_policy.policy,
1037 /* set default policy */
1038 return cpufreq_set_policy(policy, &new_policy);
1041 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1045 /* Has this CPU been taken care of already? */
1046 if (cpumask_test_cpu(cpu, policy->cpus))
1049 down_write(&policy->rwsem);
1051 cpufreq_stop_governor(policy);
1053 cpumask_set_cpu(cpu, policy->cpus);
1056 ret = cpufreq_start_governor(policy);
1058 pr_err("%s: Failed to start governor\n", __func__);
1060 up_write(&policy->rwsem);
1064 static void handle_update(struct work_struct *work)
1066 struct cpufreq_policy *policy =
1067 container_of(work, struct cpufreq_policy, update);
1068 unsigned int cpu = policy->cpu;
1069 pr_debug("handle_update for cpu %u called\n", cpu);
1070 cpufreq_update_policy(cpu);
1073 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1075 struct device *dev = get_cpu_device(cpu);
1076 struct cpufreq_policy *policy;
1082 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1086 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1087 goto err_free_policy;
1089 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1090 goto err_free_cpumask;
1092 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1093 goto err_free_rcpumask;
1095 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1096 cpufreq_global_kobject, "policy%u", cpu);
1098 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1099 goto err_free_real_cpus;
1102 INIT_LIST_HEAD(&policy->policy_list);
1103 init_rwsem(&policy->rwsem);
1104 spin_lock_init(&policy->transition_lock);
1105 init_waitqueue_head(&policy->transition_wait);
1106 init_completion(&policy->kobj_unregister);
1107 INIT_WORK(&policy->update, handle_update);
1113 free_cpumask_var(policy->real_cpus);
1115 free_cpumask_var(policy->related_cpus);
1117 free_cpumask_var(policy->cpus);
1124 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1126 struct kobject *kobj;
1127 struct completion *cmp;
1130 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1131 CPUFREQ_REMOVE_POLICY, policy);
1133 down_write(&policy->rwsem);
1134 cpufreq_stats_free_table(policy);
1135 cpufreq_remove_dev_symlink(policy);
1136 kobj = &policy->kobj;
1137 cmp = &policy->kobj_unregister;
1138 up_write(&policy->rwsem);
1142 * We need to make sure that the underlying kobj is
1143 * actually not referenced anymore by anybody before we
1144 * proceed with unloading.
1146 pr_debug("waiting for dropping of refcount\n");
1147 wait_for_completion(cmp);
1148 pr_debug("wait complete\n");
1151 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1153 unsigned long flags;
1156 /* Remove policy from list */
1157 write_lock_irqsave(&cpufreq_driver_lock, flags);
1158 list_del(&policy->policy_list);
1160 for_each_cpu(cpu, policy->related_cpus)
1161 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1162 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164 cpufreq_policy_put_kobj(policy, notify);
1165 free_cpumask_var(policy->real_cpus);
1166 free_cpumask_var(policy->related_cpus);
1167 free_cpumask_var(policy->cpus);
1171 static int cpufreq_online(unsigned int cpu)
1173 struct cpufreq_policy *policy;
1175 unsigned long flags;
1179 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1181 /* Check if this CPU already has a policy to manage it */
1182 policy = per_cpu(cpufreq_cpu_data, cpu);
1184 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1185 if (!policy_is_inactive(policy))
1186 return cpufreq_add_policy_cpu(policy, cpu);
1188 /* This is the only online CPU for the policy. Start over. */
1190 down_write(&policy->rwsem);
1192 policy->governor = NULL;
1193 up_write(&policy->rwsem);
1196 policy = cpufreq_policy_alloc(cpu);
1201 cpumask_copy(policy->cpus, cpumask_of(cpu));
1203 /* call driver. From then on the cpufreq must be able
1204 * to accept all calls to ->verify and ->setpolicy for this CPU
1206 ret = cpufreq_driver->init(policy);
1208 pr_debug("initialization failed\n");
1209 goto out_free_policy;
1212 down_write(&policy->rwsem);
1215 /* related_cpus should at least include policy->cpus. */
1216 cpumask_copy(policy->related_cpus, policy->cpus);
1217 /* Remember CPUs present at the policy creation time. */
1218 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1222 * affected cpus must always be the one, which are online. We aren't
1223 * managing offline cpus here.
1225 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1228 policy->user_policy.min = policy->min;
1229 policy->user_policy.max = policy->max;
1231 write_lock_irqsave(&cpufreq_driver_lock, flags);
1232 for_each_cpu(j, policy->related_cpus)
1233 per_cpu(cpufreq_cpu_data, j) = policy;
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1237 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1238 policy->cur = cpufreq_driver->get(policy->cpu);
1240 pr_err("%s: ->get() failed\n", __func__);
1241 goto out_exit_policy;
1246 * Sometimes boot loaders set CPU frequency to a value outside of
1247 * frequency table present with cpufreq core. In such cases CPU might be
1248 * unstable if it has to run on that frequency for long duration of time
1249 * and so its better to set it to a frequency which is specified in
1250 * freq-table. This also makes cpufreq stats inconsistent as
1251 * cpufreq-stats would fail to register because current frequency of CPU
1252 * isn't found in freq-table.
1254 * Because we don't want this change to effect boot process badly, we go
1255 * for the next freq which is >= policy->cur ('cur' must be set by now,
1256 * otherwise we will end up setting freq to lowest of the table as 'cur'
1257 * is initialized to zero).
1259 * We are passing target-freq as "policy->cur - 1" otherwise
1260 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1261 * equal to target-freq.
1263 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1265 /* Are we running at unknown frequency ? */
1266 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1267 if (ret == -EINVAL) {
1268 /* Warn user and fix it */
1269 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1270 __func__, policy->cpu, policy->cur);
1271 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1272 CPUFREQ_RELATION_L);
1275 * Reaching here after boot in a few seconds may not
1276 * mean that system will remain stable at "unknown"
1277 * frequency for longer duration. Hence, a BUG_ON().
1280 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1281 __func__, policy->cpu, policy->cur);
1286 ret = cpufreq_add_dev_interface(policy);
1288 goto out_exit_policy;
1290 cpufreq_stats_create_table(policy);
1291 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1292 CPUFREQ_CREATE_POLICY, policy);
1294 write_lock_irqsave(&cpufreq_driver_lock, flags);
1295 list_add(&policy->policy_list, &cpufreq_policy_list);
1296 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1299 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1300 CPUFREQ_START, policy);
1302 ret = cpufreq_init_policy(policy);
1304 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1305 __func__, cpu, ret);
1306 /* cpufreq_policy_free() will notify based on this */
1308 goto out_exit_policy;
1311 up_write(&policy->rwsem);
1313 kobject_uevent(&policy->kobj, KOBJ_ADD);
1315 /* Callback for handling stuff after policy is ready */
1316 if (cpufreq_driver->ready)
1317 cpufreq_driver->ready(policy);
1319 pr_debug("initialization complete\n");
1324 up_write(&policy->rwsem);
1326 if (cpufreq_driver->exit)
1327 cpufreq_driver->exit(policy);
1329 cpufreq_policy_free(policy, !new_policy);
1334 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1336 * @sif: Subsystem interface structure pointer (not used)
1338 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1340 struct cpufreq_policy *policy;
1341 unsigned cpu = dev->id;
1343 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1345 if (cpu_online(cpu))
1346 return cpufreq_online(cpu);
1349 * A hotplug notifier will follow and we will handle it as CPU online
1350 * then. For now, just create the sysfs link, unless there is no policy
1351 * or the link is already present.
1353 policy = per_cpu(cpufreq_cpu_data, cpu);
1354 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1357 return add_cpu_dev_symlink(policy, cpu);
1360 static void cpufreq_offline(unsigned int cpu)
1362 struct cpufreq_policy *policy;
1365 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1367 policy = cpufreq_cpu_get_raw(cpu);
1369 pr_debug("%s: No cpu_data found\n", __func__);
1373 down_write(&policy->rwsem);
1375 cpufreq_stop_governor(policy);
1377 cpumask_clear_cpu(cpu, policy->cpus);
1379 if (policy_is_inactive(policy)) {
1381 strncpy(policy->last_governor, policy->governor->name,
1384 policy->last_policy = policy->policy;
1385 } else if (cpu == policy->cpu) {
1386 /* Nominate new CPU */
1387 policy->cpu = cpumask_any(policy->cpus);
1390 /* Start governor again for active policy */
1391 if (!policy_is_inactive(policy)) {
1393 ret = cpufreq_start_governor(policy);
1395 pr_err("%s: Failed to start governor\n", __func__);
1401 if (cpufreq_driver->stop_cpu)
1402 cpufreq_driver->stop_cpu(policy);
1405 cpufreq_exit_governor(policy);
1408 * Perform the ->exit() even during light-weight tear-down,
1409 * since this is a core component, and is essential for the
1410 * subsequent light-weight ->init() to succeed.
1412 if (cpufreq_driver->exit) {
1413 cpufreq_driver->exit(policy);
1414 policy->freq_table = NULL;
1418 up_write(&policy->rwsem);
1422 * cpufreq_remove_dev - remove a CPU device
1424 * Removes the cpufreq interface for a CPU device.
1426 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1428 unsigned int cpu = dev->id;
1429 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1434 if (cpu_online(cpu))
1435 cpufreq_offline(cpu);
1437 cpumask_clear_cpu(cpu, policy->real_cpus);
1438 remove_cpu_dev_symlink(policy, cpu);
1440 if (cpumask_empty(policy->real_cpus))
1441 cpufreq_policy_free(policy, true);
1445 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1447 * @policy: policy managing CPUs
1448 * @new_freq: CPU frequency the CPU actually runs at
1450 * We adjust to current frequency first, and need to clean up later.
1451 * So either call to cpufreq_update_policy() or schedule handle_update()).
1453 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1454 unsigned int new_freq)
1456 struct cpufreq_freqs freqs;
1458 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1459 policy->cur, new_freq);
1461 freqs.old = policy->cur;
1462 freqs.new = new_freq;
1464 cpufreq_freq_transition_begin(policy, &freqs);
1465 cpufreq_freq_transition_end(policy, &freqs, 0);
1469 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1472 * This is the last known freq, without actually getting it from the driver.
1473 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1475 unsigned int cpufreq_quick_get(unsigned int cpu)
1477 struct cpufreq_policy *policy;
1478 unsigned int ret_freq = 0;
1479 unsigned long flags;
1481 read_lock_irqsave(&cpufreq_driver_lock, flags);
1483 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1484 ret_freq = cpufreq_driver->get(cpu);
1485 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1489 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1491 policy = cpufreq_cpu_get(cpu);
1493 ret_freq = policy->cur;
1494 cpufreq_cpu_put(policy);
1499 EXPORT_SYMBOL(cpufreq_quick_get);
1502 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1505 * Just return the max possible frequency for a given CPU.
1507 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1509 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1510 unsigned int ret_freq = 0;
1513 ret_freq = policy->max;
1514 cpufreq_cpu_put(policy);
1519 EXPORT_SYMBOL(cpufreq_quick_get_max);
1521 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1523 unsigned int ret_freq = 0;
1525 if (!cpufreq_driver->get)
1528 ret_freq = cpufreq_driver->get(policy->cpu);
1531 * Updating inactive policies is invalid, so avoid doing that. Also
1532 * if fast frequency switching is used with the given policy, the check
1533 * against policy->cur is pointless, so skip it in that case too.
1535 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1538 if (ret_freq && policy->cur &&
1539 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1540 /* verify no discrepancy between actual and
1541 saved value exists */
1542 if (unlikely(ret_freq != policy->cur)) {
1543 cpufreq_out_of_sync(policy, ret_freq);
1544 schedule_work(&policy->update);
1552 * cpufreq_get - get the current CPU frequency (in kHz)
1555 * Get the CPU current (static) CPU frequency
1557 unsigned int cpufreq_get(unsigned int cpu)
1559 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1560 unsigned int ret_freq = 0;
1563 down_read(&policy->rwsem);
1564 ret_freq = __cpufreq_get(policy);
1565 up_read(&policy->rwsem);
1567 cpufreq_cpu_put(policy);
1572 EXPORT_SYMBOL(cpufreq_get);
1574 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1576 unsigned int new_freq;
1578 new_freq = cpufreq_driver->get(policy->cpu);
1583 pr_debug("cpufreq: Driver did not initialize current freq\n");
1584 policy->cur = new_freq;
1585 } else if (policy->cur != new_freq && has_target()) {
1586 cpufreq_out_of_sync(policy, new_freq);
1592 static struct subsys_interface cpufreq_interface = {
1594 .subsys = &cpu_subsys,
1595 .add_dev = cpufreq_add_dev,
1596 .remove_dev = cpufreq_remove_dev,
1600 * In case platform wants some specific frequency to be configured
1603 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1607 if (!policy->suspend_freq) {
1608 pr_debug("%s: suspend_freq not defined\n", __func__);
1612 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1613 policy->suspend_freq);
1615 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1616 CPUFREQ_RELATION_H);
1618 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1619 __func__, policy->suspend_freq, ret);
1623 EXPORT_SYMBOL(cpufreq_generic_suspend);
1626 * cpufreq_suspend() - Suspend CPUFreq governors
1628 * Called during system wide Suspend/Hibernate cycles for suspending governors
1629 * as some platforms can't change frequency after this point in suspend cycle.
1630 * Because some of the devices (like: i2c, regulators, etc) they use for
1631 * changing frequency are suspended quickly after this point.
1633 void cpufreq_suspend(void)
1635 struct cpufreq_policy *policy;
1637 if (!cpufreq_driver)
1640 if (!has_target() && !cpufreq_driver->suspend)
1643 pr_debug("%s: Suspending Governors\n", __func__);
1645 for_each_active_policy(policy) {
1647 down_write(&policy->rwsem);
1648 cpufreq_stop_governor(policy);
1649 up_write(&policy->rwsem);
1652 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1653 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1658 cpufreq_suspended = true;
1662 * cpufreq_resume() - Resume CPUFreq governors
1664 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1665 * are suspended with cpufreq_suspend().
1667 void cpufreq_resume(void)
1669 struct cpufreq_policy *policy;
1672 if (!cpufreq_driver)
1675 cpufreq_suspended = false;
1677 if (!has_target() && !cpufreq_driver->resume)
1680 pr_debug("%s: Resuming Governors\n", __func__);
1682 for_each_active_policy(policy) {
1683 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1684 pr_err("%s: Failed to resume driver: %p\n", __func__,
1686 } else if (has_target()) {
1687 down_write(&policy->rwsem);
1688 ret = cpufreq_start_governor(policy);
1689 up_write(&policy->rwsem);
1692 pr_err("%s: Failed to start governor for policy: %p\n",
1699 * cpufreq_get_current_driver - return current driver's name
1701 * Return the name string of the currently loaded cpufreq driver
1704 const char *cpufreq_get_current_driver(void)
1707 return cpufreq_driver->name;
1711 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1714 * cpufreq_get_driver_data - return current driver data
1716 * Return the private data of the currently loaded cpufreq
1717 * driver, or NULL if no cpufreq driver is loaded.
1719 void *cpufreq_get_driver_data(void)
1722 return cpufreq_driver->driver_data;
1726 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1728 /*********************************************************************
1729 * NOTIFIER LISTS INTERFACE *
1730 *********************************************************************/
1733 * cpufreq_register_notifier - register a driver with cpufreq
1734 * @nb: notifier function to register
1735 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1737 * Add a driver to one of two lists: either a list of drivers that
1738 * are notified about clock rate changes (once before and once after
1739 * the transition), or a list of drivers that are notified about
1740 * changes in cpufreq policy.
1742 * This function may sleep, and has the same return conditions as
1743 * blocking_notifier_chain_register.
1745 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1749 if (cpufreq_disabled())
1752 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1755 case CPUFREQ_TRANSITION_NOTIFIER:
1756 mutex_lock(&cpufreq_fast_switch_lock);
1758 if (cpufreq_fast_switch_count > 0) {
1759 mutex_unlock(&cpufreq_fast_switch_lock);
1762 ret = srcu_notifier_chain_register(
1763 &cpufreq_transition_notifier_list, nb);
1765 cpufreq_fast_switch_count--;
1767 mutex_unlock(&cpufreq_fast_switch_lock);
1769 case CPUFREQ_POLICY_NOTIFIER:
1770 ret = blocking_notifier_chain_register(
1771 &cpufreq_policy_notifier_list, nb);
1779 EXPORT_SYMBOL(cpufreq_register_notifier);
1782 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1783 * @nb: notifier block to be unregistered
1784 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1786 * Remove a driver from the CPU frequency notifier list.
1788 * This function may sleep, and has the same return conditions as
1789 * blocking_notifier_chain_unregister.
1791 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1795 if (cpufreq_disabled())
1799 case CPUFREQ_TRANSITION_NOTIFIER:
1800 mutex_lock(&cpufreq_fast_switch_lock);
1802 ret = srcu_notifier_chain_unregister(
1803 &cpufreq_transition_notifier_list, nb);
1804 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1805 cpufreq_fast_switch_count++;
1807 mutex_unlock(&cpufreq_fast_switch_lock);
1809 case CPUFREQ_POLICY_NOTIFIER:
1810 ret = blocking_notifier_chain_unregister(
1811 &cpufreq_policy_notifier_list, nb);
1819 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1822 /*********************************************************************
1824 *********************************************************************/
1827 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1828 * @policy: cpufreq policy to switch the frequency for.
1829 * @target_freq: New frequency to set (may be approximate).
1831 * Carry out a fast frequency switch without sleeping.
1833 * The driver's ->fast_switch() callback invoked by this function must be
1834 * suitable for being called from within RCU-sched read-side critical sections
1835 * and it is expected to select the minimum available frequency greater than or
1836 * equal to @target_freq (CPUFREQ_RELATION_L).
1838 * This function must not be called if policy->fast_switch_enabled is unset.
1840 * Governors calling this function must guarantee that it will never be invoked
1841 * twice in parallel for the same policy and that it will never be called in
1842 * parallel with either ->target() or ->target_index() for the same policy.
1844 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1845 * callback to indicate an error condition, the hardware configuration must be
1848 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1849 unsigned int target_freq)
1851 target_freq = clamp_val(target_freq, policy->min, policy->max);
1853 return cpufreq_driver->fast_switch(policy, target_freq);
1855 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1857 /* Must set freqs->new to intermediate frequency */
1858 static int __target_intermediate(struct cpufreq_policy *policy,
1859 struct cpufreq_freqs *freqs, int index)
1863 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1865 /* We don't need to switch to intermediate freq */
1869 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1870 __func__, policy->cpu, freqs->old, freqs->new);
1872 cpufreq_freq_transition_begin(policy, freqs);
1873 ret = cpufreq_driver->target_intermediate(policy, index);
1874 cpufreq_freq_transition_end(policy, freqs, ret);
1877 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1883 static int __target_index(struct cpufreq_policy *policy, int index)
1885 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1886 unsigned int intermediate_freq = 0;
1887 unsigned int newfreq = policy->freq_table[index].frequency;
1888 int retval = -EINVAL;
1891 if (newfreq == policy->cur)
1894 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1896 /* Handle switching to intermediate frequency */
1897 if (cpufreq_driver->get_intermediate) {
1898 retval = __target_intermediate(policy, &freqs, index);
1902 intermediate_freq = freqs.new;
1903 /* Set old freq to intermediate */
1904 if (intermediate_freq)
1905 freqs.old = freqs.new;
1908 freqs.new = newfreq;
1909 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1910 __func__, policy->cpu, freqs.old, freqs.new);
1912 cpufreq_freq_transition_begin(policy, &freqs);
1915 retval = cpufreq_driver->target_index(policy, index);
1917 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1921 cpufreq_freq_transition_end(policy, &freqs, retval);
1924 * Failed after setting to intermediate freq? Driver should have
1925 * reverted back to initial frequency and so should we. Check
1926 * here for intermediate_freq instead of get_intermediate, in
1927 * case we haven't switched to intermediate freq at all.
1929 if (unlikely(retval && intermediate_freq)) {
1930 freqs.old = intermediate_freq;
1931 freqs.new = policy->restore_freq;
1932 cpufreq_freq_transition_begin(policy, &freqs);
1933 cpufreq_freq_transition_end(policy, &freqs, 0);
1940 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1941 unsigned int target_freq,
1942 unsigned int relation)
1944 unsigned int old_target_freq = target_freq;
1947 if (cpufreq_disabled())
1950 /* Make sure that target_freq is within supported range */
1951 target_freq = clamp_val(target_freq, policy->min, policy->max);
1953 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1954 policy->cpu, target_freq, relation, old_target_freq);
1957 * This might look like a redundant call as we are checking it again
1958 * after finding index. But it is left intentionally for cases where
1959 * exactly same freq is called again and so we can save on few function
1962 if (target_freq == policy->cur)
1965 /* Save last value to restore later on errors */
1966 policy->restore_freq = policy->cur;
1968 if (cpufreq_driver->target)
1969 return cpufreq_driver->target(policy, target_freq, relation);
1971 if (!cpufreq_driver->target_index)
1974 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1976 return __target_index(policy, index);
1978 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1980 int cpufreq_driver_target(struct cpufreq_policy *policy,
1981 unsigned int target_freq,
1982 unsigned int relation)
1986 down_write(&policy->rwsem);
1988 ret = __cpufreq_driver_target(policy, target_freq, relation);
1990 up_write(&policy->rwsem);
1994 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1996 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2001 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2005 /* Don't start any governor operations if we are entering suspend */
2006 if (cpufreq_suspended)
2009 * Governor might not be initiated here if ACPI _PPC changed
2010 * notification happened, so check it.
2012 if (!policy->governor)
2015 if (policy->governor->max_transition_latency &&
2016 policy->cpuinfo.transition_latency >
2017 policy->governor->max_transition_latency) {
2018 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2021 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2022 policy->governor->name, gov->name);
2023 policy->governor = gov;
2029 if (!try_module_get(policy->governor->owner))
2032 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2034 if (policy->governor->init) {
2035 ret = policy->governor->init(policy);
2037 module_put(policy->governor->owner);
2045 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2047 if (cpufreq_suspended || !policy->governor)
2050 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2052 if (policy->governor->exit)
2053 policy->governor->exit(policy);
2055 module_put(policy->governor->owner);
2058 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2062 if (cpufreq_suspended)
2065 if (!policy->governor)
2068 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2070 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2071 cpufreq_update_current_freq(policy);
2073 if (policy->governor->start) {
2074 ret = policy->governor->start(policy);
2079 if (policy->governor->limits)
2080 policy->governor->limits(policy);
2085 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2087 if (cpufreq_suspended || !policy->governor)
2090 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2092 if (policy->governor->stop)
2093 policy->governor->stop(policy);
2096 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2098 if (cpufreq_suspended || !policy->governor)
2101 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2103 if (policy->governor->limits)
2104 policy->governor->limits(policy);
2107 int cpufreq_register_governor(struct cpufreq_governor *governor)
2114 if (cpufreq_disabled())
2117 mutex_lock(&cpufreq_governor_mutex);
2120 if (!find_governor(governor->name)) {
2122 list_add(&governor->governor_list, &cpufreq_governor_list);
2125 mutex_unlock(&cpufreq_governor_mutex);
2128 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2130 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2132 struct cpufreq_policy *policy;
2133 unsigned long flags;
2138 if (cpufreq_disabled())
2141 /* clear last_governor for all inactive policies */
2142 read_lock_irqsave(&cpufreq_driver_lock, flags);
2143 for_each_inactive_policy(policy) {
2144 if (!strcmp(policy->last_governor, governor->name)) {
2145 policy->governor = NULL;
2146 strcpy(policy->last_governor, "\0");
2149 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2151 mutex_lock(&cpufreq_governor_mutex);
2152 list_del(&governor->governor_list);
2153 mutex_unlock(&cpufreq_governor_mutex);
2156 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2159 /*********************************************************************
2160 * POLICY INTERFACE *
2161 *********************************************************************/
2164 * cpufreq_get_policy - get the current cpufreq_policy
2165 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2168 * Reads the current cpufreq policy.
2170 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2172 struct cpufreq_policy *cpu_policy;
2176 cpu_policy = cpufreq_cpu_get(cpu);
2180 memcpy(policy, cpu_policy, sizeof(*policy));
2182 cpufreq_cpu_put(cpu_policy);
2185 EXPORT_SYMBOL(cpufreq_get_policy);
2188 * policy : current policy.
2189 * new_policy: policy to be set.
2191 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2192 struct cpufreq_policy *new_policy)
2194 struct cpufreq_governor *old_gov;
2197 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2198 new_policy->cpu, new_policy->min, new_policy->max);
2200 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2203 * This check works well when we store new min/max freq attributes,
2204 * because new_policy is a copy of policy with one field updated.
2206 if (new_policy->min > new_policy->max)
2209 /* verify the cpu speed can be set within this limit */
2210 ret = cpufreq_driver->verify(new_policy);
2214 /* adjust if necessary - all reasons */
2215 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2216 CPUFREQ_ADJUST, new_policy);
2219 * verify the cpu speed can be set within this limit, which might be
2220 * different to the first one
2222 ret = cpufreq_driver->verify(new_policy);
2226 /* notification of the new policy */
2227 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2228 CPUFREQ_NOTIFY, new_policy);
2230 policy->min = new_policy->min;
2231 policy->max = new_policy->max;
2233 policy->cached_target_freq = UINT_MAX;
2235 pr_debug("new min and max freqs are %u - %u kHz\n",
2236 policy->min, policy->max);
2238 if (cpufreq_driver->setpolicy) {
2239 policy->policy = new_policy->policy;
2240 pr_debug("setting range\n");
2241 return cpufreq_driver->setpolicy(new_policy);
2244 if (new_policy->governor == policy->governor) {
2245 pr_debug("cpufreq: governor limits update\n");
2246 cpufreq_governor_limits(policy);
2250 pr_debug("governor switch\n");
2252 /* save old, working values */
2253 old_gov = policy->governor;
2254 /* end old governor */
2256 cpufreq_stop_governor(policy);
2257 cpufreq_exit_governor(policy);
2260 /* start new governor */
2261 policy->governor = new_policy->governor;
2262 ret = cpufreq_init_governor(policy);
2264 ret = cpufreq_start_governor(policy);
2266 pr_debug("cpufreq: governor change\n");
2269 cpufreq_exit_governor(policy);
2272 /* new governor failed, so re-start old one */
2273 pr_debug("starting governor %s failed\n", policy->governor->name);
2275 policy->governor = old_gov;
2276 if (cpufreq_init_governor(policy))
2277 policy->governor = NULL;
2279 cpufreq_start_governor(policy);
2286 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2287 * @cpu: CPU which shall be re-evaluated
2289 * Useful for policy notifiers which have different necessities
2290 * at different times.
2292 int cpufreq_update_policy(unsigned int cpu)
2294 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2295 struct cpufreq_policy new_policy;
2301 down_write(&policy->rwsem);
2303 pr_debug("updating policy for CPU %u\n", cpu);
2304 memcpy(&new_policy, policy, sizeof(*policy));
2305 new_policy.min = policy->user_policy.min;
2306 new_policy.max = policy->user_policy.max;
2309 * BIOS might change freq behind our back
2310 * -> ask driver for current freq and notify governors about a change
2312 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2313 if (cpufreq_suspended) {
2317 new_policy.cur = cpufreq_update_current_freq(policy);
2318 if (WARN_ON(!new_policy.cur)) {
2324 ret = cpufreq_set_policy(policy, &new_policy);
2327 up_write(&policy->rwsem);
2329 cpufreq_cpu_put(policy);
2332 EXPORT_SYMBOL(cpufreq_update_policy);
2334 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2335 unsigned long action, void *hcpu)
2337 unsigned int cpu = (unsigned long)hcpu;
2339 switch (action & ~CPU_TASKS_FROZEN) {
2341 case CPU_DOWN_FAILED:
2342 cpufreq_online(cpu);
2345 case CPU_DOWN_PREPARE:
2346 cpufreq_offline(cpu);
2352 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2353 .notifier_call = cpufreq_cpu_callback,
2356 /*********************************************************************
2358 *********************************************************************/
2359 static int cpufreq_boost_set_sw(int state)
2361 struct cpufreq_policy *policy;
2364 for_each_active_policy(policy) {
2365 if (!policy->freq_table)
2368 ret = cpufreq_frequency_table_cpuinfo(policy,
2369 policy->freq_table);
2371 pr_err("%s: Policy frequency update failed\n",
2376 down_write(&policy->rwsem);
2377 policy->user_policy.max = policy->max;
2378 cpufreq_governor_limits(policy);
2379 up_write(&policy->rwsem);
2385 int cpufreq_boost_trigger_state(int state)
2387 unsigned long flags;
2390 if (cpufreq_driver->boost_enabled == state)
2393 write_lock_irqsave(&cpufreq_driver_lock, flags);
2394 cpufreq_driver->boost_enabled = state;
2395 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2397 ret = cpufreq_driver->set_boost(state);
2399 write_lock_irqsave(&cpufreq_driver_lock, flags);
2400 cpufreq_driver->boost_enabled = !state;
2401 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2403 pr_err("%s: Cannot %s BOOST\n",
2404 __func__, state ? "enable" : "disable");
2410 static bool cpufreq_boost_supported(void)
2412 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2415 static int create_boost_sysfs_file(void)
2419 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2421 pr_err("%s: cannot register global BOOST sysfs file\n",
2427 static void remove_boost_sysfs_file(void)
2429 if (cpufreq_boost_supported())
2430 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2433 int cpufreq_enable_boost_support(void)
2435 if (!cpufreq_driver)
2438 if (cpufreq_boost_supported())
2441 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2443 /* This will get removed on driver unregister */
2444 return create_boost_sysfs_file();
2446 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2448 int cpufreq_boost_enabled(void)
2450 return cpufreq_driver->boost_enabled;
2452 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2454 /*********************************************************************
2455 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2456 *********************************************************************/
2459 * cpufreq_register_driver - register a CPU Frequency driver
2460 * @driver_data: A struct cpufreq_driver containing the values#
2461 * submitted by the CPU Frequency driver.
2463 * Registers a CPU Frequency driver to this core code. This code
2464 * returns zero on success, -EEXIST when another driver got here first
2465 * (and isn't unregistered in the meantime).
2468 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2470 unsigned long flags;
2473 if (cpufreq_disabled())
2476 if (!driver_data || !driver_data->verify || !driver_data->init ||
2477 !(driver_data->setpolicy || driver_data->target_index ||
2478 driver_data->target) ||
2479 (driver_data->setpolicy && (driver_data->target_index ||
2480 driver_data->target)) ||
2481 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2484 pr_debug("trying to register driver %s\n", driver_data->name);
2486 /* Protect against concurrent CPU online/offline. */
2489 write_lock_irqsave(&cpufreq_driver_lock, flags);
2490 if (cpufreq_driver) {
2491 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2495 cpufreq_driver = driver_data;
2496 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2498 if (driver_data->setpolicy)
2499 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2501 if (cpufreq_boost_supported()) {
2502 ret = create_boost_sysfs_file();
2504 goto err_null_driver;
2507 ret = subsys_interface_register(&cpufreq_interface);
2509 goto err_boost_unreg;
2511 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2512 list_empty(&cpufreq_policy_list)) {
2513 /* if all ->init() calls failed, unregister */
2514 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2519 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2520 pr_debug("driver %s up and running\n", driver_data->name);
2524 subsys_interface_unregister(&cpufreq_interface);
2526 remove_boost_sysfs_file();
2528 write_lock_irqsave(&cpufreq_driver_lock, flags);
2529 cpufreq_driver = NULL;
2530 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2535 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2538 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2540 * Unregister the current CPUFreq driver. Only call this if you have
2541 * the right to do so, i.e. if you have succeeded in initialising before!
2542 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2543 * currently not initialised.
2545 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2547 unsigned long flags;
2549 if (!cpufreq_driver || (driver != cpufreq_driver))
2552 pr_debug("unregistering driver %s\n", driver->name);
2554 /* Protect against concurrent cpu hotplug */
2556 subsys_interface_unregister(&cpufreq_interface);
2557 remove_boost_sysfs_file();
2558 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2560 write_lock_irqsave(&cpufreq_driver_lock, flags);
2562 cpufreq_driver = NULL;
2564 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2569 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2572 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2573 * or mutexes when secondary CPUs are halted.
2575 static struct syscore_ops cpufreq_syscore_ops = {
2576 .shutdown = cpufreq_suspend,
2579 struct kobject *cpufreq_global_kobject;
2580 EXPORT_SYMBOL(cpufreq_global_kobject);
2582 static int __init cpufreq_core_init(void)
2584 if (cpufreq_disabled())
2587 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2588 BUG_ON(!cpufreq_global_kobject);
2590 register_syscore_ops(&cpufreq_syscore_ops);
2594 core_initcall(cpufreq_core_init);