cpufreq: suspend governors on system suspend/hibernate
[cascardo/linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include <linux/timer.h>
35
36 #include "../base.h"
37 #include "power.h"
38
39 typedef int (*pm_callback_t)(struct device *);
40
41 /*
42  * The entries in the dpm_list list are in a depth first order, simply
43  * because children are guaranteed to be discovered after parents, and
44  * are inserted at the back of the list on discovery.
45  *
46  * Since device_pm_add() may be called with a device lock held,
47  * we must never try to acquire a device lock while holding
48  * dpm_list_mutex.
49  */
50
51 LIST_HEAD(dpm_list);
52 static LIST_HEAD(dpm_prepared_list);
53 static LIST_HEAD(dpm_suspended_list);
54 static LIST_HEAD(dpm_late_early_list);
55 static LIST_HEAD(dpm_noirq_list);
56
57 struct suspend_stats suspend_stats;
58 static DEFINE_MUTEX(dpm_list_mtx);
59 static pm_message_t pm_transition;
60
61 static int async_error;
62
63 static char *pm_verb(int event)
64 {
65         switch (event) {
66         case PM_EVENT_SUSPEND:
67                 return "suspend";
68         case PM_EVENT_RESUME:
69                 return "resume";
70         case PM_EVENT_FREEZE:
71                 return "freeze";
72         case PM_EVENT_QUIESCE:
73                 return "quiesce";
74         case PM_EVENT_HIBERNATE:
75                 return "hibernate";
76         case PM_EVENT_THAW:
77                 return "thaw";
78         case PM_EVENT_RESTORE:
79                 return "restore";
80         case PM_EVENT_RECOVER:
81                 return "recover";
82         default:
83                 return "(unknown PM event)";
84         }
85 }
86
87 /**
88  * device_pm_sleep_init - Initialize system suspend-related device fields.
89  * @dev: Device object being initialized.
90  */
91 void device_pm_sleep_init(struct device *dev)
92 {
93         dev->power.is_prepared = false;
94         dev->power.is_suspended = false;
95         init_completion(&dev->power.completion);
96         complete_all(&dev->power.completion);
97         dev->power.wakeup = NULL;
98         INIT_LIST_HEAD(&dev->power.entry);
99 }
100
101 /**
102  * device_pm_lock - Lock the list of active devices used by the PM core.
103  */
104 void device_pm_lock(void)
105 {
106         mutex_lock(&dpm_list_mtx);
107 }
108
109 /**
110  * device_pm_unlock - Unlock the list of active devices used by the PM core.
111  */
112 void device_pm_unlock(void)
113 {
114         mutex_unlock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_add - Add a device to the PM core's list of active devices.
119  * @dev: Device to add to the list.
120  */
121 void device_pm_add(struct device *dev)
122 {
123         pr_debug("PM: Adding info for %s:%s\n",
124                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
125         mutex_lock(&dpm_list_mtx);
126         if (dev->parent && dev->parent->power.is_prepared)
127                 dev_warn(dev, "parent %s should not be sleeping\n",
128                         dev_name(dev->parent));
129         list_add_tail(&dev->power.entry, &dpm_list);
130         mutex_unlock(&dpm_list_mtx);
131 }
132
133 /**
134  * device_pm_remove - Remove a device from the PM core's list of active devices.
135  * @dev: Device to be removed from the list.
136  */
137 void device_pm_remove(struct device *dev)
138 {
139         pr_debug("PM: Removing info for %s:%s\n",
140                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
141         complete_all(&dev->power.completion);
142         mutex_lock(&dpm_list_mtx);
143         list_del_init(&dev->power.entry);
144         mutex_unlock(&dpm_list_mtx);
145         device_wakeup_disable(dev);
146         pm_runtime_remove(dev);
147 }
148
149 /**
150  * device_pm_move_before - Move device in the PM core's list of active devices.
151  * @deva: Device to move in dpm_list.
152  * @devb: Device @deva should come before.
153  */
154 void device_pm_move_before(struct device *deva, struct device *devb)
155 {
156         pr_debug("PM: Moving %s:%s before %s:%s\n",
157                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
158                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
159         /* Delete deva from dpm_list and reinsert before devb. */
160         list_move_tail(&deva->power.entry, &devb->power.entry);
161 }
162
163 /**
164  * device_pm_move_after - Move device in the PM core's list of active devices.
165  * @deva: Device to move in dpm_list.
166  * @devb: Device @deva should come after.
167  */
168 void device_pm_move_after(struct device *deva, struct device *devb)
169 {
170         pr_debug("PM: Moving %s:%s after %s:%s\n",
171                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
172                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
173         /* Delete deva from dpm_list and reinsert after devb. */
174         list_move(&deva->power.entry, &devb->power.entry);
175 }
176
177 /**
178  * device_pm_move_last - Move device to end of the PM core's list of devices.
179  * @dev: Device to move in dpm_list.
180  */
181 void device_pm_move_last(struct device *dev)
182 {
183         pr_debug("PM: Moving %s:%s to end of list\n",
184                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
185         list_move_tail(&dev->power.entry, &dpm_list);
186 }
187
188 static ktime_t initcall_debug_start(struct device *dev)
189 {
190         ktime_t calltime = ktime_set(0, 0);
191
192         if (pm_print_times_enabled) {
193                 pr_info("calling  %s+ @ %i, parent: %s\n",
194                         dev_name(dev), task_pid_nr(current),
195                         dev->parent ? dev_name(dev->parent) : "none");
196                 calltime = ktime_get();
197         }
198
199         return calltime;
200 }
201
202 static void initcall_debug_report(struct device *dev, ktime_t calltime,
203                                   int error, pm_message_t state, char *info)
204 {
205         ktime_t rettime;
206         s64 nsecs;
207
208         rettime = ktime_get();
209         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
210
211         if (pm_print_times_enabled) {
212                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
213                         error, (unsigned long long)nsecs >> 10);
214         }
215
216         trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
217                                     error);
218 }
219
220 /**
221  * dpm_wait - Wait for a PM operation to complete.
222  * @dev: Device to wait for.
223  * @async: If unset, wait only if the device's power.async_suspend flag is set.
224  */
225 static void dpm_wait(struct device *dev, bool async)
226 {
227         if (!dev)
228                 return;
229
230         if (async || (pm_async_enabled && dev->power.async_suspend))
231                 wait_for_completion(&dev->power.completion);
232 }
233
234 static int dpm_wait_fn(struct device *dev, void *async_ptr)
235 {
236         dpm_wait(dev, *((bool *)async_ptr));
237         return 0;
238 }
239
240 static void dpm_wait_for_children(struct device *dev, bool async)
241 {
242        device_for_each_child(dev, &async, dpm_wait_fn);
243 }
244
245 /**
246  * pm_op - Return the PM operation appropriate for given PM event.
247  * @ops: PM operations to choose from.
248  * @state: PM transition of the system being carried out.
249  */
250 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
251 {
252         switch (state.event) {
253 #ifdef CONFIG_SUSPEND
254         case PM_EVENT_SUSPEND:
255                 return ops->suspend;
256         case PM_EVENT_RESUME:
257                 return ops->resume;
258 #endif /* CONFIG_SUSPEND */
259 #ifdef CONFIG_HIBERNATE_CALLBACKS
260         case PM_EVENT_FREEZE:
261         case PM_EVENT_QUIESCE:
262                 return ops->freeze;
263         case PM_EVENT_HIBERNATE:
264                 return ops->poweroff;
265         case PM_EVENT_THAW:
266         case PM_EVENT_RECOVER:
267                 return ops->thaw;
268                 break;
269         case PM_EVENT_RESTORE:
270                 return ops->restore;
271 #endif /* CONFIG_HIBERNATE_CALLBACKS */
272         }
273
274         return NULL;
275 }
276
277 /**
278  * pm_late_early_op - Return the PM operation appropriate for given PM event.
279  * @ops: PM operations to choose from.
280  * @state: PM transition of the system being carried out.
281  *
282  * Runtime PM is disabled for @dev while this function is being executed.
283  */
284 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
285                                       pm_message_t state)
286 {
287         switch (state.event) {
288 #ifdef CONFIG_SUSPEND
289         case PM_EVENT_SUSPEND:
290                 return ops->suspend_late;
291         case PM_EVENT_RESUME:
292                 return ops->resume_early;
293 #endif /* CONFIG_SUSPEND */
294 #ifdef CONFIG_HIBERNATE_CALLBACKS
295         case PM_EVENT_FREEZE:
296         case PM_EVENT_QUIESCE:
297                 return ops->freeze_late;
298         case PM_EVENT_HIBERNATE:
299                 return ops->poweroff_late;
300         case PM_EVENT_THAW:
301         case PM_EVENT_RECOVER:
302                 return ops->thaw_early;
303         case PM_EVENT_RESTORE:
304                 return ops->restore_early;
305 #endif /* CONFIG_HIBERNATE_CALLBACKS */
306         }
307
308         return NULL;
309 }
310
311 /**
312  * pm_noirq_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  *
316  * The driver of @dev will not receive interrupts while this function is being
317  * executed.
318  */
319 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
320 {
321         switch (state.event) {
322 #ifdef CONFIG_SUSPEND
323         case PM_EVENT_SUSPEND:
324                 return ops->suspend_noirq;
325         case PM_EVENT_RESUME:
326                 return ops->resume_noirq;
327 #endif /* CONFIG_SUSPEND */
328 #ifdef CONFIG_HIBERNATE_CALLBACKS
329         case PM_EVENT_FREEZE:
330         case PM_EVENT_QUIESCE:
331                 return ops->freeze_noirq;
332         case PM_EVENT_HIBERNATE:
333                 return ops->poweroff_noirq;
334         case PM_EVENT_THAW:
335         case PM_EVENT_RECOVER:
336                 return ops->thaw_noirq;
337         case PM_EVENT_RESTORE:
338                 return ops->restore_noirq;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
346 {
347         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
348                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
349                 ", may wakeup" : "");
350 }
351
352 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
353                         int error)
354 {
355         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
356                 dev_name(dev), pm_verb(state.event), info, error);
357 }
358
359 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
360 {
361         ktime_t calltime;
362         u64 usecs64;
363         int usecs;
364
365         calltime = ktime_get();
366         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
367         do_div(usecs64, NSEC_PER_USEC);
368         usecs = usecs64;
369         if (usecs == 0)
370                 usecs = 1;
371         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
372                 info ?: "", info ? " " : "", pm_verb(state.event),
373                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
374 }
375
376 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
377                             pm_message_t state, char *info)
378 {
379         ktime_t calltime;
380         int error;
381
382         if (!cb)
383                 return 0;
384
385         calltime = initcall_debug_start(dev);
386
387         pm_dev_dbg(dev, state, info);
388         error = cb(dev);
389         suspend_report_result(cb, error);
390
391         initcall_debug_report(dev, calltime, error, state, info);
392
393         return error;
394 }
395
396 #ifdef CONFIG_DPM_WATCHDOG
397 struct dpm_watchdog {
398         struct device           *dev;
399         struct task_struct      *tsk;
400         struct timer_list       timer;
401 };
402
403 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
404         struct dpm_watchdog wd
405
406 /**
407  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
408  * @data: Watchdog object address.
409  *
410  * Called when a driver has timed out suspending or resuming.
411  * There's not much we can do here to recover so panic() to
412  * capture a crash-dump in pstore.
413  */
414 static void dpm_watchdog_handler(unsigned long data)
415 {
416         struct dpm_watchdog *wd = (void *)data;
417
418         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
419         show_stack(wd->tsk, NULL);
420         panic("%s %s: unrecoverable failure\n",
421                 dev_driver_string(wd->dev), dev_name(wd->dev));
422 }
423
424 /**
425  * dpm_watchdog_set - Enable pm watchdog for given device.
426  * @wd: Watchdog. Must be allocated on the stack.
427  * @dev: Device to handle.
428  */
429 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
430 {
431         struct timer_list *timer = &wd->timer;
432
433         wd->dev = dev;
434         wd->tsk = current;
435
436         init_timer_on_stack(timer);
437         /* use same timeout value for both suspend and resume */
438         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
439         timer->function = dpm_watchdog_handler;
440         timer->data = (unsigned long)wd;
441         add_timer(timer);
442 }
443
444 /**
445  * dpm_watchdog_clear - Disable suspend/resume watchdog.
446  * @wd: Watchdog to disable.
447  */
448 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
449 {
450         struct timer_list *timer = &wd->timer;
451
452         del_timer_sync(timer);
453         destroy_timer_on_stack(timer);
454 }
455 #else
456 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
457 #define dpm_watchdog_set(x, y)
458 #define dpm_watchdog_clear(x)
459 #endif
460
461 /*------------------------- Resume routines -------------------------*/
462
463 /**
464  * device_resume_noirq - Execute an "early resume" callback for given device.
465  * @dev: Device to handle.
466  * @state: PM transition of the system being carried out.
467  *
468  * The driver of @dev will not receive interrupts while this function is being
469  * executed.
470  */
471 static int device_resume_noirq(struct device *dev, pm_message_t state)
472 {
473         pm_callback_t callback = NULL;
474         char *info = NULL;
475         int error = 0;
476
477         TRACE_DEVICE(dev);
478         TRACE_RESUME(0);
479
480         if (dev->power.syscore)
481                 goto Out;
482
483         if (dev->pm_domain) {
484                 info = "noirq power domain ";
485                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
486         } else if (dev->type && dev->type->pm) {
487                 info = "noirq type ";
488                 callback = pm_noirq_op(dev->type->pm, state);
489         } else if (dev->class && dev->class->pm) {
490                 info = "noirq class ";
491                 callback = pm_noirq_op(dev->class->pm, state);
492         } else if (dev->bus && dev->bus->pm) {
493                 info = "noirq bus ";
494                 callback = pm_noirq_op(dev->bus->pm, state);
495         }
496
497         if (!callback && dev->driver && dev->driver->pm) {
498                 info = "noirq driver ";
499                 callback = pm_noirq_op(dev->driver->pm, state);
500         }
501
502         error = dpm_run_callback(callback, dev, state, info);
503
504  Out:
505         TRACE_RESUME(error);
506         return error;
507 }
508
509 /**
510  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
511  * @state: PM transition of the system being carried out.
512  *
513  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
514  * enable device drivers to receive interrupts.
515  */
516 static void dpm_resume_noirq(pm_message_t state)
517 {
518         ktime_t starttime = ktime_get();
519
520         mutex_lock(&dpm_list_mtx);
521         while (!list_empty(&dpm_noirq_list)) {
522                 struct device *dev = to_device(dpm_noirq_list.next);
523                 int error;
524
525                 get_device(dev);
526                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
527                 mutex_unlock(&dpm_list_mtx);
528
529                 error = device_resume_noirq(dev, state);
530                 if (error) {
531                         suspend_stats.failed_resume_noirq++;
532                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
533                         dpm_save_failed_dev(dev_name(dev));
534                         pm_dev_err(dev, state, " noirq", error);
535                 }
536
537                 mutex_lock(&dpm_list_mtx);
538                 put_device(dev);
539         }
540         mutex_unlock(&dpm_list_mtx);
541         dpm_show_time(starttime, state, "noirq");
542         resume_device_irqs();
543         cpuidle_resume();
544 }
545
546 /**
547  * device_resume_early - Execute an "early resume" callback for given device.
548  * @dev: Device to handle.
549  * @state: PM transition of the system being carried out.
550  *
551  * Runtime PM is disabled for @dev while this function is being executed.
552  */
553 static int device_resume_early(struct device *dev, pm_message_t state)
554 {
555         pm_callback_t callback = NULL;
556         char *info = NULL;
557         int error = 0;
558
559         TRACE_DEVICE(dev);
560         TRACE_RESUME(0);
561
562         if (dev->power.syscore)
563                 goto Out;
564
565         if (dev->pm_domain) {
566                 info = "early power domain ";
567                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
568         } else if (dev->type && dev->type->pm) {
569                 info = "early type ";
570                 callback = pm_late_early_op(dev->type->pm, state);
571         } else if (dev->class && dev->class->pm) {
572                 info = "early class ";
573                 callback = pm_late_early_op(dev->class->pm, state);
574         } else if (dev->bus && dev->bus->pm) {
575                 info = "early bus ";
576                 callback = pm_late_early_op(dev->bus->pm, state);
577         }
578
579         if (!callback && dev->driver && dev->driver->pm) {
580                 info = "early driver ";
581                 callback = pm_late_early_op(dev->driver->pm, state);
582         }
583
584         error = dpm_run_callback(callback, dev, state, info);
585
586  Out:
587         TRACE_RESUME(error);
588
589         pm_runtime_enable(dev);
590         return error;
591 }
592
593 /**
594  * dpm_resume_early - Execute "early resume" callbacks for all devices.
595  * @state: PM transition of the system being carried out.
596  */
597 static void dpm_resume_early(pm_message_t state)
598 {
599         ktime_t starttime = ktime_get();
600
601         mutex_lock(&dpm_list_mtx);
602         while (!list_empty(&dpm_late_early_list)) {
603                 struct device *dev = to_device(dpm_late_early_list.next);
604                 int error;
605
606                 get_device(dev);
607                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
608                 mutex_unlock(&dpm_list_mtx);
609
610                 error = device_resume_early(dev, state);
611                 if (error) {
612                         suspend_stats.failed_resume_early++;
613                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
614                         dpm_save_failed_dev(dev_name(dev));
615                         pm_dev_err(dev, state, " early", error);
616                 }
617
618                 mutex_lock(&dpm_list_mtx);
619                 put_device(dev);
620         }
621         mutex_unlock(&dpm_list_mtx);
622         dpm_show_time(starttime, state, "early");
623 }
624
625 /**
626  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
627  * @state: PM transition of the system being carried out.
628  */
629 void dpm_resume_start(pm_message_t state)
630 {
631         dpm_resume_noirq(state);
632         dpm_resume_early(state);
633 }
634 EXPORT_SYMBOL_GPL(dpm_resume_start);
635
636 /**
637  * device_resume - Execute "resume" callbacks for given device.
638  * @dev: Device to handle.
639  * @state: PM transition of the system being carried out.
640  * @async: If true, the device is being resumed asynchronously.
641  */
642 static int device_resume(struct device *dev, pm_message_t state, bool async)
643 {
644         pm_callback_t callback = NULL;
645         char *info = NULL;
646         int error = 0;
647         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
648
649         TRACE_DEVICE(dev);
650         TRACE_RESUME(0);
651
652         if (dev->power.syscore)
653                 goto Complete;
654
655         dpm_wait(dev->parent, async);
656         dpm_watchdog_set(&wd, dev);
657         device_lock(dev);
658
659         /*
660          * This is a fib.  But we'll allow new children to be added below
661          * a resumed device, even if the device hasn't been completed yet.
662          */
663         dev->power.is_prepared = false;
664
665         if (!dev->power.is_suspended)
666                 goto Unlock;
667
668         if (dev->pm_domain) {
669                 info = "power domain ";
670                 callback = pm_op(&dev->pm_domain->ops, state);
671                 goto Driver;
672         }
673
674         if (dev->type && dev->type->pm) {
675                 info = "type ";
676                 callback = pm_op(dev->type->pm, state);
677                 goto Driver;
678         }
679
680         if (dev->class) {
681                 if (dev->class->pm) {
682                         info = "class ";
683                         callback = pm_op(dev->class->pm, state);
684                         goto Driver;
685                 } else if (dev->class->resume) {
686                         info = "legacy class ";
687                         callback = dev->class->resume;
688                         goto End;
689                 }
690         }
691
692         if (dev->bus) {
693                 if (dev->bus->pm) {
694                         info = "bus ";
695                         callback = pm_op(dev->bus->pm, state);
696                 } else if (dev->bus->resume) {
697                         info = "legacy bus ";
698                         callback = dev->bus->resume;
699                         goto End;
700                 }
701         }
702
703  Driver:
704         if (!callback && dev->driver && dev->driver->pm) {
705                 info = "driver ";
706                 callback = pm_op(dev->driver->pm, state);
707         }
708
709  End:
710         error = dpm_run_callback(callback, dev, state, info);
711         dev->power.is_suspended = false;
712
713  Unlock:
714         device_unlock(dev);
715         dpm_watchdog_clear(&wd);
716
717  Complete:
718         complete_all(&dev->power.completion);
719
720         TRACE_RESUME(error);
721
722         return error;
723 }
724
725 static void async_resume(void *data, async_cookie_t cookie)
726 {
727         struct device *dev = (struct device *)data;
728         int error;
729
730         error = device_resume(dev, pm_transition, true);
731         if (error)
732                 pm_dev_err(dev, pm_transition, " async", error);
733         put_device(dev);
734 }
735
736 static bool is_async(struct device *dev)
737 {
738         return dev->power.async_suspend && pm_async_enabled
739                 && !pm_trace_is_enabled();
740 }
741
742 /**
743  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
744  * @state: PM transition of the system being carried out.
745  *
746  * Execute the appropriate "resume" callback for all devices whose status
747  * indicates that they are suspended.
748  */
749 void dpm_resume(pm_message_t state)
750 {
751         struct device *dev;
752         ktime_t starttime = ktime_get();
753
754         might_sleep();
755
756         mutex_lock(&dpm_list_mtx);
757         pm_transition = state;
758         async_error = 0;
759
760         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
761                 reinit_completion(&dev->power.completion);
762                 if (is_async(dev)) {
763                         get_device(dev);
764                         async_schedule(async_resume, dev);
765                 }
766         }
767
768         while (!list_empty(&dpm_suspended_list)) {
769                 dev = to_device(dpm_suspended_list.next);
770                 get_device(dev);
771                 if (!is_async(dev)) {
772                         int error;
773
774                         mutex_unlock(&dpm_list_mtx);
775
776                         error = device_resume(dev, state, false);
777                         if (error) {
778                                 suspend_stats.failed_resume++;
779                                 dpm_save_failed_step(SUSPEND_RESUME);
780                                 dpm_save_failed_dev(dev_name(dev));
781                                 pm_dev_err(dev, state, "", error);
782                         }
783
784                         mutex_lock(&dpm_list_mtx);
785                 }
786                 if (!list_empty(&dev->power.entry))
787                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
788                 put_device(dev);
789         }
790         mutex_unlock(&dpm_list_mtx);
791         async_synchronize_full();
792         dpm_show_time(starttime, state, NULL);
793
794         cpufreq_resume();
795 }
796
797 /**
798  * device_complete - Complete a PM transition for given device.
799  * @dev: Device to handle.
800  * @state: PM transition of the system being carried out.
801  */
802 static void device_complete(struct device *dev, pm_message_t state)
803 {
804         void (*callback)(struct device *) = NULL;
805         char *info = NULL;
806
807         if (dev->power.syscore)
808                 return;
809
810         device_lock(dev);
811
812         if (dev->pm_domain) {
813                 info = "completing power domain ";
814                 callback = dev->pm_domain->ops.complete;
815         } else if (dev->type && dev->type->pm) {
816                 info = "completing type ";
817                 callback = dev->type->pm->complete;
818         } else if (dev->class && dev->class->pm) {
819                 info = "completing class ";
820                 callback = dev->class->pm->complete;
821         } else if (dev->bus && dev->bus->pm) {
822                 info = "completing bus ";
823                 callback = dev->bus->pm->complete;
824         }
825
826         if (!callback && dev->driver && dev->driver->pm) {
827                 info = "completing driver ";
828                 callback = dev->driver->pm->complete;
829         }
830
831         if (callback) {
832                 pm_dev_dbg(dev, state, info);
833                 callback(dev);
834         }
835
836         device_unlock(dev);
837
838         pm_runtime_put(dev);
839 }
840
841 /**
842  * dpm_complete - Complete a PM transition for all non-sysdev devices.
843  * @state: PM transition of the system being carried out.
844  *
845  * Execute the ->complete() callbacks for all devices whose PM status is not
846  * DPM_ON (this allows new devices to be registered).
847  */
848 void dpm_complete(pm_message_t state)
849 {
850         struct list_head list;
851
852         might_sleep();
853
854         INIT_LIST_HEAD(&list);
855         mutex_lock(&dpm_list_mtx);
856         while (!list_empty(&dpm_prepared_list)) {
857                 struct device *dev = to_device(dpm_prepared_list.prev);
858
859                 get_device(dev);
860                 dev->power.is_prepared = false;
861                 list_move(&dev->power.entry, &list);
862                 mutex_unlock(&dpm_list_mtx);
863
864                 device_complete(dev, state);
865
866                 mutex_lock(&dpm_list_mtx);
867                 put_device(dev);
868         }
869         list_splice(&list, &dpm_list);
870         mutex_unlock(&dpm_list_mtx);
871 }
872
873 /**
874  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
875  * @state: PM transition of the system being carried out.
876  *
877  * Execute "resume" callbacks for all devices and complete the PM transition of
878  * the system.
879  */
880 void dpm_resume_end(pm_message_t state)
881 {
882         dpm_resume(state);
883         dpm_complete(state);
884 }
885 EXPORT_SYMBOL_GPL(dpm_resume_end);
886
887
888 /*------------------------- Suspend routines -------------------------*/
889
890 /**
891  * resume_event - Return a "resume" message for given "suspend" sleep state.
892  * @sleep_state: PM message representing a sleep state.
893  *
894  * Return a PM message representing the resume event corresponding to given
895  * sleep state.
896  */
897 static pm_message_t resume_event(pm_message_t sleep_state)
898 {
899         switch (sleep_state.event) {
900         case PM_EVENT_SUSPEND:
901                 return PMSG_RESUME;
902         case PM_EVENT_FREEZE:
903         case PM_EVENT_QUIESCE:
904                 return PMSG_RECOVER;
905         case PM_EVENT_HIBERNATE:
906                 return PMSG_RESTORE;
907         }
908         return PMSG_ON;
909 }
910
911 /**
912  * device_suspend_noirq - Execute a "late suspend" callback for given device.
913  * @dev: Device to handle.
914  * @state: PM transition of the system being carried out.
915  *
916  * The driver of @dev will not receive interrupts while this function is being
917  * executed.
918  */
919 static int device_suspend_noirq(struct device *dev, pm_message_t state)
920 {
921         pm_callback_t callback = NULL;
922         char *info = NULL;
923
924         if (dev->power.syscore)
925                 return 0;
926
927         if (dev->pm_domain) {
928                 info = "noirq power domain ";
929                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
930         } else if (dev->type && dev->type->pm) {
931                 info = "noirq type ";
932                 callback = pm_noirq_op(dev->type->pm, state);
933         } else if (dev->class && dev->class->pm) {
934                 info = "noirq class ";
935                 callback = pm_noirq_op(dev->class->pm, state);
936         } else if (dev->bus && dev->bus->pm) {
937                 info = "noirq bus ";
938                 callback = pm_noirq_op(dev->bus->pm, state);
939         }
940
941         if (!callback && dev->driver && dev->driver->pm) {
942                 info = "noirq driver ";
943                 callback = pm_noirq_op(dev->driver->pm, state);
944         }
945
946         return dpm_run_callback(callback, dev, state, info);
947 }
948
949 /**
950  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
951  * @state: PM transition of the system being carried out.
952  *
953  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
954  * handlers for all non-sysdev devices.
955  */
956 static int dpm_suspend_noirq(pm_message_t state)
957 {
958         ktime_t starttime = ktime_get();
959         int error = 0;
960
961         cpuidle_pause();
962         suspend_device_irqs();
963         mutex_lock(&dpm_list_mtx);
964         while (!list_empty(&dpm_late_early_list)) {
965                 struct device *dev = to_device(dpm_late_early_list.prev);
966
967                 get_device(dev);
968                 mutex_unlock(&dpm_list_mtx);
969
970                 error = device_suspend_noirq(dev, state);
971
972                 mutex_lock(&dpm_list_mtx);
973                 if (error) {
974                         pm_dev_err(dev, state, " noirq", error);
975                         suspend_stats.failed_suspend_noirq++;
976                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
977                         dpm_save_failed_dev(dev_name(dev));
978                         put_device(dev);
979                         break;
980                 }
981                 if (!list_empty(&dev->power.entry))
982                         list_move(&dev->power.entry, &dpm_noirq_list);
983                 put_device(dev);
984
985                 if (pm_wakeup_pending()) {
986                         error = -EBUSY;
987                         break;
988                 }
989         }
990         mutex_unlock(&dpm_list_mtx);
991         if (error)
992                 dpm_resume_noirq(resume_event(state));
993         else
994                 dpm_show_time(starttime, state, "noirq");
995         return error;
996 }
997
998 /**
999  * device_suspend_late - Execute a "late suspend" callback for given device.
1000  * @dev: Device to handle.
1001  * @state: PM transition of the system being carried out.
1002  *
1003  * Runtime PM is disabled for @dev while this function is being executed.
1004  */
1005 static int device_suspend_late(struct device *dev, pm_message_t state)
1006 {
1007         pm_callback_t callback = NULL;
1008         char *info = NULL;
1009
1010         __pm_runtime_disable(dev, false);
1011
1012         if (dev->power.syscore)
1013                 return 0;
1014
1015         if (dev->pm_domain) {
1016                 info = "late power domain ";
1017                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1018         } else if (dev->type && dev->type->pm) {
1019                 info = "late type ";
1020                 callback = pm_late_early_op(dev->type->pm, state);
1021         } else if (dev->class && dev->class->pm) {
1022                 info = "late class ";
1023                 callback = pm_late_early_op(dev->class->pm, state);
1024         } else if (dev->bus && dev->bus->pm) {
1025                 info = "late bus ";
1026                 callback = pm_late_early_op(dev->bus->pm, state);
1027         }
1028
1029         if (!callback && dev->driver && dev->driver->pm) {
1030                 info = "late driver ";
1031                 callback = pm_late_early_op(dev->driver->pm, state);
1032         }
1033
1034         return dpm_run_callback(callback, dev, state, info);
1035 }
1036
1037 /**
1038  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1039  * @state: PM transition of the system being carried out.
1040  */
1041 static int dpm_suspend_late(pm_message_t state)
1042 {
1043         ktime_t starttime = ktime_get();
1044         int error = 0;
1045
1046         mutex_lock(&dpm_list_mtx);
1047         while (!list_empty(&dpm_suspended_list)) {
1048                 struct device *dev = to_device(dpm_suspended_list.prev);
1049
1050                 get_device(dev);
1051                 mutex_unlock(&dpm_list_mtx);
1052
1053                 error = device_suspend_late(dev, state);
1054
1055                 mutex_lock(&dpm_list_mtx);
1056                 if (error) {
1057                         pm_dev_err(dev, state, " late", error);
1058                         suspend_stats.failed_suspend_late++;
1059                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1060                         dpm_save_failed_dev(dev_name(dev));
1061                         put_device(dev);
1062                         break;
1063                 }
1064                 if (!list_empty(&dev->power.entry))
1065                         list_move(&dev->power.entry, &dpm_late_early_list);
1066                 put_device(dev);
1067
1068                 if (pm_wakeup_pending()) {
1069                         error = -EBUSY;
1070                         break;
1071                 }
1072         }
1073         mutex_unlock(&dpm_list_mtx);
1074         if (error)
1075                 dpm_resume_early(resume_event(state));
1076         else
1077                 dpm_show_time(starttime, state, "late");
1078
1079         return error;
1080 }
1081
1082 /**
1083  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1084  * @state: PM transition of the system being carried out.
1085  */
1086 int dpm_suspend_end(pm_message_t state)
1087 {
1088         int error = dpm_suspend_late(state);
1089         if (error)
1090                 return error;
1091
1092         error = dpm_suspend_noirq(state);
1093         if (error) {
1094                 dpm_resume_early(resume_event(state));
1095                 return error;
1096         }
1097
1098         return 0;
1099 }
1100 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1101
1102 /**
1103  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1104  * @dev: Device to suspend.
1105  * @state: PM transition of the system being carried out.
1106  * @cb: Suspend callback to execute.
1107  */
1108 static int legacy_suspend(struct device *dev, pm_message_t state,
1109                           int (*cb)(struct device *dev, pm_message_t state),
1110                           char *info)
1111 {
1112         int error;
1113         ktime_t calltime;
1114
1115         calltime = initcall_debug_start(dev);
1116
1117         error = cb(dev, state);
1118         suspend_report_result(cb, error);
1119
1120         initcall_debug_report(dev, calltime, error, state, info);
1121
1122         return error;
1123 }
1124
1125 /**
1126  * device_suspend - Execute "suspend" callbacks for given device.
1127  * @dev: Device to handle.
1128  * @state: PM transition of the system being carried out.
1129  * @async: If true, the device is being suspended asynchronously.
1130  */
1131 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1132 {
1133         pm_callback_t callback = NULL;
1134         char *info = NULL;
1135         int error = 0;
1136         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1137
1138         dpm_wait_for_children(dev, async);
1139
1140         if (async_error)
1141                 goto Complete;
1142
1143         /*
1144          * If a device configured to wake up the system from sleep states
1145          * has been suspended at run time and there's a resume request pending
1146          * for it, this is equivalent to the device signaling wakeup, so the
1147          * system suspend operation should be aborted.
1148          */
1149         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1150                 pm_wakeup_event(dev, 0);
1151
1152         if (pm_wakeup_pending()) {
1153                 async_error = -EBUSY;
1154                 goto Complete;
1155         }
1156
1157         if (dev->power.syscore)
1158                 goto Complete;
1159
1160         dpm_watchdog_set(&wd, dev);
1161         device_lock(dev);
1162
1163         if (dev->pm_domain) {
1164                 info = "power domain ";
1165                 callback = pm_op(&dev->pm_domain->ops, state);
1166                 goto Run;
1167         }
1168
1169         if (dev->type && dev->type->pm) {
1170                 info = "type ";
1171                 callback = pm_op(dev->type->pm, state);
1172                 goto Run;
1173         }
1174
1175         if (dev->class) {
1176                 if (dev->class->pm) {
1177                         info = "class ";
1178                         callback = pm_op(dev->class->pm, state);
1179                         goto Run;
1180                 } else if (dev->class->suspend) {
1181                         pm_dev_dbg(dev, state, "legacy class ");
1182                         error = legacy_suspend(dev, state, dev->class->suspend,
1183                                                 "legacy class ");
1184                         goto End;
1185                 }
1186         }
1187
1188         if (dev->bus) {
1189                 if (dev->bus->pm) {
1190                         info = "bus ";
1191                         callback = pm_op(dev->bus->pm, state);
1192                 } else if (dev->bus->suspend) {
1193                         pm_dev_dbg(dev, state, "legacy bus ");
1194                         error = legacy_suspend(dev, state, dev->bus->suspend,
1195                                                 "legacy bus ");
1196                         goto End;
1197                 }
1198         }
1199
1200  Run:
1201         if (!callback && dev->driver && dev->driver->pm) {
1202                 info = "driver ";
1203                 callback = pm_op(dev->driver->pm, state);
1204         }
1205
1206         error = dpm_run_callback(callback, dev, state, info);
1207
1208  End:
1209         if (!error) {
1210                 dev->power.is_suspended = true;
1211                 if (dev->power.wakeup_path
1212                     && dev->parent && !dev->parent->power.ignore_children)
1213                         dev->parent->power.wakeup_path = true;
1214         }
1215
1216         device_unlock(dev);
1217         dpm_watchdog_clear(&wd);
1218
1219  Complete:
1220         complete_all(&dev->power.completion);
1221         if (error)
1222                 async_error = error;
1223
1224         return error;
1225 }
1226
1227 static void async_suspend(void *data, async_cookie_t cookie)
1228 {
1229         struct device *dev = (struct device *)data;
1230         int error;
1231
1232         error = __device_suspend(dev, pm_transition, true);
1233         if (error) {
1234                 dpm_save_failed_dev(dev_name(dev));
1235                 pm_dev_err(dev, pm_transition, " async", error);
1236         }
1237
1238         put_device(dev);
1239 }
1240
1241 static int device_suspend(struct device *dev)
1242 {
1243         reinit_completion(&dev->power.completion);
1244
1245         if (pm_async_enabled && dev->power.async_suspend) {
1246                 get_device(dev);
1247                 async_schedule(async_suspend, dev);
1248                 return 0;
1249         }
1250
1251         return __device_suspend(dev, pm_transition, false);
1252 }
1253
1254 /**
1255  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1256  * @state: PM transition of the system being carried out.
1257  */
1258 int dpm_suspend(pm_message_t state)
1259 {
1260         ktime_t starttime = ktime_get();
1261         int error = 0;
1262
1263         might_sleep();
1264
1265         cpufreq_suspend();
1266
1267         mutex_lock(&dpm_list_mtx);
1268         pm_transition = state;
1269         async_error = 0;
1270         while (!list_empty(&dpm_prepared_list)) {
1271                 struct device *dev = to_device(dpm_prepared_list.prev);
1272
1273                 get_device(dev);
1274                 mutex_unlock(&dpm_list_mtx);
1275
1276                 error = device_suspend(dev);
1277
1278                 mutex_lock(&dpm_list_mtx);
1279                 if (error) {
1280                         pm_dev_err(dev, state, "", error);
1281                         dpm_save_failed_dev(dev_name(dev));
1282                         put_device(dev);
1283                         break;
1284                 }
1285                 if (!list_empty(&dev->power.entry))
1286                         list_move(&dev->power.entry, &dpm_suspended_list);
1287                 put_device(dev);
1288                 if (async_error)
1289                         break;
1290         }
1291         mutex_unlock(&dpm_list_mtx);
1292         async_synchronize_full();
1293         if (!error)
1294                 error = async_error;
1295         if (error) {
1296                 suspend_stats.failed_suspend++;
1297                 dpm_save_failed_step(SUSPEND_SUSPEND);
1298         } else
1299                 dpm_show_time(starttime, state, NULL);
1300         return error;
1301 }
1302
1303 /**
1304  * device_prepare - Prepare a device for system power transition.
1305  * @dev: Device to handle.
1306  * @state: PM transition of the system being carried out.
1307  *
1308  * Execute the ->prepare() callback(s) for given device.  No new children of the
1309  * device may be registered after this function has returned.
1310  */
1311 static int device_prepare(struct device *dev, pm_message_t state)
1312 {
1313         int (*callback)(struct device *) = NULL;
1314         char *info = NULL;
1315         int error = 0;
1316
1317         if (dev->power.syscore)
1318                 return 0;
1319
1320         /*
1321          * If a device's parent goes into runtime suspend at the wrong time,
1322          * it won't be possible to resume the device.  To prevent this we
1323          * block runtime suspend here, during the prepare phase, and allow
1324          * it again during the complete phase.
1325          */
1326         pm_runtime_get_noresume(dev);
1327
1328         device_lock(dev);
1329
1330         dev->power.wakeup_path = device_may_wakeup(dev);
1331
1332         if (dev->pm_domain) {
1333                 info = "preparing power domain ";
1334                 callback = dev->pm_domain->ops.prepare;
1335         } else if (dev->type && dev->type->pm) {
1336                 info = "preparing type ";
1337                 callback = dev->type->pm->prepare;
1338         } else if (dev->class && dev->class->pm) {
1339                 info = "preparing class ";
1340                 callback = dev->class->pm->prepare;
1341         } else if (dev->bus && dev->bus->pm) {
1342                 info = "preparing bus ";
1343                 callback = dev->bus->pm->prepare;
1344         }
1345
1346         if (!callback && dev->driver && dev->driver->pm) {
1347                 info = "preparing driver ";
1348                 callback = dev->driver->pm->prepare;
1349         }
1350
1351         if (callback) {
1352                 error = callback(dev);
1353                 suspend_report_result(callback, error);
1354         }
1355
1356         device_unlock(dev);
1357
1358         if (error)
1359                 pm_runtime_put(dev);
1360
1361         return error;
1362 }
1363
1364 /**
1365  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1366  * @state: PM transition of the system being carried out.
1367  *
1368  * Execute the ->prepare() callback(s) for all devices.
1369  */
1370 int dpm_prepare(pm_message_t state)
1371 {
1372         int error = 0;
1373
1374         might_sleep();
1375
1376         mutex_lock(&dpm_list_mtx);
1377         while (!list_empty(&dpm_list)) {
1378                 struct device *dev = to_device(dpm_list.next);
1379
1380                 get_device(dev);
1381                 mutex_unlock(&dpm_list_mtx);
1382
1383                 error = device_prepare(dev, state);
1384
1385                 mutex_lock(&dpm_list_mtx);
1386                 if (error) {
1387                         if (error == -EAGAIN) {
1388                                 put_device(dev);
1389                                 error = 0;
1390                                 continue;
1391                         }
1392                         printk(KERN_INFO "PM: Device %s not prepared "
1393                                 "for power transition: code %d\n",
1394                                 dev_name(dev), error);
1395                         put_device(dev);
1396                         break;
1397                 }
1398                 dev->power.is_prepared = true;
1399                 if (!list_empty(&dev->power.entry))
1400                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1401                 put_device(dev);
1402         }
1403         mutex_unlock(&dpm_list_mtx);
1404         return error;
1405 }
1406
1407 /**
1408  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1409  * @state: PM transition of the system being carried out.
1410  *
1411  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1412  * callbacks for them.
1413  */
1414 int dpm_suspend_start(pm_message_t state)
1415 {
1416         int error;
1417
1418         error = dpm_prepare(state);
1419         if (error) {
1420                 suspend_stats.failed_prepare++;
1421                 dpm_save_failed_step(SUSPEND_PREPARE);
1422         } else
1423                 error = dpm_suspend(state);
1424         return error;
1425 }
1426 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1427
1428 void __suspend_report_result(const char *function, void *fn, int ret)
1429 {
1430         if (ret)
1431                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1432 }
1433 EXPORT_SYMBOL_GPL(__suspend_report_result);
1434
1435 /**
1436  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1437  * @dev: Device to wait for.
1438  * @subordinate: Device that needs to wait for @dev.
1439  */
1440 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1441 {
1442         dpm_wait(dev, subordinate->power.async_suspend);
1443         return async_error;
1444 }
1445 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1446
1447 /**
1448  * dpm_for_each_dev - device iterator.
1449  * @data: data for the callback.
1450  * @fn: function to be called for each device.
1451  *
1452  * Iterate over devices in dpm_list, and call @fn for each device,
1453  * passing it @data.
1454  */
1455 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1456 {
1457         struct device *dev;
1458
1459         if (!fn)
1460                 return;
1461
1462         device_pm_lock();
1463         list_for_each_entry(dev, &dpm_list, power.entry)
1464                 fn(dev, data);
1465         device_pm_unlock();
1466 }
1467 EXPORT_SYMBOL_GPL(dpm_for_each_dev);