main.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will initialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/export.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pm.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/resume-trace.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/sched.h>
  28. #include <linux/async.h>
  29. #include <linux/suspend.h>
  30. #include <linux/timer.h>
  31. #include "../base.h"
  32. #include "power.h"
  33. typedef int (*pm_callback_t)(struct device *);
  34. /*
  35. * The entries in the dpm_list list are in a depth first order, simply
  36. * because children are guaranteed to be discovered after parents, and
  37. * are inserted at the back of the list on discovery.
  38. *
  39. * Since device_pm_add() may be called with a device lock held,
  40. * we must never try to acquire a device lock while holding
  41. * dpm_list_mutex.
  42. */
  43. LIST_HEAD(dpm_list);
  44. LIST_HEAD(dpm_prepared_list);
  45. LIST_HEAD(dpm_suspended_list);
  46. LIST_HEAD(dpm_late_early_list);
  47. LIST_HEAD(dpm_noirq_list);
  48. struct suspend_stats suspend_stats;
  49. static DEFINE_MUTEX(dpm_list_mtx);
  50. static pm_message_t pm_transition;
  51. struct dpm_watchdog {
  52. struct device *dev;
  53. struct task_struct *tsk;
  54. struct timer_list timer;
  55. };
  56. static int async_error;
  57. /**
  58. * device_pm_init - Initialize the PM-related part of a device object.
  59. * @dev: Device object being initialized.
  60. */
  61. void device_pm_init(struct device *dev)
  62. {
  63. dev->power.is_prepared = false;
  64. dev->power.is_suspended = false;
  65. init_completion(&dev->power.completion);
  66. complete_all(&dev->power.completion);
  67. dev->power.wakeup = NULL;
  68. spin_lock_init(&dev->power.lock);
  69. pm_runtime_init(dev);
  70. INIT_LIST_HEAD(&dev->power.entry);
  71. dev->power.power_state = PMSG_INVALID;
  72. }
  73. /**
  74. * device_pm_lock - Lock the list of active devices used by the PM core.
  75. */
  76. void device_pm_lock(void)
  77. {
  78. mutex_lock(&dpm_list_mtx);
  79. }
  80. /**
  81. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  82. */
  83. void device_pm_unlock(void)
  84. {
  85. mutex_unlock(&dpm_list_mtx);
  86. }
  87. /**
  88. * device_pm_add - Add a device to the PM core's list of active devices.
  89. * @dev: Device to add to the list.
  90. */
  91. void device_pm_add(struct device *dev)
  92. {
  93. pr_debug("PM: Adding info for %s:%s\n",
  94. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  95. mutex_lock(&dpm_list_mtx);
  96. if (dev->parent && dev->parent->power.is_prepared)
  97. dev_warn(dev, "parent %s should not be sleeping\n",
  98. dev_name(dev->parent));
  99. list_add_tail(&dev->power.entry, &dpm_list);
  100. dev_pm_qos_constraints_init(dev);
  101. mutex_unlock(&dpm_list_mtx);
  102. }
  103. /**
  104. * device_pm_remove - Remove a device from the PM core's list of active devices.
  105. * @dev: Device to be removed from the list.
  106. */
  107. void device_pm_remove(struct device *dev)
  108. {
  109. pr_debug("PM: Removing info for %s:%s\n",
  110. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  111. complete_all(&dev->power.completion);
  112. mutex_lock(&dpm_list_mtx);
  113. dev_pm_qos_constraints_destroy(dev);
  114. list_del_init(&dev->power.entry);
  115. mutex_unlock(&dpm_list_mtx);
  116. device_wakeup_disable(dev);
  117. pm_runtime_remove(dev);
  118. }
  119. /**
  120. * device_pm_move_before - Move device in the PM core's list of active devices.
  121. * @deva: Device to move in dpm_list.
  122. * @devb: Device @deva should come before.
  123. */
  124. void device_pm_move_before(struct device *deva, struct device *devb)
  125. {
  126. pr_debug("PM: Moving %s:%s before %s:%s\n",
  127. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  128. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  129. /* Delete deva from dpm_list and reinsert before devb. */
  130. list_move_tail(&deva->power.entry, &devb->power.entry);
  131. }
  132. /**
  133. * device_pm_move_after - Move device in the PM core's list of active devices.
  134. * @deva: Device to move in dpm_list.
  135. * @devb: Device @deva should come after.
  136. */
  137. void device_pm_move_after(struct device *deva, struct device *devb)
  138. {
  139. pr_debug("PM: Moving %s:%s after %s:%s\n",
  140. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  141. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  142. /* Delete deva from dpm_list and reinsert after devb. */
  143. list_move(&deva->power.entry, &devb->power.entry);
  144. }
  145. /**
  146. * device_pm_move_last - Move device to end of the PM core's list of devices.
  147. * @dev: Device to move in dpm_list.
  148. */
  149. void device_pm_move_last(struct device *dev)
  150. {
  151. pr_debug("PM: Moving %s:%s to end of list\n",
  152. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  153. list_move_tail(&dev->power.entry, &dpm_list);
  154. }
  155. static ktime_t initcall_debug_start(struct device *dev)
  156. {
  157. ktime_t calltime = ktime_set(0, 0);
  158. if (initcall_debug) {
  159. pr_info("calling %s+ @ %i, parent: %s\n",
  160. dev_name(dev), task_pid_nr(current),
  161. dev->parent ? dev_name(dev->parent) : "none");
  162. calltime = ktime_get();
  163. }
  164. return calltime;
  165. }
  166. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  167. int error)
  168. {
  169. ktime_t delta, rettime;
  170. if (initcall_debug) {
  171. rettime = ktime_get();
  172. delta = ktime_sub(rettime, calltime);
  173. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  174. error, (unsigned long long)ktime_to_ns(delta) >> 10);
  175. }
  176. }
  177. /**
  178. * dpm_wait - Wait for a PM operation to complete.
  179. * @dev: Device to wait for.
  180. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  181. */
  182. static void dpm_wait(struct device *dev, bool async)
  183. {
  184. if (!dev)
  185. return;
  186. if (async || (pm_async_enabled && dev->power.async_suspend))
  187. wait_for_completion(&dev->power.completion);
  188. }
  189. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  190. {
  191. dpm_wait(dev, *((bool *)async_ptr));
  192. return 0;
  193. }
  194. static void dpm_wait_for_children(struct device *dev, bool async)
  195. {
  196. device_for_each_child(dev, &async, dpm_wait_fn);
  197. }
  198. /**
  199. * pm_op - Return the PM operation appropriate for given PM event.
  200. * @ops: PM operations to choose from.
  201. * @state: PM transition of the system being carried out.
  202. */
  203. static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
  204. {
  205. switch (state.event) {
  206. #ifdef CONFIG_SUSPEND
  207. case PM_EVENT_SUSPEND:
  208. return ops->suspend;
  209. case PM_EVENT_RESUME:
  210. return ops->resume;
  211. #endif /* CONFIG_SUSPEND */
  212. #ifdef CONFIG_HIBERNATE_CALLBACKS
  213. case PM_EVENT_FREEZE:
  214. case PM_EVENT_QUIESCE:
  215. return ops->freeze;
  216. case PM_EVENT_HIBERNATE:
  217. return ops->poweroff;
  218. case PM_EVENT_THAW:
  219. case PM_EVENT_RECOVER:
  220. return ops->thaw;
  221. break;
  222. case PM_EVENT_RESTORE:
  223. return ops->restore;
  224. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  225. }
  226. return NULL;
  227. }
  228. /**
  229. * pm_late_early_op - Return the PM operation appropriate for given PM event.
  230. * @ops: PM operations to choose from.
  231. * @state: PM transition of the system being carried out.
  232. *
  233. * Runtime PM is disabled for @dev while this function is being executed.
  234. */
  235. static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
  236. pm_message_t state)
  237. {
  238. switch (state.event) {
  239. #ifdef CONFIG_SUSPEND
  240. case PM_EVENT_SUSPEND:
  241. return ops->suspend_late;
  242. case PM_EVENT_RESUME:
  243. return ops->resume_early;
  244. #endif /* CONFIG_SUSPEND */
  245. #ifdef CONFIG_HIBERNATE_CALLBACKS
  246. case PM_EVENT_FREEZE:
  247. case PM_EVENT_QUIESCE:
  248. return ops->freeze_late;
  249. case PM_EVENT_HIBERNATE:
  250. return ops->poweroff_late;
  251. case PM_EVENT_THAW:
  252. case PM_EVENT_RECOVER:
  253. return ops->thaw_early;
  254. case PM_EVENT_RESTORE:
  255. return ops->restore_early;
  256. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  257. }
  258. return NULL;
  259. }
  260. /**
  261. * pm_noirq_op - Return the PM operation appropriate for given PM event.
  262. * @ops: PM operations to choose from.
  263. * @state: PM transition of the system being carried out.
  264. *
  265. * The driver of @dev will not receive interrupts while this function is being
  266. * executed.
  267. */
  268. static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
  269. {
  270. switch (state.event) {
  271. #ifdef CONFIG_SUSPEND
  272. case PM_EVENT_SUSPEND:
  273. return ops->suspend_noirq;
  274. case PM_EVENT_RESUME:
  275. return ops->resume_noirq;
  276. #endif /* CONFIG_SUSPEND */
  277. #ifdef CONFIG_HIBERNATE_CALLBACKS
  278. case PM_EVENT_FREEZE:
  279. case PM_EVENT_QUIESCE:
  280. return ops->freeze_noirq;
  281. case PM_EVENT_HIBERNATE:
  282. return ops->poweroff_noirq;
  283. case PM_EVENT_THAW:
  284. case PM_EVENT_RECOVER:
  285. return ops->thaw_noirq;
  286. case PM_EVENT_RESTORE:
  287. return ops->restore_noirq;
  288. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  289. }
  290. return NULL;
  291. }
  292. static char *pm_verb(int event)
  293. {
  294. switch (event) {
  295. case PM_EVENT_SUSPEND:
  296. return "suspend";
  297. case PM_EVENT_RESUME:
  298. return "resume";
  299. case PM_EVENT_FREEZE:
  300. return "freeze";
  301. case PM_EVENT_QUIESCE:
  302. return "quiesce";
  303. case PM_EVENT_HIBERNATE:
  304. return "hibernate";
  305. case PM_EVENT_THAW:
  306. return "thaw";
  307. case PM_EVENT_RESTORE:
  308. return "restore";
  309. case PM_EVENT_RECOVER:
  310. return "recover";
  311. default:
  312. return "(unknown PM event)";
  313. }
  314. }
  315. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  316. {
  317. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  318. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  319. ", may wakeup" : "");
  320. }
  321. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  322. int error)
  323. {
  324. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  325. dev_name(dev), pm_verb(state.event), info, error);
  326. }
  327. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  328. {
  329. ktime_t calltime;
  330. u64 usecs64;
  331. int usecs;
  332. calltime = ktime_get();
  333. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  334. do_div(usecs64, NSEC_PER_USEC);
  335. usecs = usecs64;
  336. if (usecs == 0)
  337. usecs = 1;
  338. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  339. info ?: "", info ? " " : "", pm_verb(state.event),
  340. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  341. }
  342. static int dpm_run_callback(pm_callback_t cb, struct device *dev,
  343. pm_message_t state, char *info)
  344. {
  345. ktime_t calltime;
  346. int error;
  347. if (!cb)
  348. return 0;
  349. calltime = initcall_debug_start(dev);
  350. pm_dev_dbg(dev, state, info);
  351. error = cb(dev);
  352. suspend_report_result(cb, error);
  353. initcall_debug_report(dev, calltime, error);
  354. return error;
  355. }
  356. /**
  357. * dpm_wd_handler - Driver suspend / resume watchdog handler.
  358. *
  359. * Called when a driver has timed out suspending or resuming.
  360. * There's not much we can do here to recover so BUG() out for
  361. * a crash-dump
  362. */
  363. static void dpm_wd_handler(unsigned long data)
  364. {
  365. struct dpm_watchdog *wd = (void *)data;
  366. struct device *dev = wd->dev;
  367. struct task_struct *tsk = wd->tsk;
  368. dev_emerg(dev, "**** DPM device timeout ****\n");
  369. show_stack(tsk, NULL);
  370. BUG();
  371. }
  372. /**
  373. * dpm_wd_set - Enable pm watchdog for given device.
  374. * @wd: Watchdog. Must be allocated on the stack.
  375. * @dev: Device to handle.
  376. */
  377. static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
  378. {
  379. struct timer_list *timer = &wd->timer;
  380. wd->dev = dev;
  381. wd->tsk = get_current();
  382. init_timer_on_stack(timer);
  383. timer->expires = jiffies + HZ * 12;
  384. timer->function = dpm_wd_handler;
  385. timer->data = (unsigned long)wd;
  386. add_timer(timer);
  387. }
  388. /**
  389. * dpm_wd_clear - Disable pm watchdog.
  390. * @wd: Watchdog to disable.
  391. */
  392. static void dpm_wd_clear(struct dpm_watchdog *wd)
  393. {
  394. struct timer_list *timer = &wd->timer;
  395. del_timer_sync(timer);
  396. destroy_timer_on_stack(timer);
  397. }
  398. /*------------------------- Resume routines -------------------------*/
  399. /**
  400. * device_resume_noirq - Execute an "early resume" callback for given device.
  401. * @dev: Device to handle.
  402. * @state: PM transition of the system being carried out.
  403. *
  404. * The driver of @dev will not receive interrupts while this function is being
  405. * executed.
  406. */
  407. static int device_resume_noirq(struct device *dev, pm_message_t state)
  408. {
  409. pm_callback_t callback = NULL;
  410. char *info = NULL;
  411. int error = 0;
  412. TRACE_DEVICE(dev);
  413. TRACE_RESUME(0);
  414. if (dev->pm_domain) {
  415. info = "noirq power domain ";
  416. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  417. } else if (dev->type && dev->type->pm) {
  418. info = "noirq type ";
  419. callback = pm_noirq_op(dev->type->pm, state);
  420. } else if (dev->class && dev->class->pm) {
  421. info = "noirq class ";
  422. callback = pm_noirq_op(dev->class->pm, state);
  423. } else if (dev->bus && dev->bus->pm) {
  424. info = "noirq bus ";
  425. callback = pm_noirq_op(dev->bus->pm, state);
  426. }
  427. if (!callback && dev->driver && dev->driver->pm) {
  428. info = "noirq driver ";
  429. callback = pm_noirq_op(dev->driver->pm, state);
  430. }
  431. error = dpm_run_callback(callback, dev, state, info);
  432. TRACE_RESUME(error);
  433. return error;
  434. }
  435. /**
  436. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  437. * @state: PM transition of the system being carried out.
  438. *
  439. * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
  440. * enable device drivers to receive interrupts.
  441. */
  442. static void dpm_resume_noirq(pm_message_t state)
  443. {
  444. ktime_t starttime = ktime_get();
  445. mutex_lock(&dpm_list_mtx);
  446. while (!list_empty(&dpm_noirq_list)) {
  447. struct device *dev = to_device(dpm_noirq_list.next);
  448. int error;
  449. get_device(dev);
  450. list_move_tail(&dev->power.entry, &dpm_late_early_list);
  451. mutex_unlock(&dpm_list_mtx);
  452. error = device_resume_noirq(dev, state);
  453. if (error) {
  454. suspend_stats.failed_resume_noirq++;
  455. dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
  456. dpm_save_failed_dev(dev_name(dev));
  457. pm_dev_err(dev, state, " noirq", error);
  458. }
  459. mutex_lock(&dpm_list_mtx);
  460. put_device(dev);
  461. }
  462. mutex_unlock(&dpm_list_mtx);
  463. dpm_show_time(starttime, state, "noirq");
  464. resume_device_irqs();
  465. }
  466. /**
  467. * device_resume_early - Execute an "early resume" callback for given device.
  468. * @dev: Device to handle.
  469. * @state: PM transition of the system being carried out.
  470. *
  471. * Runtime PM is disabled for @dev while this function is being executed.
  472. */
  473. static int device_resume_early(struct device *dev, pm_message_t state)
  474. {
  475. pm_callback_t callback = NULL;
  476. char *info = NULL;
  477. int error = 0;
  478. TRACE_DEVICE(dev);
  479. TRACE_RESUME(0);
  480. if (dev->pm_domain) {
  481. info = "early power domain ";
  482. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  483. } else if (dev->type && dev->type->pm) {
  484. info = "early type ";
  485. callback = pm_late_early_op(dev->type->pm, state);
  486. } else if (dev->class && dev->class->pm) {
  487. info = "early class ";
  488. callback = pm_late_early_op(dev->class->pm, state);
  489. } else if (dev->bus && dev->bus->pm) {
  490. info = "early bus ";
  491. callback = pm_late_early_op(dev->bus->pm, state);
  492. }
  493. if (!callback && dev->driver && dev->driver->pm) {
  494. info = "early driver ";
  495. callback = pm_late_early_op(dev->driver->pm, state);
  496. }
  497. error = dpm_run_callback(callback, dev, state, info);
  498. TRACE_RESUME(error);
  499. return error;
  500. }
  501. /**
  502. * dpm_resume_early - Execute "early resume" callbacks for all devices.
  503. * @state: PM transition of the system being carried out.
  504. */
  505. static void dpm_resume_early(pm_message_t state)
  506. {
  507. ktime_t starttime = ktime_get();
  508. mutex_lock(&dpm_list_mtx);
  509. while (!list_empty(&dpm_late_early_list)) {
  510. struct device *dev = to_device(dpm_late_early_list.next);
  511. int error;
  512. get_device(dev);
  513. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  514. mutex_unlock(&dpm_list_mtx);
  515. error = device_resume_early(dev, state);
  516. if (error) {
  517. suspend_stats.failed_resume_early++;
  518. dpm_save_failed_step(SUSPEND_RESUME_EARLY);
  519. dpm_save_failed_dev(dev_name(dev));
  520. pm_dev_err(dev, state, " early", error);
  521. }
  522. mutex_lock(&dpm_list_mtx);
  523. put_device(dev);
  524. }
  525. mutex_unlock(&dpm_list_mtx);
  526. dpm_show_time(starttime, state, "early");
  527. }
  528. /**
  529. * dpm_resume_start - Execute "noirq" and "early" device callbacks.
  530. * @state: PM transition of the system being carried out.
  531. */
  532. void dpm_resume_start(pm_message_t state)
  533. {
  534. dpm_resume_noirq(state);
  535. dpm_resume_early(state);
  536. }
  537. EXPORT_SYMBOL_GPL(dpm_resume_start);
  538. /**
  539. * device_resume - Execute "resume" callbacks for given device.
  540. * @dev: Device to handle.
  541. * @state: PM transition of the system being carried out.
  542. * @async: If true, the device is being resumed asynchronously.
  543. */
  544. static int device_resume(struct device *dev, pm_message_t state, bool async)
  545. {
  546. pm_callback_t callback = NULL;
  547. char *info = NULL;
  548. int error = 0;
  549. struct dpm_watchdog wd;
  550. TRACE_DEVICE(dev);
  551. TRACE_RESUME(0);
  552. dpm_wait(dev->parent, async);
  553. device_lock(dev);
  554. /*
  555. * This is a fib. But we'll allow new children to be added below
  556. * a resumed device, even if the device hasn't been completed yet.
  557. */
  558. dev->power.is_prepared = false;
  559. dpm_wd_set(&wd, dev);
  560. if (!dev->power.is_suspended)
  561. goto Unlock;
  562. pm_runtime_enable(dev);
  563. if (dev->pm_domain) {
  564. info = "power domain ";
  565. callback = pm_op(&dev->pm_domain->ops, state);
  566. goto Driver;
  567. }
  568. if (dev->type && dev->type->pm) {
  569. info = "type ";
  570. callback = pm_op(dev->type->pm, state);
  571. goto Driver;
  572. }
  573. if (dev->class) {
  574. if (dev->class->pm) {
  575. info = "class ";
  576. callback = pm_op(dev->class->pm, state);
  577. goto Driver;
  578. } else if (dev->class->resume) {
  579. info = "legacy class ";
  580. callback = dev->class->resume;
  581. goto End;
  582. }
  583. }
  584. if (dev->bus) {
  585. if (dev->bus->pm) {
  586. info = "bus ";
  587. callback = pm_op(dev->bus->pm, state);
  588. } else if (dev->bus->resume) {
  589. info = "legacy bus ";
  590. callback = dev->bus->resume;
  591. goto End;
  592. }
  593. }
  594. Driver:
  595. if (!callback && dev->driver && dev->driver->pm) {
  596. info = "driver ";
  597. callback = pm_op(dev->driver->pm, state);
  598. }
  599. End:
  600. error = dpm_run_callback(callback, dev, state, info);
  601. dev->power.is_suspended = false;
  602. Unlock:
  603. device_unlock(dev);
  604. dpm_wd_clear(&wd);
  605. complete_all(&dev->power.completion);
  606. TRACE_RESUME(error);
  607. return error;
  608. }
  609. static void async_resume(void *data, async_cookie_t cookie)
  610. {
  611. struct device *dev = (struct device *)data;
  612. int error;
  613. error = device_resume(dev, pm_transition, true);
  614. if (error)
  615. pm_dev_err(dev, pm_transition, " async", error);
  616. put_device(dev);
  617. }
  618. static bool is_async(struct device *dev)
  619. {
  620. return dev->power.async_suspend && pm_async_enabled
  621. && !pm_trace_is_enabled();
  622. }
  623. /**
  624. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  625. * @state: PM transition of the system being carried out.
  626. *
  627. * Execute the appropriate "resume" callback for all devices whose status
  628. * indicates that they are suspended.
  629. */
  630. void dpm_resume(pm_message_t state)
  631. {
  632. struct device *dev;
  633. ktime_t starttime = ktime_get();
  634. might_sleep();
  635. mutex_lock(&dpm_list_mtx);
  636. pm_transition = state;
  637. async_error = 0;
  638. list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
  639. INIT_COMPLETION(dev->power.completion);
  640. if (is_async(dev)) {
  641. get_device(dev);
  642. async_schedule(async_resume, dev);
  643. }
  644. }
  645. while (!list_empty(&dpm_suspended_list)) {
  646. dev = to_device(dpm_suspended_list.next);
  647. get_device(dev);
  648. if (!is_async(dev)) {
  649. int error;
  650. mutex_unlock(&dpm_list_mtx);
  651. error = device_resume(dev, state, false);
  652. if (error) {
  653. suspend_stats.failed_resume++;
  654. dpm_save_failed_step(SUSPEND_RESUME);
  655. dpm_save_failed_dev(dev_name(dev));
  656. pm_dev_err(dev, state, "", error);
  657. }
  658. mutex_lock(&dpm_list_mtx);
  659. }
  660. if (!list_empty(&dev->power.entry))
  661. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  662. put_device(dev);
  663. }
  664. mutex_unlock(&dpm_list_mtx);
  665. async_synchronize_full();
  666. dpm_show_time(starttime, state, NULL);
  667. }
  668. /**
  669. * device_complete - Complete a PM transition for given device.
  670. * @dev: Device to handle.
  671. * @state: PM transition of the system being carried out.
  672. */
  673. static void device_complete(struct device *dev, pm_message_t state)
  674. {
  675. void (*callback)(struct device *) = NULL;
  676. char *info = NULL;
  677. device_lock(dev);
  678. if (dev->pm_domain) {
  679. info = "completing power domain ";
  680. callback = dev->pm_domain->ops.complete;
  681. } else if (dev->type && dev->type->pm) {
  682. info = "completing type ";
  683. callback = dev->type->pm->complete;
  684. } else if (dev->class && dev->class->pm) {
  685. info = "completing class ";
  686. callback = dev->class->pm->complete;
  687. } else if (dev->bus && dev->bus->pm) {
  688. info = "completing bus ";
  689. callback = dev->bus->pm->complete;
  690. }
  691. if (!callback && dev->driver && dev->driver->pm) {
  692. info = "completing driver ";
  693. callback = dev->driver->pm->complete;
  694. }
  695. if (callback) {
  696. pm_dev_dbg(dev, state, info);
  697. callback(dev);
  698. }
  699. device_unlock(dev);
  700. pm_runtime_put_sync(dev);
  701. }
  702. /**
  703. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  704. * @state: PM transition of the system being carried out.
  705. *
  706. * Execute the ->complete() callbacks for all devices whose PM status is not
  707. * DPM_ON (this allows new devices to be registered).
  708. */
  709. void dpm_complete(pm_message_t state)
  710. {
  711. struct list_head list;
  712. might_sleep();
  713. INIT_LIST_HEAD(&list);
  714. mutex_lock(&dpm_list_mtx);
  715. while (!list_empty(&dpm_prepared_list)) {
  716. struct device *dev = to_device(dpm_prepared_list.prev);
  717. get_device(dev);
  718. dev->power.is_prepared = false;
  719. list_move(&dev->power.entry, &list);
  720. mutex_unlock(&dpm_list_mtx);
  721. device_complete(dev, state);
  722. mutex_lock(&dpm_list_mtx);
  723. put_device(dev);
  724. }
  725. list_splice(&list, &dpm_list);
  726. mutex_unlock(&dpm_list_mtx);
  727. }
  728. /**
  729. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  730. * @state: PM transition of the system being carried out.
  731. *
  732. * Execute "resume" callbacks for all devices and complete the PM transition of
  733. * the system.
  734. */
  735. void dpm_resume_end(pm_message_t state)
  736. {
  737. dpm_resume(state);
  738. dpm_complete(state);
  739. }
  740. EXPORT_SYMBOL_GPL(dpm_resume_end);
  741. /*------------------------- Suspend routines -------------------------*/
  742. /**
  743. * resume_event - Return a "resume" message for given "suspend" sleep state.
  744. * @sleep_state: PM message representing a sleep state.
  745. *
  746. * Return a PM message representing the resume event corresponding to given
  747. * sleep state.
  748. */
  749. static pm_message_t resume_event(pm_message_t sleep_state)
  750. {
  751. switch (sleep_state.event) {
  752. case PM_EVENT_SUSPEND:
  753. return PMSG_RESUME;
  754. case PM_EVENT_FREEZE:
  755. case PM_EVENT_QUIESCE:
  756. return PMSG_RECOVER;
  757. case PM_EVENT_HIBERNATE:
  758. return PMSG_RESTORE;
  759. }
  760. return PMSG_ON;
  761. }
  762. /**
  763. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  764. * @dev: Device to handle.
  765. * @state: PM transition of the system being carried out.
  766. *
  767. * The driver of @dev will not receive interrupts while this function is being
  768. * executed.
  769. */
  770. static int device_suspend_noirq(struct device *dev, pm_message_t state)
  771. {
  772. pm_callback_t callback = NULL;
  773. char *info = NULL;
  774. if (dev->pm_domain) {
  775. info = "noirq power domain ";
  776. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  777. } else if (dev->type && dev->type->pm) {
  778. info = "noirq type ";
  779. callback = pm_noirq_op(dev->type->pm, state);
  780. } else if (dev->class && dev->class->pm) {
  781. info = "noirq class ";
  782. callback = pm_noirq_op(dev->class->pm, state);
  783. } else if (dev->bus && dev->bus->pm) {
  784. info = "noirq bus ";
  785. callback = pm_noirq_op(dev->bus->pm, state);
  786. }
  787. if (!callback && dev->driver && dev->driver->pm) {
  788. info = "noirq driver ";
  789. callback = pm_noirq_op(dev->driver->pm, state);
  790. }
  791. return dpm_run_callback(callback, dev, state, info);
  792. }
  793. /**
  794. * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
  795. * @state: PM transition of the system being carried out.
  796. *
  797. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  798. * handlers for all non-sysdev devices.
  799. */
  800. static int dpm_suspend_noirq(pm_message_t state)
  801. {
  802. ktime_t starttime = ktime_get();
  803. int error = 0;
  804. suspend_device_irqs();
  805. mutex_lock(&dpm_list_mtx);
  806. while (!list_empty(&dpm_late_early_list)) {
  807. struct device *dev = to_device(dpm_late_early_list.prev);
  808. get_device(dev);
  809. mutex_unlock(&dpm_list_mtx);
  810. error = device_suspend_noirq(dev, state);
  811. mutex_lock(&dpm_list_mtx);
  812. if (error) {
  813. pm_dev_err(dev, state, " noirq", error);
  814. suspend_stats.failed_suspend_noirq++;
  815. dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
  816. dpm_save_failed_dev(dev_name(dev));
  817. put_device(dev);
  818. break;
  819. }
  820. if (!list_empty(&dev->power.entry))
  821. list_move(&dev->power.entry, &dpm_noirq_list);
  822. put_device(dev);
  823. if (pm_wakeup_pending()) {
  824. error = -EBUSY;
  825. break;
  826. }
  827. }
  828. mutex_unlock(&dpm_list_mtx);
  829. if (error)
  830. dpm_resume_noirq(resume_event(state));
  831. else
  832. dpm_show_time(starttime, state, "noirq");
  833. return error;
  834. }
  835. /**
  836. * device_suspend_late - Execute a "late suspend" callback for given device.
  837. * @dev: Device to handle.
  838. * @state: PM transition of the system being carried out.
  839. *
  840. * Runtime PM is disabled for @dev while this function is being executed.
  841. */
  842. static int device_suspend_late(struct device *dev, pm_message_t state)
  843. {
  844. pm_callback_t callback = NULL;
  845. char *info = NULL;
  846. if (dev->pm_domain) {
  847. info = "late power domain ";
  848. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  849. } else if (dev->type && dev->type->pm) {
  850. info = "late type ";
  851. callback = pm_late_early_op(dev->type->pm, state);
  852. } else if (dev->class && dev->class->pm) {
  853. info = "late class ";
  854. callback = pm_late_early_op(dev->class->pm, state);
  855. } else if (dev->bus && dev->bus->pm) {
  856. info = "late bus ";
  857. callback = pm_late_early_op(dev->bus->pm, state);
  858. }
  859. if (!callback && dev->driver && dev->driver->pm) {
  860. info = "late driver ";
  861. callback = pm_late_early_op(dev->driver->pm, state);
  862. }
  863. return dpm_run_callback(callback, dev, state, info);
  864. }
  865. /**
  866. * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
  867. * @state: PM transition of the system being carried out.
  868. */
  869. static int dpm_suspend_late(pm_message_t state)
  870. {
  871. ktime_t starttime = ktime_get();
  872. int error = 0;
  873. mutex_lock(&dpm_list_mtx);
  874. while (!list_empty(&dpm_suspended_list)) {
  875. struct device *dev = to_device(dpm_suspended_list.prev);
  876. get_device(dev);
  877. mutex_unlock(&dpm_list_mtx);
  878. error = device_suspend_late(dev, state);
  879. mutex_lock(&dpm_list_mtx);
  880. if (error) {
  881. pm_dev_err(dev, state, " late", error);
  882. suspend_stats.failed_suspend_late++;
  883. dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
  884. dpm_save_failed_dev(dev_name(dev));
  885. put_device(dev);
  886. break;
  887. }
  888. if (!list_empty(&dev->power.entry))
  889. list_move(&dev->power.entry, &dpm_late_early_list);
  890. put_device(dev);
  891. if (pm_wakeup_pending()) {
  892. error = -EBUSY;
  893. break;
  894. }
  895. }
  896. mutex_unlock(&dpm_list_mtx);
  897. if (error)
  898. dpm_resume_early(resume_event(state));
  899. else
  900. dpm_show_time(starttime, state, "late");
  901. return error;
  902. }
  903. /**
  904. * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
  905. * @state: PM transition of the system being carried out.
  906. */
  907. int dpm_suspend_end(pm_message_t state)
  908. {
  909. int error = dpm_suspend_late(state);
  910. if (error)
  911. return error;
  912. error = dpm_suspend_noirq(state);
  913. if (error) {
  914. dpm_resume_early(resume_event(state));
  915. return error;
  916. }
  917. return 0;
  918. }
  919. EXPORT_SYMBOL_GPL(dpm_suspend_end);
  920. /**
  921. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  922. * @dev: Device to suspend.
  923. * @state: PM transition of the system being carried out.
  924. * @cb: Suspend callback to execute.
  925. */
  926. static int legacy_suspend(struct device *dev, pm_message_t state,
  927. int (*cb)(struct device *dev, pm_message_t state))
  928. {
  929. int error;
  930. ktime_t calltime;
  931. calltime = initcall_debug_start(dev);
  932. error = cb(dev, state);
  933. suspend_report_result(cb, error);
  934. initcall_debug_report(dev, calltime, error);
  935. return error;
  936. }
  937. /**
  938. * device_suspend - Execute "suspend" callbacks for given device.
  939. * @dev: Device to handle.
  940. * @state: PM transition of the system being carried out.
  941. * @async: If true, the device is being suspended asynchronously.
  942. */
  943. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  944. {
  945. pm_callback_t callback = NULL;
  946. char *info = NULL;
  947. int error = 0;
  948. struct dpm_watchdog wd;
  949. dpm_wait_for_children(dev, async);
  950. if (async_error)
  951. goto Complete;
  952. /*
  953. * If a device configured to wake up the system from sleep states
  954. * has been suspended at run time and there's a resume request pending
  955. * for it, this is equivalent to the device signaling wakeup, so the
  956. * system suspend operation should be aborted.
  957. */
  958. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  959. pm_wakeup_event(dev, 0);
  960. if (pm_wakeup_pending()) {
  961. async_error = -EBUSY;
  962. goto Complete;
  963. }
  964. dpm_wd_set(&wd, dev);
  965. device_lock(dev);
  966. if (dev->pm_domain) {
  967. info = "power domain ";
  968. callback = pm_op(&dev->pm_domain->ops, state);
  969. goto Run;
  970. }
  971. if (dev->type && dev->type->pm) {
  972. info = "type ";
  973. callback = pm_op(dev->type->pm, state);
  974. goto Run;
  975. }
  976. if (dev->class) {
  977. if (dev->class->pm) {
  978. info = "class ";
  979. callback = pm_op(dev->class->pm, state);
  980. goto Run;
  981. } else if (dev->class->suspend) {
  982. pm_dev_dbg(dev, state, "legacy class ");
  983. error = legacy_suspend(dev, state, dev->class->suspend);
  984. goto End;
  985. }
  986. }
  987. if (dev->bus) {
  988. if (dev->bus->pm) {
  989. info = "bus ";
  990. callback = pm_op(dev->bus->pm, state);
  991. } else if (dev->bus->suspend) {
  992. pm_dev_dbg(dev, state, "legacy bus ");
  993. error = legacy_suspend(dev, state, dev->bus->suspend);
  994. goto End;
  995. }
  996. }
  997. Run:
  998. if (!callback && dev->driver && dev->driver->pm) {
  999. info = "driver ";
  1000. callback = pm_op(dev->driver->pm, state);
  1001. }
  1002. error = dpm_run_callback(callback, dev, state, info);
  1003. End:
  1004. if (!error) {
  1005. dev->power.is_suspended = true;
  1006. if (dev->power.wakeup_path
  1007. && dev->parent && !dev->parent->power.ignore_children)
  1008. dev->parent->power.wakeup_path = true;
  1009. }
  1010. device_unlock(dev);
  1011. dpm_wd_clear(&wd);
  1012. Complete:
  1013. complete_all(&dev->power.completion);
  1014. if (error)
  1015. async_error = error;
  1016. else if (dev->power.is_suspended)
  1017. __pm_runtime_disable(dev, false);
  1018. return error;
  1019. }
  1020. static void async_suspend(void *data, async_cookie_t cookie)
  1021. {
  1022. struct device *dev = (struct device *)data;
  1023. int error;
  1024. error = __device_suspend(dev, pm_transition, true);
  1025. if (error) {
  1026. dpm_save_failed_dev(dev_name(dev));
  1027. pm_dev_err(dev, pm_transition, " async", error);
  1028. }
  1029. put_device(dev);
  1030. }
  1031. static int device_suspend(struct device *dev)
  1032. {
  1033. INIT_COMPLETION(dev->power.completion);
  1034. if (pm_async_enabled && dev->power.async_suspend) {
  1035. get_device(dev);
  1036. async_schedule(async_suspend, dev);
  1037. return 0;
  1038. }
  1039. return __device_suspend(dev, pm_transition, false);
  1040. }
  1041. /**
  1042. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  1043. * @state: PM transition of the system being carried out.
  1044. */
  1045. int dpm_suspend(pm_message_t state)
  1046. {
  1047. ktime_t starttime = ktime_get();
  1048. int error = 0;
  1049. might_sleep();
  1050. mutex_lock(&dpm_list_mtx);
  1051. pm_transition = state;
  1052. async_error = 0;
  1053. while (!list_empty(&dpm_prepared_list)) {
  1054. struct device *dev = to_device(dpm_prepared_list.prev);
  1055. get_device(dev);
  1056. mutex_unlock(&dpm_list_mtx);
  1057. error = device_suspend(dev);
  1058. mutex_lock(&dpm_list_mtx);
  1059. if (error) {
  1060. pm_dev_err(dev, state, "", error);
  1061. dpm_save_failed_dev(dev_name(dev));
  1062. put_device(dev);
  1063. break;
  1064. }
  1065. if (!list_empty(&dev->power.entry))
  1066. list_move(&dev->power.entry, &dpm_suspended_list);
  1067. put_device(dev);
  1068. if (async_error)
  1069. break;
  1070. }
  1071. mutex_unlock(&dpm_list_mtx);
  1072. async_synchronize_full();
  1073. if (!error)
  1074. error = async_error;
  1075. if (error) {
  1076. suspend_stats.failed_suspend++;
  1077. dpm_save_failed_step(SUSPEND_SUSPEND);
  1078. } else
  1079. dpm_show_time(starttime, state, NULL);
  1080. return error;
  1081. }
  1082. /**
  1083. * device_prepare - Prepare a device for system power transition.
  1084. * @dev: Device to handle.
  1085. * @state: PM transition of the system being carried out.
  1086. *
  1087. * Execute the ->prepare() callback(s) for given device. No new children of the
  1088. * device may be registered after this function has returned.
  1089. */
  1090. static int device_prepare(struct device *dev, pm_message_t state)
  1091. {
  1092. int (*callback)(struct device *) = NULL;
  1093. char *info = NULL;
  1094. int error = 0;
  1095. /*
  1096. * If a device's parent goes into runtime suspend at the wrong time,
  1097. * it won't be possible to resume the device. To prevent this we
  1098. * block runtime suspend here, during the prepare phase, and allow
  1099. * it again during the complete phase.
  1100. */
  1101. pm_runtime_get_noresume(dev);
  1102. device_lock(dev);
  1103. dev->power.wakeup_path = device_may_wakeup(dev);
  1104. if (dev->pm_domain) {
  1105. info = "preparing power domain ";
  1106. callback = dev->pm_domain->ops.prepare;
  1107. } else if (dev->type && dev->type->pm) {
  1108. info = "preparing type ";
  1109. callback = dev->type->pm->prepare;
  1110. } else if (dev->class && dev->class->pm) {
  1111. info = "preparing class ";
  1112. callback = dev->class->pm->prepare;
  1113. } else if (dev->bus && dev->bus->pm) {
  1114. info = "preparing bus ";
  1115. callback = dev->bus->pm->prepare;
  1116. }
  1117. if (!callback && dev->driver && dev->driver->pm) {
  1118. info = "preparing driver ";
  1119. callback = dev->driver->pm->prepare;
  1120. }
  1121. if (callback) {
  1122. error = callback(dev);
  1123. suspend_report_result(callback, error);
  1124. }
  1125. device_unlock(dev);
  1126. return error;
  1127. }
  1128. /**
  1129. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  1130. * @state: PM transition of the system being carried out.
  1131. *
  1132. * Execute the ->prepare() callback(s) for all devices.
  1133. */
  1134. int dpm_prepare(pm_message_t state)
  1135. {
  1136. int error = 0;
  1137. might_sleep();
  1138. mutex_lock(&dpm_list_mtx);
  1139. while (!list_empty(&dpm_list)) {
  1140. struct device *dev = to_device(dpm_list.next);
  1141. get_device(dev);
  1142. mutex_unlock(&dpm_list_mtx);
  1143. error = device_prepare(dev, state);
  1144. mutex_lock(&dpm_list_mtx);
  1145. if (error) {
  1146. if (error == -EAGAIN) {
  1147. put_device(dev);
  1148. error = 0;
  1149. continue;
  1150. }
  1151. printk(KERN_INFO "PM: Device %s not prepared "
  1152. "for power transition: code %d\n",
  1153. dev_name(dev), error);
  1154. put_device(dev);
  1155. break;
  1156. }
  1157. dev->power.is_prepared = true;
  1158. if (!list_empty(&dev->power.entry))
  1159. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1160. put_device(dev);
  1161. }
  1162. mutex_unlock(&dpm_list_mtx);
  1163. return error;
  1164. }
  1165. /**
  1166. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1167. * @state: PM transition of the system being carried out.
  1168. *
  1169. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1170. * callbacks for them.
  1171. */
  1172. int dpm_suspend_start(pm_message_t state)
  1173. {
  1174. int error;
  1175. error = dpm_prepare(state);
  1176. if (error) {
  1177. suspend_stats.failed_prepare++;
  1178. dpm_save_failed_step(SUSPEND_PREPARE);
  1179. } else
  1180. error = dpm_suspend(state);
  1181. return error;
  1182. }
  1183. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1184. void __suspend_report_result(const char *function, void *fn, int ret)
  1185. {
  1186. if (ret)
  1187. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  1188. }
  1189. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1190. /**
  1191. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1192. * @dev: Device to wait for.
  1193. * @subordinate: Device that needs to wait for @dev.
  1194. */
  1195. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1196. {
  1197. dpm_wait(dev, subordinate->power.async_suspend);
  1198. return async_error;
  1199. }
  1200. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);