main.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will intialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/mutex.h>
  22. #include <linux/pm.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/resume-trace.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/sched.h>
  27. #include <linux/async.h>
  28. #include "../base.h"
  29. #include "power.h"
  30. /*
  31. * The entries in the dpm_list list are in a depth first order, simply
  32. * because children are guaranteed to be discovered after parents, and
  33. * are inserted at the back of the list on discovery.
  34. *
  35. * Since device_pm_add() may be called with a device lock held,
  36. * we must never try to acquire a device lock while holding
  37. * dpm_list_mutex.
  38. */
  39. LIST_HEAD(dpm_list);
  40. static DEFINE_MUTEX(dpm_list_mtx);
  41. static pm_message_t pm_transition;
  42. /*
  43. * Set once the preparation of devices for a PM transition has started, reset
  44. * before starting to resume devices. Protected by dpm_list_mtx.
  45. */
  46. static bool transition_started;
  47. /**
  48. * device_pm_init - Initialize the PM-related part of a device object.
  49. * @dev: Device object being initialized.
  50. */
  51. void device_pm_init(struct device *dev)
  52. {
  53. dev->power.status = DPM_ON;
  54. init_completion(&dev->power.completion);
  55. complete_all(&dev->power.completion);
  56. dev->power.wakeup = NULL;
  57. spin_lock_init(&dev->power.lock);
  58. pm_runtime_init(dev);
  59. }
  60. /**
  61. * device_pm_lock - Lock the list of active devices used by the PM core.
  62. */
  63. void device_pm_lock(void)
  64. {
  65. mutex_lock(&dpm_list_mtx);
  66. }
  67. /**
  68. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  69. */
  70. void device_pm_unlock(void)
  71. {
  72. mutex_unlock(&dpm_list_mtx);
  73. }
  74. /**
  75. * device_pm_add - Add a device to the PM core's list of active devices.
  76. * @dev: Device to add to the list.
  77. */
  78. void device_pm_add(struct device *dev)
  79. {
  80. pr_debug("PM: Adding info for %s:%s\n",
  81. dev->bus ? dev->bus->name : "No Bus",
  82. kobject_name(&dev->kobj));
  83. mutex_lock(&dpm_list_mtx);
  84. if (dev->parent) {
  85. if (dev->parent->power.status >= DPM_SUSPENDING)
  86. dev_warn(dev, "parent %s should not be sleeping\n",
  87. dev_name(dev->parent));
  88. } else if (transition_started) {
  89. /*
  90. * We refuse to register parentless devices while a PM
  91. * transition is in progress in order to avoid leaving them
  92. * unhandled down the road
  93. */
  94. dev_WARN(dev, "Parentless device registered during a PM transaction\n");
  95. }
  96. list_add_tail(&dev->power.entry, &dpm_list);
  97. mutex_unlock(&dpm_list_mtx);
  98. }
  99. /**
  100. * device_pm_remove - Remove a device from the PM core's list of active devices.
  101. * @dev: Device to be removed from the list.
  102. */
  103. void device_pm_remove(struct device *dev)
  104. {
  105. pr_debug("PM: Removing info for %s:%s\n",
  106. dev->bus ? dev->bus->name : "No Bus",
  107. kobject_name(&dev->kobj));
  108. complete_all(&dev->power.completion);
  109. mutex_lock(&dpm_list_mtx);
  110. list_del_init(&dev->power.entry);
  111. mutex_unlock(&dpm_list_mtx);
  112. device_wakeup_disable(dev);
  113. pm_runtime_remove(dev);
  114. }
  115. /**
  116. * device_pm_move_before - Move device in the PM core's list of active devices.
  117. * @deva: Device to move in dpm_list.
  118. * @devb: Device @deva should come before.
  119. */
  120. void device_pm_move_before(struct device *deva, struct device *devb)
  121. {
  122. pr_debug("PM: Moving %s:%s before %s:%s\n",
  123. deva->bus ? deva->bus->name : "No Bus",
  124. kobject_name(&deva->kobj),
  125. devb->bus ? devb->bus->name : "No Bus",
  126. kobject_name(&devb->kobj));
  127. /* Delete deva from dpm_list and reinsert before devb. */
  128. list_move_tail(&deva->power.entry, &devb->power.entry);
  129. }
  130. /**
  131. * device_pm_move_after - Move device in the PM core's list of active devices.
  132. * @deva: Device to move in dpm_list.
  133. * @devb: Device @deva should come after.
  134. */
  135. void device_pm_move_after(struct device *deva, struct device *devb)
  136. {
  137. pr_debug("PM: Moving %s:%s after %s:%s\n",
  138. deva->bus ? deva->bus->name : "No Bus",
  139. kobject_name(&deva->kobj),
  140. devb->bus ? devb->bus->name : "No Bus",
  141. kobject_name(&devb->kobj));
  142. /* Delete deva from dpm_list and reinsert after devb. */
  143. list_move(&deva->power.entry, &devb->power.entry);
  144. }
  145. /**
  146. * device_pm_move_last - Move device to end of the PM core's list of devices.
  147. * @dev: Device to move in dpm_list.
  148. */
  149. void device_pm_move_last(struct device *dev)
  150. {
  151. pr_debug("PM: Moving %s:%s to end of list\n",
  152. dev->bus ? dev->bus->name : "No Bus",
  153. kobject_name(&dev->kobj));
  154. list_move_tail(&dev->power.entry, &dpm_list);
  155. }
  156. static ktime_t initcall_debug_start(struct device *dev)
  157. {
  158. ktime_t calltime = ktime_set(0, 0);
  159. if (initcall_debug) {
  160. pr_info("calling %s+ @ %i\n",
  161. dev_name(dev), task_pid_nr(current));
  162. calltime = ktime_get();
  163. }
  164. return calltime;
  165. }
  166. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  167. int error)
  168. {
  169. ktime_t delta, rettime;
  170. if (initcall_debug) {
  171. rettime = ktime_get();
  172. delta = ktime_sub(rettime, calltime);
  173. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  174. error, (unsigned long long)ktime_to_ns(delta) >> 10);
  175. }
  176. }
  177. /**
  178. * dpm_wait - Wait for a PM operation to complete.
  179. * @dev: Device to wait for.
  180. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  181. */
  182. static void dpm_wait(struct device *dev, bool async)
  183. {
  184. if (!dev)
  185. return;
  186. if (async || (pm_async_enabled && dev->power.async_suspend))
  187. wait_for_completion(&dev->power.completion);
  188. }
  189. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  190. {
  191. dpm_wait(dev, *((bool *)async_ptr));
  192. return 0;
  193. }
  194. static void dpm_wait_for_children(struct device *dev, bool async)
  195. {
  196. device_for_each_child(dev, &async, dpm_wait_fn);
  197. }
  198. /**
  199. * pm_op - Execute the PM operation appropriate for given PM event.
  200. * @dev: Device to handle.
  201. * @ops: PM operations to choose from.
  202. * @state: PM transition of the system being carried out.
  203. */
  204. static int pm_op(struct device *dev,
  205. const struct dev_pm_ops *ops,
  206. pm_message_t state)
  207. {
  208. int error = 0;
  209. ktime_t calltime;
  210. calltime = initcall_debug_start(dev);
  211. switch (state.event) {
  212. #ifdef CONFIG_SUSPEND
  213. case PM_EVENT_SUSPEND:
  214. if (ops->suspend) {
  215. error = ops->suspend(dev);
  216. suspend_report_result(ops->suspend, error);
  217. }
  218. break;
  219. case PM_EVENT_RESUME:
  220. if (ops->resume) {
  221. error = ops->resume(dev);
  222. suspend_report_result(ops->resume, error);
  223. }
  224. break;
  225. #endif /* CONFIG_SUSPEND */
  226. #ifdef CONFIG_HIBERNATION
  227. case PM_EVENT_FREEZE:
  228. case PM_EVENT_QUIESCE:
  229. if (ops->freeze) {
  230. error = ops->freeze(dev);
  231. suspend_report_result(ops->freeze, error);
  232. }
  233. break;
  234. case PM_EVENT_HIBERNATE:
  235. if (ops->poweroff) {
  236. error = ops->poweroff(dev);
  237. suspend_report_result(ops->poweroff, error);
  238. }
  239. break;
  240. case PM_EVENT_THAW:
  241. case PM_EVENT_RECOVER:
  242. if (ops->thaw) {
  243. error = ops->thaw(dev);
  244. suspend_report_result(ops->thaw, error);
  245. }
  246. break;
  247. case PM_EVENT_RESTORE:
  248. if (ops->restore) {
  249. error = ops->restore(dev);
  250. suspend_report_result(ops->restore, error);
  251. }
  252. break;
  253. #endif /* CONFIG_HIBERNATION */
  254. default:
  255. error = -EINVAL;
  256. }
  257. initcall_debug_report(dev, calltime, error);
  258. return error;
  259. }
  260. /**
  261. * pm_noirq_op - Execute the PM operation appropriate for given PM event.
  262. * @dev: Device to handle.
  263. * @ops: PM operations to choose from.
  264. * @state: PM transition of the system being carried out.
  265. *
  266. * The driver of @dev will not receive interrupts while this function is being
  267. * executed.
  268. */
  269. static int pm_noirq_op(struct device *dev,
  270. const struct dev_pm_ops *ops,
  271. pm_message_t state)
  272. {
  273. int error = 0;
  274. ktime_t calltime, delta, rettime;
  275. if (initcall_debug) {
  276. pr_info("calling %s+ @ %i, parent: %s\n",
  277. dev_name(dev), task_pid_nr(current),
  278. dev->parent ? dev_name(dev->parent) : "none");
  279. calltime = ktime_get();
  280. }
  281. switch (state.event) {
  282. #ifdef CONFIG_SUSPEND
  283. case PM_EVENT_SUSPEND:
  284. if (ops->suspend_noirq) {
  285. error = ops->suspend_noirq(dev);
  286. suspend_report_result(ops->suspend_noirq, error);
  287. }
  288. break;
  289. case PM_EVENT_RESUME:
  290. if (ops->resume_noirq) {
  291. error = ops->resume_noirq(dev);
  292. suspend_report_result(ops->resume_noirq, error);
  293. }
  294. break;
  295. #endif /* CONFIG_SUSPEND */
  296. #ifdef CONFIG_HIBERNATION
  297. case PM_EVENT_FREEZE:
  298. case PM_EVENT_QUIESCE:
  299. if (ops->freeze_noirq) {
  300. error = ops->freeze_noirq(dev);
  301. suspend_report_result(ops->freeze_noirq, error);
  302. }
  303. break;
  304. case PM_EVENT_HIBERNATE:
  305. if (ops->poweroff_noirq) {
  306. error = ops->poweroff_noirq(dev);
  307. suspend_report_result(ops->poweroff_noirq, error);
  308. }
  309. break;
  310. case PM_EVENT_THAW:
  311. case PM_EVENT_RECOVER:
  312. if (ops->thaw_noirq) {
  313. error = ops->thaw_noirq(dev);
  314. suspend_report_result(ops->thaw_noirq, error);
  315. }
  316. break;
  317. case PM_EVENT_RESTORE:
  318. if (ops->restore_noirq) {
  319. error = ops->restore_noirq(dev);
  320. suspend_report_result(ops->restore_noirq, error);
  321. }
  322. break;
  323. #endif /* CONFIG_HIBERNATION */
  324. default:
  325. error = -EINVAL;
  326. }
  327. if (initcall_debug) {
  328. rettime = ktime_get();
  329. delta = ktime_sub(rettime, calltime);
  330. printk("initcall %s_i+ returned %d after %Ld usecs\n",
  331. dev_name(dev), error,
  332. (unsigned long long)ktime_to_ns(delta) >> 10);
  333. }
  334. return error;
  335. }
  336. static char *pm_verb(int event)
  337. {
  338. switch (event) {
  339. case PM_EVENT_SUSPEND:
  340. return "suspend";
  341. case PM_EVENT_RESUME:
  342. return "resume";
  343. case PM_EVENT_FREEZE:
  344. return "freeze";
  345. case PM_EVENT_QUIESCE:
  346. return "quiesce";
  347. case PM_EVENT_HIBERNATE:
  348. return "hibernate";
  349. case PM_EVENT_THAW:
  350. return "thaw";
  351. case PM_EVENT_RESTORE:
  352. return "restore";
  353. case PM_EVENT_RECOVER:
  354. return "recover";
  355. default:
  356. return "(unknown PM event)";
  357. }
  358. }
  359. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  360. {
  361. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  362. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  363. ", may wakeup" : "");
  364. }
  365. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  366. int error)
  367. {
  368. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  369. kobject_name(&dev->kobj), pm_verb(state.event), info, error);
  370. }
  371. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  372. {
  373. ktime_t calltime;
  374. u64 usecs64;
  375. int usecs;
  376. calltime = ktime_get();
  377. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  378. do_div(usecs64, NSEC_PER_USEC);
  379. usecs = usecs64;
  380. if (usecs == 0)
  381. usecs = 1;
  382. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  383. info ?: "", info ? " " : "", pm_verb(state.event),
  384. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  385. }
  386. /*------------------------- Resume routines -------------------------*/
  387. /**
  388. * device_resume_noirq - Execute an "early resume" callback for given device.
  389. * @dev: Device to handle.
  390. * @state: PM transition of the system being carried out.
  391. *
  392. * The driver of @dev will not receive interrupts while this function is being
  393. * executed.
  394. */
  395. static int device_resume_noirq(struct device *dev, pm_message_t state)
  396. {
  397. int error = 0;
  398. TRACE_DEVICE(dev);
  399. TRACE_RESUME(0);
  400. if (dev->bus && dev->bus->pm) {
  401. pm_dev_dbg(dev, state, "EARLY ");
  402. error = pm_noirq_op(dev, dev->bus->pm, state);
  403. if (error)
  404. goto End;
  405. }
  406. if (dev->type && dev->type->pm) {
  407. pm_dev_dbg(dev, state, "EARLY type ");
  408. error = pm_noirq_op(dev, dev->type->pm, state);
  409. if (error)
  410. goto End;
  411. }
  412. if (dev->class && dev->class->pm) {
  413. pm_dev_dbg(dev, state, "EARLY class ");
  414. error = pm_noirq_op(dev, dev->class->pm, state);
  415. }
  416. End:
  417. TRACE_RESUME(error);
  418. return error;
  419. }
  420. /**
  421. * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
  422. * @state: PM transition of the system being carried out.
  423. *
  424. * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
  425. * enable device drivers to receive interrupts.
  426. */
  427. void dpm_resume_noirq(pm_message_t state)
  428. {
  429. struct device *dev;
  430. ktime_t starttime = ktime_get();
  431. mutex_lock(&dpm_list_mtx);
  432. transition_started = false;
  433. list_for_each_entry(dev, &dpm_list, power.entry)
  434. if (dev->power.status > DPM_OFF) {
  435. int error;
  436. dev->power.status = DPM_OFF;
  437. error = device_resume_noirq(dev, state);
  438. if (error)
  439. pm_dev_err(dev, state, " early", error);
  440. }
  441. mutex_unlock(&dpm_list_mtx);
  442. dpm_show_time(starttime, state, "early");
  443. resume_device_irqs();
  444. }
  445. EXPORT_SYMBOL_GPL(dpm_resume_noirq);
  446. /**
  447. * legacy_resume - Execute a legacy (bus or class) resume callback for device.
  448. * @dev: Device to resume.
  449. * @cb: Resume callback to execute.
  450. */
  451. static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
  452. {
  453. int error;
  454. ktime_t calltime;
  455. calltime = initcall_debug_start(dev);
  456. error = cb(dev);
  457. suspend_report_result(cb, error);
  458. initcall_debug_report(dev, calltime, error);
  459. return error;
  460. }
  461. /**
  462. * device_resume - Execute "resume" callbacks for given device.
  463. * @dev: Device to handle.
  464. * @state: PM transition of the system being carried out.
  465. * @async: If true, the device is being resumed asynchronously.
  466. */
  467. static int device_resume(struct device *dev, pm_message_t state, bool async)
  468. {
  469. int error = 0;
  470. TRACE_DEVICE(dev);
  471. TRACE_RESUME(0);
  472. dpm_wait(dev->parent, async);
  473. device_lock(dev);
  474. dev->power.status = DPM_RESUMING;
  475. if (dev->bus) {
  476. if (dev->bus->pm) {
  477. pm_dev_dbg(dev, state, "");
  478. error = pm_op(dev, dev->bus->pm, state);
  479. } else if (dev->bus->resume) {
  480. pm_dev_dbg(dev, state, "legacy ");
  481. error = legacy_resume(dev, dev->bus->resume);
  482. }
  483. if (error)
  484. goto End;
  485. }
  486. if (dev->type) {
  487. if (dev->type->pm) {
  488. pm_dev_dbg(dev, state, "type ");
  489. error = pm_op(dev, dev->type->pm, state);
  490. }
  491. if (error)
  492. goto End;
  493. }
  494. if (dev->class) {
  495. if (dev->class->pm) {
  496. pm_dev_dbg(dev, state, "class ");
  497. error = pm_op(dev, dev->class->pm, state);
  498. } else if (dev->class->resume) {
  499. pm_dev_dbg(dev, state, "legacy class ");
  500. error = legacy_resume(dev, dev->class->resume);
  501. }
  502. }
  503. End:
  504. device_unlock(dev);
  505. complete_all(&dev->power.completion);
  506. TRACE_RESUME(error);
  507. return error;
  508. }
  509. static void async_resume(void *data, async_cookie_t cookie)
  510. {
  511. struct device *dev = (struct device *)data;
  512. int error;
  513. error = device_resume(dev, pm_transition, true);
  514. if (error)
  515. pm_dev_err(dev, pm_transition, " async", error);
  516. put_device(dev);
  517. }
  518. static bool is_async(struct device *dev)
  519. {
  520. return dev->power.async_suspend && pm_async_enabled
  521. && !pm_trace_is_enabled();
  522. }
  523. /**
  524. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  525. * @state: PM transition of the system being carried out.
  526. *
  527. * Execute the appropriate "resume" callback for all devices whose status
  528. * indicates that they are suspended.
  529. */
  530. static void dpm_resume(pm_message_t state)
  531. {
  532. struct list_head list;
  533. struct device *dev;
  534. ktime_t starttime = ktime_get();
  535. INIT_LIST_HEAD(&list);
  536. mutex_lock(&dpm_list_mtx);
  537. pm_transition = state;
  538. list_for_each_entry(dev, &dpm_list, power.entry) {
  539. if (dev->power.status < DPM_OFF)
  540. continue;
  541. INIT_COMPLETION(dev->power.completion);
  542. if (is_async(dev)) {
  543. get_device(dev);
  544. async_schedule(async_resume, dev);
  545. }
  546. }
  547. while (!list_empty(&dpm_list)) {
  548. dev = to_device(dpm_list.next);
  549. get_device(dev);
  550. if (dev->power.status >= DPM_OFF && !is_async(dev)) {
  551. int error;
  552. mutex_unlock(&dpm_list_mtx);
  553. error = device_resume(dev, state, false);
  554. mutex_lock(&dpm_list_mtx);
  555. if (error)
  556. pm_dev_err(dev, state, "", error);
  557. } else if (dev->power.status == DPM_SUSPENDING) {
  558. /* Allow new children of the device to be registered */
  559. dev->power.status = DPM_RESUMING;
  560. }
  561. if (!list_empty(&dev->power.entry))
  562. list_move_tail(&dev->power.entry, &list);
  563. put_device(dev);
  564. }
  565. list_splice(&list, &dpm_list);
  566. mutex_unlock(&dpm_list_mtx);
  567. async_synchronize_full();
  568. dpm_show_time(starttime, state, NULL);
  569. }
  570. /**
  571. * device_complete - Complete a PM transition for given device.
  572. * @dev: Device to handle.
  573. * @state: PM transition of the system being carried out.
  574. */
  575. static void device_complete(struct device *dev, pm_message_t state)
  576. {
  577. device_lock(dev);
  578. if (dev->class && dev->class->pm && dev->class->pm->complete) {
  579. pm_dev_dbg(dev, state, "completing class ");
  580. dev->class->pm->complete(dev);
  581. }
  582. if (dev->type && dev->type->pm && dev->type->pm->complete) {
  583. pm_dev_dbg(dev, state, "completing type ");
  584. dev->type->pm->complete(dev);
  585. }
  586. if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
  587. pm_dev_dbg(dev, state, "completing ");
  588. dev->bus->pm->complete(dev);
  589. }
  590. device_unlock(dev);
  591. }
  592. /**
  593. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  594. * @state: PM transition of the system being carried out.
  595. *
  596. * Execute the ->complete() callbacks for all devices whose PM status is not
  597. * DPM_ON (this allows new devices to be registered).
  598. */
  599. static void dpm_complete(pm_message_t state)
  600. {
  601. struct list_head list;
  602. INIT_LIST_HEAD(&list);
  603. mutex_lock(&dpm_list_mtx);
  604. transition_started = false;
  605. while (!list_empty(&dpm_list)) {
  606. struct device *dev = to_device(dpm_list.prev);
  607. get_device(dev);
  608. if (dev->power.status > DPM_ON) {
  609. dev->power.status = DPM_ON;
  610. mutex_unlock(&dpm_list_mtx);
  611. device_complete(dev, state);
  612. pm_runtime_put_sync(dev);
  613. mutex_lock(&dpm_list_mtx);
  614. }
  615. if (!list_empty(&dev->power.entry))
  616. list_move(&dev->power.entry, &list);
  617. put_device(dev);
  618. }
  619. list_splice(&list, &dpm_list);
  620. mutex_unlock(&dpm_list_mtx);
  621. }
  622. /**
  623. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  624. * @state: PM transition of the system being carried out.
  625. *
  626. * Execute "resume" callbacks for all devices and complete the PM transition of
  627. * the system.
  628. */
  629. void dpm_resume_end(pm_message_t state)
  630. {
  631. might_sleep();
  632. dpm_resume(state);
  633. dpm_complete(state);
  634. }
  635. EXPORT_SYMBOL_GPL(dpm_resume_end);
  636. /*------------------------- Suspend routines -------------------------*/
  637. /**
  638. * resume_event - Return a "resume" message for given "suspend" sleep state.
  639. * @sleep_state: PM message representing a sleep state.
  640. *
  641. * Return a PM message representing the resume event corresponding to given
  642. * sleep state.
  643. */
  644. static pm_message_t resume_event(pm_message_t sleep_state)
  645. {
  646. switch (sleep_state.event) {
  647. case PM_EVENT_SUSPEND:
  648. return PMSG_RESUME;
  649. case PM_EVENT_FREEZE:
  650. case PM_EVENT_QUIESCE:
  651. return PMSG_RECOVER;
  652. case PM_EVENT_HIBERNATE:
  653. return PMSG_RESTORE;
  654. }
  655. return PMSG_ON;
  656. }
  657. /**
  658. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  659. * @dev: Device to handle.
  660. * @state: PM transition of the system being carried out.
  661. *
  662. * The driver of @dev will not receive interrupts while this function is being
  663. * executed.
  664. */
  665. static int device_suspend_noirq(struct device *dev, pm_message_t state)
  666. {
  667. int error = 0;
  668. if (dev->class && dev->class->pm) {
  669. pm_dev_dbg(dev, state, "LATE class ");
  670. error = pm_noirq_op(dev, dev->class->pm, state);
  671. if (error)
  672. goto End;
  673. }
  674. if (dev->type && dev->type->pm) {
  675. pm_dev_dbg(dev, state, "LATE type ");
  676. error = pm_noirq_op(dev, dev->type->pm, state);
  677. if (error)
  678. goto End;
  679. }
  680. if (dev->bus && dev->bus->pm) {
  681. pm_dev_dbg(dev, state, "LATE ");
  682. error = pm_noirq_op(dev, dev->bus->pm, state);
  683. }
  684. End:
  685. return error;
  686. }
  687. /**
  688. * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
  689. * @state: PM transition of the system being carried out.
  690. *
  691. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  692. * handlers for all non-sysdev devices.
  693. */
  694. int dpm_suspend_noirq(pm_message_t state)
  695. {
  696. struct device *dev;
  697. ktime_t starttime = ktime_get();
  698. int error = 0;
  699. suspend_device_irqs();
  700. mutex_lock(&dpm_list_mtx);
  701. list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
  702. error = device_suspend_noirq(dev, state);
  703. if (error) {
  704. pm_dev_err(dev, state, " late", error);
  705. break;
  706. }
  707. dev->power.status = DPM_OFF_IRQ;
  708. }
  709. mutex_unlock(&dpm_list_mtx);
  710. if (error)
  711. dpm_resume_noirq(resume_event(state));
  712. else
  713. dpm_show_time(starttime, state, "late");
  714. return error;
  715. }
  716. EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
  717. /**
  718. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  719. * @dev: Device to suspend.
  720. * @state: PM transition of the system being carried out.
  721. * @cb: Suspend callback to execute.
  722. */
  723. static int legacy_suspend(struct device *dev, pm_message_t state,
  724. int (*cb)(struct device *dev, pm_message_t state))
  725. {
  726. int error;
  727. ktime_t calltime;
  728. calltime = initcall_debug_start(dev);
  729. error = cb(dev, state);
  730. suspend_report_result(cb, error);
  731. initcall_debug_report(dev, calltime, error);
  732. return error;
  733. }
  734. static int async_error;
  735. /**
  736. * device_suspend - Execute "suspend" callbacks for given device.
  737. * @dev: Device to handle.
  738. * @state: PM transition of the system being carried out.
  739. * @async: If true, the device is being suspended asynchronously.
  740. */
  741. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  742. {
  743. int error = 0;
  744. dpm_wait_for_children(dev, async);
  745. device_lock(dev);
  746. if (async_error)
  747. goto End;
  748. if (dev->class) {
  749. if (dev->class->pm) {
  750. pm_dev_dbg(dev, state, "class ");
  751. error = pm_op(dev, dev->class->pm, state);
  752. } else if (dev->class->suspend) {
  753. pm_dev_dbg(dev, state, "legacy class ");
  754. error = legacy_suspend(dev, state, dev->class->suspend);
  755. }
  756. if (error)
  757. goto End;
  758. }
  759. if (dev->type) {
  760. if (dev->type->pm) {
  761. pm_dev_dbg(dev, state, "type ");
  762. error = pm_op(dev, dev->type->pm, state);
  763. }
  764. if (error)
  765. goto End;
  766. }
  767. if (dev->bus) {
  768. if (dev->bus->pm) {
  769. pm_dev_dbg(dev, state, "");
  770. error = pm_op(dev, dev->bus->pm, state);
  771. } else if (dev->bus->suspend) {
  772. pm_dev_dbg(dev, state, "legacy ");
  773. error = legacy_suspend(dev, state, dev->bus->suspend);
  774. }
  775. }
  776. if (!error)
  777. dev->power.status = DPM_OFF;
  778. End:
  779. device_unlock(dev);
  780. complete_all(&dev->power.completion);
  781. return error;
  782. }
  783. static void async_suspend(void *data, async_cookie_t cookie)
  784. {
  785. struct device *dev = (struct device *)data;
  786. int error;
  787. error = __device_suspend(dev, pm_transition, true);
  788. if (error) {
  789. pm_dev_err(dev, pm_transition, " async", error);
  790. async_error = error;
  791. }
  792. put_device(dev);
  793. }
  794. static int device_suspend(struct device *dev)
  795. {
  796. INIT_COMPLETION(dev->power.completion);
  797. if (pm_async_enabled && dev->power.async_suspend) {
  798. get_device(dev);
  799. async_schedule(async_suspend, dev);
  800. return 0;
  801. }
  802. return __device_suspend(dev, pm_transition, false);
  803. }
  804. /**
  805. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  806. * @state: PM transition of the system being carried out.
  807. */
  808. static int dpm_suspend(pm_message_t state)
  809. {
  810. struct list_head list;
  811. ktime_t starttime = ktime_get();
  812. int error = 0;
  813. INIT_LIST_HEAD(&list);
  814. mutex_lock(&dpm_list_mtx);
  815. pm_transition = state;
  816. async_error = 0;
  817. while (!list_empty(&dpm_list)) {
  818. struct device *dev = to_device(dpm_list.prev);
  819. get_device(dev);
  820. mutex_unlock(&dpm_list_mtx);
  821. error = device_suspend(dev);
  822. mutex_lock(&dpm_list_mtx);
  823. if (error) {
  824. pm_dev_err(dev, state, "", error);
  825. put_device(dev);
  826. break;
  827. }
  828. if (!list_empty(&dev->power.entry))
  829. list_move(&dev->power.entry, &list);
  830. put_device(dev);
  831. if (async_error)
  832. break;
  833. }
  834. list_splice(&list, dpm_list.prev);
  835. mutex_unlock(&dpm_list_mtx);
  836. async_synchronize_full();
  837. if (!error)
  838. error = async_error;
  839. if (!error)
  840. dpm_show_time(starttime, state, NULL);
  841. return error;
  842. }
  843. /**
  844. * device_prepare - Prepare a device for system power transition.
  845. * @dev: Device to handle.
  846. * @state: PM transition of the system being carried out.
  847. *
  848. * Execute the ->prepare() callback(s) for given device. No new children of the
  849. * device may be registered after this function has returned.
  850. */
  851. static int device_prepare(struct device *dev, pm_message_t state)
  852. {
  853. int error = 0;
  854. device_lock(dev);
  855. if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
  856. pm_dev_dbg(dev, state, "preparing ");
  857. error = dev->bus->pm->prepare(dev);
  858. suspend_report_result(dev->bus->pm->prepare, error);
  859. if (error)
  860. goto End;
  861. }
  862. if (dev->type && dev->type->pm && dev->type->pm->prepare) {
  863. pm_dev_dbg(dev, state, "preparing type ");
  864. error = dev->type->pm->prepare(dev);
  865. suspend_report_result(dev->type->pm->prepare, error);
  866. if (error)
  867. goto End;
  868. }
  869. if (dev->class && dev->class->pm && dev->class->pm->prepare) {
  870. pm_dev_dbg(dev, state, "preparing class ");
  871. error = dev->class->pm->prepare(dev);
  872. suspend_report_result(dev->class->pm->prepare, error);
  873. }
  874. End:
  875. device_unlock(dev);
  876. return error;
  877. }
  878. /**
  879. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  880. * @state: PM transition of the system being carried out.
  881. *
  882. * Execute the ->prepare() callback(s) for all devices.
  883. */
  884. static int dpm_prepare(pm_message_t state)
  885. {
  886. struct list_head list;
  887. int error = 0;
  888. INIT_LIST_HEAD(&list);
  889. mutex_lock(&dpm_list_mtx);
  890. transition_started = true;
  891. while (!list_empty(&dpm_list)) {
  892. struct device *dev = to_device(dpm_list.next);
  893. get_device(dev);
  894. dev->power.status = DPM_PREPARING;
  895. mutex_unlock(&dpm_list_mtx);
  896. pm_runtime_get_noresume(dev);
  897. if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
  898. /* Wake-up requested during system sleep transition. */
  899. pm_runtime_put_sync(dev);
  900. error = -EBUSY;
  901. } else {
  902. error = device_prepare(dev, state);
  903. }
  904. mutex_lock(&dpm_list_mtx);
  905. if (error) {
  906. dev->power.status = DPM_ON;
  907. if (error == -EAGAIN) {
  908. put_device(dev);
  909. error = 0;
  910. continue;
  911. }
  912. printk(KERN_ERR "PM: Failed to prepare device %s "
  913. "for power transition: error %d\n",
  914. kobject_name(&dev->kobj), error);
  915. put_device(dev);
  916. break;
  917. }
  918. dev->power.status = DPM_SUSPENDING;
  919. if (!list_empty(&dev->power.entry))
  920. list_move_tail(&dev->power.entry, &list);
  921. put_device(dev);
  922. }
  923. list_splice(&list, &dpm_list);
  924. mutex_unlock(&dpm_list_mtx);
  925. return error;
  926. }
  927. /**
  928. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  929. * @state: PM transition of the system being carried out.
  930. *
  931. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  932. * callbacks for them.
  933. */
  934. int dpm_suspend_start(pm_message_t state)
  935. {
  936. int error;
  937. might_sleep();
  938. error = dpm_prepare(state);
  939. if (!error)
  940. error = dpm_suspend(state);
  941. return error;
  942. }
  943. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  944. void __suspend_report_result(const char *function, void *fn, int ret)
  945. {
  946. if (ret)
  947. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  948. }
  949. EXPORT_SYMBOL_GPL(__suspend_report_result);
  950. /**
  951. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  952. * @dev: Device to wait for.
  953. * @subordinate: Device that needs to wait for @dev.
  954. */
  955. void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  956. {
  957. dpm_wait(dev, subordinate->power.async_suspend);
  958. }
  959. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);