runtime.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. /*
  2. * drivers/base/power/runtime.c - Helper functions for device run-time PM
  3. *
  4. * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
  6. *
  7. * This file is released under the GPLv2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/pm_runtime.h>
  11. #include <linux/jiffies.h>
  12. #include "power.h"
  13. static int rpm_resume(struct device *dev, int rpmflags);
  14. static int rpm_suspend(struct device *dev, int rpmflags);
  15. /**
  16. * update_pm_runtime_accounting - Update the time accounting of power states
  17. * @dev: Device to update the accounting for
  18. *
  19. * In order to be able to have time accounting of the various power states
  20. * (as used by programs such as PowerTOP to show the effectiveness of runtime
  21. * PM), we need to track the time spent in each state.
  22. * update_pm_runtime_accounting must be called each time before the
  23. * runtime_status field is updated, to account the time in the old state
  24. * correctly.
  25. */
  26. void update_pm_runtime_accounting(struct device *dev)
  27. {
  28. unsigned long now = jiffies;
  29. int delta;
  30. delta = now - dev->power.accounting_timestamp;
  31. if (delta < 0)
  32. delta = 0;
  33. dev->power.accounting_timestamp = now;
  34. if (dev->power.disable_depth > 0)
  35. return;
  36. if (dev->power.runtime_status == RPM_SUSPENDED)
  37. dev->power.suspended_jiffies += delta;
  38. else
  39. dev->power.active_jiffies += delta;
  40. }
  41. static void __update_runtime_status(struct device *dev, enum rpm_status status)
  42. {
  43. update_pm_runtime_accounting(dev);
  44. dev->power.runtime_status = status;
  45. }
  46. /**
  47. * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  48. * @dev: Device to handle.
  49. */
  50. static void pm_runtime_deactivate_timer(struct device *dev)
  51. {
  52. if (dev->power.timer_expires > 0) {
  53. del_timer(&dev->power.suspend_timer);
  54. dev->power.timer_expires = 0;
  55. }
  56. }
  57. /**
  58. * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  59. * @dev: Device to handle.
  60. */
  61. static void pm_runtime_cancel_pending(struct device *dev)
  62. {
  63. pm_runtime_deactivate_timer(dev);
  64. /*
  65. * In case there's a request pending, make sure its work function will
  66. * return without doing anything.
  67. */
  68. dev->power.request = RPM_REQ_NONE;
  69. }
  70. /**
  71. * rpm_check_suspend_allowed - Test whether a device may be suspended.
  72. * @dev: Device to test.
  73. */
  74. static int rpm_check_suspend_allowed(struct device *dev)
  75. {
  76. int retval = 0;
  77. if (dev->power.runtime_error)
  78. retval = -EINVAL;
  79. else if (atomic_read(&dev->power.usage_count) > 0
  80. || dev->power.disable_depth > 0)
  81. retval = -EAGAIN;
  82. else if (!pm_children_suspended(dev))
  83. retval = -EBUSY;
  84. /* Pending resume requests take precedence over suspends. */
  85. else if ((dev->power.deferred_resume
  86. && dev->power.status == RPM_SUSPENDING)
  87. || (dev->power.request_pending
  88. && dev->power.request == RPM_REQ_RESUME))
  89. retval = -EAGAIN;
  90. else if (dev->power.runtime_status == RPM_SUSPENDED)
  91. retval = 1;
  92. return retval;
  93. }
  94. /**
  95. * rpm_idle - Notify device bus type if the device can be suspended.
  96. * @dev: Device to notify the bus type about.
  97. * @rpmflags: Flag bits.
  98. *
  99. * Check if the device's run-time PM status allows it to be suspended. If
  100. * another idle notification has been started earlier, return immediately. If
  101. * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
  102. * run the ->runtime_idle() callback directly.
  103. *
  104. * This function must be called under dev->power.lock with interrupts disabled.
  105. */
  106. static int rpm_idle(struct device *dev, int rpmflags)
  107. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  108. {
  109. int retval;
  110. retval = rpm_check_suspend_allowed(dev);
  111. if (retval < 0)
  112. ; /* Conditions are wrong. */
  113. /* Idle notifications are allowed only in the RPM_ACTIVE state. */
  114. else if (dev->power.runtime_status != RPM_ACTIVE)
  115. retval = -EAGAIN;
  116. /*
  117. * Any pending request other than an idle notification takes
  118. * precedence over us, except that the timer may be running.
  119. */
  120. else if (dev->power.request_pending &&
  121. dev->power.request > RPM_REQ_IDLE)
  122. retval = -EAGAIN;
  123. /* Act as though RPM_NOWAIT is always set. */
  124. else if (dev->power.idle_notification)
  125. retval = -EINPROGRESS;
  126. if (retval)
  127. goto out;
  128. /* Pending requests need to be canceled. */
  129. dev->power.request = RPM_REQ_NONE;
  130. if (dev->power.no_callbacks) {
  131. /* Assume ->runtime_idle() callback would have suspended. */
  132. retval = rpm_suspend(dev, rpmflags);
  133. goto out;
  134. }
  135. /* Carry out an asynchronous or a synchronous idle notification. */
  136. if (rpmflags & RPM_ASYNC) {
  137. dev->power.request = RPM_REQ_IDLE;
  138. if (!dev->power.request_pending) {
  139. dev->power.request_pending = true;
  140. queue_work(pm_wq, &dev->power.work);
  141. }
  142. goto out;
  143. }
  144. dev->power.idle_notification = true;
  145. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
  146. spin_unlock_irq(&dev->power.lock);
  147. dev->bus->pm->runtime_idle(dev);
  148. spin_lock_irq(&dev->power.lock);
  149. } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
  150. spin_unlock_irq(&dev->power.lock);
  151. dev->type->pm->runtime_idle(dev);
  152. spin_lock_irq(&dev->power.lock);
  153. } else if (dev->class && dev->class->pm
  154. && dev->class->pm->runtime_idle) {
  155. spin_unlock_irq(&dev->power.lock);
  156. dev->class->pm->runtime_idle(dev);
  157. spin_lock_irq(&dev->power.lock);
  158. }
  159. dev->power.idle_notification = false;
  160. wake_up_all(&dev->power.wait_queue);
  161. out:
  162. return retval;
  163. }
  164. /**
  165. * rpm_suspend - Carry out run-time suspend of given device.
  166. * @dev: Device to suspend.
  167. * @rpmflags: Flag bits.
  168. *
  169. * Check if the device's run-time PM status allows it to be suspended. If
  170. * another suspend has been started earlier, either return immediately or wait
  171. * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
  172. * pending idle notification. If the RPM_ASYNC flag is set then queue a
  173. * suspend request; otherwise run the ->runtime_suspend() callback directly.
  174. * If a deferred resume was requested while the callback was running then carry
  175. * it out; otherwise send an idle notification for the device (if the suspend
  176. * failed) or for its parent (if the suspend succeeded).
  177. *
  178. * This function must be called under dev->power.lock with interrupts disabled.
  179. */
  180. static int rpm_suspend(struct device *dev, int rpmflags)
  181. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  182. {
  183. struct device *parent = NULL;
  184. bool notify = false;
  185. int retval;
  186. dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
  187. repeat:
  188. retval = rpm_check_suspend_allowed(dev);
  189. if (retval < 0)
  190. ; /* Conditions are wrong. */
  191. /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
  192. else if (dev->power.runtime_status == RPM_RESUMING &&
  193. !(rpmflags & RPM_ASYNC))
  194. retval = -EAGAIN;
  195. if (retval)
  196. goto out;
  197. /* Other scheduled or pending requests need to be canceled. */
  198. pm_runtime_cancel_pending(dev);
  199. if (dev->power.runtime_status == RPM_SUSPENDING) {
  200. DEFINE_WAIT(wait);
  201. if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
  202. retval = -EINPROGRESS;
  203. goto out;
  204. }
  205. /* Wait for the other suspend running in parallel with us. */
  206. for (;;) {
  207. prepare_to_wait(&dev->power.wait_queue, &wait,
  208. TASK_UNINTERRUPTIBLE);
  209. if (dev->power.runtime_status != RPM_SUSPENDING)
  210. break;
  211. spin_unlock_irq(&dev->power.lock);
  212. schedule();
  213. spin_lock_irq(&dev->power.lock);
  214. }
  215. finish_wait(&dev->power.wait_queue, &wait);
  216. goto repeat;
  217. }
  218. dev->power.deferred_resume = false;
  219. if (dev->power.no_callbacks)
  220. goto no_callback; /* Assume success. */
  221. /* Carry out an asynchronous or a synchronous suspend. */
  222. if (rpmflags & RPM_ASYNC) {
  223. dev->power.request = RPM_REQ_SUSPEND;
  224. if (!dev->power.request_pending) {
  225. dev->power.request_pending = true;
  226. queue_work(pm_wq, &dev->power.work);
  227. }
  228. goto out;
  229. }
  230. __update_runtime_status(dev, RPM_SUSPENDING);
  231. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
  232. spin_unlock_irq(&dev->power.lock);
  233. retval = dev->bus->pm->runtime_suspend(dev);
  234. spin_lock_irq(&dev->power.lock);
  235. dev->power.runtime_error = retval;
  236. } else if (dev->type && dev->type->pm
  237. && dev->type->pm->runtime_suspend) {
  238. spin_unlock_irq(&dev->power.lock);
  239. retval = dev->type->pm->runtime_suspend(dev);
  240. spin_lock_irq(&dev->power.lock);
  241. dev->power.runtime_error = retval;
  242. } else if (dev->class && dev->class->pm
  243. && dev->class->pm->runtime_suspend) {
  244. spin_unlock_irq(&dev->power.lock);
  245. retval = dev->class->pm->runtime_suspend(dev);
  246. spin_lock_irq(&dev->power.lock);
  247. dev->power.runtime_error = retval;
  248. } else {
  249. retval = -ENOSYS;
  250. }
  251. if (retval) {
  252. __update_runtime_status(dev, RPM_ACTIVE);
  253. dev->power.deferred_resume = 0;
  254. if (retval == -EAGAIN || retval == -EBUSY) {
  255. if (dev->power.timer_expires == 0)
  256. notify = true;
  257. dev->power.runtime_error = 0;
  258. } else {
  259. pm_runtime_cancel_pending(dev);
  260. }
  261. } else {
  262. no_callback:
  263. __update_runtime_status(dev, RPM_SUSPENDED);
  264. pm_runtime_deactivate_timer(dev);
  265. if (dev->parent) {
  266. parent = dev->parent;
  267. atomic_add_unless(&parent->power.child_count, -1, 0);
  268. }
  269. }
  270. wake_up_all(&dev->power.wait_queue);
  271. if (dev->power.deferred_resume) {
  272. rpm_resume(dev, 0);
  273. retval = -EAGAIN;
  274. goto out;
  275. }
  276. if (notify)
  277. rpm_idle(dev, 0);
  278. if (parent && !parent->power.ignore_children) {
  279. spin_unlock_irq(&dev->power.lock);
  280. pm_request_idle(parent);
  281. spin_lock_irq(&dev->power.lock);
  282. }
  283. out:
  284. dev_dbg(dev, "%s returns %d\n", __func__, retval);
  285. return retval;
  286. }
  287. /**
  288. * rpm_resume - Carry out run-time resume of given device.
  289. * @dev: Device to resume.
  290. * @rpmflags: Flag bits.
  291. *
  292. * Check if the device's run-time PM status allows it to be resumed. Cancel
  293. * any scheduled or pending requests. If another resume has been started
  294. * earlier, either return imediately or wait for it to finish, depending on the
  295. * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
  296. * parallel with this function, either tell the other process to resume after
  297. * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
  298. * flag is set then queue a resume request; otherwise run the
  299. * ->runtime_resume() callback directly. Queue an idle notification for the
  300. * device if the resume succeeded.
  301. *
  302. * This function must be called under dev->power.lock with interrupts disabled.
  303. */
  304. static int rpm_resume(struct device *dev, int rpmflags)
  305. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  306. {
  307. struct device *parent = NULL;
  308. int retval = 0;
  309. dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
  310. repeat:
  311. if (dev->power.runtime_error)
  312. retval = -EINVAL;
  313. else if (dev->power.disable_depth > 0)
  314. retval = -EAGAIN;
  315. if (retval)
  316. goto out;
  317. /* Other scheduled or pending requests need to be canceled. */
  318. pm_runtime_cancel_pending(dev);
  319. if (dev->power.runtime_status == RPM_ACTIVE) {
  320. retval = 1;
  321. goto out;
  322. }
  323. if (dev->power.runtime_status == RPM_RESUMING
  324. || dev->power.runtime_status == RPM_SUSPENDING) {
  325. DEFINE_WAIT(wait);
  326. if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
  327. if (dev->power.runtime_status == RPM_SUSPENDING)
  328. dev->power.deferred_resume = true;
  329. else
  330. retval = -EINPROGRESS;
  331. goto out;
  332. }
  333. /* Wait for the operation carried out in parallel with us. */
  334. for (;;) {
  335. prepare_to_wait(&dev->power.wait_queue, &wait,
  336. TASK_UNINTERRUPTIBLE);
  337. if (dev->power.runtime_status != RPM_RESUMING
  338. && dev->power.runtime_status != RPM_SUSPENDING)
  339. break;
  340. spin_unlock_irq(&dev->power.lock);
  341. schedule();
  342. spin_lock_irq(&dev->power.lock);
  343. }
  344. finish_wait(&dev->power.wait_queue, &wait);
  345. goto repeat;
  346. }
  347. /*
  348. * See if we can skip waking up the parent. This is safe only if
  349. * power.no_callbacks is set, because otherwise we don't know whether
  350. * the resume will actually succeed.
  351. */
  352. if (dev->power.no_callbacks && !parent && dev->parent) {
  353. spin_lock(&dev->parent->power.lock);
  354. if (dev->parent->power.disable_depth > 0
  355. || dev->parent->power.ignore_children
  356. || dev->parent->power.runtime_status == RPM_ACTIVE) {
  357. atomic_inc(&dev->parent->power.child_count);
  358. spin_unlock(&dev->parent->power.lock);
  359. goto no_callback; /* Assume success. */
  360. }
  361. spin_unlock(&dev->parent->power.lock);
  362. }
  363. /* Carry out an asynchronous or a synchronous resume. */
  364. if (rpmflags & RPM_ASYNC) {
  365. dev->power.request = RPM_REQ_RESUME;
  366. if (!dev->power.request_pending) {
  367. dev->power.request_pending = true;
  368. queue_work(pm_wq, &dev->power.work);
  369. }
  370. retval = 0;
  371. goto out;
  372. }
  373. if (!parent && dev->parent) {
  374. /*
  375. * Increment the parent's resume counter and resume it if
  376. * necessary.
  377. */
  378. parent = dev->parent;
  379. spin_unlock(&dev->power.lock);
  380. pm_runtime_get_noresume(parent);
  381. spin_lock(&parent->power.lock);
  382. /*
  383. * We can resume if the parent's run-time PM is disabled or it
  384. * is set to ignore children.
  385. */
  386. if (!parent->power.disable_depth
  387. && !parent->power.ignore_children) {
  388. rpm_resume(parent, 0);
  389. if (parent->power.runtime_status != RPM_ACTIVE)
  390. retval = -EBUSY;
  391. }
  392. spin_unlock(&parent->power.lock);
  393. spin_lock(&dev->power.lock);
  394. if (retval)
  395. goto out;
  396. goto repeat;
  397. }
  398. if (dev->power.no_callbacks)
  399. goto no_callback; /* Assume success. */
  400. __update_runtime_status(dev, RPM_RESUMING);
  401. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
  402. spin_unlock_irq(&dev->power.lock);
  403. retval = dev->bus->pm->runtime_resume(dev);
  404. spin_lock_irq(&dev->power.lock);
  405. dev->power.runtime_error = retval;
  406. } else if (dev->type && dev->type->pm
  407. && dev->type->pm->runtime_resume) {
  408. spin_unlock_irq(&dev->power.lock);
  409. retval = dev->type->pm->runtime_resume(dev);
  410. spin_lock_irq(&dev->power.lock);
  411. dev->power.runtime_error = retval;
  412. } else if (dev->class && dev->class->pm
  413. && dev->class->pm->runtime_resume) {
  414. spin_unlock_irq(&dev->power.lock);
  415. retval = dev->class->pm->runtime_resume(dev);
  416. spin_lock_irq(&dev->power.lock);
  417. dev->power.runtime_error = retval;
  418. } else {
  419. retval = -ENOSYS;
  420. }
  421. if (retval) {
  422. __update_runtime_status(dev, RPM_SUSPENDED);
  423. pm_runtime_cancel_pending(dev);
  424. } else {
  425. no_callback:
  426. __update_runtime_status(dev, RPM_ACTIVE);
  427. if (parent)
  428. atomic_inc(&parent->power.child_count);
  429. }
  430. wake_up_all(&dev->power.wait_queue);
  431. if (!retval)
  432. rpm_idle(dev, RPM_ASYNC);
  433. out:
  434. if (parent) {
  435. spin_unlock_irq(&dev->power.lock);
  436. pm_runtime_put(parent);
  437. spin_lock_irq(&dev->power.lock);
  438. }
  439. dev_dbg(dev, "%s returns %d\n", __func__, retval);
  440. return retval;
  441. }
  442. /**
  443. * pm_runtime_work - Universal run-time PM work function.
  444. * @work: Work structure used for scheduling the execution of this function.
  445. *
  446. * Use @work to get the device object the work is to be done for, determine what
  447. * is to be done and execute the appropriate run-time PM function.
  448. */
  449. static void pm_runtime_work(struct work_struct *work)
  450. {
  451. struct device *dev = container_of(work, struct device, power.work);
  452. enum rpm_request req;
  453. spin_lock_irq(&dev->power.lock);
  454. if (!dev->power.request_pending)
  455. goto out;
  456. req = dev->power.request;
  457. dev->power.request = RPM_REQ_NONE;
  458. dev->power.request_pending = false;
  459. switch (req) {
  460. case RPM_REQ_NONE:
  461. break;
  462. case RPM_REQ_IDLE:
  463. rpm_idle(dev, RPM_NOWAIT);
  464. break;
  465. case RPM_REQ_SUSPEND:
  466. rpm_suspend(dev, RPM_NOWAIT);
  467. break;
  468. case RPM_REQ_RESUME:
  469. rpm_resume(dev, RPM_NOWAIT);
  470. break;
  471. }
  472. out:
  473. spin_unlock_irq(&dev->power.lock);
  474. }
  475. /**
  476. * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
  477. * @data: Device pointer passed by pm_schedule_suspend().
  478. *
  479. * Check if the time is right and queue a suspend request.
  480. */
  481. static void pm_suspend_timer_fn(unsigned long data)
  482. {
  483. struct device *dev = (struct device *)data;
  484. unsigned long flags;
  485. unsigned long expires;
  486. spin_lock_irqsave(&dev->power.lock, flags);
  487. expires = dev->power.timer_expires;
  488. /* If 'expire' is after 'jiffies' we've been called too early. */
  489. if (expires > 0 && !time_after(expires, jiffies)) {
  490. dev->power.timer_expires = 0;
  491. rpm_suspend(dev, RPM_ASYNC);
  492. }
  493. spin_unlock_irqrestore(&dev->power.lock, flags);
  494. }
  495. /**
  496. * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
  497. * @dev: Device to suspend.
  498. * @delay: Time to wait before submitting a suspend request, in milliseconds.
  499. */
  500. int pm_schedule_suspend(struct device *dev, unsigned int delay)
  501. {
  502. unsigned long flags;
  503. int retval;
  504. spin_lock_irqsave(&dev->power.lock, flags);
  505. if (!delay) {
  506. retval = rpm_suspend(dev, RPM_ASYNC);
  507. goto out;
  508. }
  509. retval = rpm_check_suspend_allowed(dev);
  510. if (retval)
  511. goto out;
  512. /* Other scheduled or pending requests need to be canceled. */
  513. pm_runtime_cancel_pending(dev);
  514. dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
  515. dev->power.timer_expires += !dev->power.timer_expires;
  516. mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
  517. out:
  518. spin_unlock_irqrestore(&dev->power.lock, flags);
  519. return retval;
  520. }
  521. EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  522. /**
  523. * __pm_runtime_idle - Entry point for run-time idle operations.
  524. * @dev: Device to send idle notification for.
  525. * @rpmflags: Flag bits.
  526. *
  527. * If the RPM_GET_PUT flag is set, decrement the device's usage count and
  528. * return immediately if it is larger than zero. Then carry out an idle
  529. * notification, either synchronous or asynchronous.
  530. *
  531. * This routine may be called in atomic context if the RPM_ASYNC flag is set.
  532. */
  533. int __pm_runtime_idle(struct device *dev, int rpmflags)
  534. {
  535. unsigned long flags;
  536. int retval;
  537. if (rpmflags & RPM_GET_PUT) {
  538. if (!atomic_dec_and_test(&dev->power.usage_count))
  539. return 0;
  540. }
  541. spin_lock_irqsave(&dev->power.lock, flags);
  542. retval = rpm_idle(dev, rpmflags);
  543. spin_unlock_irqrestore(&dev->power.lock, flags);
  544. return retval;
  545. }
  546. EXPORT_SYMBOL_GPL(__pm_runtime_idle);
  547. /**
  548. * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
  549. * @dev: Device to suspend.
  550. * @rpmflags: Flag bits.
  551. *
  552. * Carry out a suspend, either synchronous or asynchronous.
  553. *
  554. * This routine may be called in atomic context if the RPM_ASYNC flag is set.
  555. */
  556. int __pm_runtime_suspend(struct device *dev, int rpmflags)
  557. {
  558. unsigned long flags;
  559. int retval;
  560. spin_lock_irqsave(&dev->power.lock, flags);
  561. retval = rpm_suspend(dev, rpmflags);
  562. spin_unlock_irqrestore(&dev->power.lock, flags);
  563. return retval;
  564. }
  565. EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
  566. /**
  567. * __pm_runtime_resume - Entry point for run-time resume operations.
  568. * @dev: Device to resume.
  569. * @rpmflags: Flag bits.
  570. *
  571. * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
  572. * carry out a resume, either synchronous or asynchronous.
  573. *
  574. * This routine may be called in atomic context if the RPM_ASYNC flag is set.
  575. */
  576. int __pm_runtime_resume(struct device *dev, int rpmflags)
  577. {
  578. unsigned long flags;
  579. int retval;
  580. if (rpmflags & RPM_GET_PUT)
  581. atomic_inc(&dev->power.usage_count);
  582. spin_lock_irqsave(&dev->power.lock, flags);
  583. retval = rpm_resume(dev, rpmflags);
  584. spin_unlock_irqrestore(&dev->power.lock, flags);
  585. return retval;
  586. }
  587. EXPORT_SYMBOL_GPL(__pm_runtime_resume);
  588. /**
  589. * __pm_runtime_set_status - Set run-time PM status of a device.
  590. * @dev: Device to handle.
  591. * @status: New run-time PM status of the device.
  592. *
  593. * If run-time PM of the device is disabled or its power.runtime_error field is
  594. * different from zero, the status may be changed either to RPM_ACTIVE, or to
  595. * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  596. * However, if the device has a parent and the parent is not active, and the
  597. * parent's power.ignore_children flag is unset, the device's status cannot be
  598. * set to RPM_ACTIVE, so -EBUSY is returned in that case.
  599. *
  600. * If successful, __pm_runtime_set_status() clears the power.runtime_error field
  601. * and the device parent's counter of unsuspended children is modified to
  602. * reflect the new status. If the new status is RPM_SUSPENDED, an idle
  603. * notification request for the parent is submitted.
  604. */
  605. int __pm_runtime_set_status(struct device *dev, unsigned int status)
  606. {
  607. struct device *parent = dev->parent;
  608. unsigned long flags;
  609. bool notify_parent = false;
  610. int error = 0;
  611. if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
  612. return -EINVAL;
  613. spin_lock_irqsave(&dev->power.lock, flags);
  614. if (!dev->power.runtime_error && !dev->power.disable_depth) {
  615. error = -EAGAIN;
  616. goto out;
  617. }
  618. if (dev->power.runtime_status == status)
  619. goto out_set;
  620. if (status == RPM_SUSPENDED) {
  621. /* It always is possible to set the status to 'suspended'. */
  622. if (parent) {
  623. atomic_add_unless(&parent->power.child_count, -1, 0);
  624. notify_parent = !parent->power.ignore_children;
  625. }
  626. goto out_set;
  627. }
  628. if (parent) {
  629. spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
  630. /*
  631. * It is invalid to put an active child under a parent that is
  632. * not active, has run-time PM enabled and the
  633. * 'power.ignore_children' flag unset.
  634. */
  635. if (!parent->power.disable_depth
  636. && !parent->power.ignore_children
  637. && parent->power.runtime_status != RPM_ACTIVE)
  638. error = -EBUSY;
  639. else if (dev->power.runtime_status == RPM_SUSPENDED)
  640. atomic_inc(&parent->power.child_count);
  641. spin_unlock(&parent->power.lock);
  642. if (error)
  643. goto out;
  644. }
  645. out_set:
  646. __update_runtime_status(dev, status);
  647. dev->power.runtime_error = 0;
  648. out:
  649. spin_unlock_irqrestore(&dev->power.lock, flags);
  650. if (notify_parent)
  651. pm_request_idle(parent);
  652. return error;
  653. }
  654. EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  655. /**
  656. * __pm_runtime_barrier - Cancel pending requests and wait for completions.
  657. * @dev: Device to handle.
  658. *
  659. * Flush all pending requests for the device from pm_wq and wait for all
  660. * run-time PM operations involving the device in progress to complete.
  661. *
  662. * Should be called under dev->power.lock with interrupts disabled.
  663. */
  664. static void __pm_runtime_barrier(struct device *dev)
  665. {
  666. pm_runtime_deactivate_timer(dev);
  667. if (dev->power.request_pending) {
  668. dev->power.request = RPM_REQ_NONE;
  669. spin_unlock_irq(&dev->power.lock);
  670. cancel_work_sync(&dev->power.work);
  671. spin_lock_irq(&dev->power.lock);
  672. dev->power.request_pending = false;
  673. }
  674. if (dev->power.runtime_status == RPM_SUSPENDING
  675. || dev->power.runtime_status == RPM_RESUMING
  676. || dev->power.idle_notification) {
  677. DEFINE_WAIT(wait);
  678. /* Suspend, wake-up or idle notification in progress. */
  679. for (;;) {
  680. prepare_to_wait(&dev->power.wait_queue, &wait,
  681. TASK_UNINTERRUPTIBLE);
  682. if (dev->power.runtime_status != RPM_SUSPENDING
  683. && dev->power.runtime_status != RPM_RESUMING
  684. && !dev->power.idle_notification)
  685. break;
  686. spin_unlock_irq(&dev->power.lock);
  687. schedule();
  688. spin_lock_irq(&dev->power.lock);
  689. }
  690. finish_wait(&dev->power.wait_queue, &wait);
  691. }
  692. }
  693. /**
  694. * pm_runtime_barrier - Flush pending requests and wait for completions.
  695. * @dev: Device to handle.
  696. *
  697. * Prevent the device from being suspended by incrementing its usage counter and
  698. * if there's a pending resume request for the device, wake the device up.
  699. * Next, make sure that all pending requests for the device have been flushed
  700. * from pm_wq and wait for all run-time PM operations involving the device in
  701. * progress to complete.
  702. *
  703. * Return value:
  704. * 1, if there was a resume request pending and the device had to be woken up,
  705. * 0, otherwise
  706. */
  707. int pm_runtime_barrier(struct device *dev)
  708. {
  709. int retval = 0;
  710. pm_runtime_get_noresume(dev);
  711. spin_lock_irq(&dev->power.lock);
  712. if (dev->power.request_pending
  713. && dev->power.request == RPM_REQ_RESUME) {
  714. rpm_resume(dev, 0);
  715. retval = 1;
  716. }
  717. __pm_runtime_barrier(dev);
  718. spin_unlock_irq(&dev->power.lock);
  719. pm_runtime_put_noidle(dev);
  720. return retval;
  721. }
  722. EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  723. /**
  724. * __pm_runtime_disable - Disable run-time PM of a device.
  725. * @dev: Device to handle.
  726. * @check_resume: If set, check if there's a resume request for the device.
  727. *
  728. * Increment power.disable_depth for the device and if was zero previously,
  729. * cancel all pending run-time PM requests for the device and wait for all
  730. * operations in progress to complete. The device can be either active or
  731. * suspended after its run-time PM has been disabled.
  732. *
  733. * If @check_resume is set and there's a resume request pending when
  734. * __pm_runtime_disable() is called and power.disable_depth is zero, the
  735. * function will wake up the device before disabling its run-time PM.
  736. */
  737. void __pm_runtime_disable(struct device *dev, bool check_resume)
  738. {
  739. spin_lock_irq(&dev->power.lock);
  740. if (dev->power.disable_depth > 0) {
  741. dev->power.disable_depth++;
  742. goto out;
  743. }
  744. /*
  745. * Wake up the device if there's a resume request pending, because that
  746. * means there probably is some I/O to process and disabling run-time PM
  747. * shouldn't prevent the device from processing the I/O.
  748. */
  749. if (check_resume && dev->power.request_pending
  750. && dev->power.request == RPM_REQ_RESUME) {
  751. /*
  752. * Prevent suspends and idle notifications from being carried
  753. * out after we have woken up the device.
  754. */
  755. pm_runtime_get_noresume(dev);
  756. rpm_resume(dev, 0);
  757. pm_runtime_put_noidle(dev);
  758. }
  759. if (!dev->power.disable_depth++)
  760. __pm_runtime_barrier(dev);
  761. out:
  762. spin_unlock_irq(&dev->power.lock);
  763. }
  764. EXPORT_SYMBOL_GPL(__pm_runtime_disable);
  765. /**
  766. * pm_runtime_enable - Enable run-time PM of a device.
  767. * @dev: Device to handle.
  768. */
  769. void pm_runtime_enable(struct device *dev)
  770. {
  771. unsigned long flags;
  772. spin_lock_irqsave(&dev->power.lock, flags);
  773. if (dev->power.disable_depth > 0)
  774. dev->power.disable_depth--;
  775. else
  776. dev_warn(dev, "Unbalanced %s!\n", __func__);
  777. spin_unlock_irqrestore(&dev->power.lock, flags);
  778. }
  779. EXPORT_SYMBOL_GPL(pm_runtime_enable);
  780. /**
  781. * pm_runtime_forbid - Block run-time PM of a device.
  782. * @dev: Device to handle.
  783. *
  784. * Increase the device's usage count and clear its power.runtime_auto flag,
  785. * so that it cannot be suspended at run time until pm_runtime_allow() is called
  786. * for it.
  787. */
  788. void pm_runtime_forbid(struct device *dev)
  789. {
  790. spin_lock_irq(&dev->power.lock);
  791. if (!dev->power.runtime_auto)
  792. goto out;
  793. dev->power.runtime_auto = false;
  794. atomic_inc(&dev->power.usage_count);
  795. rpm_resume(dev, 0);
  796. out:
  797. spin_unlock_irq(&dev->power.lock);
  798. }
  799. EXPORT_SYMBOL_GPL(pm_runtime_forbid);
  800. /**
  801. * pm_runtime_allow - Unblock run-time PM of a device.
  802. * @dev: Device to handle.
  803. *
  804. * Decrease the device's usage count and set its power.runtime_auto flag.
  805. */
  806. void pm_runtime_allow(struct device *dev)
  807. {
  808. spin_lock_irq(&dev->power.lock);
  809. if (dev->power.runtime_auto)
  810. goto out;
  811. dev->power.runtime_auto = true;
  812. if (atomic_dec_and_test(&dev->power.usage_count))
  813. rpm_idle(dev, 0);
  814. out:
  815. spin_unlock_irq(&dev->power.lock);
  816. }
  817. EXPORT_SYMBOL_GPL(pm_runtime_allow);
  818. /**
  819. * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
  820. * @dev: Device to handle.
  821. *
  822. * Set the power.no_callbacks flag, which tells the PM core that this
  823. * device is power-managed through its parent and has no run-time PM
  824. * callbacks of its own. The run-time sysfs attributes will be removed.
  825. *
  826. */
  827. void pm_runtime_no_callbacks(struct device *dev)
  828. {
  829. spin_lock_irq(&dev->power.lock);
  830. dev->power.no_callbacks = 1;
  831. spin_unlock_irq(&dev->power.lock);
  832. if (device_is_registered(dev))
  833. rpm_sysfs_remove(dev);
  834. }
  835. EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
  836. /**
  837. * pm_runtime_init - Initialize run-time PM fields in given device object.
  838. * @dev: Device object to initialize.
  839. */
  840. void pm_runtime_init(struct device *dev)
  841. {
  842. dev->power.runtime_status = RPM_SUSPENDED;
  843. dev->power.idle_notification = false;
  844. dev->power.disable_depth = 1;
  845. atomic_set(&dev->power.usage_count, 0);
  846. dev->power.runtime_error = 0;
  847. atomic_set(&dev->power.child_count, 0);
  848. pm_suspend_ignore_children(dev, false);
  849. dev->power.runtime_auto = true;
  850. dev->power.request_pending = false;
  851. dev->power.request = RPM_REQ_NONE;
  852. dev->power.deferred_resume = false;
  853. dev->power.accounting_timestamp = jiffies;
  854. INIT_WORK(&dev->power.work, pm_runtime_work);
  855. dev->power.timer_expires = 0;
  856. setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
  857. (unsigned long)dev);
  858. init_waitqueue_head(&dev->power.wait_queue);
  859. }
  860. /**
  861. * pm_runtime_remove - Prepare for removing a device from device hierarchy.
  862. * @dev: Device object being removed from device hierarchy.
  863. */
  864. void pm_runtime_remove(struct device *dev)
  865. {
  866. __pm_runtime_disable(dev, false);
  867. /* Change the status back to 'suspended' to match the initial status. */
  868. if (dev->power.runtime_status == RPM_ACTIVE)
  869. pm_runtime_set_suspended(dev);
  870. }