runtime.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * drivers/base/power/runtime.c - Helper functions for device run-time PM
  3. *
  4. * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/jiffies.h>
  11. static int __pm_runtime_resume(struct device *dev, bool from_wq);
  12. static int __pm_request_idle(struct device *dev);
  13. static int __pm_request_resume(struct device *dev);
  14. /**
  15. * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  16. * @dev: Device to handle.
  17. */
  18. static void pm_runtime_deactivate_timer(struct device *dev)
  19. {
  20. if (dev->power.timer_expires > 0) {
  21. del_timer(&dev->power.suspend_timer);
  22. dev->power.timer_expires = 0;
  23. }
  24. }
  25. /**
  26. * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  27. * @dev: Device to handle.
  28. */
  29. static void pm_runtime_cancel_pending(struct device *dev)
  30. {
  31. pm_runtime_deactivate_timer(dev);
  32. /*
  33. * In case there's a request pending, make sure its work function will
  34. * return without doing anything.
  35. */
  36. dev->power.request = RPM_REQ_NONE;
  37. }
  38. /**
  39. * __pm_runtime_idle - Notify device bus type if the device can be suspended.
  40. * @dev: Device to notify the bus type about.
  41. *
  42. * This function must be called under dev->power.lock with interrupts disabled.
  43. */
  44. static int __pm_runtime_idle(struct device *dev)
  45. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  46. {
  47. int retval = 0;
  48. dev_dbg(dev, "__pm_runtime_idle()!\n");
  49. if (dev->power.runtime_error)
  50. retval = -EINVAL;
  51. else if (dev->power.idle_notification)
  52. retval = -EINPROGRESS;
  53. else if (atomic_read(&dev->power.usage_count) > 0
  54. || dev->power.disable_depth > 0
  55. || dev->power.runtime_status != RPM_ACTIVE)
  56. retval = -EAGAIN;
  57. else if (!pm_children_suspended(dev))
  58. retval = -EBUSY;
  59. if (retval)
  60. goto out;
  61. if (dev->power.request_pending) {
  62. /*
  63. * If an idle notification request is pending, cancel it. Any
  64. * other pending request takes precedence over us.
  65. */
  66. if (dev->power.request == RPM_REQ_IDLE) {
  67. dev->power.request = RPM_REQ_NONE;
  68. } else if (dev->power.request != RPM_REQ_NONE) {
  69. retval = -EAGAIN;
  70. goto out;
  71. }
  72. }
  73. dev->power.idle_notification = true;
  74. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
  75. spin_unlock_irq(&dev->power.lock);
  76. dev->bus->pm->runtime_idle(dev);
  77. spin_lock_irq(&dev->power.lock);
  78. }
  79. dev->power.idle_notification = false;
  80. wake_up_all(&dev->power.wait_queue);
  81. out:
  82. dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
  83. return retval;
  84. }
  85. /**
  86. * pm_runtime_idle - Notify device bus type if the device can be suspended.
  87. * @dev: Device to notify the bus type about.
  88. */
  89. int pm_runtime_idle(struct device *dev)
  90. {
  91. int retval;
  92. spin_lock_irq(&dev->power.lock);
  93. retval = __pm_runtime_idle(dev);
  94. spin_unlock_irq(&dev->power.lock);
  95. return retval;
  96. }
  97. EXPORT_SYMBOL_GPL(pm_runtime_idle);
  98. /**
  99. * __pm_runtime_suspend - Carry out run-time suspend of given device.
  100. * @dev: Device to suspend.
  101. * @from_wq: If set, the function has been called via pm_wq.
  102. *
  103. * Check if the device can be suspended and run the ->runtime_suspend() callback
  104. * provided by its bus type. If another suspend has been started earlier, wait
  105. * for it to finish. If an idle notification or suspend request is pending or
  106. * scheduled, cancel it.
  107. *
  108. * This function must be called under dev->power.lock with interrupts disabled.
  109. */
  110. int __pm_runtime_suspend(struct device *dev, bool from_wq)
  111. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  112. {
  113. struct device *parent = NULL;
  114. bool notify = false;
  115. int retval = 0;
  116. dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
  117. from_wq ? " from workqueue" : "");
  118. repeat:
  119. if (dev->power.runtime_error) {
  120. retval = -EINVAL;
  121. goto out;
  122. }
  123. /* Pending resume requests take precedence over us. */
  124. if (dev->power.request_pending
  125. && dev->power.request == RPM_REQ_RESUME) {
  126. retval = -EAGAIN;
  127. goto out;
  128. }
  129. /* Other scheduled or pending requests need to be canceled. */
  130. pm_runtime_cancel_pending(dev);
  131. if (dev->power.runtime_status == RPM_SUSPENDED)
  132. retval = 1;
  133. else if (dev->power.runtime_status == RPM_RESUMING
  134. || dev->power.disable_depth > 0
  135. || atomic_read(&dev->power.usage_count) > 0)
  136. retval = -EAGAIN;
  137. else if (!pm_children_suspended(dev))
  138. retval = -EBUSY;
  139. if (retval)
  140. goto out;
  141. if (dev->power.runtime_status == RPM_SUSPENDING) {
  142. DEFINE_WAIT(wait);
  143. if (from_wq) {
  144. retval = -EINPROGRESS;
  145. goto out;
  146. }
  147. /* Wait for the other suspend running in parallel with us. */
  148. for (;;) {
  149. prepare_to_wait(&dev->power.wait_queue, &wait,
  150. TASK_UNINTERRUPTIBLE);
  151. if (dev->power.runtime_status != RPM_SUSPENDING)
  152. break;
  153. spin_unlock_irq(&dev->power.lock);
  154. schedule();
  155. spin_lock_irq(&dev->power.lock);
  156. }
  157. finish_wait(&dev->power.wait_queue, &wait);
  158. goto repeat;
  159. }
  160. dev->power.runtime_status = RPM_SUSPENDING;
  161. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
  162. spin_unlock_irq(&dev->power.lock);
  163. retval = dev->bus->pm->runtime_suspend(dev);
  164. spin_lock_irq(&dev->power.lock);
  165. dev->power.runtime_error = retval;
  166. } else {
  167. retval = -ENOSYS;
  168. }
  169. if (retval) {
  170. dev->power.runtime_status = RPM_ACTIVE;
  171. pm_runtime_cancel_pending(dev);
  172. dev->power.deferred_resume = false;
  173. if (retval == -EAGAIN || retval == -EBUSY) {
  174. notify = true;
  175. dev->power.runtime_error = 0;
  176. }
  177. } else {
  178. dev->power.runtime_status = RPM_SUSPENDED;
  179. if (dev->parent) {
  180. parent = dev->parent;
  181. atomic_add_unless(&parent->power.child_count, -1, 0);
  182. }
  183. }
  184. wake_up_all(&dev->power.wait_queue);
  185. if (dev->power.deferred_resume) {
  186. dev->power.deferred_resume = false;
  187. __pm_runtime_resume(dev, false);
  188. retval = -EAGAIN;
  189. goto out;
  190. }
  191. if (notify)
  192. __pm_runtime_idle(dev);
  193. if (parent && !parent->power.ignore_children) {
  194. spin_unlock_irq(&dev->power.lock);
  195. pm_request_idle(parent);
  196. spin_lock_irq(&dev->power.lock);
  197. }
  198. out:
  199. dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
  200. return retval;
  201. }
  202. /**
  203. * pm_runtime_suspend - Carry out run-time suspend of given device.
  204. * @dev: Device to suspend.
  205. */
  206. int pm_runtime_suspend(struct device *dev)
  207. {
  208. int retval;
  209. spin_lock_irq(&dev->power.lock);
  210. retval = __pm_runtime_suspend(dev, false);
  211. spin_unlock_irq(&dev->power.lock);
  212. return retval;
  213. }
  214. EXPORT_SYMBOL_GPL(pm_runtime_suspend);
  215. /**
  216. * __pm_runtime_resume - Carry out run-time resume of given device.
  217. * @dev: Device to resume.
  218. * @from_wq: If set, the function has been called via pm_wq.
  219. *
  220. * Check if the device can be woken up and run the ->runtime_resume() callback
  221. * provided by its bus type. If another resume has been started earlier, wait
  222. * for it to finish. If there's a suspend running in parallel with this
  223. * function, wait for it to finish and resume the device. Cancel any scheduled
  224. * or pending requests.
  225. *
  226. * This function must be called under dev->power.lock with interrupts disabled.
  227. */
  228. int __pm_runtime_resume(struct device *dev, bool from_wq)
  229. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  230. {
  231. struct device *parent = NULL;
  232. int retval = 0;
  233. dev_dbg(dev, "__pm_runtime_resume()%s!\n",
  234. from_wq ? " from workqueue" : "");
  235. repeat:
  236. if (dev->power.runtime_error) {
  237. retval = -EINVAL;
  238. goto out;
  239. }
  240. pm_runtime_cancel_pending(dev);
  241. if (dev->power.runtime_status == RPM_ACTIVE)
  242. retval = 1;
  243. else if (dev->power.disable_depth > 0)
  244. retval = -EAGAIN;
  245. if (retval)
  246. goto out;
  247. if (dev->power.runtime_status == RPM_RESUMING
  248. || dev->power.runtime_status == RPM_SUSPENDING) {
  249. DEFINE_WAIT(wait);
  250. if (from_wq) {
  251. if (dev->power.runtime_status == RPM_SUSPENDING)
  252. dev->power.deferred_resume = true;
  253. retval = -EINPROGRESS;
  254. goto out;
  255. }
  256. /* Wait for the operation carried out in parallel with us. */
  257. for (;;) {
  258. prepare_to_wait(&dev->power.wait_queue, &wait,
  259. TASK_UNINTERRUPTIBLE);
  260. if (dev->power.runtime_status != RPM_RESUMING
  261. && dev->power.runtime_status != RPM_SUSPENDING)
  262. break;
  263. spin_unlock_irq(&dev->power.lock);
  264. schedule();
  265. spin_lock_irq(&dev->power.lock);
  266. }
  267. finish_wait(&dev->power.wait_queue, &wait);
  268. goto repeat;
  269. }
  270. if (!parent && dev->parent) {
  271. /*
  272. * Increment the parent's resume counter and resume it if
  273. * necessary.
  274. */
  275. parent = dev->parent;
  276. spin_unlock_irq(&dev->power.lock);
  277. pm_runtime_get_noresume(parent);
  278. spin_lock_irq(&parent->power.lock);
  279. /*
  280. * We can resume if the parent's run-time PM is disabled or it
  281. * is set to ignore children.
  282. */
  283. if (!parent->power.disable_depth
  284. && !parent->power.ignore_children) {
  285. __pm_runtime_resume(parent, false);
  286. if (parent->power.runtime_status != RPM_ACTIVE)
  287. retval = -EBUSY;
  288. }
  289. spin_unlock_irq(&parent->power.lock);
  290. spin_lock_irq(&dev->power.lock);
  291. if (retval)
  292. goto out;
  293. goto repeat;
  294. }
  295. dev->power.runtime_status = RPM_RESUMING;
  296. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
  297. spin_unlock_irq(&dev->power.lock);
  298. retval = dev->bus->pm->runtime_resume(dev);
  299. spin_lock_irq(&dev->power.lock);
  300. dev->power.runtime_error = retval;
  301. } else {
  302. retval = -ENOSYS;
  303. }
  304. if (retval) {
  305. dev->power.runtime_status = RPM_SUSPENDED;
  306. pm_runtime_cancel_pending(dev);
  307. } else {
  308. dev->power.runtime_status = RPM_ACTIVE;
  309. if (parent)
  310. atomic_inc(&parent->power.child_count);
  311. }
  312. wake_up_all(&dev->power.wait_queue);
  313. if (!retval)
  314. __pm_request_idle(dev);
  315. out:
  316. if (parent) {
  317. spin_unlock_irq(&dev->power.lock);
  318. pm_runtime_put(parent);
  319. spin_lock_irq(&dev->power.lock);
  320. }
  321. dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
  322. return retval;
  323. }
  324. /**
  325. * pm_runtime_resume - Carry out run-time resume of given device.
  326. * @dev: Device to suspend.
  327. */
  328. int pm_runtime_resume(struct device *dev)
  329. {
  330. int retval;
  331. spin_lock_irq(&dev->power.lock);
  332. retval = __pm_runtime_resume(dev, false);
  333. spin_unlock_irq(&dev->power.lock);
  334. return retval;
  335. }
  336. EXPORT_SYMBOL_GPL(pm_runtime_resume);
  337. /**
  338. * pm_runtime_work - Universal run-time PM work function.
  339. * @work: Work structure used for scheduling the execution of this function.
  340. *
  341. * Use @work to get the device object the work is to be done for, determine what
  342. * is to be done and execute the appropriate run-time PM function.
  343. */
  344. static void pm_runtime_work(struct work_struct *work)
  345. {
  346. struct device *dev = container_of(work, struct device, power.work);
  347. enum rpm_request req;
  348. spin_lock_irq(&dev->power.lock);
  349. if (!dev->power.request_pending)
  350. goto out;
  351. req = dev->power.request;
  352. dev->power.request = RPM_REQ_NONE;
  353. dev->power.request_pending = false;
  354. switch (req) {
  355. case RPM_REQ_NONE:
  356. break;
  357. case RPM_REQ_IDLE:
  358. __pm_runtime_idle(dev);
  359. break;
  360. case RPM_REQ_SUSPEND:
  361. __pm_runtime_suspend(dev, true);
  362. break;
  363. case RPM_REQ_RESUME:
  364. __pm_runtime_resume(dev, true);
  365. break;
  366. }
  367. out:
  368. spin_unlock_irq(&dev->power.lock);
  369. }
  370. /**
  371. * __pm_request_idle - Submit an idle notification request for given device.
  372. * @dev: Device to handle.
  373. *
  374. * Check if the device's run-time PM status is correct for suspending the device
  375. * and queue up a request to run __pm_runtime_idle() for it.
  376. *
  377. * This function must be called under dev->power.lock with interrupts disabled.
  378. */
  379. static int __pm_request_idle(struct device *dev)
  380. {
  381. int retval = 0;
  382. if (dev->power.runtime_error)
  383. retval = -EINVAL;
  384. else if (atomic_read(&dev->power.usage_count) > 0
  385. || dev->power.disable_depth > 0
  386. || dev->power.runtime_status == RPM_SUSPENDED
  387. || dev->power.runtime_status == RPM_SUSPENDING)
  388. retval = -EAGAIN;
  389. else if (!pm_children_suspended(dev))
  390. retval = -EBUSY;
  391. if (retval)
  392. return retval;
  393. if (dev->power.request_pending) {
  394. /* Any requests other then RPM_REQ_IDLE take precedence. */
  395. if (dev->power.request == RPM_REQ_NONE)
  396. dev->power.request = RPM_REQ_IDLE;
  397. else if (dev->power.request != RPM_REQ_IDLE)
  398. retval = -EAGAIN;
  399. return retval;
  400. }
  401. dev->power.request = RPM_REQ_IDLE;
  402. dev->power.request_pending = true;
  403. queue_work(pm_wq, &dev->power.work);
  404. return retval;
  405. }
  406. /**
  407. * pm_request_idle - Submit an idle notification request for given device.
  408. * @dev: Device to handle.
  409. */
  410. int pm_request_idle(struct device *dev)
  411. {
  412. unsigned long flags;
  413. int retval;
  414. spin_lock_irqsave(&dev->power.lock, flags);
  415. retval = __pm_request_idle(dev);
  416. spin_unlock_irqrestore(&dev->power.lock, flags);
  417. return retval;
  418. }
  419. EXPORT_SYMBOL_GPL(pm_request_idle);
  420. /**
  421. * __pm_request_suspend - Submit a suspend request for given device.
  422. * @dev: Device to suspend.
  423. *
  424. * This function must be called under dev->power.lock with interrupts disabled.
  425. */
  426. static int __pm_request_suspend(struct device *dev)
  427. {
  428. int retval = 0;
  429. if (dev->power.runtime_error)
  430. return -EINVAL;
  431. if (dev->power.runtime_status == RPM_SUSPENDED)
  432. retval = 1;
  433. else if (atomic_read(&dev->power.usage_count) > 0
  434. || dev->power.disable_depth > 0)
  435. retval = -EAGAIN;
  436. else if (dev->power.runtime_status == RPM_SUSPENDING)
  437. retval = -EINPROGRESS;
  438. else if (!pm_children_suspended(dev))
  439. retval = -EBUSY;
  440. if (retval < 0)
  441. return retval;
  442. pm_runtime_deactivate_timer(dev);
  443. if (dev->power.request_pending) {
  444. /*
  445. * Pending resume requests take precedence over us, but we can
  446. * overtake any other pending request.
  447. */
  448. if (dev->power.request == RPM_REQ_RESUME)
  449. retval = -EAGAIN;
  450. else if (dev->power.request != RPM_REQ_SUSPEND)
  451. dev->power.request = retval ?
  452. RPM_REQ_NONE : RPM_REQ_SUSPEND;
  453. return retval;
  454. } else if (retval) {
  455. return retval;
  456. }
  457. dev->power.request = RPM_REQ_SUSPEND;
  458. dev->power.request_pending = true;
  459. queue_work(pm_wq, &dev->power.work);
  460. return 0;
  461. }
  462. /**
  463. * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
  464. * @data: Device pointer passed by pm_schedule_suspend().
  465. *
  466. * Check if the time is right and execute __pm_request_suspend() in that case.
  467. */
  468. static void pm_suspend_timer_fn(unsigned long data)
  469. {
  470. struct device *dev = (struct device *)data;
  471. unsigned long flags;
  472. unsigned long expires;
  473. spin_lock_irqsave(&dev->power.lock, flags);
  474. expires = dev->power.timer_expires;
  475. /* If 'expire' is after 'jiffies' we've been called too early. */
  476. if (expires > 0 && !time_after(expires, jiffies)) {
  477. dev->power.timer_expires = 0;
  478. __pm_request_suspend(dev);
  479. }
  480. spin_unlock_irqrestore(&dev->power.lock, flags);
  481. }
  482. /**
  483. * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
  484. * @dev: Device to suspend.
  485. * @delay: Time to wait before submitting a suspend request, in milliseconds.
  486. */
  487. int pm_schedule_suspend(struct device *dev, unsigned int delay)
  488. {
  489. unsigned long flags;
  490. int retval = 0;
  491. spin_lock_irqsave(&dev->power.lock, flags);
  492. if (dev->power.runtime_error) {
  493. retval = -EINVAL;
  494. goto out;
  495. }
  496. if (!delay) {
  497. retval = __pm_request_suspend(dev);
  498. goto out;
  499. }
  500. pm_runtime_deactivate_timer(dev);
  501. if (dev->power.request_pending) {
  502. /*
  503. * Pending resume requests take precedence over us, but any
  504. * other pending requests have to be canceled.
  505. */
  506. if (dev->power.request == RPM_REQ_RESUME) {
  507. retval = -EAGAIN;
  508. goto out;
  509. }
  510. dev->power.request = RPM_REQ_NONE;
  511. }
  512. if (dev->power.runtime_status == RPM_SUSPENDED)
  513. retval = 1;
  514. else if (dev->power.runtime_status == RPM_SUSPENDING)
  515. retval = -EINPROGRESS;
  516. else if (atomic_read(&dev->power.usage_count) > 0
  517. || dev->power.disable_depth > 0)
  518. retval = -EAGAIN;
  519. else if (!pm_children_suspended(dev))
  520. retval = -EBUSY;
  521. if (retval)
  522. goto out;
  523. dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
  524. mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
  525. out:
  526. spin_unlock_irqrestore(&dev->power.lock, flags);
  527. return retval;
  528. }
  529. EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  530. /**
  531. * pm_request_resume - Submit a resume request for given device.
  532. * @dev: Device to resume.
  533. *
  534. * This function must be called under dev->power.lock with interrupts disabled.
  535. */
  536. static int __pm_request_resume(struct device *dev)
  537. {
  538. int retval = 0;
  539. if (dev->power.runtime_error)
  540. return -EINVAL;
  541. if (dev->power.runtime_status == RPM_ACTIVE)
  542. retval = 1;
  543. else if (dev->power.runtime_status == RPM_RESUMING)
  544. retval = -EINPROGRESS;
  545. else if (dev->power.disable_depth > 0)
  546. retval = -EAGAIN;
  547. if (retval < 0)
  548. return retval;
  549. pm_runtime_deactivate_timer(dev);
  550. if (dev->power.request_pending) {
  551. /* If non-resume request is pending, we can overtake it. */
  552. dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
  553. return retval;
  554. } else if (retval) {
  555. return retval;
  556. }
  557. dev->power.request = RPM_REQ_RESUME;
  558. dev->power.request_pending = true;
  559. queue_work(pm_wq, &dev->power.work);
  560. return retval;
  561. }
  562. /**
  563. * pm_request_resume - Submit a resume request for given device.
  564. * @dev: Device to resume.
  565. */
  566. int pm_request_resume(struct device *dev)
  567. {
  568. unsigned long flags;
  569. int retval;
  570. spin_lock_irqsave(&dev->power.lock, flags);
  571. retval = __pm_request_resume(dev);
  572. spin_unlock_irqrestore(&dev->power.lock, flags);
  573. return retval;
  574. }
  575. EXPORT_SYMBOL_GPL(pm_request_resume);
  576. /**
  577. * __pm_runtime_get - Reference count a device and wake it up, if necessary.
  578. * @dev: Device to handle.
  579. * @sync: If set and the device is suspended, resume it synchronously.
  580. *
  581. * Increment the usage count of the device and if it was zero previously,
  582. * resume it or submit a resume request for it, depending on the value of @sync.
  583. */
  584. int __pm_runtime_get(struct device *dev, bool sync)
  585. {
  586. int retval = 1;
  587. if (atomic_add_return(1, &dev->power.usage_count) == 1)
  588. retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
  589. return retval;
  590. }
  591. EXPORT_SYMBOL_GPL(__pm_runtime_get);
  592. /**
  593. * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
  594. * @dev: Device to handle.
  595. * @sync: If the device's bus type is to be notified, do that synchronously.
  596. *
  597. * Decrement the usage count of the device and if it reaches zero, carry out a
  598. * synchronous idle notification or submit an idle notification request for it,
  599. * depending on the value of @sync.
  600. */
  601. int __pm_runtime_put(struct device *dev, bool sync)
  602. {
  603. int retval = 0;
  604. if (atomic_dec_and_test(&dev->power.usage_count))
  605. retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
  606. return retval;
  607. }
  608. EXPORT_SYMBOL_GPL(__pm_runtime_put);
  609. /**
  610. * __pm_runtime_set_status - Set run-time PM status of a device.
  611. * @dev: Device to handle.
  612. * @status: New run-time PM status of the device.
  613. *
  614. * If run-time PM of the device is disabled or its power.runtime_error field is
  615. * different from zero, the status may be changed either to RPM_ACTIVE, or to
  616. * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  617. * However, if the device has a parent and the parent is not active, and the
  618. * parent's power.ignore_children flag is unset, the device's status cannot be
  619. * set to RPM_ACTIVE, so -EBUSY is returned in that case.
  620. *
  621. * If successful, __pm_runtime_set_status() clears the power.runtime_error field
  622. * and the device parent's counter of unsuspended children is modified to
  623. * reflect the new status. If the new status is RPM_SUSPENDED, an idle
  624. * notification request for the parent is submitted.
  625. */
  626. int __pm_runtime_set_status(struct device *dev, unsigned int status)
  627. {
  628. struct device *parent = dev->parent;
  629. unsigned long flags;
  630. bool notify_parent = false;
  631. int error = 0;
  632. if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
  633. return -EINVAL;
  634. spin_lock_irqsave(&dev->power.lock, flags);
  635. if (!dev->power.runtime_error && !dev->power.disable_depth) {
  636. error = -EAGAIN;
  637. goto out;
  638. }
  639. if (dev->power.runtime_status == status)
  640. goto out_set;
  641. if (status == RPM_SUSPENDED) {
  642. /* It always is possible to set the status to 'suspended'. */
  643. if (parent) {
  644. atomic_add_unless(&parent->power.child_count, -1, 0);
  645. notify_parent = !parent->power.ignore_children;
  646. }
  647. goto out_set;
  648. }
  649. if (parent) {
  650. spin_lock_irq(&parent->power.lock);
  651. /*
  652. * It is invalid to put an active child under a parent that is
  653. * not active, has run-time PM enabled and the
  654. * 'power.ignore_children' flag unset.
  655. */
  656. if (!parent->power.disable_depth
  657. && !parent->power.ignore_children
  658. && parent->power.runtime_status != RPM_ACTIVE) {
  659. error = -EBUSY;
  660. } else {
  661. if (dev->power.runtime_status == RPM_SUSPENDED)
  662. atomic_inc(&parent->power.child_count);
  663. }
  664. spin_unlock_irq(&parent->power.lock);
  665. if (error)
  666. goto out;
  667. }
  668. out_set:
  669. dev->power.runtime_status = status;
  670. dev->power.runtime_error = 0;
  671. out:
  672. spin_unlock_irqrestore(&dev->power.lock, flags);
  673. if (notify_parent)
  674. pm_request_idle(parent);
  675. return error;
  676. }
  677. EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  678. /**
  679. * __pm_runtime_barrier - Cancel pending requests and wait for completions.
  680. * @dev: Device to handle.
  681. *
  682. * Flush all pending requests for the device from pm_wq and wait for all
  683. * run-time PM operations involving the device in progress to complete.
  684. *
  685. * Should be called under dev->power.lock with interrupts disabled.
  686. */
  687. static void __pm_runtime_barrier(struct device *dev)
  688. {
  689. pm_runtime_deactivate_timer(dev);
  690. if (dev->power.request_pending) {
  691. dev->power.request = RPM_REQ_NONE;
  692. spin_unlock_irq(&dev->power.lock);
  693. cancel_work_sync(&dev->power.work);
  694. spin_lock_irq(&dev->power.lock);
  695. dev->power.request_pending = false;
  696. }
  697. if (dev->power.runtime_status == RPM_SUSPENDING
  698. || dev->power.runtime_status == RPM_RESUMING
  699. || dev->power.idle_notification) {
  700. DEFINE_WAIT(wait);
  701. /* Suspend, wake-up or idle notification in progress. */
  702. for (;;) {
  703. prepare_to_wait(&dev->power.wait_queue, &wait,
  704. TASK_UNINTERRUPTIBLE);
  705. if (dev->power.runtime_status != RPM_SUSPENDING
  706. && dev->power.runtime_status != RPM_RESUMING
  707. && !dev->power.idle_notification)
  708. break;
  709. spin_unlock_irq(&dev->power.lock);
  710. schedule();
  711. spin_lock_irq(&dev->power.lock);
  712. }
  713. finish_wait(&dev->power.wait_queue, &wait);
  714. }
  715. }
  716. /**
  717. * pm_runtime_barrier - Flush pending requests and wait for completions.
  718. * @dev: Device to handle.
  719. *
  720. * Prevent the device from being suspended by incrementing its usage counter and
  721. * if there's a pending resume request for the device, wake the device up.
  722. * Next, make sure that all pending requests for the device have been flushed
  723. * from pm_wq and wait for all run-time PM operations involving the device in
  724. * progress to complete.
  725. *
  726. * Return value:
  727. * 1, if there was a resume request pending and the device had to be woken up,
  728. * 0, otherwise
  729. */
  730. int pm_runtime_barrier(struct device *dev)
  731. {
  732. int retval = 0;
  733. pm_runtime_get_noresume(dev);
  734. spin_lock_irq(&dev->power.lock);
  735. if (dev->power.request_pending
  736. && dev->power.request == RPM_REQ_RESUME) {
  737. __pm_runtime_resume(dev, false);
  738. retval = 1;
  739. }
  740. __pm_runtime_barrier(dev);
  741. spin_unlock_irq(&dev->power.lock);
  742. pm_runtime_put_noidle(dev);
  743. return retval;
  744. }
  745. EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  746. /**
  747. * __pm_runtime_disable - Disable run-time PM of a device.
  748. * @dev: Device to handle.
  749. * @check_resume: If set, check if there's a resume request for the device.
  750. *
  751. * Increment power.disable_depth for the device and if was zero previously,
  752. * cancel all pending run-time PM requests for the device and wait for all
  753. * operations in progress to complete. The device can be either active or
  754. * suspended after its run-time PM has been disabled.
  755. *
  756. * If @check_resume is set and there's a resume request pending when
  757. * __pm_runtime_disable() is called and power.disable_depth is zero, the
  758. * function will wake up the device before disabling its run-time PM.
  759. */
  760. void __pm_runtime_disable(struct device *dev, bool check_resume)
  761. {
  762. spin_lock_irq(&dev->power.lock);
  763. if (dev->power.disable_depth > 0) {
  764. dev->power.disable_depth++;
  765. goto out;
  766. }
  767. /*
  768. * Wake up the device if there's a resume request pending, because that
  769. * means there probably is some I/O to process and disabling run-time PM
  770. * shouldn't prevent the device from processing the I/O.
  771. */
  772. if (check_resume && dev->power.request_pending
  773. && dev->power.request == RPM_REQ_RESUME) {
  774. /*
  775. * Prevent suspends and idle notifications from being carried
  776. * out after we have woken up the device.
  777. */
  778. pm_runtime_get_noresume(dev);
  779. __pm_runtime_resume(dev, false);
  780. pm_runtime_put_noidle(dev);
  781. }
  782. if (!dev->power.disable_depth++)
  783. __pm_runtime_barrier(dev);
  784. out:
  785. spin_unlock_irq(&dev->power.lock);
  786. }
  787. EXPORT_SYMBOL_GPL(__pm_runtime_disable);
  788. /**
  789. * pm_runtime_enable - Enable run-time PM of a device.
  790. * @dev: Device to handle.
  791. */
  792. void pm_runtime_enable(struct device *dev)
  793. {
  794. unsigned long flags;
  795. spin_lock_irqsave(&dev->power.lock, flags);
  796. if (dev->power.disable_depth > 0)
  797. dev->power.disable_depth--;
  798. else
  799. dev_warn(dev, "Unbalanced %s!\n", __func__);
  800. spin_unlock_irqrestore(&dev->power.lock, flags);
  801. }
  802. EXPORT_SYMBOL_GPL(pm_runtime_enable);
  803. /**
  804. * pm_runtime_init - Initialize run-time PM fields in given device object.
  805. * @dev: Device object to initialize.
  806. */
  807. void pm_runtime_init(struct device *dev)
  808. {
  809. spin_lock_init(&dev->power.lock);
  810. dev->power.runtime_status = RPM_SUSPENDED;
  811. dev->power.idle_notification = false;
  812. dev->power.disable_depth = 1;
  813. atomic_set(&dev->power.usage_count, 0);
  814. dev->power.runtime_error = 0;
  815. atomic_set(&dev->power.child_count, 0);
  816. pm_suspend_ignore_children(dev, false);
  817. dev->power.request_pending = false;
  818. dev->power.request = RPM_REQ_NONE;
  819. dev->power.deferred_resume = false;
  820. INIT_WORK(&dev->power.work, pm_runtime_work);
  821. dev->power.timer_expires = 0;
  822. setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
  823. (unsigned long)dev);
  824. init_waitqueue_head(&dev->power.wait_queue);
  825. }
  826. /**
  827. * pm_runtime_remove - Prepare for removing a device from device hierarchy.
  828. * @dev: Device object being removed from device hierarchy.
  829. */
  830. void pm_runtime_remove(struct device *dev)
  831. {
  832. __pm_runtime_disable(dev, false);
  833. /* Change the status back to 'suspended' to match the initial status. */
  834. if (dev->power.runtime_status == RPM_ACTIVE)
  835. pm_runtime_set_suspended(dev);
  836. }