runtime.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. /*
  2. * drivers/base/power/runtime.c - Helper functions for device run-time PM
  3. *
  4. * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/jiffies.h>
  11. static int __pm_runtime_resume(struct device *dev, bool from_wq);
  12. static int __pm_request_idle(struct device *dev);
  13. static int __pm_request_resume(struct device *dev);
  14. /**
  15. * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  16. * @dev: Device to handle.
  17. */
  18. static void pm_runtime_deactivate_timer(struct device *dev)
  19. {
  20. if (dev->power.timer_expires > 0) {
  21. del_timer(&dev->power.suspend_timer);
  22. dev->power.timer_expires = 0;
  23. }
  24. }
  25. /**
  26. * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  27. * @dev: Device to handle.
  28. */
  29. static void pm_runtime_cancel_pending(struct device *dev)
  30. {
  31. pm_runtime_deactivate_timer(dev);
  32. /*
  33. * In case there's a request pending, make sure its work function will
  34. * return without doing anything.
  35. */
  36. dev->power.request = RPM_REQ_NONE;
  37. }
  38. /**
  39. * __pm_runtime_idle - Notify device bus type if the device can be suspended.
  40. * @dev: Device to notify the bus type about.
  41. *
  42. * This function must be called under dev->power.lock with interrupts disabled.
  43. */
  44. static int __pm_runtime_idle(struct device *dev)
  45. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  46. {
  47. int retval = 0;
  48. if (dev->power.runtime_error)
  49. retval = -EINVAL;
  50. else if (dev->power.idle_notification)
  51. retval = -EINPROGRESS;
  52. else if (atomic_read(&dev->power.usage_count) > 0
  53. || dev->power.disable_depth > 0
  54. || dev->power.runtime_status != RPM_ACTIVE)
  55. retval = -EAGAIN;
  56. else if (!pm_children_suspended(dev))
  57. retval = -EBUSY;
  58. if (retval)
  59. goto out;
  60. if (dev->power.request_pending) {
  61. /*
  62. * If an idle notification request is pending, cancel it. Any
  63. * other pending request takes precedence over us.
  64. */
  65. if (dev->power.request == RPM_REQ_IDLE) {
  66. dev->power.request = RPM_REQ_NONE;
  67. } else if (dev->power.request != RPM_REQ_NONE) {
  68. retval = -EAGAIN;
  69. goto out;
  70. }
  71. }
  72. dev->power.idle_notification = true;
  73. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
  74. spin_unlock_irq(&dev->power.lock);
  75. dev->bus->pm->runtime_idle(dev);
  76. spin_lock_irq(&dev->power.lock);
  77. } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
  78. spin_unlock_irq(&dev->power.lock);
  79. dev->type->pm->runtime_idle(dev);
  80. spin_lock_irq(&dev->power.lock);
  81. } else if (dev->class && dev->class->pm
  82. && dev->class->pm->runtime_idle) {
  83. spin_unlock_irq(&dev->power.lock);
  84. dev->class->pm->runtime_idle(dev);
  85. spin_lock_irq(&dev->power.lock);
  86. }
  87. dev->power.idle_notification = false;
  88. wake_up_all(&dev->power.wait_queue);
  89. out:
  90. return retval;
  91. }
  92. /**
  93. * pm_runtime_idle - Notify device bus type if the device can be suspended.
  94. * @dev: Device to notify the bus type about.
  95. */
  96. int pm_runtime_idle(struct device *dev)
  97. {
  98. int retval;
  99. spin_lock_irq(&dev->power.lock);
  100. retval = __pm_runtime_idle(dev);
  101. spin_unlock_irq(&dev->power.lock);
  102. return retval;
  103. }
  104. EXPORT_SYMBOL_GPL(pm_runtime_idle);
  105. /**
  106. * __pm_runtime_suspend - Carry out run-time suspend of given device.
  107. * @dev: Device to suspend.
  108. * @from_wq: If set, the function has been called via pm_wq.
  109. *
  110. * Check if the device can be suspended and run the ->runtime_suspend() callback
  111. * provided by its bus type. If another suspend has been started earlier, wait
  112. * for it to finish. If an idle notification or suspend request is pending or
  113. * scheduled, cancel it.
  114. *
  115. * This function must be called under dev->power.lock with interrupts disabled.
  116. */
  117. int __pm_runtime_suspend(struct device *dev, bool from_wq)
  118. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  119. {
  120. struct device *parent = NULL;
  121. bool notify = false;
  122. int retval = 0;
  123. dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
  124. from_wq ? " from workqueue" : "");
  125. repeat:
  126. if (dev->power.runtime_error) {
  127. retval = -EINVAL;
  128. goto out;
  129. }
  130. /* Pending resume requests take precedence over us. */
  131. if (dev->power.request_pending
  132. && dev->power.request == RPM_REQ_RESUME) {
  133. retval = -EAGAIN;
  134. goto out;
  135. }
  136. /* Other scheduled or pending requests need to be canceled. */
  137. pm_runtime_cancel_pending(dev);
  138. if (dev->power.runtime_status == RPM_SUSPENDED)
  139. retval = 1;
  140. else if (dev->power.runtime_status == RPM_RESUMING
  141. || dev->power.disable_depth > 0
  142. || atomic_read(&dev->power.usage_count) > 0)
  143. retval = -EAGAIN;
  144. else if (!pm_children_suspended(dev))
  145. retval = -EBUSY;
  146. if (retval)
  147. goto out;
  148. if (dev->power.runtime_status == RPM_SUSPENDING) {
  149. DEFINE_WAIT(wait);
  150. if (from_wq) {
  151. retval = -EINPROGRESS;
  152. goto out;
  153. }
  154. /* Wait for the other suspend running in parallel with us. */
  155. for (;;) {
  156. prepare_to_wait(&dev->power.wait_queue, &wait,
  157. TASK_UNINTERRUPTIBLE);
  158. if (dev->power.runtime_status != RPM_SUSPENDING)
  159. break;
  160. spin_unlock_irq(&dev->power.lock);
  161. schedule();
  162. spin_lock_irq(&dev->power.lock);
  163. }
  164. finish_wait(&dev->power.wait_queue, &wait);
  165. goto repeat;
  166. }
  167. dev->power.runtime_status = RPM_SUSPENDING;
  168. dev->power.deferred_resume = false;
  169. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
  170. spin_unlock_irq(&dev->power.lock);
  171. retval = dev->bus->pm->runtime_suspend(dev);
  172. spin_lock_irq(&dev->power.lock);
  173. dev->power.runtime_error = retval;
  174. } else if (dev->type && dev->type->pm
  175. && dev->type->pm->runtime_suspend) {
  176. spin_unlock_irq(&dev->power.lock);
  177. retval = dev->type->pm->runtime_suspend(dev);
  178. spin_lock_irq(&dev->power.lock);
  179. dev->power.runtime_error = retval;
  180. } else if (dev->class && dev->class->pm
  181. && dev->class->pm->runtime_suspend) {
  182. spin_unlock_irq(&dev->power.lock);
  183. retval = dev->class->pm->runtime_suspend(dev);
  184. spin_lock_irq(&dev->power.lock);
  185. dev->power.runtime_error = retval;
  186. } else {
  187. retval = -ENOSYS;
  188. }
  189. if (retval) {
  190. dev->power.runtime_status = RPM_ACTIVE;
  191. if (retval == -EAGAIN || retval == -EBUSY) {
  192. if (dev->power.timer_expires == 0)
  193. notify = true;
  194. dev->power.runtime_error = 0;
  195. } else {
  196. pm_runtime_cancel_pending(dev);
  197. }
  198. } else {
  199. dev->power.runtime_status = RPM_SUSPENDED;
  200. pm_runtime_deactivate_timer(dev);
  201. if (dev->parent) {
  202. parent = dev->parent;
  203. atomic_add_unless(&parent->power.child_count, -1, 0);
  204. }
  205. }
  206. wake_up_all(&dev->power.wait_queue);
  207. if (dev->power.deferred_resume) {
  208. __pm_runtime_resume(dev, false);
  209. retval = -EAGAIN;
  210. goto out;
  211. }
  212. if (notify)
  213. __pm_runtime_idle(dev);
  214. if (parent && !parent->power.ignore_children) {
  215. spin_unlock_irq(&dev->power.lock);
  216. pm_request_idle(parent);
  217. spin_lock_irq(&dev->power.lock);
  218. }
  219. out:
  220. dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
  221. return retval;
  222. }
  223. /**
  224. * pm_runtime_suspend - Carry out run-time suspend of given device.
  225. * @dev: Device to suspend.
  226. */
  227. int pm_runtime_suspend(struct device *dev)
  228. {
  229. int retval;
  230. spin_lock_irq(&dev->power.lock);
  231. retval = __pm_runtime_suspend(dev, false);
  232. spin_unlock_irq(&dev->power.lock);
  233. return retval;
  234. }
  235. EXPORT_SYMBOL_GPL(pm_runtime_suspend);
  236. /**
  237. * __pm_runtime_resume - Carry out run-time resume of given device.
  238. * @dev: Device to resume.
  239. * @from_wq: If set, the function has been called via pm_wq.
  240. *
  241. * Check if the device can be woken up and run the ->runtime_resume() callback
  242. * provided by its bus type. If another resume has been started earlier, wait
  243. * for it to finish. If there's a suspend running in parallel with this
  244. * function, wait for it to finish and resume the device. Cancel any scheduled
  245. * or pending requests.
  246. *
  247. * This function must be called under dev->power.lock with interrupts disabled.
  248. */
  249. int __pm_runtime_resume(struct device *dev, bool from_wq)
  250. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  251. {
  252. struct device *parent = NULL;
  253. int retval = 0;
  254. dev_dbg(dev, "__pm_runtime_resume()%s!\n",
  255. from_wq ? " from workqueue" : "");
  256. repeat:
  257. if (dev->power.runtime_error) {
  258. retval = -EINVAL;
  259. goto out;
  260. }
  261. pm_runtime_cancel_pending(dev);
  262. if (dev->power.runtime_status == RPM_ACTIVE)
  263. retval = 1;
  264. else if (dev->power.disable_depth > 0)
  265. retval = -EAGAIN;
  266. if (retval)
  267. goto out;
  268. if (dev->power.runtime_status == RPM_RESUMING
  269. || dev->power.runtime_status == RPM_SUSPENDING) {
  270. DEFINE_WAIT(wait);
  271. if (from_wq) {
  272. if (dev->power.runtime_status == RPM_SUSPENDING)
  273. dev->power.deferred_resume = true;
  274. retval = -EINPROGRESS;
  275. goto out;
  276. }
  277. /* Wait for the operation carried out in parallel with us. */
  278. for (;;) {
  279. prepare_to_wait(&dev->power.wait_queue, &wait,
  280. TASK_UNINTERRUPTIBLE);
  281. if (dev->power.runtime_status != RPM_RESUMING
  282. && dev->power.runtime_status != RPM_SUSPENDING)
  283. break;
  284. spin_unlock_irq(&dev->power.lock);
  285. schedule();
  286. spin_lock_irq(&dev->power.lock);
  287. }
  288. finish_wait(&dev->power.wait_queue, &wait);
  289. goto repeat;
  290. }
  291. if (!parent && dev->parent) {
  292. /*
  293. * Increment the parent's resume counter and resume it if
  294. * necessary.
  295. */
  296. parent = dev->parent;
  297. spin_unlock(&dev->power.lock);
  298. pm_runtime_get_noresume(parent);
  299. spin_lock(&parent->power.lock);
  300. /*
  301. * We can resume if the parent's run-time PM is disabled or it
  302. * is set to ignore children.
  303. */
  304. if (!parent->power.disable_depth
  305. && !parent->power.ignore_children) {
  306. __pm_runtime_resume(parent, false);
  307. if (parent->power.runtime_status != RPM_ACTIVE)
  308. retval = -EBUSY;
  309. }
  310. spin_unlock(&parent->power.lock);
  311. spin_lock(&dev->power.lock);
  312. if (retval)
  313. goto out;
  314. goto repeat;
  315. }
  316. dev->power.runtime_status = RPM_RESUMING;
  317. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
  318. spin_unlock_irq(&dev->power.lock);
  319. retval = dev->bus->pm->runtime_resume(dev);
  320. spin_lock_irq(&dev->power.lock);
  321. dev->power.runtime_error = retval;
  322. } else if (dev->type && dev->type->pm
  323. && dev->type->pm->runtime_resume) {
  324. spin_unlock_irq(&dev->power.lock);
  325. retval = dev->type->pm->runtime_resume(dev);
  326. spin_lock_irq(&dev->power.lock);
  327. dev->power.runtime_error = retval;
  328. } else if (dev->class && dev->class->pm
  329. && dev->class->pm->runtime_resume) {
  330. spin_unlock_irq(&dev->power.lock);
  331. retval = dev->class->pm->runtime_resume(dev);
  332. spin_lock_irq(&dev->power.lock);
  333. dev->power.runtime_error = retval;
  334. } else {
  335. retval = -ENOSYS;
  336. }
  337. if (retval) {
  338. dev->power.runtime_status = RPM_SUSPENDED;
  339. pm_runtime_cancel_pending(dev);
  340. } else {
  341. dev->power.runtime_status = RPM_ACTIVE;
  342. if (parent)
  343. atomic_inc(&parent->power.child_count);
  344. }
  345. wake_up_all(&dev->power.wait_queue);
  346. if (!retval)
  347. __pm_request_idle(dev);
  348. out:
  349. if (parent) {
  350. spin_unlock_irq(&dev->power.lock);
  351. pm_runtime_put(parent);
  352. spin_lock_irq(&dev->power.lock);
  353. }
  354. dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
  355. return retval;
  356. }
  357. /**
  358. * pm_runtime_resume - Carry out run-time resume of given device.
  359. * @dev: Device to suspend.
  360. */
  361. int pm_runtime_resume(struct device *dev)
  362. {
  363. int retval;
  364. spin_lock_irq(&dev->power.lock);
  365. retval = __pm_runtime_resume(dev, false);
  366. spin_unlock_irq(&dev->power.lock);
  367. return retval;
  368. }
  369. EXPORT_SYMBOL_GPL(pm_runtime_resume);
  370. /**
  371. * pm_runtime_work - Universal run-time PM work function.
  372. * @work: Work structure used for scheduling the execution of this function.
  373. *
  374. * Use @work to get the device object the work is to be done for, determine what
  375. * is to be done and execute the appropriate run-time PM function.
  376. */
  377. static void pm_runtime_work(struct work_struct *work)
  378. {
  379. struct device *dev = container_of(work, struct device, power.work);
  380. enum rpm_request req;
  381. spin_lock_irq(&dev->power.lock);
  382. if (!dev->power.request_pending)
  383. goto out;
  384. req = dev->power.request;
  385. dev->power.request = RPM_REQ_NONE;
  386. dev->power.request_pending = false;
  387. switch (req) {
  388. case RPM_REQ_NONE:
  389. break;
  390. case RPM_REQ_IDLE:
  391. __pm_runtime_idle(dev);
  392. break;
  393. case RPM_REQ_SUSPEND:
  394. __pm_runtime_suspend(dev, true);
  395. break;
  396. case RPM_REQ_RESUME:
  397. __pm_runtime_resume(dev, true);
  398. break;
  399. }
  400. out:
  401. spin_unlock_irq(&dev->power.lock);
  402. }
  403. /**
  404. * __pm_request_idle - Submit an idle notification request for given device.
  405. * @dev: Device to handle.
  406. *
  407. * Check if the device's run-time PM status is correct for suspending the device
  408. * and queue up a request to run __pm_runtime_idle() for it.
  409. *
  410. * This function must be called under dev->power.lock with interrupts disabled.
  411. */
  412. static int __pm_request_idle(struct device *dev)
  413. {
  414. int retval = 0;
  415. if (dev->power.runtime_error)
  416. retval = -EINVAL;
  417. else if (atomic_read(&dev->power.usage_count) > 0
  418. || dev->power.disable_depth > 0
  419. || dev->power.runtime_status == RPM_SUSPENDED
  420. || dev->power.runtime_status == RPM_SUSPENDING)
  421. retval = -EAGAIN;
  422. else if (!pm_children_suspended(dev))
  423. retval = -EBUSY;
  424. if (retval)
  425. return retval;
  426. if (dev->power.request_pending) {
  427. /* Any requests other then RPM_REQ_IDLE take precedence. */
  428. if (dev->power.request == RPM_REQ_NONE)
  429. dev->power.request = RPM_REQ_IDLE;
  430. else if (dev->power.request != RPM_REQ_IDLE)
  431. retval = -EAGAIN;
  432. return retval;
  433. }
  434. dev->power.request = RPM_REQ_IDLE;
  435. dev->power.request_pending = true;
  436. queue_work(pm_wq, &dev->power.work);
  437. return retval;
  438. }
  439. /**
  440. * pm_request_idle - Submit an idle notification request for given device.
  441. * @dev: Device to handle.
  442. */
  443. int pm_request_idle(struct device *dev)
  444. {
  445. unsigned long flags;
  446. int retval;
  447. spin_lock_irqsave(&dev->power.lock, flags);
  448. retval = __pm_request_idle(dev);
  449. spin_unlock_irqrestore(&dev->power.lock, flags);
  450. return retval;
  451. }
  452. EXPORT_SYMBOL_GPL(pm_request_idle);
  453. /**
  454. * __pm_request_suspend - Submit a suspend request for given device.
  455. * @dev: Device to suspend.
  456. *
  457. * This function must be called under dev->power.lock with interrupts disabled.
  458. */
  459. static int __pm_request_suspend(struct device *dev)
  460. {
  461. int retval = 0;
  462. if (dev->power.runtime_error)
  463. return -EINVAL;
  464. if (dev->power.runtime_status == RPM_SUSPENDED)
  465. retval = 1;
  466. else if (atomic_read(&dev->power.usage_count) > 0
  467. || dev->power.disable_depth > 0)
  468. retval = -EAGAIN;
  469. else if (dev->power.runtime_status == RPM_SUSPENDING)
  470. retval = -EINPROGRESS;
  471. else if (!pm_children_suspended(dev))
  472. retval = -EBUSY;
  473. if (retval < 0)
  474. return retval;
  475. pm_runtime_deactivate_timer(dev);
  476. if (dev->power.request_pending) {
  477. /*
  478. * Pending resume requests take precedence over us, but we can
  479. * overtake any other pending request.
  480. */
  481. if (dev->power.request == RPM_REQ_RESUME)
  482. retval = -EAGAIN;
  483. else if (dev->power.request != RPM_REQ_SUSPEND)
  484. dev->power.request = retval ?
  485. RPM_REQ_NONE : RPM_REQ_SUSPEND;
  486. return retval;
  487. } else if (retval) {
  488. return retval;
  489. }
  490. dev->power.request = RPM_REQ_SUSPEND;
  491. dev->power.request_pending = true;
  492. queue_work(pm_wq, &dev->power.work);
  493. return 0;
  494. }
  495. /**
  496. * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
  497. * @data: Device pointer passed by pm_schedule_suspend().
  498. *
  499. * Check if the time is right and execute __pm_request_suspend() in that case.
  500. */
  501. static void pm_suspend_timer_fn(unsigned long data)
  502. {
  503. struct device *dev = (struct device *)data;
  504. unsigned long flags;
  505. unsigned long expires;
  506. spin_lock_irqsave(&dev->power.lock, flags);
  507. expires = dev->power.timer_expires;
  508. /* If 'expire' is after 'jiffies' we've been called too early. */
  509. if (expires > 0 && !time_after(expires, jiffies)) {
  510. dev->power.timer_expires = 0;
  511. __pm_request_suspend(dev);
  512. }
  513. spin_unlock_irqrestore(&dev->power.lock, flags);
  514. }
  515. /**
  516. * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
  517. * @dev: Device to suspend.
  518. * @delay: Time to wait before submitting a suspend request, in milliseconds.
  519. */
  520. int pm_schedule_suspend(struct device *dev, unsigned int delay)
  521. {
  522. unsigned long flags;
  523. int retval = 0;
  524. spin_lock_irqsave(&dev->power.lock, flags);
  525. if (dev->power.runtime_error) {
  526. retval = -EINVAL;
  527. goto out;
  528. }
  529. if (!delay) {
  530. retval = __pm_request_suspend(dev);
  531. goto out;
  532. }
  533. pm_runtime_deactivate_timer(dev);
  534. if (dev->power.request_pending) {
  535. /*
  536. * Pending resume requests take precedence over us, but any
  537. * other pending requests have to be canceled.
  538. */
  539. if (dev->power.request == RPM_REQ_RESUME) {
  540. retval = -EAGAIN;
  541. goto out;
  542. }
  543. dev->power.request = RPM_REQ_NONE;
  544. }
  545. if (dev->power.runtime_status == RPM_SUSPENDED)
  546. retval = 1;
  547. else if (atomic_read(&dev->power.usage_count) > 0
  548. || dev->power.disable_depth > 0)
  549. retval = -EAGAIN;
  550. else if (!pm_children_suspended(dev))
  551. retval = -EBUSY;
  552. if (retval)
  553. goto out;
  554. dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
  555. if (!dev->power.timer_expires)
  556. dev->power.timer_expires = 1;
  557. mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
  558. out:
  559. spin_unlock_irqrestore(&dev->power.lock, flags);
  560. return retval;
  561. }
  562. EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  563. /**
  564. * pm_request_resume - Submit a resume request for given device.
  565. * @dev: Device to resume.
  566. *
  567. * This function must be called under dev->power.lock with interrupts disabled.
  568. */
  569. static int __pm_request_resume(struct device *dev)
  570. {
  571. int retval = 0;
  572. if (dev->power.runtime_error)
  573. return -EINVAL;
  574. if (dev->power.runtime_status == RPM_ACTIVE)
  575. retval = 1;
  576. else if (dev->power.runtime_status == RPM_RESUMING)
  577. retval = -EINPROGRESS;
  578. else if (dev->power.disable_depth > 0)
  579. retval = -EAGAIN;
  580. if (retval < 0)
  581. return retval;
  582. pm_runtime_deactivate_timer(dev);
  583. if (dev->power.runtime_status == RPM_SUSPENDING) {
  584. dev->power.deferred_resume = true;
  585. return retval;
  586. }
  587. if (dev->power.request_pending) {
  588. /* If non-resume request is pending, we can overtake it. */
  589. dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
  590. return retval;
  591. }
  592. if (retval)
  593. return retval;
  594. dev->power.request = RPM_REQ_RESUME;
  595. dev->power.request_pending = true;
  596. queue_work(pm_wq, &dev->power.work);
  597. return retval;
  598. }
  599. /**
  600. * pm_request_resume - Submit a resume request for given device.
  601. * @dev: Device to resume.
  602. */
  603. int pm_request_resume(struct device *dev)
  604. {
  605. unsigned long flags;
  606. int retval;
  607. spin_lock_irqsave(&dev->power.lock, flags);
  608. retval = __pm_request_resume(dev);
  609. spin_unlock_irqrestore(&dev->power.lock, flags);
  610. return retval;
  611. }
  612. EXPORT_SYMBOL_GPL(pm_request_resume);
  613. /**
  614. * __pm_runtime_get - Reference count a device and wake it up, if necessary.
  615. * @dev: Device to handle.
  616. * @sync: If set and the device is suspended, resume it synchronously.
  617. *
  618. * Increment the usage count of the device and resume it or submit a resume
  619. * request for it, depending on the value of @sync.
  620. */
  621. int __pm_runtime_get(struct device *dev, bool sync)
  622. {
  623. int retval;
  624. atomic_inc(&dev->power.usage_count);
  625. retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
  626. return retval;
  627. }
  628. EXPORT_SYMBOL_GPL(__pm_runtime_get);
  629. /**
  630. * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
  631. * @dev: Device to handle.
  632. * @sync: If the device's bus type is to be notified, do that synchronously.
  633. *
  634. * Decrement the usage count of the device and if it reaches zero, carry out a
  635. * synchronous idle notification or submit an idle notification request for it,
  636. * depending on the value of @sync.
  637. */
  638. int __pm_runtime_put(struct device *dev, bool sync)
  639. {
  640. int retval = 0;
  641. if (atomic_dec_and_test(&dev->power.usage_count))
  642. retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
  643. return retval;
  644. }
  645. EXPORT_SYMBOL_GPL(__pm_runtime_put);
  646. /**
  647. * __pm_runtime_set_status - Set run-time PM status of a device.
  648. * @dev: Device to handle.
  649. * @status: New run-time PM status of the device.
  650. *
  651. * If run-time PM of the device is disabled or its power.runtime_error field is
  652. * different from zero, the status may be changed either to RPM_ACTIVE, or to
  653. * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  654. * However, if the device has a parent and the parent is not active, and the
  655. * parent's power.ignore_children flag is unset, the device's status cannot be
  656. * set to RPM_ACTIVE, so -EBUSY is returned in that case.
  657. *
  658. * If successful, __pm_runtime_set_status() clears the power.runtime_error field
  659. * and the device parent's counter of unsuspended children is modified to
  660. * reflect the new status. If the new status is RPM_SUSPENDED, an idle
  661. * notification request for the parent is submitted.
  662. */
  663. int __pm_runtime_set_status(struct device *dev, unsigned int status)
  664. {
  665. struct device *parent = dev->parent;
  666. unsigned long flags;
  667. bool notify_parent = false;
  668. int error = 0;
  669. if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
  670. return -EINVAL;
  671. spin_lock_irqsave(&dev->power.lock, flags);
  672. if (!dev->power.runtime_error && !dev->power.disable_depth) {
  673. error = -EAGAIN;
  674. goto out;
  675. }
  676. if (dev->power.runtime_status == status)
  677. goto out_set;
  678. if (status == RPM_SUSPENDED) {
  679. /* It always is possible to set the status to 'suspended'. */
  680. if (parent) {
  681. atomic_add_unless(&parent->power.child_count, -1, 0);
  682. notify_parent = !parent->power.ignore_children;
  683. }
  684. goto out_set;
  685. }
  686. if (parent) {
  687. spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
  688. /*
  689. * It is invalid to put an active child under a parent that is
  690. * not active, has run-time PM enabled and the
  691. * 'power.ignore_children' flag unset.
  692. */
  693. if (!parent->power.disable_depth
  694. && !parent->power.ignore_children
  695. && parent->power.runtime_status != RPM_ACTIVE)
  696. error = -EBUSY;
  697. else if (dev->power.runtime_status == RPM_SUSPENDED)
  698. atomic_inc(&parent->power.child_count);
  699. spin_unlock(&parent->power.lock);
  700. if (error)
  701. goto out;
  702. }
  703. out_set:
  704. dev->power.runtime_status = status;
  705. dev->power.runtime_error = 0;
  706. out:
  707. spin_unlock_irqrestore(&dev->power.lock, flags);
  708. if (notify_parent)
  709. pm_request_idle(parent);
  710. return error;
  711. }
  712. EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  713. /**
  714. * __pm_runtime_barrier - Cancel pending requests and wait for completions.
  715. * @dev: Device to handle.
  716. *
  717. * Flush all pending requests for the device from pm_wq and wait for all
  718. * run-time PM operations involving the device in progress to complete.
  719. *
  720. * Should be called under dev->power.lock with interrupts disabled.
  721. */
  722. static void __pm_runtime_barrier(struct device *dev)
  723. {
  724. pm_runtime_deactivate_timer(dev);
  725. if (dev->power.request_pending) {
  726. dev->power.request = RPM_REQ_NONE;
  727. spin_unlock_irq(&dev->power.lock);
  728. cancel_work_sync(&dev->power.work);
  729. spin_lock_irq(&dev->power.lock);
  730. dev->power.request_pending = false;
  731. }
  732. if (dev->power.runtime_status == RPM_SUSPENDING
  733. || dev->power.runtime_status == RPM_RESUMING
  734. || dev->power.idle_notification) {
  735. DEFINE_WAIT(wait);
  736. /* Suspend, wake-up or idle notification in progress. */
  737. for (;;) {
  738. prepare_to_wait(&dev->power.wait_queue, &wait,
  739. TASK_UNINTERRUPTIBLE);
  740. if (dev->power.runtime_status != RPM_SUSPENDING
  741. && dev->power.runtime_status != RPM_RESUMING
  742. && !dev->power.idle_notification)
  743. break;
  744. spin_unlock_irq(&dev->power.lock);
  745. schedule();
  746. spin_lock_irq(&dev->power.lock);
  747. }
  748. finish_wait(&dev->power.wait_queue, &wait);
  749. }
  750. }
  751. /**
  752. * pm_runtime_barrier - Flush pending requests and wait for completions.
  753. * @dev: Device to handle.
  754. *
  755. * Prevent the device from being suspended by incrementing its usage counter and
  756. * if there's a pending resume request for the device, wake the device up.
  757. * Next, make sure that all pending requests for the device have been flushed
  758. * from pm_wq and wait for all run-time PM operations involving the device in
  759. * progress to complete.
  760. *
  761. * Return value:
  762. * 1, if there was a resume request pending and the device had to be woken up,
  763. * 0, otherwise
  764. */
  765. int pm_runtime_barrier(struct device *dev)
  766. {
  767. int retval = 0;
  768. pm_runtime_get_noresume(dev);
  769. spin_lock_irq(&dev->power.lock);
  770. if (dev->power.request_pending
  771. && dev->power.request == RPM_REQ_RESUME) {
  772. __pm_runtime_resume(dev, false);
  773. retval = 1;
  774. }
  775. __pm_runtime_barrier(dev);
  776. spin_unlock_irq(&dev->power.lock);
  777. pm_runtime_put_noidle(dev);
  778. return retval;
  779. }
  780. EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  781. /**
  782. * __pm_runtime_disable - Disable run-time PM of a device.
  783. * @dev: Device to handle.
  784. * @check_resume: If set, check if there's a resume request for the device.
  785. *
  786. * Increment power.disable_depth for the device and if was zero previously,
  787. * cancel all pending run-time PM requests for the device and wait for all
  788. * operations in progress to complete. The device can be either active or
  789. * suspended after its run-time PM has been disabled.
  790. *
  791. * If @check_resume is set and there's a resume request pending when
  792. * __pm_runtime_disable() is called and power.disable_depth is zero, the
  793. * function will wake up the device before disabling its run-time PM.
  794. */
  795. void __pm_runtime_disable(struct device *dev, bool check_resume)
  796. {
  797. spin_lock_irq(&dev->power.lock);
  798. if (dev->power.disable_depth > 0) {
  799. dev->power.disable_depth++;
  800. goto out;
  801. }
  802. /*
  803. * Wake up the device if there's a resume request pending, because that
  804. * means there probably is some I/O to process and disabling run-time PM
  805. * shouldn't prevent the device from processing the I/O.
  806. */
  807. if (check_resume && dev->power.request_pending
  808. && dev->power.request == RPM_REQ_RESUME) {
  809. /*
  810. * Prevent suspends and idle notifications from being carried
  811. * out after we have woken up the device.
  812. */
  813. pm_runtime_get_noresume(dev);
  814. __pm_runtime_resume(dev, false);
  815. pm_runtime_put_noidle(dev);
  816. }
  817. if (!dev->power.disable_depth++)
  818. __pm_runtime_barrier(dev);
  819. out:
  820. spin_unlock_irq(&dev->power.lock);
  821. }
  822. EXPORT_SYMBOL_GPL(__pm_runtime_disable);
  823. /**
  824. * pm_runtime_enable - Enable run-time PM of a device.
  825. * @dev: Device to handle.
  826. */
  827. void pm_runtime_enable(struct device *dev)
  828. {
  829. unsigned long flags;
  830. spin_lock_irqsave(&dev->power.lock, flags);
  831. if (dev->power.disable_depth > 0)
  832. dev->power.disable_depth--;
  833. else
  834. dev_warn(dev, "Unbalanced %s!\n", __func__);
  835. spin_unlock_irqrestore(&dev->power.lock, flags);
  836. }
  837. EXPORT_SYMBOL_GPL(pm_runtime_enable);
  838. /**
  839. * pm_runtime_forbid - Block run-time PM of a device.
  840. * @dev: Device to handle.
  841. *
  842. * Increase the device's usage count and clear its power.runtime_auto flag,
  843. * so that it cannot be suspended at run time until pm_runtime_allow() is called
  844. * for it.
  845. */
  846. void pm_runtime_forbid(struct device *dev)
  847. {
  848. spin_lock_irq(&dev->power.lock);
  849. if (!dev->power.runtime_auto)
  850. goto out;
  851. dev->power.runtime_auto = false;
  852. atomic_inc(&dev->power.usage_count);
  853. __pm_runtime_resume(dev, false);
  854. out:
  855. spin_unlock_irq(&dev->power.lock);
  856. }
  857. EXPORT_SYMBOL_GPL(pm_runtime_forbid);
  858. /**
  859. * pm_runtime_allow - Unblock run-time PM of a device.
  860. * @dev: Device to handle.
  861. *
  862. * Decrease the device's usage count and set its power.runtime_auto flag.
  863. */
  864. void pm_runtime_allow(struct device *dev)
  865. {
  866. spin_lock_irq(&dev->power.lock);
  867. if (dev->power.runtime_auto)
  868. goto out;
  869. dev->power.runtime_auto = true;
  870. if (atomic_dec_and_test(&dev->power.usage_count))
  871. __pm_runtime_idle(dev);
  872. out:
  873. spin_unlock_irq(&dev->power.lock);
  874. }
  875. EXPORT_SYMBOL_GPL(pm_runtime_allow);
  876. /**
  877. * pm_runtime_init - Initialize run-time PM fields in given device object.
  878. * @dev: Device object to initialize.
  879. */
  880. void pm_runtime_init(struct device *dev)
  881. {
  882. spin_lock_init(&dev->power.lock);
  883. dev->power.runtime_status = RPM_SUSPENDED;
  884. dev->power.idle_notification = false;
  885. dev->power.disable_depth = 1;
  886. atomic_set(&dev->power.usage_count, 0);
  887. dev->power.runtime_error = 0;
  888. atomic_set(&dev->power.child_count, 0);
  889. pm_suspend_ignore_children(dev, false);
  890. dev->power.runtime_auto = true;
  891. dev->power.request_pending = false;
  892. dev->power.request = RPM_REQ_NONE;
  893. dev->power.deferred_resume = false;
  894. INIT_WORK(&dev->power.work, pm_runtime_work);
  895. dev->power.timer_expires = 0;
  896. setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
  897. (unsigned long)dev);
  898. init_waitqueue_head(&dev->power.wait_queue);
  899. }
  900. /**
  901. * pm_runtime_remove - Prepare for removing a device from device hierarchy.
  902. * @dev: Device object being removed from device hierarchy.
  903. */
  904. void pm_runtime_remove(struct device *dev)
  905. {
  906. __pm_runtime_disable(dev, false);
  907. /* Change the status back to 'suspended' to match the initial status. */
  908. if (dev->power.runtime_status == RPM_ACTIVE)
  909. pm_runtime_set_suspended(dev);
  910. }