runtime.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * drivers/base/power/runtime.c - Helper functions for device run-time PM
  3. *
  4. * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/jiffies.h>
  11. static int __pm_runtime_resume(struct device *dev, bool from_wq);
  12. static int __pm_request_idle(struct device *dev);
  13. static int __pm_request_resume(struct device *dev);
  14. /**
  15. * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  16. * @dev: Device to handle.
  17. */
  18. static void pm_runtime_deactivate_timer(struct device *dev)
  19. {
  20. if (dev->power.timer_expires > 0) {
  21. del_timer(&dev->power.suspend_timer);
  22. dev->power.timer_expires = 0;
  23. }
  24. }
  25. /**
  26. * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  27. * @dev: Device to handle.
  28. */
  29. static void pm_runtime_cancel_pending(struct device *dev)
  30. {
  31. pm_runtime_deactivate_timer(dev);
  32. /*
  33. * In case there's a request pending, make sure its work function will
  34. * return without doing anything.
  35. */
  36. dev->power.request = RPM_REQ_NONE;
  37. }
  38. /**
  39. * __pm_runtime_idle - Notify device bus type if the device can be suspended.
  40. * @dev: Device to notify the bus type about.
  41. *
  42. * This function must be called under dev->power.lock with interrupts disabled.
  43. */
  44. static int __pm_runtime_idle(struct device *dev)
  45. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  46. {
  47. int retval = 0;
  48. if (dev->power.runtime_error)
  49. retval = -EINVAL;
  50. else if (dev->power.idle_notification)
  51. retval = -EINPROGRESS;
  52. else if (atomic_read(&dev->power.usage_count) > 0
  53. || dev->power.disable_depth > 0
  54. || dev->power.runtime_status != RPM_ACTIVE)
  55. retval = -EAGAIN;
  56. else if (!pm_children_suspended(dev))
  57. retval = -EBUSY;
  58. if (retval)
  59. goto out;
  60. if (dev->power.request_pending) {
  61. /*
  62. * If an idle notification request is pending, cancel it. Any
  63. * other pending request takes precedence over us.
  64. */
  65. if (dev->power.request == RPM_REQ_IDLE) {
  66. dev->power.request = RPM_REQ_NONE;
  67. } else if (dev->power.request != RPM_REQ_NONE) {
  68. retval = -EAGAIN;
  69. goto out;
  70. }
  71. }
  72. dev->power.idle_notification = true;
  73. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
  74. spin_unlock_irq(&dev->power.lock);
  75. dev->bus->pm->runtime_idle(dev);
  76. spin_lock_irq(&dev->power.lock);
  77. } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
  78. spin_unlock_irq(&dev->power.lock);
  79. dev->type->pm->runtime_idle(dev);
  80. spin_lock_irq(&dev->power.lock);
  81. } else if (dev->class && dev->class->pm
  82. && dev->class->pm->runtime_idle) {
  83. spin_unlock_irq(&dev->power.lock);
  84. dev->class->pm->runtime_idle(dev);
  85. spin_lock_irq(&dev->power.lock);
  86. }
  87. dev->power.idle_notification = false;
  88. wake_up_all(&dev->power.wait_queue);
  89. out:
  90. return retval;
  91. }
  92. /**
  93. * pm_runtime_idle - Notify device bus type if the device can be suspended.
  94. * @dev: Device to notify the bus type about.
  95. */
  96. int pm_runtime_idle(struct device *dev)
  97. {
  98. int retval;
  99. spin_lock_irq(&dev->power.lock);
  100. retval = __pm_runtime_idle(dev);
  101. spin_unlock_irq(&dev->power.lock);
  102. return retval;
  103. }
  104. EXPORT_SYMBOL_GPL(pm_runtime_idle);
  105. /**
  106. * __pm_runtime_suspend - Carry out run-time suspend of given device.
  107. * @dev: Device to suspend.
  108. * @from_wq: If set, the function has been called via pm_wq.
  109. *
  110. * Check if the device can be suspended and run the ->runtime_suspend() callback
  111. * provided by its bus type. If another suspend has been started earlier, wait
  112. * for it to finish. If an idle notification or suspend request is pending or
  113. * scheduled, cancel it.
  114. *
  115. * This function must be called under dev->power.lock with interrupts disabled.
  116. */
  117. int __pm_runtime_suspend(struct device *dev, bool from_wq)
  118. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  119. {
  120. struct device *parent = NULL;
  121. bool notify = false;
  122. int retval = 0;
  123. dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
  124. from_wq ? " from workqueue" : "");
  125. repeat:
  126. if (dev->power.runtime_error) {
  127. retval = -EINVAL;
  128. goto out;
  129. }
  130. /* Pending resume requests take precedence over us. */
  131. if (dev->power.request_pending
  132. && dev->power.request == RPM_REQ_RESUME) {
  133. retval = -EAGAIN;
  134. goto out;
  135. }
  136. /* Other scheduled or pending requests need to be canceled. */
  137. pm_runtime_cancel_pending(dev);
  138. if (dev->power.runtime_status == RPM_SUSPENDED)
  139. retval = 1;
  140. else if (dev->power.runtime_status == RPM_RESUMING
  141. || dev->power.disable_depth > 0
  142. || atomic_read(&dev->power.usage_count) > 0)
  143. retval = -EAGAIN;
  144. else if (!pm_children_suspended(dev))
  145. retval = -EBUSY;
  146. if (retval)
  147. goto out;
  148. if (dev->power.runtime_status == RPM_SUSPENDING) {
  149. DEFINE_WAIT(wait);
  150. if (from_wq) {
  151. retval = -EINPROGRESS;
  152. goto out;
  153. }
  154. /* Wait for the other suspend running in parallel with us. */
  155. for (;;) {
  156. prepare_to_wait(&dev->power.wait_queue, &wait,
  157. TASK_UNINTERRUPTIBLE);
  158. if (dev->power.runtime_status != RPM_SUSPENDING)
  159. break;
  160. spin_unlock_irq(&dev->power.lock);
  161. schedule();
  162. spin_lock_irq(&dev->power.lock);
  163. }
  164. finish_wait(&dev->power.wait_queue, &wait);
  165. goto repeat;
  166. }
  167. dev->power.runtime_status = RPM_SUSPENDING;
  168. dev->power.deferred_resume = false;
  169. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
  170. spin_unlock_irq(&dev->power.lock);
  171. retval = dev->bus->pm->runtime_suspend(dev);
  172. spin_lock_irq(&dev->power.lock);
  173. dev->power.runtime_error = retval;
  174. } else if (dev->type && dev->type->pm
  175. && dev->type->pm->runtime_suspend) {
  176. spin_unlock_irq(&dev->power.lock);
  177. retval = dev->type->pm->runtime_suspend(dev);
  178. spin_lock_irq(&dev->power.lock);
  179. dev->power.runtime_error = retval;
  180. } else if (dev->class && dev->class->pm
  181. && dev->class->pm->runtime_suspend) {
  182. spin_unlock_irq(&dev->power.lock);
  183. retval = dev->class->pm->runtime_suspend(dev);
  184. spin_lock_irq(&dev->power.lock);
  185. dev->power.runtime_error = retval;
  186. } else {
  187. retval = -ENOSYS;
  188. }
  189. if (retval) {
  190. dev->power.runtime_status = RPM_ACTIVE;
  191. pm_runtime_cancel_pending(dev);
  192. if (retval == -EAGAIN || retval == -EBUSY) {
  193. notify = true;
  194. dev->power.runtime_error = 0;
  195. }
  196. } else {
  197. dev->power.runtime_status = RPM_SUSPENDED;
  198. if (dev->parent) {
  199. parent = dev->parent;
  200. atomic_add_unless(&parent->power.child_count, -1, 0);
  201. }
  202. }
  203. wake_up_all(&dev->power.wait_queue);
  204. if (dev->power.deferred_resume) {
  205. __pm_runtime_resume(dev, false);
  206. retval = -EAGAIN;
  207. goto out;
  208. }
  209. if (notify)
  210. __pm_runtime_idle(dev);
  211. if (parent && !parent->power.ignore_children) {
  212. spin_unlock_irq(&dev->power.lock);
  213. pm_request_idle(parent);
  214. spin_lock_irq(&dev->power.lock);
  215. }
  216. out:
  217. dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
  218. return retval;
  219. }
  220. /**
  221. * pm_runtime_suspend - Carry out run-time suspend of given device.
  222. * @dev: Device to suspend.
  223. */
  224. int pm_runtime_suspend(struct device *dev)
  225. {
  226. int retval;
  227. spin_lock_irq(&dev->power.lock);
  228. retval = __pm_runtime_suspend(dev, false);
  229. spin_unlock_irq(&dev->power.lock);
  230. return retval;
  231. }
  232. EXPORT_SYMBOL_GPL(pm_runtime_suspend);
  233. /**
  234. * __pm_runtime_resume - Carry out run-time resume of given device.
  235. * @dev: Device to resume.
  236. * @from_wq: If set, the function has been called via pm_wq.
  237. *
  238. * Check if the device can be woken up and run the ->runtime_resume() callback
  239. * provided by its bus type. If another resume has been started earlier, wait
  240. * for it to finish. If there's a suspend running in parallel with this
  241. * function, wait for it to finish and resume the device. Cancel any scheduled
  242. * or pending requests.
  243. *
  244. * This function must be called under dev->power.lock with interrupts disabled.
  245. */
  246. int __pm_runtime_resume(struct device *dev, bool from_wq)
  247. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  248. {
  249. struct device *parent = NULL;
  250. int retval = 0;
  251. dev_dbg(dev, "__pm_runtime_resume()%s!\n",
  252. from_wq ? " from workqueue" : "");
  253. repeat:
  254. if (dev->power.runtime_error) {
  255. retval = -EINVAL;
  256. goto out;
  257. }
  258. pm_runtime_cancel_pending(dev);
  259. if (dev->power.runtime_status == RPM_ACTIVE)
  260. retval = 1;
  261. else if (dev->power.disable_depth > 0)
  262. retval = -EAGAIN;
  263. if (retval)
  264. goto out;
  265. if (dev->power.runtime_status == RPM_RESUMING
  266. || dev->power.runtime_status == RPM_SUSPENDING) {
  267. DEFINE_WAIT(wait);
  268. if (from_wq) {
  269. if (dev->power.runtime_status == RPM_SUSPENDING)
  270. dev->power.deferred_resume = true;
  271. retval = -EINPROGRESS;
  272. goto out;
  273. }
  274. /* Wait for the operation carried out in parallel with us. */
  275. for (;;) {
  276. prepare_to_wait(&dev->power.wait_queue, &wait,
  277. TASK_UNINTERRUPTIBLE);
  278. if (dev->power.runtime_status != RPM_RESUMING
  279. && dev->power.runtime_status != RPM_SUSPENDING)
  280. break;
  281. spin_unlock_irq(&dev->power.lock);
  282. schedule();
  283. spin_lock_irq(&dev->power.lock);
  284. }
  285. finish_wait(&dev->power.wait_queue, &wait);
  286. goto repeat;
  287. }
  288. if (!parent && dev->parent) {
  289. /*
  290. * Increment the parent's resume counter and resume it if
  291. * necessary.
  292. */
  293. parent = dev->parent;
  294. spin_unlock(&dev->power.lock);
  295. pm_runtime_get_noresume(parent);
  296. spin_lock(&parent->power.lock);
  297. /*
  298. * We can resume if the parent's run-time PM is disabled or it
  299. * is set to ignore children.
  300. */
  301. if (!parent->power.disable_depth
  302. && !parent->power.ignore_children) {
  303. __pm_runtime_resume(parent, false);
  304. if (parent->power.runtime_status != RPM_ACTIVE)
  305. retval = -EBUSY;
  306. }
  307. spin_unlock(&parent->power.lock);
  308. spin_lock(&dev->power.lock);
  309. if (retval)
  310. goto out;
  311. goto repeat;
  312. }
  313. dev->power.runtime_status = RPM_RESUMING;
  314. if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
  315. spin_unlock_irq(&dev->power.lock);
  316. retval = dev->bus->pm->runtime_resume(dev);
  317. spin_lock_irq(&dev->power.lock);
  318. dev->power.runtime_error = retval;
  319. } else if (dev->type && dev->type->pm
  320. && dev->type->pm->runtime_resume) {
  321. spin_unlock_irq(&dev->power.lock);
  322. retval = dev->type->pm->runtime_resume(dev);
  323. spin_lock_irq(&dev->power.lock);
  324. dev->power.runtime_error = retval;
  325. } else if (dev->class && dev->class->pm
  326. && dev->class->pm->runtime_resume) {
  327. spin_unlock_irq(&dev->power.lock);
  328. retval = dev->class->pm->runtime_resume(dev);
  329. spin_lock_irq(&dev->power.lock);
  330. dev->power.runtime_error = retval;
  331. } else {
  332. retval = -ENOSYS;
  333. }
  334. if (retval) {
  335. dev->power.runtime_status = RPM_SUSPENDED;
  336. pm_runtime_cancel_pending(dev);
  337. } else {
  338. dev->power.runtime_status = RPM_ACTIVE;
  339. if (parent)
  340. atomic_inc(&parent->power.child_count);
  341. }
  342. wake_up_all(&dev->power.wait_queue);
  343. if (!retval)
  344. __pm_request_idle(dev);
  345. out:
  346. if (parent) {
  347. spin_unlock_irq(&dev->power.lock);
  348. pm_runtime_put(parent);
  349. spin_lock_irq(&dev->power.lock);
  350. }
  351. dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
  352. return retval;
  353. }
  354. /**
  355. * pm_runtime_resume - Carry out run-time resume of given device.
  356. * @dev: Device to suspend.
  357. */
  358. int pm_runtime_resume(struct device *dev)
  359. {
  360. int retval;
  361. spin_lock_irq(&dev->power.lock);
  362. retval = __pm_runtime_resume(dev, false);
  363. spin_unlock_irq(&dev->power.lock);
  364. return retval;
  365. }
  366. EXPORT_SYMBOL_GPL(pm_runtime_resume);
  367. /**
  368. * pm_runtime_work - Universal run-time PM work function.
  369. * @work: Work structure used for scheduling the execution of this function.
  370. *
  371. * Use @work to get the device object the work is to be done for, determine what
  372. * is to be done and execute the appropriate run-time PM function.
  373. */
  374. static void pm_runtime_work(struct work_struct *work)
  375. {
  376. struct device *dev = container_of(work, struct device, power.work);
  377. enum rpm_request req;
  378. spin_lock_irq(&dev->power.lock);
  379. if (!dev->power.request_pending)
  380. goto out;
  381. req = dev->power.request;
  382. dev->power.request = RPM_REQ_NONE;
  383. dev->power.request_pending = false;
  384. switch (req) {
  385. case RPM_REQ_NONE:
  386. break;
  387. case RPM_REQ_IDLE:
  388. __pm_runtime_idle(dev);
  389. break;
  390. case RPM_REQ_SUSPEND:
  391. __pm_runtime_suspend(dev, true);
  392. break;
  393. case RPM_REQ_RESUME:
  394. __pm_runtime_resume(dev, true);
  395. break;
  396. }
  397. out:
  398. spin_unlock_irq(&dev->power.lock);
  399. }
  400. /**
  401. * __pm_request_idle - Submit an idle notification request for given device.
  402. * @dev: Device to handle.
  403. *
  404. * Check if the device's run-time PM status is correct for suspending the device
  405. * and queue up a request to run __pm_runtime_idle() for it.
  406. *
  407. * This function must be called under dev->power.lock with interrupts disabled.
  408. */
  409. static int __pm_request_idle(struct device *dev)
  410. {
  411. int retval = 0;
  412. if (dev->power.runtime_error)
  413. retval = -EINVAL;
  414. else if (atomic_read(&dev->power.usage_count) > 0
  415. || dev->power.disable_depth > 0
  416. || dev->power.runtime_status == RPM_SUSPENDED
  417. || dev->power.runtime_status == RPM_SUSPENDING)
  418. retval = -EAGAIN;
  419. else if (!pm_children_suspended(dev))
  420. retval = -EBUSY;
  421. if (retval)
  422. return retval;
  423. if (dev->power.request_pending) {
  424. /* Any requests other then RPM_REQ_IDLE take precedence. */
  425. if (dev->power.request == RPM_REQ_NONE)
  426. dev->power.request = RPM_REQ_IDLE;
  427. else if (dev->power.request != RPM_REQ_IDLE)
  428. retval = -EAGAIN;
  429. return retval;
  430. }
  431. dev->power.request = RPM_REQ_IDLE;
  432. dev->power.request_pending = true;
  433. queue_work(pm_wq, &dev->power.work);
  434. return retval;
  435. }
  436. /**
  437. * pm_request_idle - Submit an idle notification request for given device.
  438. * @dev: Device to handle.
  439. */
  440. int pm_request_idle(struct device *dev)
  441. {
  442. unsigned long flags;
  443. int retval;
  444. spin_lock_irqsave(&dev->power.lock, flags);
  445. retval = __pm_request_idle(dev);
  446. spin_unlock_irqrestore(&dev->power.lock, flags);
  447. return retval;
  448. }
  449. EXPORT_SYMBOL_GPL(pm_request_idle);
  450. /**
  451. * __pm_request_suspend - Submit a suspend request for given device.
  452. * @dev: Device to suspend.
  453. *
  454. * This function must be called under dev->power.lock with interrupts disabled.
  455. */
  456. static int __pm_request_suspend(struct device *dev)
  457. {
  458. int retval = 0;
  459. if (dev->power.runtime_error)
  460. return -EINVAL;
  461. if (dev->power.runtime_status == RPM_SUSPENDED)
  462. retval = 1;
  463. else if (atomic_read(&dev->power.usage_count) > 0
  464. || dev->power.disable_depth > 0)
  465. retval = -EAGAIN;
  466. else if (dev->power.runtime_status == RPM_SUSPENDING)
  467. retval = -EINPROGRESS;
  468. else if (!pm_children_suspended(dev))
  469. retval = -EBUSY;
  470. if (retval < 0)
  471. return retval;
  472. pm_runtime_deactivate_timer(dev);
  473. if (dev->power.request_pending) {
  474. /*
  475. * Pending resume requests take precedence over us, but we can
  476. * overtake any other pending request.
  477. */
  478. if (dev->power.request == RPM_REQ_RESUME)
  479. retval = -EAGAIN;
  480. else if (dev->power.request != RPM_REQ_SUSPEND)
  481. dev->power.request = retval ?
  482. RPM_REQ_NONE : RPM_REQ_SUSPEND;
  483. return retval;
  484. } else if (retval) {
  485. return retval;
  486. }
  487. dev->power.request = RPM_REQ_SUSPEND;
  488. dev->power.request_pending = true;
  489. queue_work(pm_wq, &dev->power.work);
  490. return 0;
  491. }
  492. /**
  493. * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
  494. * @data: Device pointer passed by pm_schedule_suspend().
  495. *
  496. * Check if the time is right and execute __pm_request_suspend() in that case.
  497. */
  498. static void pm_suspend_timer_fn(unsigned long data)
  499. {
  500. struct device *dev = (struct device *)data;
  501. unsigned long flags;
  502. unsigned long expires;
  503. spin_lock_irqsave(&dev->power.lock, flags);
  504. expires = dev->power.timer_expires;
  505. /* If 'expire' is after 'jiffies' we've been called too early. */
  506. if (expires > 0 && !time_after(expires, jiffies)) {
  507. dev->power.timer_expires = 0;
  508. __pm_request_suspend(dev);
  509. }
  510. spin_unlock_irqrestore(&dev->power.lock, flags);
  511. }
  512. /**
  513. * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
  514. * @dev: Device to suspend.
  515. * @delay: Time to wait before submitting a suspend request, in milliseconds.
  516. */
  517. int pm_schedule_suspend(struct device *dev, unsigned int delay)
  518. {
  519. unsigned long flags;
  520. int retval = 0;
  521. spin_lock_irqsave(&dev->power.lock, flags);
  522. if (dev->power.runtime_error) {
  523. retval = -EINVAL;
  524. goto out;
  525. }
  526. if (!delay) {
  527. retval = __pm_request_suspend(dev);
  528. goto out;
  529. }
  530. pm_runtime_deactivate_timer(dev);
  531. if (dev->power.request_pending) {
  532. /*
  533. * Pending resume requests take precedence over us, but any
  534. * other pending requests have to be canceled.
  535. */
  536. if (dev->power.request == RPM_REQ_RESUME) {
  537. retval = -EAGAIN;
  538. goto out;
  539. }
  540. dev->power.request = RPM_REQ_NONE;
  541. }
  542. if (dev->power.runtime_status == RPM_SUSPENDED)
  543. retval = 1;
  544. else if (dev->power.runtime_status == RPM_SUSPENDING)
  545. retval = -EINPROGRESS;
  546. else if (atomic_read(&dev->power.usage_count) > 0
  547. || dev->power.disable_depth > 0)
  548. retval = -EAGAIN;
  549. else if (!pm_children_suspended(dev))
  550. retval = -EBUSY;
  551. if (retval)
  552. goto out;
  553. dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
  554. if (!dev->power.timer_expires)
  555. dev->power.timer_expires = 1;
  556. mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
  557. out:
  558. spin_unlock_irqrestore(&dev->power.lock, flags);
  559. return retval;
  560. }
  561. EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  562. /**
  563. * pm_request_resume - Submit a resume request for given device.
  564. * @dev: Device to resume.
  565. *
  566. * This function must be called under dev->power.lock with interrupts disabled.
  567. */
  568. static int __pm_request_resume(struct device *dev)
  569. {
  570. int retval = 0;
  571. if (dev->power.runtime_error)
  572. return -EINVAL;
  573. if (dev->power.runtime_status == RPM_ACTIVE)
  574. retval = 1;
  575. else if (dev->power.runtime_status == RPM_RESUMING)
  576. retval = -EINPROGRESS;
  577. else if (dev->power.disable_depth > 0)
  578. retval = -EAGAIN;
  579. if (retval < 0)
  580. return retval;
  581. pm_runtime_deactivate_timer(dev);
  582. if (dev->power.runtime_status == RPM_SUSPENDING) {
  583. dev->power.deferred_resume = true;
  584. return retval;
  585. }
  586. if (dev->power.request_pending) {
  587. /* If non-resume request is pending, we can overtake it. */
  588. dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
  589. return retval;
  590. }
  591. if (retval)
  592. return retval;
  593. dev->power.request = RPM_REQ_RESUME;
  594. dev->power.request_pending = true;
  595. queue_work(pm_wq, &dev->power.work);
  596. return retval;
  597. }
  598. /**
  599. * pm_request_resume - Submit a resume request for given device.
  600. * @dev: Device to resume.
  601. */
  602. int pm_request_resume(struct device *dev)
  603. {
  604. unsigned long flags;
  605. int retval;
  606. spin_lock_irqsave(&dev->power.lock, flags);
  607. retval = __pm_request_resume(dev);
  608. spin_unlock_irqrestore(&dev->power.lock, flags);
  609. return retval;
  610. }
  611. EXPORT_SYMBOL_GPL(pm_request_resume);
  612. /**
  613. * __pm_runtime_get - Reference count a device and wake it up, if necessary.
  614. * @dev: Device to handle.
  615. * @sync: If set and the device is suspended, resume it synchronously.
  616. *
  617. * Increment the usage count of the device and resume it or submit a resume
  618. * request for it, depending on the value of @sync.
  619. */
  620. int __pm_runtime_get(struct device *dev, bool sync)
  621. {
  622. int retval;
  623. atomic_inc(&dev->power.usage_count);
  624. retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
  625. return retval;
  626. }
  627. EXPORT_SYMBOL_GPL(__pm_runtime_get);
  628. /**
  629. * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
  630. * @dev: Device to handle.
  631. * @sync: If the device's bus type is to be notified, do that synchronously.
  632. *
  633. * Decrement the usage count of the device and if it reaches zero, carry out a
  634. * synchronous idle notification or submit an idle notification request for it,
  635. * depending on the value of @sync.
  636. */
  637. int __pm_runtime_put(struct device *dev, bool sync)
  638. {
  639. int retval = 0;
  640. if (atomic_dec_and_test(&dev->power.usage_count))
  641. retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
  642. return retval;
  643. }
  644. EXPORT_SYMBOL_GPL(__pm_runtime_put);
  645. /**
  646. * __pm_runtime_set_status - Set run-time PM status of a device.
  647. * @dev: Device to handle.
  648. * @status: New run-time PM status of the device.
  649. *
  650. * If run-time PM of the device is disabled or its power.runtime_error field is
  651. * different from zero, the status may be changed either to RPM_ACTIVE, or to
  652. * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  653. * However, if the device has a parent and the parent is not active, and the
  654. * parent's power.ignore_children flag is unset, the device's status cannot be
  655. * set to RPM_ACTIVE, so -EBUSY is returned in that case.
  656. *
  657. * If successful, __pm_runtime_set_status() clears the power.runtime_error field
  658. * and the device parent's counter of unsuspended children is modified to
  659. * reflect the new status. If the new status is RPM_SUSPENDED, an idle
  660. * notification request for the parent is submitted.
  661. */
  662. int __pm_runtime_set_status(struct device *dev, unsigned int status)
  663. {
  664. struct device *parent = dev->parent;
  665. unsigned long flags;
  666. bool notify_parent = false;
  667. int error = 0;
  668. if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
  669. return -EINVAL;
  670. spin_lock_irqsave(&dev->power.lock, flags);
  671. if (!dev->power.runtime_error && !dev->power.disable_depth) {
  672. error = -EAGAIN;
  673. goto out;
  674. }
  675. if (dev->power.runtime_status == status)
  676. goto out_set;
  677. if (status == RPM_SUSPENDED) {
  678. /* It always is possible to set the status to 'suspended'. */
  679. if (parent) {
  680. atomic_add_unless(&parent->power.child_count, -1, 0);
  681. notify_parent = !parent->power.ignore_children;
  682. }
  683. goto out_set;
  684. }
  685. if (parent) {
  686. spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
  687. /*
  688. * It is invalid to put an active child under a parent that is
  689. * not active, has run-time PM enabled and the
  690. * 'power.ignore_children' flag unset.
  691. */
  692. if (!parent->power.disable_depth
  693. && !parent->power.ignore_children
  694. && parent->power.runtime_status != RPM_ACTIVE)
  695. error = -EBUSY;
  696. else if (dev->power.runtime_status == RPM_SUSPENDED)
  697. atomic_inc(&parent->power.child_count);
  698. spin_unlock(&parent->power.lock);
  699. if (error)
  700. goto out;
  701. }
  702. out_set:
  703. dev->power.runtime_status = status;
  704. dev->power.runtime_error = 0;
  705. out:
  706. spin_unlock_irqrestore(&dev->power.lock, flags);
  707. if (notify_parent)
  708. pm_request_idle(parent);
  709. return error;
  710. }
  711. EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  712. /**
  713. * __pm_runtime_barrier - Cancel pending requests and wait for completions.
  714. * @dev: Device to handle.
  715. *
  716. * Flush all pending requests for the device from pm_wq and wait for all
  717. * run-time PM operations involving the device in progress to complete.
  718. *
  719. * Should be called under dev->power.lock with interrupts disabled.
  720. */
  721. static void __pm_runtime_barrier(struct device *dev)
  722. {
  723. pm_runtime_deactivate_timer(dev);
  724. if (dev->power.request_pending) {
  725. dev->power.request = RPM_REQ_NONE;
  726. spin_unlock_irq(&dev->power.lock);
  727. cancel_work_sync(&dev->power.work);
  728. spin_lock_irq(&dev->power.lock);
  729. dev->power.request_pending = false;
  730. }
  731. if (dev->power.runtime_status == RPM_SUSPENDING
  732. || dev->power.runtime_status == RPM_RESUMING
  733. || dev->power.idle_notification) {
  734. DEFINE_WAIT(wait);
  735. /* Suspend, wake-up or idle notification in progress. */
  736. for (;;) {
  737. prepare_to_wait(&dev->power.wait_queue, &wait,
  738. TASK_UNINTERRUPTIBLE);
  739. if (dev->power.runtime_status != RPM_SUSPENDING
  740. && dev->power.runtime_status != RPM_RESUMING
  741. && !dev->power.idle_notification)
  742. break;
  743. spin_unlock_irq(&dev->power.lock);
  744. schedule();
  745. spin_lock_irq(&dev->power.lock);
  746. }
  747. finish_wait(&dev->power.wait_queue, &wait);
  748. }
  749. }
  750. /**
  751. * pm_runtime_barrier - Flush pending requests and wait for completions.
  752. * @dev: Device to handle.
  753. *
  754. * Prevent the device from being suspended by incrementing its usage counter and
  755. * if there's a pending resume request for the device, wake the device up.
  756. * Next, make sure that all pending requests for the device have been flushed
  757. * from pm_wq and wait for all run-time PM operations involving the device in
  758. * progress to complete.
  759. *
  760. * Return value:
  761. * 1, if there was a resume request pending and the device had to be woken up,
  762. * 0, otherwise
  763. */
  764. int pm_runtime_barrier(struct device *dev)
  765. {
  766. int retval = 0;
  767. pm_runtime_get_noresume(dev);
  768. spin_lock_irq(&dev->power.lock);
  769. if (dev->power.request_pending
  770. && dev->power.request == RPM_REQ_RESUME) {
  771. __pm_runtime_resume(dev, false);
  772. retval = 1;
  773. }
  774. __pm_runtime_barrier(dev);
  775. spin_unlock_irq(&dev->power.lock);
  776. pm_runtime_put_noidle(dev);
  777. return retval;
  778. }
  779. EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  780. /**
  781. * __pm_runtime_disable - Disable run-time PM of a device.
  782. * @dev: Device to handle.
  783. * @check_resume: If set, check if there's a resume request for the device.
  784. *
  785. * Increment power.disable_depth for the device and if was zero previously,
  786. * cancel all pending run-time PM requests for the device and wait for all
  787. * operations in progress to complete. The device can be either active or
  788. * suspended after its run-time PM has been disabled.
  789. *
  790. * If @check_resume is set and there's a resume request pending when
  791. * __pm_runtime_disable() is called and power.disable_depth is zero, the
  792. * function will wake up the device before disabling its run-time PM.
  793. */
  794. void __pm_runtime_disable(struct device *dev, bool check_resume)
  795. {
  796. spin_lock_irq(&dev->power.lock);
  797. if (dev->power.disable_depth > 0) {
  798. dev->power.disable_depth++;
  799. goto out;
  800. }
  801. /*
  802. * Wake up the device if there's a resume request pending, because that
  803. * means there probably is some I/O to process and disabling run-time PM
  804. * shouldn't prevent the device from processing the I/O.
  805. */
  806. if (check_resume && dev->power.request_pending
  807. && dev->power.request == RPM_REQ_RESUME) {
  808. /*
  809. * Prevent suspends and idle notifications from being carried
  810. * out after we have woken up the device.
  811. */
  812. pm_runtime_get_noresume(dev);
  813. __pm_runtime_resume(dev, false);
  814. pm_runtime_put_noidle(dev);
  815. }
  816. if (!dev->power.disable_depth++)
  817. __pm_runtime_barrier(dev);
  818. out:
  819. spin_unlock_irq(&dev->power.lock);
  820. }
  821. EXPORT_SYMBOL_GPL(__pm_runtime_disable);
  822. /**
  823. * pm_runtime_enable - Enable run-time PM of a device.
  824. * @dev: Device to handle.
  825. */
  826. void pm_runtime_enable(struct device *dev)
  827. {
  828. unsigned long flags;
  829. spin_lock_irqsave(&dev->power.lock, flags);
  830. if (dev->power.disable_depth > 0)
  831. dev->power.disable_depth--;
  832. else
  833. dev_warn(dev, "Unbalanced %s!\n", __func__);
  834. spin_unlock_irqrestore(&dev->power.lock, flags);
  835. }
  836. EXPORT_SYMBOL_GPL(pm_runtime_enable);
  837. /**
  838. * pm_runtime_init - Initialize run-time PM fields in given device object.
  839. * @dev: Device object to initialize.
  840. */
  841. void pm_runtime_init(struct device *dev)
  842. {
  843. spin_lock_init(&dev->power.lock);
  844. dev->power.runtime_status = RPM_SUSPENDED;
  845. dev->power.idle_notification = false;
  846. dev->power.disable_depth = 1;
  847. atomic_set(&dev->power.usage_count, 0);
  848. dev->power.runtime_error = 0;
  849. atomic_set(&dev->power.child_count, 0);
  850. pm_suspend_ignore_children(dev, false);
  851. dev->power.request_pending = false;
  852. dev->power.request = RPM_REQ_NONE;
  853. dev->power.deferred_resume = false;
  854. INIT_WORK(&dev->power.work, pm_runtime_work);
  855. dev->power.timer_expires = 0;
  856. setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
  857. (unsigned long)dev);
  858. init_waitqueue_head(&dev->power.wait_queue);
  859. }
  860. /**
  861. * pm_runtime_remove - Prepare for removing a device from device hierarchy.
  862. * @dev: Device object being removed from device hierarchy.
  863. */
  864. void pm_runtime_remove(struct device *dev)
  865. {
  866. __pm_runtime_disable(dev, false);
  867. /* Change the status back to 'suspended' to match the initial status. */
  868. if (dev->power.runtime_status == RPM_ACTIVE)
  869. pm_runtime_set_suspended(dev);
  870. }