domain.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * drivers/base/power/domain.c - Common code related to device power domains.
  3. *
  4. * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/io.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/pm_domain.h>
  13. #include <linux/slab.h>
  14. #include <linux/err.h>
  15. #ifdef CONFIG_PM
  16. static struct generic_pm_domain *dev_to_genpd(struct device *dev)
  17. {
  18. if (IS_ERR_OR_NULL(dev->pm_domain))
  19. return ERR_PTR(-EINVAL);
  20. return pd_to_genpd(dev->pm_domain);
  21. }
  22. static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
  23. {
  24. if (!WARN_ON(genpd->sd_count == 0))
  25. genpd->sd_count--;
  26. }
  27. /**
  28. * pm_genpd_poweron - Restore power to a given PM domain and its parents.
  29. * @genpd: PM domain to power up.
  30. *
  31. * Restore power to @genpd and all of its parents so that it is possible to
  32. * resume a device belonging to it.
  33. */
  34. int pm_genpd_poweron(struct generic_pm_domain *genpd)
  35. {
  36. int ret = 0;
  37. start:
  38. if (genpd->parent)
  39. mutex_lock(&genpd->parent->lock);
  40. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  41. if (!genpd->power_is_off
  42. || (genpd->prepared_count > 0 && genpd->suspend_power_off))
  43. goto out;
  44. if (genpd->parent && genpd->parent->power_is_off) {
  45. mutex_unlock(&genpd->lock);
  46. mutex_unlock(&genpd->parent->lock);
  47. ret = pm_genpd_poweron(genpd->parent);
  48. if (ret)
  49. return ret;
  50. goto start;
  51. }
  52. if (genpd->power_on) {
  53. int ret = genpd->power_on(genpd);
  54. if (ret)
  55. goto out;
  56. }
  57. genpd->power_is_off = false;
  58. if (genpd->parent)
  59. genpd->parent->sd_count++;
  60. out:
  61. mutex_unlock(&genpd->lock);
  62. if (genpd->parent)
  63. mutex_unlock(&genpd->parent->lock);
  64. return ret;
  65. }
  66. #endif /* CONFIG_PM */
  67. #ifdef CONFIG_PM_RUNTIME
  68. /**
  69. * __pm_genpd_save_device - Save the pre-suspend state of a device.
  70. * @dle: Device list entry of the device to save the state of.
  71. * @genpd: PM domain the device belongs to.
  72. */
  73. static int __pm_genpd_save_device(struct dev_list_entry *dle,
  74. struct generic_pm_domain *genpd)
  75. {
  76. struct device *dev = dle->dev;
  77. struct device_driver *drv = dev->driver;
  78. int ret = 0;
  79. if (dle->need_restore)
  80. return 0;
  81. if (drv && drv->pm && drv->pm->runtime_suspend) {
  82. if (genpd->start_device)
  83. genpd->start_device(dev);
  84. ret = drv->pm->runtime_suspend(dev);
  85. if (genpd->stop_device)
  86. genpd->stop_device(dev);
  87. }
  88. if (!ret)
  89. dle->need_restore = true;
  90. return ret;
  91. }
  92. /**
  93. * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
  94. * @dle: Device list entry of the device to restore the state of.
  95. * @genpd: PM domain the device belongs to.
  96. */
  97. static void __pm_genpd_restore_device(struct dev_list_entry *dle,
  98. struct generic_pm_domain *genpd)
  99. {
  100. struct device *dev = dle->dev;
  101. struct device_driver *drv = dev->driver;
  102. if (!dle->need_restore)
  103. return;
  104. if (drv && drv->pm && drv->pm->runtime_resume) {
  105. if (genpd->start_device)
  106. genpd->start_device(dev);
  107. drv->pm->runtime_resume(dev);
  108. if (genpd->stop_device)
  109. genpd->stop_device(dev);
  110. }
  111. dle->need_restore = false;
  112. }
  113. /**
  114. * pm_genpd_poweroff - Remove power from a given PM domain.
  115. * @genpd: PM domain to power down.
  116. *
  117. * If all of the @genpd's devices have been suspended and all of its subdomains
  118. * have been powered down, run the runtime suspend callbacks provided by all of
  119. * the @genpd's devices' drivers and remove power from @genpd.
  120. */
  121. static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
  122. {
  123. struct generic_pm_domain *parent;
  124. struct dev_list_entry *dle;
  125. unsigned int not_suspended;
  126. int ret;
  127. if (genpd->power_is_off || genpd->prepared_count > 0)
  128. return 0;
  129. if (genpd->sd_count > 0)
  130. return -EBUSY;
  131. not_suspended = 0;
  132. list_for_each_entry(dle, &genpd->dev_list, node)
  133. if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
  134. not_suspended++;
  135. if (not_suspended > genpd->in_progress)
  136. return -EBUSY;
  137. if (genpd->gov && genpd->gov->power_down_ok) {
  138. if (!genpd->gov->power_down_ok(&genpd->domain))
  139. return -EAGAIN;
  140. }
  141. list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
  142. ret = __pm_genpd_save_device(dle, genpd);
  143. if (ret)
  144. goto err_dev;
  145. }
  146. if (genpd->power_off)
  147. genpd->power_off(genpd);
  148. genpd->power_is_off = true;
  149. parent = genpd->parent;
  150. if (parent) {
  151. genpd_sd_counter_dec(parent);
  152. if (parent->sd_count == 0)
  153. queue_work(pm_wq, &parent->power_off_work);
  154. }
  155. return 0;
  156. err_dev:
  157. list_for_each_entry_continue(dle, &genpd->dev_list, node)
  158. __pm_genpd_restore_device(dle, genpd);
  159. return ret;
  160. }
  161. /**
  162. * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
  163. * @work: Work structure used for scheduling the execution of this function.
  164. */
  165. static void genpd_power_off_work_fn(struct work_struct *work)
  166. {
  167. struct generic_pm_domain *genpd;
  168. genpd = container_of(work, struct generic_pm_domain, power_off_work);
  169. if (genpd->parent)
  170. mutex_lock(&genpd->parent->lock);
  171. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  172. pm_genpd_poweroff(genpd);
  173. mutex_unlock(&genpd->lock);
  174. if (genpd->parent)
  175. mutex_unlock(&genpd->parent->lock);
  176. }
  177. /**
  178. * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
  179. * @dev: Device to suspend.
  180. *
  181. * Carry out a runtime suspend of a device under the assumption that its
  182. * pm_domain field points to the domain member of an object of type
  183. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  184. */
  185. static int pm_genpd_runtime_suspend(struct device *dev)
  186. {
  187. struct generic_pm_domain *genpd;
  188. dev_dbg(dev, "%s()\n", __func__);
  189. genpd = dev_to_genpd(dev);
  190. if (IS_ERR(genpd))
  191. return -EINVAL;
  192. if (genpd->parent)
  193. mutex_lock(&genpd->parent->lock);
  194. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  195. if (genpd->stop_device) {
  196. int ret = genpd->stop_device(dev);
  197. if (ret)
  198. goto out;
  199. }
  200. genpd->in_progress++;
  201. pm_genpd_poweroff(genpd);
  202. genpd->in_progress--;
  203. out:
  204. mutex_unlock(&genpd->lock);
  205. if (genpd->parent)
  206. mutex_unlock(&genpd->parent->lock);
  207. return 0;
  208. }
  209. /**
  210. * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  211. * @dev: Device to resume.
  212. * @genpd: PM domain the device belongs to.
  213. */
  214. static void __pm_genpd_runtime_resume(struct device *dev,
  215. struct generic_pm_domain *genpd)
  216. {
  217. struct dev_list_entry *dle;
  218. list_for_each_entry(dle, &genpd->dev_list, node) {
  219. if (dle->dev == dev) {
  220. __pm_genpd_restore_device(dle, genpd);
  221. break;
  222. }
  223. }
  224. if (genpd->start_device)
  225. genpd->start_device(dev);
  226. }
  227. /**
  228. * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  229. * @dev: Device to resume.
  230. *
  231. * Carry out a runtime resume of a device under the assumption that its
  232. * pm_domain field points to the domain member of an object of type
  233. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  234. */
  235. static int pm_genpd_runtime_resume(struct device *dev)
  236. {
  237. struct generic_pm_domain *genpd;
  238. int ret;
  239. dev_dbg(dev, "%s()\n", __func__);
  240. genpd = dev_to_genpd(dev);
  241. if (IS_ERR(genpd))
  242. return -EINVAL;
  243. ret = pm_genpd_poweron(genpd);
  244. if (ret)
  245. return ret;
  246. mutex_lock(&genpd->lock);
  247. __pm_genpd_runtime_resume(dev, genpd);
  248. mutex_unlock(&genpd->lock);
  249. return 0;
  250. }
  251. #else
  252. static inline void genpd_power_off_work_fn(struct work_struct *work) {}
  253. static inline void __pm_genpd_runtime_resume(struct device *dev,
  254. struct generic_pm_domain *genpd) {}
  255. #define pm_genpd_runtime_suspend NULL
  256. #define pm_genpd_runtime_resume NULL
  257. #endif /* CONFIG_PM_RUNTIME */
  258. #ifdef CONFIG_PM_SLEEP
  259. /**
  260. * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
  261. * @genpd: PM domain to power off, if possible.
  262. *
  263. * Check if the given PM domain can be powered off (during system suspend or
  264. * hibernation) and do that if so. Also, in that case propagate to its parent.
  265. *
  266. * This function is only called in "noirq" stages of system power transitions,
  267. * so it need not acquire locks (all of the "noirq" callbacks are executed
  268. * sequentially, so it is guaranteed that it will never run twice in parallel).
  269. */
  270. static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
  271. {
  272. struct generic_pm_domain *parent = genpd->parent;
  273. if (genpd->power_is_off)
  274. return;
  275. if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
  276. return;
  277. if (genpd->power_off)
  278. genpd->power_off(genpd);
  279. genpd->power_is_off = true;
  280. if (parent) {
  281. genpd_sd_counter_dec(parent);
  282. pm_genpd_sync_poweroff(parent);
  283. }
  284. }
  285. /**
  286. * pm_genpd_prepare - Start power transition of a device in a PM domain.
  287. * @dev: Device to start the transition of.
  288. *
  289. * Start a power transition of a device (during a system-wide power transition)
  290. * under the assumption that its pm_domain field points to the domain member of
  291. * an object of type struct generic_pm_domain representing a PM domain
  292. * consisting of I/O devices.
  293. */
  294. static int pm_genpd_prepare(struct device *dev)
  295. {
  296. struct generic_pm_domain *genpd;
  297. dev_dbg(dev, "%s()\n", __func__);
  298. genpd = dev_to_genpd(dev);
  299. if (IS_ERR(genpd))
  300. return -EINVAL;
  301. mutex_lock(&genpd->lock);
  302. if (genpd->prepared_count++ == 0)
  303. genpd->suspend_power_off = genpd->power_is_off;
  304. if (genpd->suspend_power_off) {
  305. mutex_unlock(&genpd->lock);
  306. return 0;
  307. }
  308. /*
  309. * If the device is in the (runtime) "suspended" state, call
  310. * .start_device() for it, if defined.
  311. */
  312. if (pm_runtime_suspended(dev))
  313. __pm_genpd_runtime_resume(dev, genpd);
  314. /*
  315. * Do not check if runtime resume is pending at this point, because it
  316. * has been taken care of already and if pm_genpd_poweron() ran at this
  317. * point as a result of the check, it would deadlock.
  318. */
  319. __pm_runtime_disable(dev, false);
  320. mutex_unlock(&genpd->lock);
  321. return pm_generic_prepare(dev);
  322. }
  323. /**
  324. * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
  325. * @dev: Device to suspend.
  326. *
  327. * Suspend a device under the assumption that its pm_domain field points to the
  328. * domain member of an object of type struct generic_pm_domain representing
  329. * a PM domain consisting of I/O devices.
  330. */
  331. static int pm_genpd_suspend(struct device *dev)
  332. {
  333. struct generic_pm_domain *genpd;
  334. dev_dbg(dev, "%s()\n", __func__);
  335. genpd = dev_to_genpd(dev);
  336. if (IS_ERR(genpd))
  337. return -EINVAL;
  338. return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
  339. }
  340. /**
  341. * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
  342. * @dev: Device to suspend.
  343. *
  344. * Carry out a late suspend of a device under the assumption that its
  345. * pm_domain field points to the domain member of an object of type
  346. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  347. */
  348. static int pm_genpd_suspend_noirq(struct device *dev)
  349. {
  350. struct generic_pm_domain *genpd;
  351. int ret;
  352. dev_dbg(dev, "%s()\n", __func__);
  353. genpd = dev_to_genpd(dev);
  354. if (IS_ERR(genpd))
  355. return -EINVAL;
  356. if (genpd->suspend_power_off)
  357. return 0;
  358. ret = pm_generic_suspend_noirq(dev);
  359. if (ret)
  360. return ret;
  361. if (device_may_wakeup(dev)
  362. && genpd->active_wakeup && genpd->active_wakeup(dev))
  363. return 0;
  364. if (genpd->stop_device)
  365. genpd->stop_device(dev);
  366. /*
  367. * Since all of the "noirq" callbacks are executed sequentially, it is
  368. * guaranteed that this function will never run twice in parallel for
  369. * the same PM domain, so it is not necessary to use locking here.
  370. */
  371. genpd->suspended_count++;
  372. pm_genpd_sync_poweroff(genpd);
  373. return 0;
  374. }
  375. /**
  376. * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
  377. * @dev: Device to resume.
  378. *
  379. * Carry out an early resume of a device under the assumption that its
  380. * pm_domain field points to the domain member of an object of type
  381. * struct generic_pm_domain representing a power domain consisting of I/O
  382. * devices.
  383. */
  384. static int pm_genpd_resume_noirq(struct device *dev)
  385. {
  386. struct generic_pm_domain *genpd;
  387. dev_dbg(dev, "%s()\n", __func__);
  388. genpd = dev_to_genpd(dev);
  389. if (IS_ERR(genpd))
  390. return -EINVAL;
  391. if (genpd->suspend_power_off)
  392. return 0;
  393. /*
  394. * Since all of the "noirq" callbacks are executed sequentially, it is
  395. * guaranteed that this function will never run twice in parallel for
  396. * the same PM domain, so it is not necessary to use locking here.
  397. */
  398. pm_genpd_poweron(genpd);
  399. genpd->suspended_count--;
  400. if (genpd->start_device)
  401. genpd->start_device(dev);
  402. return pm_generic_resume_noirq(dev);
  403. }
  404. /**
  405. * pm_genpd_resume - Resume a device belonging to an I/O power domain.
  406. * @dev: Device to resume.
  407. *
  408. * Resume a device under the assumption that its pm_domain field points to the
  409. * domain member of an object of type struct generic_pm_domain representing
  410. * a power domain consisting of I/O devices.
  411. */
  412. static int pm_genpd_resume(struct device *dev)
  413. {
  414. struct generic_pm_domain *genpd;
  415. dev_dbg(dev, "%s()\n", __func__);
  416. genpd = dev_to_genpd(dev);
  417. if (IS_ERR(genpd))
  418. return -EINVAL;
  419. return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
  420. }
  421. /**
  422. * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
  423. * @dev: Device to freeze.
  424. *
  425. * Freeze a device under the assumption that its pm_domain field points to the
  426. * domain member of an object of type struct generic_pm_domain representing
  427. * a power domain consisting of I/O devices.
  428. */
  429. static int pm_genpd_freeze(struct device *dev)
  430. {
  431. struct generic_pm_domain *genpd;
  432. dev_dbg(dev, "%s()\n", __func__);
  433. genpd = dev_to_genpd(dev);
  434. if (IS_ERR(genpd))
  435. return -EINVAL;
  436. return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
  437. }
  438. /**
  439. * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
  440. * @dev: Device to freeze.
  441. *
  442. * Carry out a late freeze of a device under the assumption that its
  443. * pm_domain field points to the domain member of an object of type
  444. * struct generic_pm_domain representing a power domain consisting of I/O
  445. * devices.
  446. */
  447. static int pm_genpd_freeze_noirq(struct device *dev)
  448. {
  449. struct generic_pm_domain *genpd;
  450. int ret;
  451. dev_dbg(dev, "%s()\n", __func__);
  452. genpd = dev_to_genpd(dev);
  453. if (IS_ERR(genpd))
  454. return -EINVAL;
  455. if (genpd->suspend_power_off)
  456. return 0;
  457. ret = pm_generic_freeze_noirq(dev);
  458. if (ret)
  459. return ret;
  460. if (genpd->stop_device)
  461. genpd->stop_device(dev);
  462. return 0;
  463. }
  464. /**
  465. * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
  466. * @dev: Device to thaw.
  467. *
  468. * Carry out an early thaw of a device under the assumption that its
  469. * pm_domain field points to the domain member of an object of type
  470. * struct generic_pm_domain representing a power domain consisting of I/O
  471. * devices.
  472. */
  473. static int pm_genpd_thaw_noirq(struct device *dev)
  474. {
  475. struct generic_pm_domain *genpd;
  476. dev_dbg(dev, "%s()\n", __func__);
  477. genpd = dev_to_genpd(dev);
  478. if (IS_ERR(genpd))
  479. return -EINVAL;
  480. if (genpd->suspend_power_off)
  481. return 0;
  482. if (genpd->start_device)
  483. genpd->start_device(dev);
  484. return pm_generic_thaw_noirq(dev);
  485. }
  486. /**
  487. * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
  488. * @dev: Device to thaw.
  489. *
  490. * Thaw a device under the assumption that its pm_domain field points to the
  491. * domain member of an object of type struct generic_pm_domain representing
  492. * a power domain consisting of I/O devices.
  493. */
  494. static int pm_genpd_thaw(struct device *dev)
  495. {
  496. struct generic_pm_domain *genpd;
  497. dev_dbg(dev, "%s()\n", __func__);
  498. genpd = dev_to_genpd(dev);
  499. if (IS_ERR(genpd))
  500. return -EINVAL;
  501. return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
  502. }
  503. /**
  504. * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
  505. * @dev: Device to suspend.
  506. *
  507. * Power off a device under the assumption that its pm_domain field points to
  508. * the domain member of an object of type struct generic_pm_domain representing
  509. * a PM domain consisting of I/O devices.
  510. */
  511. static int pm_genpd_dev_poweroff(struct device *dev)
  512. {
  513. struct generic_pm_domain *genpd;
  514. dev_dbg(dev, "%s()\n", __func__);
  515. genpd = dev_to_genpd(dev);
  516. if (IS_ERR(genpd))
  517. return -EINVAL;
  518. return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
  519. }
  520. /**
  521. * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
  522. * @dev: Device to suspend.
  523. *
  524. * Carry out a late powering off of a device under the assumption that its
  525. * pm_domain field points to the domain member of an object of type
  526. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  527. */
  528. static int pm_genpd_dev_poweroff_noirq(struct device *dev)
  529. {
  530. struct generic_pm_domain *genpd;
  531. int ret;
  532. dev_dbg(dev, "%s()\n", __func__);
  533. genpd = dev_to_genpd(dev);
  534. if (IS_ERR(genpd))
  535. return -EINVAL;
  536. if (genpd->suspend_power_off)
  537. return 0;
  538. ret = pm_generic_poweroff_noirq(dev);
  539. if (ret)
  540. return ret;
  541. if (device_may_wakeup(dev)
  542. && genpd->active_wakeup && genpd->active_wakeup(dev))
  543. return 0;
  544. if (genpd->stop_device)
  545. genpd->stop_device(dev);
  546. /*
  547. * Since all of the "noirq" callbacks are executed sequentially, it is
  548. * guaranteed that this function will never run twice in parallel for
  549. * the same PM domain, so it is not necessary to use locking here.
  550. */
  551. genpd->suspended_count++;
  552. pm_genpd_sync_poweroff(genpd);
  553. return 0;
  554. }
  555. /**
  556. * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
  557. * @dev: Device to resume.
  558. *
  559. * Carry out an early restore of a device under the assumption that its
  560. * pm_domain field points to the domain member of an object of type
  561. * struct generic_pm_domain representing a power domain consisting of I/O
  562. * devices.
  563. */
  564. static int pm_genpd_restore_noirq(struct device *dev)
  565. {
  566. struct generic_pm_domain *genpd;
  567. dev_dbg(dev, "%s()\n", __func__);
  568. genpd = dev_to_genpd(dev);
  569. if (IS_ERR(genpd))
  570. return -EINVAL;
  571. /*
  572. * Since all of the "noirq" callbacks are executed sequentially, it is
  573. * guaranteed that this function will never run twice in parallel for
  574. * the same PM domain, so it is not necessary to use locking here.
  575. */
  576. genpd->power_is_off = true;
  577. if (genpd->suspend_power_off) {
  578. /*
  579. * The boot kernel might put the domain into the power on state,
  580. * so make sure it really is powered off.
  581. */
  582. if (genpd->power_off)
  583. genpd->power_off(genpd);
  584. return 0;
  585. }
  586. pm_genpd_poweron(genpd);
  587. genpd->suspended_count--;
  588. if (genpd->start_device)
  589. genpd->start_device(dev);
  590. return pm_generic_restore_noirq(dev);
  591. }
  592. /**
  593. * pm_genpd_restore - Restore a device belonging to an I/O power domain.
  594. * @dev: Device to resume.
  595. *
  596. * Restore a device under the assumption that its pm_domain field points to the
  597. * domain member of an object of type struct generic_pm_domain representing
  598. * a power domain consisting of I/O devices.
  599. */
  600. static int pm_genpd_restore(struct device *dev)
  601. {
  602. struct generic_pm_domain *genpd;
  603. dev_dbg(dev, "%s()\n", __func__);
  604. genpd = dev_to_genpd(dev);
  605. if (IS_ERR(genpd))
  606. return -EINVAL;
  607. return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
  608. }
  609. /**
  610. * pm_genpd_complete - Complete power transition of a device in a power domain.
  611. * @dev: Device to complete the transition of.
  612. *
  613. * Complete a power transition of a device (during a system-wide power
  614. * transition) under the assumption that its pm_domain field points to the
  615. * domain member of an object of type struct generic_pm_domain representing
  616. * a power domain consisting of I/O devices.
  617. */
  618. static void pm_genpd_complete(struct device *dev)
  619. {
  620. struct generic_pm_domain *genpd;
  621. bool run_complete;
  622. dev_dbg(dev, "%s()\n", __func__);
  623. genpd = dev_to_genpd(dev);
  624. if (IS_ERR(genpd))
  625. return;
  626. mutex_lock(&genpd->lock);
  627. run_complete = !genpd->suspend_power_off;
  628. if (--genpd->prepared_count == 0)
  629. genpd->suspend_power_off = false;
  630. mutex_unlock(&genpd->lock);
  631. if (run_complete) {
  632. pm_generic_complete(dev);
  633. pm_runtime_enable(dev);
  634. }
  635. }
  636. #else
  637. #define pm_genpd_prepare NULL
  638. #define pm_genpd_suspend NULL
  639. #define pm_genpd_suspend_noirq NULL
  640. #define pm_genpd_resume_noirq NULL
  641. #define pm_genpd_resume NULL
  642. #define pm_genpd_freeze NULL
  643. #define pm_genpd_freeze_noirq NULL
  644. #define pm_genpd_thaw_noirq NULL
  645. #define pm_genpd_thaw NULL
  646. #define pm_genpd_dev_poweroff_noirq NULL
  647. #define pm_genpd_dev_poweroff NULL
  648. #define pm_genpd_restore_noirq NULL
  649. #define pm_genpd_restore NULL
  650. #define pm_genpd_complete NULL
  651. #endif /* CONFIG_PM_SLEEP */
  652. /**
  653. * pm_genpd_add_device - Add a device to an I/O PM domain.
  654. * @genpd: PM domain to add the device to.
  655. * @dev: Device to be added.
  656. */
  657. int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
  658. {
  659. struct dev_list_entry *dle;
  660. int ret = 0;
  661. dev_dbg(dev, "%s()\n", __func__);
  662. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  663. return -EINVAL;
  664. mutex_lock(&genpd->lock);
  665. if (genpd->power_is_off) {
  666. ret = -EINVAL;
  667. goto out;
  668. }
  669. if (genpd->prepared_count > 0) {
  670. ret = -EAGAIN;
  671. goto out;
  672. }
  673. list_for_each_entry(dle, &genpd->dev_list, node)
  674. if (dle->dev == dev) {
  675. ret = -EINVAL;
  676. goto out;
  677. }
  678. dle = kzalloc(sizeof(*dle), GFP_KERNEL);
  679. if (!dle) {
  680. ret = -ENOMEM;
  681. goto out;
  682. }
  683. dle->dev = dev;
  684. dle->need_restore = false;
  685. list_add_tail(&dle->node, &genpd->dev_list);
  686. genpd->device_count++;
  687. spin_lock_irq(&dev->power.lock);
  688. dev->pm_domain = &genpd->domain;
  689. spin_unlock_irq(&dev->power.lock);
  690. out:
  691. mutex_unlock(&genpd->lock);
  692. return ret;
  693. }
  694. /**
  695. * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  696. * @genpd: PM domain to remove the device from.
  697. * @dev: Device to be removed.
  698. */
  699. int pm_genpd_remove_device(struct generic_pm_domain *genpd,
  700. struct device *dev)
  701. {
  702. struct dev_list_entry *dle;
  703. int ret = -EINVAL;
  704. dev_dbg(dev, "%s()\n", __func__);
  705. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  706. return -EINVAL;
  707. mutex_lock(&genpd->lock);
  708. if (genpd->prepared_count > 0) {
  709. ret = -EAGAIN;
  710. goto out;
  711. }
  712. list_for_each_entry(dle, &genpd->dev_list, node) {
  713. if (dle->dev != dev)
  714. continue;
  715. spin_lock_irq(&dev->power.lock);
  716. dev->pm_domain = NULL;
  717. spin_unlock_irq(&dev->power.lock);
  718. genpd->device_count--;
  719. list_del(&dle->node);
  720. kfree(dle);
  721. ret = 0;
  722. break;
  723. }
  724. out:
  725. mutex_unlock(&genpd->lock);
  726. return ret;
  727. }
  728. /**
  729. * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  730. * @genpd: Master PM domain to add the subdomain to.
  731. * @new_subdomain: Subdomain to be added.
  732. */
  733. int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
  734. struct generic_pm_domain *new_subdomain)
  735. {
  736. struct generic_pm_domain *subdomain;
  737. int ret = 0;
  738. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
  739. return -EINVAL;
  740. mutex_lock(&genpd->lock);
  741. if (genpd->power_is_off && !new_subdomain->power_is_off) {
  742. ret = -EINVAL;
  743. goto out;
  744. }
  745. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  746. if (subdomain == new_subdomain) {
  747. ret = -EINVAL;
  748. goto out;
  749. }
  750. }
  751. mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
  752. list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
  753. new_subdomain->parent = genpd;
  754. if (!subdomain->power_is_off)
  755. genpd->sd_count++;
  756. mutex_unlock(&new_subdomain->lock);
  757. out:
  758. mutex_unlock(&genpd->lock);
  759. return ret;
  760. }
  761. /**
  762. * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  763. * @genpd: Master PM domain to remove the subdomain from.
  764. * @target: Subdomain to be removed.
  765. */
  766. int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
  767. struct generic_pm_domain *target)
  768. {
  769. struct generic_pm_domain *subdomain;
  770. int ret = -EINVAL;
  771. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
  772. return -EINVAL;
  773. mutex_lock(&genpd->lock);
  774. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  775. if (subdomain != target)
  776. continue;
  777. mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
  778. list_del(&subdomain->sd_node);
  779. subdomain->parent = NULL;
  780. if (!subdomain->power_is_off)
  781. genpd_sd_counter_dec(genpd);
  782. mutex_unlock(&subdomain->lock);
  783. ret = 0;
  784. break;
  785. }
  786. mutex_unlock(&genpd->lock);
  787. return ret;
  788. }
  789. /**
  790. * pm_genpd_init - Initialize a generic I/O PM domain object.
  791. * @genpd: PM domain object to initialize.
  792. * @gov: PM domain governor to associate with the domain (may be NULL).
  793. * @is_off: Initial value of the domain's power_is_off field.
  794. */
  795. void pm_genpd_init(struct generic_pm_domain *genpd,
  796. struct dev_power_governor *gov, bool is_off)
  797. {
  798. if (IS_ERR_OR_NULL(genpd))
  799. return;
  800. INIT_LIST_HEAD(&genpd->sd_node);
  801. genpd->parent = NULL;
  802. INIT_LIST_HEAD(&genpd->dev_list);
  803. INIT_LIST_HEAD(&genpd->sd_list);
  804. mutex_init(&genpd->lock);
  805. genpd->gov = gov;
  806. INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
  807. genpd->in_progress = 0;
  808. genpd->sd_count = 0;
  809. genpd->power_is_off = is_off;
  810. genpd->device_count = 0;
  811. genpd->suspended_count = 0;
  812. genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
  813. genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
  814. genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
  815. genpd->domain.ops.prepare = pm_genpd_prepare;
  816. genpd->domain.ops.suspend = pm_genpd_suspend;
  817. genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
  818. genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
  819. genpd->domain.ops.resume = pm_genpd_resume;
  820. genpd->domain.ops.freeze = pm_genpd_freeze;
  821. genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
  822. genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
  823. genpd->domain.ops.thaw = pm_genpd_thaw;
  824. genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
  825. genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
  826. genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
  827. genpd->domain.ops.restore = pm_genpd_restore;
  828. genpd->domain.ops.complete = pm_genpd_complete;
  829. }