domain.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /*
  2. * drivers/base/power/domain.c - Common code related to device power domains.
  3. *
  4. * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/io.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/pm_domain.h>
  13. #include <linux/slab.h>
  14. #include <linux/err.h>
  15. #ifdef CONFIG_PM
  16. static struct generic_pm_domain *dev_to_genpd(struct device *dev)
  17. {
  18. if (IS_ERR_OR_NULL(dev->pm_domain))
  19. return ERR_PTR(-EINVAL);
  20. return pd_to_genpd(dev->pm_domain);
  21. }
  22. static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
  23. {
  24. if (!WARN_ON(genpd->sd_count == 0))
  25. genpd->sd_count--;
  26. }
  27. /**
  28. * pm_genpd_poweron - Restore power to a given PM domain and its parents.
  29. * @genpd: PM domain to power up.
  30. *
  31. * Restore power to @genpd and all of its parents so that it is possible to
  32. * resume a device belonging to it.
  33. */
  34. int pm_genpd_poweron(struct generic_pm_domain *genpd)
  35. {
  36. int ret = 0;
  37. start:
  38. if (genpd->parent)
  39. mutex_lock(&genpd->parent->lock);
  40. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  41. if (!genpd->power_is_off
  42. || (genpd->prepared_count > 0 && genpd->suspend_power_off))
  43. goto out;
  44. if (genpd->parent && genpd->parent->power_is_off) {
  45. mutex_unlock(&genpd->lock);
  46. mutex_unlock(&genpd->parent->lock);
  47. ret = pm_genpd_poweron(genpd->parent);
  48. if (ret)
  49. return ret;
  50. goto start;
  51. }
  52. if (genpd->power_on) {
  53. int ret = genpd->power_on(genpd);
  54. if (ret)
  55. goto out;
  56. }
  57. genpd->power_is_off = false;
  58. if (genpd->parent)
  59. genpd->parent->sd_count++;
  60. out:
  61. mutex_unlock(&genpd->lock);
  62. if (genpd->parent)
  63. mutex_unlock(&genpd->parent->lock);
  64. return ret;
  65. }
  66. #endif /* CONFIG_PM */
  67. #ifdef CONFIG_PM_RUNTIME
  68. /**
  69. * __pm_genpd_save_device - Save the pre-suspend state of a device.
  70. * @dle: Device list entry of the device to save the state of.
  71. * @genpd: PM domain the device belongs to.
  72. */
  73. static int __pm_genpd_save_device(struct dev_list_entry *dle,
  74. struct generic_pm_domain *genpd)
  75. {
  76. struct device *dev = dle->dev;
  77. struct device_driver *drv = dev->driver;
  78. int ret = 0;
  79. if (dle->need_restore)
  80. return 0;
  81. if (drv && drv->pm && drv->pm->runtime_suspend) {
  82. if (genpd->start_device)
  83. genpd->start_device(dev);
  84. ret = drv->pm->runtime_suspend(dev);
  85. if (genpd->stop_device)
  86. genpd->stop_device(dev);
  87. }
  88. if (!ret)
  89. dle->need_restore = true;
  90. return ret;
  91. }
  92. /**
  93. * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
  94. * @dle: Device list entry of the device to restore the state of.
  95. * @genpd: PM domain the device belongs to.
  96. */
  97. static void __pm_genpd_restore_device(struct dev_list_entry *dle,
  98. struct generic_pm_domain *genpd)
  99. {
  100. struct device *dev = dle->dev;
  101. struct device_driver *drv = dev->driver;
  102. if (!dle->need_restore)
  103. return;
  104. if (drv && drv->pm && drv->pm->runtime_resume) {
  105. if (genpd->start_device)
  106. genpd->start_device(dev);
  107. drv->pm->runtime_resume(dev);
  108. if (genpd->stop_device)
  109. genpd->stop_device(dev);
  110. }
  111. dle->need_restore = false;
  112. }
  113. /**
  114. * pm_genpd_poweroff - Remove power from a given PM domain.
  115. * @genpd: PM domain to power down.
  116. *
  117. * If all of the @genpd's devices have been suspended and all of its subdomains
  118. * have been powered down, run the runtime suspend callbacks provided by all of
  119. * the @genpd's devices' drivers and remove power from @genpd.
  120. */
  121. static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
  122. {
  123. struct generic_pm_domain *parent;
  124. struct dev_list_entry *dle;
  125. unsigned int not_suspended;
  126. int ret;
  127. if (genpd->power_is_off || genpd->prepared_count > 0)
  128. return 0;
  129. if (genpd->sd_count > 0)
  130. return -EBUSY;
  131. not_suspended = 0;
  132. list_for_each_entry(dle, &genpd->dev_list, node)
  133. if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
  134. not_suspended++;
  135. if (not_suspended > genpd->in_progress)
  136. return -EBUSY;
  137. if (genpd->gov && genpd->gov->power_down_ok) {
  138. if (!genpd->gov->power_down_ok(&genpd->domain))
  139. return -EAGAIN;
  140. }
  141. list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
  142. ret = __pm_genpd_save_device(dle, genpd);
  143. if (ret)
  144. goto err_dev;
  145. }
  146. if (genpd->power_off)
  147. genpd->power_off(genpd);
  148. genpd->power_is_off = true;
  149. parent = genpd->parent;
  150. if (parent) {
  151. genpd_sd_counter_dec(parent);
  152. if (parent->sd_count == 0)
  153. queue_work(pm_wq, &parent->power_off_work);
  154. }
  155. return 0;
  156. err_dev:
  157. list_for_each_entry_continue(dle, &genpd->dev_list, node)
  158. __pm_genpd_restore_device(dle, genpd);
  159. return ret;
  160. }
  161. /**
  162. * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
  163. * @work: Work structure used for scheduling the execution of this function.
  164. */
  165. static void genpd_power_off_work_fn(struct work_struct *work)
  166. {
  167. struct generic_pm_domain *genpd;
  168. genpd = container_of(work, struct generic_pm_domain, power_off_work);
  169. if (genpd->parent)
  170. mutex_lock(&genpd->parent->lock);
  171. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  172. pm_genpd_poweroff(genpd);
  173. mutex_unlock(&genpd->lock);
  174. if (genpd->parent)
  175. mutex_unlock(&genpd->parent->lock);
  176. }
  177. /**
  178. * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
  179. * @dev: Device to suspend.
  180. *
  181. * Carry out a runtime suspend of a device under the assumption that its
  182. * pm_domain field points to the domain member of an object of type
  183. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  184. */
  185. static int pm_genpd_runtime_suspend(struct device *dev)
  186. {
  187. struct generic_pm_domain *genpd;
  188. dev_dbg(dev, "%s()\n", __func__);
  189. genpd = dev_to_genpd(dev);
  190. if (IS_ERR(genpd))
  191. return -EINVAL;
  192. if (genpd->parent)
  193. mutex_lock(&genpd->parent->lock);
  194. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  195. if (genpd->stop_device) {
  196. int ret = genpd->stop_device(dev);
  197. if (ret)
  198. goto out;
  199. }
  200. genpd->in_progress++;
  201. pm_genpd_poweroff(genpd);
  202. genpd->in_progress--;
  203. out:
  204. mutex_unlock(&genpd->lock);
  205. if (genpd->parent)
  206. mutex_unlock(&genpd->parent->lock);
  207. return 0;
  208. }
  209. /**
  210. * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  211. * @dev: Device to resume.
  212. * @genpd: PM domain the device belongs to.
  213. */
  214. static void __pm_genpd_runtime_resume(struct device *dev,
  215. struct generic_pm_domain *genpd)
  216. {
  217. struct dev_list_entry *dle;
  218. list_for_each_entry(dle, &genpd->dev_list, node) {
  219. if (dle->dev == dev) {
  220. __pm_genpd_restore_device(dle, genpd);
  221. break;
  222. }
  223. }
  224. if (genpd->start_device)
  225. genpd->start_device(dev);
  226. }
  227. /**
  228. * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  229. * @dev: Device to resume.
  230. *
  231. * Carry out a runtime resume of a device under the assumption that its
  232. * pm_domain field points to the domain member of an object of type
  233. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  234. */
  235. static int pm_genpd_runtime_resume(struct device *dev)
  236. {
  237. struct generic_pm_domain *genpd;
  238. int ret;
  239. dev_dbg(dev, "%s()\n", __func__);
  240. genpd = dev_to_genpd(dev);
  241. if (IS_ERR(genpd))
  242. return -EINVAL;
  243. ret = pm_genpd_poweron(genpd);
  244. if (ret)
  245. return ret;
  246. mutex_lock(&genpd->lock);
  247. __pm_genpd_runtime_resume(dev, genpd);
  248. mutex_unlock(&genpd->lock);
  249. return 0;
  250. }
  251. #else
  252. static inline void genpd_power_off_work_fn(struct work_struct *work) {}
  253. static inline void __pm_genpd_runtime_resume(struct device *dev,
  254. struct generic_pm_domain *genpd) {}
  255. #define pm_genpd_runtime_suspend NULL
  256. #define pm_genpd_runtime_resume NULL
  257. #endif /* CONFIG_PM_RUNTIME */
  258. #ifdef CONFIG_PM_SLEEP
  259. /**
  260. * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
  261. * @genpd: PM domain to power off, if possible.
  262. *
  263. * Check if the given PM domain can be powered off (during system suspend or
  264. * hibernation) and do that if so. Also, in that case propagate to its parent.
  265. *
  266. * This function is only called in "noirq" stages of system power transitions,
  267. * so it need not acquire locks (all of the "noirq" callbacks are executed
  268. * sequentially, so it is guaranteed that it will never run twice in parallel).
  269. */
  270. static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
  271. {
  272. struct generic_pm_domain *parent = genpd->parent;
  273. if (genpd->power_is_off)
  274. return;
  275. if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
  276. return;
  277. if (genpd->power_off)
  278. genpd->power_off(genpd);
  279. genpd->power_is_off = true;
  280. if (parent) {
  281. genpd_sd_counter_dec(parent);
  282. pm_genpd_sync_poweroff(parent);
  283. }
  284. }
  285. /**
  286. * pm_genpd_prepare - Start power transition of a device in a PM domain.
  287. * @dev: Device to start the transition of.
  288. *
  289. * Start a power transition of a device (during a system-wide power transition)
  290. * under the assumption that its pm_domain field points to the domain member of
  291. * an object of type struct generic_pm_domain representing a PM domain
  292. * consisting of I/O devices.
  293. */
  294. static int pm_genpd_prepare(struct device *dev)
  295. {
  296. struct generic_pm_domain *genpd;
  297. dev_dbg(dev, "%s()\n", __func__);
  298. genpd = dev_to_genpd(dev);
  299. if (IS_ERR(genpd))
  300. return -EINVAL;
  301. mutex_lock(&genpd->lock);
  302. if (genpd->prepared_count++ == 0)
  303. genpd->suspend_power_off = genpd->power_is_off;
  304. if (genpd->suspend_power_off) {
  305. mutex_unlock(&genpd->lock);
  306. return 0;
  307. }
  308. /*
  309. * If the device is in the (runtime) "suspended" state, call
  310. * .start_device() for it, if defined.
  311. */
  312. if (pm_runtime_suspended(dev))
  313. __pm_genpd_runtime_resume(dev, genpd);
  314. /*
  315. * Do not check if runtime resume is pending at this point, because it
  316. * has been taken care of already and if pm_genpd_poweron() ran at this
  317. * point as a result of the check, it would deadlock.
  318. */
  319. __pm_runtime_disable(dev, false);
  320. mutex_unlock(&genpd->lock);
  321. return pm_generic_prepare(dev);
  322. }
  323. /**
  324. * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
  325. * @dev: Device to suspend.
  326. *
  327. * Suspend a device under the assumption that its pm_domain field points to the
  328. * domain member of an object of type struct generic_pm_domain representing
  329. * a PM domain consisting of I/O devices.
  330. */
  331. static int pm_genpd_suspend(struct device *dev)
  332. {
  333. struct generic_pm_domain *genpd;
  334. dev_dbg(dev, "%s()\n", __func__);
  335. genpd = dev_to_genpd(dev);
  336. if (IS_ERR(genpd))
  337. return -EINVAL;
  338. return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
  339. }
  340. /**
  341. * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
  342. * @dev: Device to suspend.
  343. *
  344. * Carry out a late suspend of a device under the assumption that its
  345. * pm_domain field points to the domain member of an object of type
  346. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  347. */
  348. static int pm_genpd_suspend_noirq(struct device *dev)
  349. {
  350. struct generic_pm_domain *genpd;
  351. int ret;
  352. dev_dbg(dev, "%s()\n", __func__);
  353. genpd = dev_to_genpd(dev);
  354. if (IS_ERR(genpd))
  355. return -EINVAL;
  356. if (genpd->suspend_power_off)
  357. return 0;
  358. ret = pm_generic_suspend_noirq(dev);
  359. if (ret)
  360. return ret;
  361. if (device_may_wakeup(dev)
  362. && genpd->active_wakeup && genpd->active_wakeup(dev))
  363. return 0;
  364. if (genpd->stop_device)
  365. genpd->stop_device(dev);
  366. /*
  367. * Since all of the "noirq" callbacks are executed sequentially, it is
  368. * guaranteed that this function will never run twice in parallel for
  369. * the same PM domain, so it is not necessary to use locking here.
  370. */
  371. genpd->suspended_count++;
  372. pm_genpd_sync_poweroff(genpd);
  373. return 0;
  374. }
  375. /**
  376. * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
  377. * @dev: Device to resume.
  378. *
  379. * Carry out an early resume of a device under the assumption that its
  380. * pm_domain field points to the domain member of an object of type
  381. * struct generic_pm_domain representing a power domain consisting of I/O
  382. * devices.
  383. */
  384. static int pm_genpd_resume_noirq(struct device *dev)
  385. {
  386. struct generic_pm_domain *genpd;
  387. dev_dbg(dev, "%s()\n", __func__);
  388. genpd = dev_to_genpd(dev);
  389. if (IS_ERR(genpd))
  390. return -EINVAL;
  391. if (genpd->suspend_power_off)
  392. return 0;
  393. /*
  394. * Since all of the "noirq" callbacks are executed sequentially, it is
  395. * guaranteed that this function will never run twice in parallel for
  396. * the same PM domain, so it is not necessary to use locking here.
  397. */
  398. pm_genpd_poweron(genpd);
  399. genpd->suspended_count--;
  400. if (genpd->start_device)
  401. genpd->start_device(dev);
  402. return pm_generic_resume_noirq(dev);
  403. }
  404. /**
  405. * pm_genpd_resume - Resume a device belonging to an I/O power domain.
  406. * @dev: Device to resume.
  407. *
  408. * Resume a device under the assumption that its pm_domain field points to the
  409. * domain member of an object of type struct generic_pm_domain representing
  410. * a power domain consisting of I/O devices.
  411. */
  412. static int pm_genpd_resume(struct device *dev)
  413. {
  414. struct generic_pm_domain *genpd;
  415. dev_dbg(dev, "%s()\n", __func__);
  416. genpd = dev_to_genpd(dev);
  417. if (IS_ERR(genpd))
  418. return -EINVAL;
  419. return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
  420. }
  421. /**
  422. * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
  423. * @dev: Device to freeze.
  424. *
  425. * Freeze a device under the assumption that its pm_domain field points to the
  426. * domain member of an object of type struct generic_pm_domain representing
  427. * a power domain consisting of I/O devices.
  428. */
  429. static int pm_genpd_freeze(struct device *dev)
  430. {
  431. struct generic_pm_domain *genpd;
  432. dev_dbg(dev, "%s()\n", __func__);
  433. genpd = dev_to_genpd(dev);
  434. if (IS_ERR(genpd))
  435. return -EINVAL;
  436. return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
  437. }
  438. /**
  439. * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
  440. * @dev: Device to freeze.
  441. *
  442. * Carry out a late freeze of a device under the assumption that its
  443. * pm_domain field points to the domain member of an object of type
  444. * struct generic_pm_domain representing a power domain consisting of I/O
  445. * devices.
  446. */
  447. static int pm_genpd_freeze_noirq(struct device *dev)
  448. {
  449. struct generic_pm_domain *genpd;
  450. int ret;
  451. dev_dbg(dev, "%s()\n", __func__);
  452. genpd = dev_to_genpd(dev);
  453. if (IS_ERR(genpd))
  454. return -EINVAL;
  455. if (genpd->suspend_power_off)
  456. return 0;
  457. ret = pm_generic_freeze_noirq(dev);
  458. if (ret)
  459. return ret;
  460. if (genpd->stop_device)
  461. genpd->stop_device(dev);
  462. return 0;
  463. }
  464. /**
  465. * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
  466. * @dev: Device to thaw.
  467. *
  468. * Carry out an early thaw of a device under the assumption that its
  469. * pm_domain field points to the domain member of an object of type
  470. * struct generic_pm_domain representing a power domain consisting of I/O
  471. * devices.
  472. */
  473. static int pm_genpd_thaw_noirq(struct device *dev)
  474. {
  475. struct generic_pm_domain *genpd;
  476. dev_dbg(dev, "%s()\n", __func__);
  477. genpd = dev_to_genpd(dev);
  478. if (IS_ERR(genpd))
  479. return -EINVAL;
  480. if (genpd->suspend_power_off)
  481. return 0;
  482. if (genpd->start_device)
  483. genpd->start_device(dev);
  484. return pm_generic_thaw_noirq(dev);
  485. }
  486. /**
  487. * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
  488. * @dev: Device to thaw.
  489. *
  490. * Thaw a device under the assumption that its pm_domain field points to the
  491. * domain member of an object of type struct generic_pm_domain representing
  492. * a power domain consisting of I/O devices.
  493. */
  494. static int pm_genpd_thaw(struct device *dev)
  495. {
  496. struct generic_pm_domain *genpd;
  497. dev_dbg(dev, "%s()\n", __func__);
  498. genpd = dev_to_genpd(dev);
  499. if (IS_ERR(genpd))
  500. return -EINVAL;
  501. return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
  502. }
  503. /**
  504. * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
  505. * @dev: Device to suspend.
  506. *
  507. * Power off a device under the assumption that its pm_domain field points to
  508. * the domain member of an object of type struct generic_pm_domain representing
  509. * a PM domain consisting of I/O devices.
  510. */
  511. static int pm_genpd_dev_poweroff(struct device *dev)
  512. {
  513. struct generic_pm_domain *genpd;
  514. dev_dbg(dev, "%s()\n", __func__);
  515. genpd = dev_to_genpd(dev);
  516. if (IS_ERR(genpd))
  517. return -EINVAL;
  518. return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
  519. }
  520. /**
  521. * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
  522. * @dev: Device to suspend.
  523. *
  524. * Carry out a late powering off of a device under the assumption that its
  525. * pm_domain field points to the domain member of an object of type
  526. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  527. */
  528. static int pm_genpd_dev_poweroff_noirq(struct device *dev)
  529. {
  530. struct generic_pm_domain *genpd;
  531. int ret;
  532. dev_dbg(dev, "%s()\n", __func__);
  533. genpd = dev_to_genpd(dev);
  534. if (IS_ERR(genpd))
  535. return -EINVAL;
  536. if (genpd->suspend_power_off)
  537. return 0;
  538. ret = pm_generic_poweroff_noirq(dev);
  539. if (ret)
  540. return ret;
  541. if (device_may_wakeup(dev)
  542. && genpd->active_wakeup && genpd->active_wakeup(dev))
  543. return 0;
  544. if (genpd->stop_device)
  545. genpd->stop_device(dev);
  546. /*
  547. * Since all of the "noirq" callbacks are executed sequentially, it is
  548. * guaranteed that this function will never run twice in parallel for
  549. * the same PM domain, so it is not necessary to use locking here.
  550. */
  551. genpd->suspended_count++;
  552. pm_genpd_sync_poweroff(genpd);
  553. return 0;
  554. }
  555. /**
  556. * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
  557. * @dev: Device to resume.
  558. *
  559. * Carry out an early restore of a device under the assumption that its
  560. * pm_domain field points to the domain member of an object of type
  561. * struct generic_pm_domain representing a power domain consisting of I/O
  562. * devices.
  563. */
  564. static int pm_genpd_restore_noirq(struct device *dev)
  565. {
  566. struct generic_pm_domain *genpd;
  567. dev_dbg(dev, "%s()\n", __func__);
  568. genpd = dev_to_genpd(dev);
  569. if (IS_ERR(genpd))
  570. return -EINVAL;
  571. /*
  572. * Since all of the "noirq" callbacks are executed sequentially, it is
  573. * guaranteed that this function will never run twice in parallel for
  574. * the same PM domain, so it is not necessary to use locking here.
  575. */
  576. genpd->power_is_off = true;
  577. if (genpd->suspend_power_off) {
  578. /*
  579. * The boot kernel might put the domain into the power on state,
  580. * so make sure it really is powered off.
  581. */
  582. if (genpd->power_off)
  583. genpd->power_off(genpd);
  584. return 0;
  585. }
  586. pm_genpd_poweron(genpd);
  587. genpd->suspended_count--;
  588. if (genpd->start_device)
  589. genpd->start_device(dev);
  590. return pm_generic_restore_noirq(dev);
  591. }
  592. /**
  593. * pm_genpd_restore - Restore a device belonging to an I/O power domain.
  594. * @dev: Device to resume.
  595. *
  596. * Restore a device under the assumption that its pm_domain field points to the
  597. * domain member of an object of type struct generic_pm_domain representing
  598. * a power domain consisting of I/O devices.
  599. */
  600. static int pm_genpd_restore(struct device *dev)
  601. {
  602. struct generic_pm_domain *genpd;
  603. dev_dbg(dev, "%s()\n", __func__);
  604. genpd = dev_to_genpd(dev);
  605. if (IS_ERR(genpd))
  606. return -EINVAL;
  607. return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
  608. }
  609. /**
  610. * pm_genpd_complete - Complete power transition of a device in a power domain.
  611. * @dev: Device to complete the transition of.
  612. *
  613. * Complete a power transition of a device (during a system-wide power
  614. * transition) under the assumption that its pm_domain field points to the
  615. * domain member of an object of type struct generic_pm_domain representing
  616. * a power domain consisting of I/O devices.
  617. */
  618. static void pm_genpd_complete(struct device *dev)
  619. {
  620. struct generic_pm_domain *genpd;
  621. bool run_complete;
  622. dev_dbg(dev, "%s()\n", __func__);
  623. genpd = dev_to_genpd(dev);
  624. if (IS_ERR(genpd))
  625. return;
  626. mutex_lock(&genpd->lock);
  627. run_complete = !genpd->suspend_power_off;
  628. if (--genpd->prepared_count == 0)
  629. genpd->suspend_power_off = false;
  630. mutex_unlock(&genpd->lock);
  631. if (run_complete) {
  632. pm_generic_complete(dev);
  633. pm_runtime_set_active(dev);
  634. pm_runtime_enable(dev);
  635. pm_runtime_idle(dev);
  636. }
  637. }
  638. #else
  639. #define pm_genpd_prepare NULL
  640. #define pm_genpd_suspend NULL
  641. #define pm_genpd_suspend_noirq NULL
  642. #define pm_genpd_resume_noirq NULL
  643. #define pm_genpd_resume NULL
  644. #define pm_genpd_freeze NULL
  645. #define pm_genpd_freeze_noirq NULL
  646. #define pm_genpd_thaw_noirq NULL
  647. #define pm_genpd_thaw NULL
  648. #define pm_genpd_dev_poweroff_noirq NULL
  649. #define pm_genpd_dev_poweroff NULL
  650. #define pm_genpd_restore_noirq NULL
  651. #define pm_genpd_restore NULL
  652. #define pm_genpd_complete NULL
  653. #endif /* CONFIG_PM_SLEEP */
  654. /**
  655. * pm_genpd_add_device - Add a device to an I/O PM domain.
  656. * @genpd: PM domain to add the device to.
  657. * @dev: Device to be added.
  658. */
  659. int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
  660. {
  661. struct dev_list_entry *dle;
  662. int ret = 0;
  663. dev_dbg(dev, "%s()\n", __func__);
  664. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  665. return -EINVAL;
  666. mutex_lock(&genpd->lock);
  667. if (genpd->power_is_off) {
  668. ret = -EINVAL;
  669. goto out;
  670. }
  671. if (genpd->prepared_count > 0) {
  672. ret = -EAGAIN;
  673. goto out;
  674. }
  675. list_for_each_entry(dle, &genpd->dev_list, node)
  676. if (dle->dev == dev) {
  677. ret = -EINVAL;
  678. goto out;
  679. }
  680. dle = kzalloc(sizeof(*dle), GFP_KERNEL);
  681. if (!dle) {
  682. ret = -ENOMEM;
  683. goto out;
  684. }
  685. dle->dev = dev;
  686. dle->need_restore = false;
  687. list_add_tail(&dle->node, &genpd->dev_list);
  688. genpd->device_count++;
  689. spin_lock_irq(&dev->power.lock);
  690. dev->pm_domain = &genpd->domain;
  691. spin_unlock_irq(&dev->power.lock);
  692. out:
  693. mutex_unlock(&genpd->lock);
  694. return ret;
  695. }
  696. /**
  697. * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  698. * @genpd: PM domain to remove the device from.
  699. * @dev: Device to be removed.
  700. */
  701. int pm_genpd_remove_device(struct generic_pm_domain *genpd,
  702. struct device *dev)
  703. {
  704. struct dev_list_entry *dle;
  705. int ret = -EINVAL;
  706. dev_dbg(dev, "%s()\n", __func__);
  707. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  708. return -EINVAL;
  709. mutex_lock(&genpd->lock);
  710. if (genpd->prepared_count > 0) {
  711. ret = -EAGAIN;
  712. goto out;
  713. }
  714. list_for_each_entry(dle, &genpd->dev_list, node) {
  715. if (dle->dev != dev)
  716. continue;
  717. spin_lock_irq(&dev->power.lock);
  718. dev->pm_domain = NULL;
  719. spin_unlock_irq(&dev->power.lock);
  720. genpd->device_count--;
  721. list_del(&dle->node);
  722. kfree(dle);
  723. ret = 0;
  724. break;
  725. }
  726. out:
  727. mutex_unlock(&genpd->lock);
  728. return ret;
  729. }
  730. /**
  731. * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  732. * @genpd: Master PM domain to add the subdomain to.
  733. * @new_subdomain: Subdomain to be added.
  734. */
  735. int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
  736. struct generic_pm_domain *new_subdomain)
  737. {
  738. struct generic_pm_domain *subdomain;
  739. int ret = 0;
  740. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
  741. return -EINVAL;
  742. mutex_lock(&genpd->lock);
  743. if (genpd->power_is_off && !new_subdomain->power_is_off) {
  744. ret = -EINVAL;
  745. goto out;
  746. }
  747. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  748. if (subdomain == new_subdomain) {
  749. ret = -EINVAL;
  750. goto out;
  751. }
  752. }
  753. mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
  754. list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
  755. new_subdomain->parent = genpd;
  756. if (!subdomain->power_is_off)
  757. genpd->sd_count++;
  758. mutex_unlock(&new_subdomain->lock);
  759. out:
  760. mutex_unlock(&genpd->lock);
  761. return ret;
  762. }
  763. /**
  764. * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  765. * @genpd: Master PM domain to remove the subdomain from.
  766. * @target: Subdomain to be removed.
  767. */
  768. int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
  769. struct generic_pm_domain *target)
  770. {
  771. struct generic_pm_domain *subdomain;
  772. int ret = -EINVAL;
  773. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
  774. return -EINVAL;
  775. mutex_lock(&genpd->lock);
  776. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  777. if (subdomain != target)
  778. continue;
  779. mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
  780. list_del(&subdomain->sd_node);
  781. subdomain->parent = NULL;
  782. if (!subdomain->power_is_off)
  783. genpd_sd_counter_dec(genpd);
  784. mutex_unlock(&subdomain->lock);
  785. ret = 0;
  786. break;
  787. }
  788. mutex_unlock(&genpd->lock);
  789. return ret;
  790. }
  791. /**
  792. * pm_genpd_init - Initialize a generic I/O PM domain object.
  793. * @genpd: PM domain object to initialize.
  794. * @gov: PM domain governor to associate with the domain (may be NULL).
  795. * @is_off: Initial value of the domain's power_is_off field.
  796. */
  797. void pm_genpd_init(struct generic_pm_domain *genpd,
  798. struct dev_power_governor *gov, bool is_off)
  799. {
  800. if (IS_ERR_OR_NULL(genpd))
  801. return;
  802. INIT_LIST_HEAD(&genpd->sd_node);
  803. genpd->parent = NULL;
  804. INIT_LIST_HEAD(&genpd->dev_list);
  805. INIT_LIST_HEAD(&genpd->sd_list);
  806. mutex_init(&genpd->lock);
  807. genpd->gov = gov;
  808. INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
  809. genpd->in_progress = 0;
  810. genpd->sd_count = 0;
  811. genpd->power_is_off = is_off;
  812. genpd->device_count = 0;
  813. genpd->suspended_count = 0;
  814. genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
  815. genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
  816. genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
  817. genpd->domain.ops.prepare = pm_genpd_prepare;
  818. genpd->domain.ops.suspend = pm_genpd_suspend;
  819. genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
  820. genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
  821. genpd->domain.ops.resume = pm_genpd_resume;
  822. genpd->domain.ops.freeze = pm_genpd_freeze;
  823. genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
  824. genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
  825. genpd->domain.ops.thaw = pm_genpd_thaw;
  826. genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
  827. genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
  828. genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
  829. genpd->domain.ops.restore = pm_genpd_restore;
  830. genpd->domain.ops.complete = pm_genpd_complete;
  831. }