domain.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. /*
  2. * drivers/base/power/domain.c - Common code related to device power domains.
  3. *
  4. * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/io.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/pm_domain.h>
  13. #include <linux/slab.h>
  14. #include <linux/err.h>
  15. #include <linux/sched.h>
  16. #include <linux/suspend.h>
  17. #ifdef CONFIG_PM
  18. static struct generic_pm_domain *dev_to_genpd(struct device *dev)
  19. {
  20. if (IS_ERR_OR_NULL(dev->pm_domain))
  21. return ERR_PTR(-EINVAL);
  22. return pd_to_genpd(dev->pm_domain);
  23. }
  24. static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
  25. {
  26. if (!WARN_ON(genpd->sd_count == 0))
  27. genpd->sd_count--;
  28. }
  29. static void genpd_acquire_lock(struct generic_pm_domain *genpd)
  30. {
  31. DEFINE_WAIT(wait);
  32. mutex_lock(&genpd->lock);
  33. /*
  34. * Wait for the domain to transition into either the active,
  35. * or the power off state.
  36. */
  37. for (;;) {
  38. prepare_to_wait(&genpd->status_wait_queue, &wait,
  39. TASK_UNINTERRUPTIBLE);
  40. if (genpd->status == GPD_STATE_ACTIVE
  41. || genpd->status == GPD_STATE_POWER_OFF)
  42. break;
  43. mutex_unlock(&genpd->lock);
  44. schedule();
  45. mutex_lock(&genpd->lock);
  46. }
  47. finish_wait(&genpd->status_wait_queue, &wait);
  48. }
  49. static void genpd_release_lock(struct generic_pm_domain *genpd)
  50. {
  51. mutex_unlock(&genpd->lock);
  52. }
  53. static void genpd_set_active(struct generic_pm_domain *genpd)
  54. {
  55. if (genpd->resume_count == 0)
  56. genpd->status = GPD_STATE_ACTIVE;
  57. }
  58. /**
  59. * pm_genpd_poweron - Restore power to a given PM domain and its parents.
  60. * @genpd: PM domain to power up.
  61. *
  62. * Restore power to @genpd and all of its parents so that it is possible to
  63. * resume a device belonging to it.
  64. */
  65. int pm_genpd_poweron(struct generic_pm_domain *genpd)
  66. {
  67. struct generic_pm_domain *parent = genpd->parent;
  68. DEFINE_WAIT(wait);
  69. int ret = 0;
  70. start:
  71. if (parent) {
  72. genpd_acquire_lock(parent);
  73. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  74. } else {
  75. mutex_lock(&genpd->lock);
  76. }
  77. if (genpd->status == GPD_STATE_ACTIVE
  78. || (genpd->prepared_count > 0 && genpd->suspend_power_off))
  79. goto out;
  80. if (genpd->status != GPD_STATE_POWER_OFF) {
  81. genpd_set_active(genpd);
  82. goto out;
  83. }
  84. if (parent && parent->status != GPD_STATE_ACTIVE) {
  85. mutex_unlock(&genpd->lock);
  86. genpd_release_lock(parent);
  87. ret = pm_genpd_poweron(parent);
  88. if (ret)
  89. return ret;
  90. goto start;
  91. }
  92. if (genpd->power_on) {
  93. int ret = genpd->power_on(genpd);
  94. if (ret)
  95. goto out;
  96. }
  97. genpd_set_active(genpd);
  98. if (parent)
  99. parent->sd_count++;
  100. out:
  101. mutex_unlock(&genpd->lock);
  102. if (parent)
  103. genpd_release_lock(parent);
  104. return ret;
  105. }
  106. #endif /* CONFIG_PM */
  107. #ifdef CONFIG_PM_RUNTIME
  108. /**
  109. * __pm_genpd_save_device - Save the pre-suspend state of a device.
  110. * @dle: Device list entry of the device to save the state of.
  111. * @genpd: PM domain the device belongs to.
  112. */
  113. static int __pm_genpd_save_device(struct dev_list_entry *dle,
  114. struct generic_pm_domain *genpd)
  115. __releases(&genpd->lock) __acquires(&genpd->lock)
  116. {
  117. struct device *dev = dle->dev;
  118. struct device_driver *drv = dev->driver;
  119. int ret = 0;
  120. if (dle->need_restore)
  121. return 0;
  122. mutex_unlock(&genpd->lock);
  123. if (drv && drv->pm && drv->pm->runtime_suspend) {
  124. if (genpd->start_device)
  125. genpd->start_device(dev);
  126. ret = drv->pm->runtime_suspend(dev);
  127. if (genpd->stop_device)
  128. genpd->stop_device(dev);
  129. }
  130. mutex_lock(&genpd->lock);
  131. if (!ret)
  132. dle->need_restore = true;
  133. return ret;
  134. }
  135. /**
  136. * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
  137. * @dle: Device list entry of the device to restore the state of.
  138. * @genpd: PM domain the device belongs to.
  139. */
  140. static void __pm_genpd_restore_device(struct dev_list_entry *dle,
  141. struct generic_pm_domain *genpd)
  142. __releases(&genpd->lock) __acquires(&genpd->lock)
  143. {
  144. struct device *dev = dle->dev;
  145. struct device_driver *drv = dev->driver;
  146. if (!dle->need_restore)
  147. return;
  148. mutex_unlock(&genpd->lock);
  149. if (drv && drv->pm && drv->pm->runtime_resume) {
  150. if (genpd->start_device)
  151. genpd->start_device(dev);
  152. drv->pm->runtime_resume(dev);
  153. if (genpd->stop_device)
  154. genpd->stop_device(dev);
  155. }
  156. mutex_lock(&genpd->lock);
  157. dle->need_restore = false;
  158. }
  159. /**
  160. * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
  161. * @genpd: PM domain to check.
  162. *
  163. * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
  164. * a "power off" operation, which means that a "power on" has occured in the
  165. * meantime, or if its resume_count field is different from zero, which means
  166. * that one of its devices has been resumed in the meantime.
  167. */
  168. static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
  169. {
  170. return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
  171. }
  172. /**
  173. * pm_genpd_poweroff - Remove power from a given PM domain.
  174. * @genpd: PM domain to power down.
  175. *
  176. * If all of the @genpd's devices have been suspended and all of its subdomains
  177. * have been powered down, run the runtime suspend callbacks provided by all of
  178. * the @genpd's devices' drivers and remove power from @genpd.
  179. */
  180. static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
  181. __releases(&genpd->lock) __acquires(&genpd->lock)
  182. {
  183. struct generic_pm_domain *parent;
  184. struct dev_list_entry *dle;
  185. unsigned int not_suspended;
  186. int ret = 0;
  187. start:
  188. /*
  189. * Do not try to power off the domain in the following situations:
  190. * (1) The domain is already in the "power off" state.
  191. * (2) System suspend is in progress.
  192. * (3) One of the domain's devices is being resumed right now.
  193. */
  194. if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
  195. || genpd->resume_count > 0)
  196. return 0;
  197. if (genpd->sd_count > 0)
  198. return -EBUSY;
  199. not_suspended = 0;
  200. list_for_each_entry(dle, &genpd->dev_list, node)
  201. if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
  202. not_suspended++;
  203. if (not_suspended > genpd->in_progress)
  204. return -EBUSY;
  205. if (genpd->poweroff_task) {
  206. /*
  207. * Another instance of pm_genpd_poweroff() is executing
  208. * callbacks, so tell it to start over and return.
  209. */
  210. genpd->status = GPD_STATE_REPEAT;
  211. return 0;
  212. }
  213. if (genpd->gov && genpd->gov->power_down_ok) {
  214. if (!genpd->gov->power_down_ok(&genpd->domain))
  215. return -EAGAIN;
  216. }
  217. genpd->status = GPD_STATE_BUSY;
  218. genpd->poweroff_task = current;
  219. list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
  220. ret = __pm_genpd_save_device(dle, genpd);
  221. if (ret)
  222. goto err_dev;
  223. if (genpd_abort_poweroff(genpd))
  224. goto out;
  225. if (genpd->status == GPD_STATE_REPEAT) {
  226. genpd->poweroff_task = NULL;
  227. goto start;
  228. }
  229. }
  230. parent = genpd->parent;
  231. if (parent) {
  232. mutex_unlock(&genpd->lock);
  233. genpd_acquire_lock(parent);
  234. mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
  235. if (genpd_abort_poweroff(genpd)) {
  236. genpd_release_lock(parent);
  237. goto out;
  238. }
  239. }
  240. if (genpd->power_off)
  241. genpd->power_off(genpd);
  242. genpd->status = GPD_STATE_POWER_OFF;
  243. if (parent) {
  244. genpd_sd_counter_dec(parent);
  245. if (parent->sd_count == 0)
  246. queue_work(pm_wq, &parent->power_off_work);
  247. genpd_release_lock(parent);
  248. }
  249. out:
  250. genpd->poweroff_task = NULL;
  251. wake_up_all(&genpd->status_wait_queue);
  252. return ret;
  253. err_dev:
  254. list_for_each_entry_continue(dle, &genpd->dev_list, node)
  255. __pm_genpd_restore_device(dle, genpd);
  256. genpd_set_active(genpd);
  257. goto out;
  258. }
  259. /**
  260. * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
  261. * @work: Work structure used for scheduling the execution of this function.
  262. */
  263. static void genpd_power_off_work_fn(struct work_struct *work)
  264. {
  265. struct generic_pm_domain *genpd;
  266. genpd = container_of(work, struct generic_pm_domain, power_off_work);
  267. genpd_acquire_lock(genpd);
  268. pm_genpd_poweroff(genpd);
  269. genpd_release_lock(genpd);
  270. }
  271. /**
  272. * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
  273. * @dev: Device to suspend.
  274. *
  275. * Carry out a runtime suspend of a device under the assumption that its
  276. * pm_domain field points to the domain member of an object of type
  277. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  278. */
  279. static int pm_genpd_runtime_suspend(struct device *dev)
  280. {
  281. struct generic_pm_domain *genpd;
  282. dev_dbg(dev, "%s()\n", __func__);
  283. genpd = dev_to_genpd(dev);
  284. if (IS_ERR(genpd))
  285. return -EINVAL;
  286. if (genpd->stop_device) {
  287. int ret = genpd->stop_device(dev);
  288. if (ret)
  289. return ret;
  290. }
  291. mutex_lock(&genpd->lock);
  292. genpd->in_progress++;
  293. pm_genpd_poweroff(genpd);
  294. genpd->in_progress--;
  295. mutex_unlock(&genpd->lock);
  296. return 0;
  297. }
  298. /**
  299. * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  300. * @dev: Device to resume.
  301. * @genpd: PM domain the device belongs to.
  302. */
  303. static void __pm_genpd_runtime_resume(struct device *dev,
  304. struct generic_pm_domain *genpd)
  305. {
  306. struct dev_list_entry *dle;
  307. list_for_each_entry(dle, &genpd->dev_list, node) {
  308. if (dle->dev == dev) {
  309. __pm_genpd_restore_device(dle, genpd);
  310. break;
  311. }
  312. }
  313. }
  314. /**
  315. * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  316. * @dev: Device to resume.
  317. *
  318. * Carry out a runtime resume of a device under the assumption that its
  319. * pm_domain field points to the domain member of an object of type
  320. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  321. */
  322. static int pm_genpd_runtime_resume(struct device *dev)
  323. {
  324. struct generic_pm_domain *genpd;
  325. DEFINE_WAIT(wait);
  326. int ret;
  327. dev_dbg(dev, "%s()\n", __func__);
  328. genpd = dev_to_genpd(dev);
  329. if (IS_ERR(genpd))
  330. return -EINVAL;
  331. ret = pm_genpd_poweron(genpd);
  332. if (ret)
  333. return ret;
  334. mutex_lock(&genpd->lock);
  335. genpd->status = GPD_STATE_BUSY;
  336. genpd->resume_count++;
  337. for (;;) {
  338. prepare_to_wait(&genpd->status_wait_queue, &wait,
  339. TASK_UNINTERRUPTIBLE);
  340. /*
  341. * If current is the powering off task, we have been called
  342. * reentrantly from one of the device callbacks, so we should
  343. * not wait.
  344. */
  345. if (!genpd->poweroff_task || genpd->poweroff_task == current)
  346. break;
  347. mutex_unlock(&genpd->lock);
  348. schedule();
  349. mutex_lock(&genpd->lock);
  350. }
  351. finish_wait(&genpd->status_wait_queue, &wait);
  352. __pm_genpd_runtime_resume(dev, genpd);
  353. genpd->resume_count--;
  354. genpd_set_active(genpd);
  355. wake_up_all(&genpd->status_wait_queue);
  356. mutex_unlock(&genpd->lock);
  357. if (genpd->start_device)
  358. genpd->start_device(dev);
  359. return 0;
  360. }
  361. #else
  362. static inline void genpd_power_off_work_fn(struct work_struct *work) {}
  363. static inline void __pm_genpd_runtime_resume(struct device *dev,
  364. struct generic_pm_domain *genpd) {}
  365. #define pm_genpd_runtime_suspend NULL
  366. #define pm_genpd_runtime_resume NULL
  367. #endif /* CONFIG_PM_RUNTIME */
  368. #ifdef CONFIG_PM_SLEEP
  369. /**
  370. * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
  371. * @genpd: PM domain to power off, if possible.
  372. *
  373. * Check if the given PM domain can be powered off (during system suspend or
  374. * hibernation) and do that if so. Also, in that case propagate to its parent.
  375. *
  376. * This function is only called in "noirq" stages of system power transitions,
  377. * so it need not acquire locks (all of the "noirq" callbacks are executed
  378. * sequentially, so it is guaranteed that it will never run twice in parallel).
  379. */
  380. static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
  381. {
  382. struct generic_pm_domain *parent = genpd->parent;
  383. if (genpd->status == GPD_STATE_POWER_OFF)
  384. return;
  385. if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
  386. return;
  387. if (genpd->power_off)
  388. genpd->power_off(genpd);
  389. genpd->status = GPD_STATE_POWER_OFF;
  390. if (parent) {
  391. genpd_sd_counter_dec(parent);
  392. pm_genpd_sync_poweroff(parent);
  393. }
  394. }
  395. /**
  396. * pm_genpd_prepare - Start power transition of a device in a PM domain.
  397. * @dev: Device to start the transition of.
  398. *
  399. * Start a power transition of a device (during a system-wide power transition)
  400. * under the assumption that its pm_domain field points to the domain member of
  401. * an object of type struct generic_pm_domain representing a PM domain
  402. * consisting of I/O devices.
  403. */
  404. static int pm_genpd_prepare(struct device *dev)
  405. {
  406. struct generic_pm_domain *genpd;
  407. int ret;
  408. dev_dbg(dev, "%s()\n", __func__);
  409. genpd = dev_to_genpd(dev);
  410. if (IS_ERR(genpd))
  411. return -EINVAL;
  412. /*
  413. * If a wakeup request is pending for the device, it should be woken up
  414. * at this point and a system wakeup event should be reported if it's
  415. * set up to wake up the system from sleep states.
  416. */
  417. pm_runtime_get_noresume(dev);
  418. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  419. pm_wakeup_event(dev, 0);
  420. if (pm_wakeup_pending()) {
  421. pm_runtime_put_sync(dev);
  422. return -EBUSY;
  423. }
  424. genpd_acquire_lock(genpd);
  425. if (genpd->prepared_count++ == 0)
  426. genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
  427. genpd_release_lock(genpd);
  428. if (genpd->suspend_power_off) {
  429. pm_runtime_put_noidle(dev);
  430. return 0;
  431. }
  432. /*
  433. * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
  434. * so pm_genpd_poweron() will return immediately, but if the device
  435. * is suspended (e.g. it's been stopped by .stop_device()), we need
  436. * to make it operational.
  437. */
  438. pm_runtime_resume(dev);
  439. __pm_runtime_disable(dev, false);
  440. ret = pm_generic_prepare(dev);
  441. if (ret) {
  442. mutex_lock(&genpd->lock);
  443. if (--genpd->prepared_count == 0)
  444. genpd->suspend_power_off = false;
  445. mutex_unlock(&genpd->lock);
  446. pm_runtime_enable(dev);
  447. }
  448. pm_runtime_put_sync(dev);
  449. return ret;
  450. }
  451. /**
  452. * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
  453. * @dev: Device to suspend.
  454. *
  455. * Suspend a device under the assumption that its pm_domain field points to the
  456. * domain member of an object of type struct generic_pm_domain representing
  457. * a PM domain consisting of I/O devices.
  458. */
  459. static int pm_genpd_suspend(struct device *dev)
  460. {
  461. struct generic_pm_domain *genpd;
  462. dev_dbg(dev, "%s()\n", __func__);
  463. genpd = dev_to_genpd(dev);
  464. if (IS_ERR(genpd))
  465. return -EINVAL;
  466. return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
  467. }
  468. /**
  469. * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
  470. * @dev: Device to suspend.
  471. *
  472. * Carry out a late suspend of a device under the assumption that its
  473. * pm_domain field points to the domain member of an object of type
  474. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  475. */
  476. static int pm_genpd_suspend_noirq(struct device *dev)
  477. {
  478. struct generic_pm_domain *genpd;
  479. int ret;
  480. dev_dbg(dev, "%s()\n", __func__);
  481. genpd = dev_to_genpd(dev);
  482. if (IS_ERR(genpd))
  483. return -EINVAL;
  484. if (genpd->suspend_power_off)
  485. return 0;
  486. ret = pm_generic_suspend_noirq(dev);
  487. if (ret)
  488. return ret;
  489. if (device_may_wakeup(dev)
  490. && genpd->active_wakeup && genpd->active_wakeup(dev))
  491. return 0;
  492. if (genpd->stop_device)
  493. genpd->stop_device(dev);
  494. /*
  495. * Since all of the "noirq" callbacks are executed sequentially, it is
  496. * guaranteed that this function will never run twice in parallel for
  497. * the same PM domain, so it is not necessary to use locking here.
  498. */
  499. genpd->suspended_count++;
  500. pm_genpd_sync_poweroff(genpd);
  501. return 0;
  502. }
  503. /**
  504. * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
  505. * @dev: Device to resume.
  506. *
  507. * Carry out an early resume of a device under the assumption that its
  508. * pm_domain field points to the domain member of an object of type
  509. * struct generic_pm_domain representing a power domain consisting of I/O
  510. * devices.
  511. */
  512. static int pm_genpd_resume_noirq(struct device *dev)
  513. {
  514. struct generic_pm_domain *genpd;
  515. dev_dbg(dev, "%s()\n", __func__);
  516. genpd = dev_to_genpd(dev);
  517. if (IS_ERR(genpd))
  518. return -EINVAL;
  519. if (genpd->suspend_power_off)
  520. return 0;
  521. /*
  522. * Since all of the "noirq" callbacks are executed sequentially, it is
  523. * guaranteed that this function will never run twice in parallel for
  524. * the same PM domain, so it is not necessary to use locking here.
  525. */
  526. pm_genpd_poweron(genpd);
  527. genpd->suspended_count--;
  528. if (genpd->start_device)
  529. genpd->start_device(dev);
  530. return pm_generic_resume_noirq(dev);
  531. }
  532. /**
  533. * pm_genpd_resume - Resume a device belonging to an I/O power domain.
  534. * @dev: Device to resume.
  535. *
  536. * Resume a device under the assumption that its pm_domain field points to the
  537. * domain member of an object of type struct generic_pm_domain representing
  538. * a power domain consisting of I/O devices.
  539. */
  540. static int pm_genpd_resume(struct device *dev)
  541. {
  542. struct generic_pm_domain *genpd;
  543. dev_dbg(dev, "%s()\n", __func__);
  544. genpd = dev_to_genpd(dev);
  545. if (IS_ERR(genpd))
  546. return -EINVAL;
  547. return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
  548. }
  549. /**
  550. * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
  551. * @dev: Device to freeze.
  552. *
  553. * Freeze a device under the assumption that its pm_domain field points to the
  554. * domain member of an object of type struct generic_pm_domain representing
  555. * a power domain consisting of I/O devices.
  556. */
  557. static int pm_genpd_freeze(struct device *dev)
  558. {
  559. struct generic_pm_domain *genpd;
  560. dev_dbg(dev, "%s()\n", __func__);
  561. genpd = dev_to_genpd(dev);
  562. if (IS_ERR(genpd))
  563. return -EINVAL;
  564. return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
  565. }
  566. /**
  567. * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
  568. * @dev: Device to freeze.
  569. *
  570. * Carry out a late freeze of a device under the assumption that its
  571. * pm_domain field points to the domain member of an object of type
  572. * struct generic_pm_domain representing a power domain consisting of I/O
  573. * devices.
  574. */
  575. static int pm_genpd_freeze_noirq(struct device *dev)
  576. {
  577. struct generic_pm_domain *genpd;
  578. int ret;
  579. dev_dbg(dev, "%s()\n", __func__);
  580. genpd = dev_to_genpd(dev);
  581. if (IS_ERR(genpd))
  582. return -EINVAL;
  583. if (genpd->suspend_power_off)
  584. return 0;
  585. ret = pm_generic_freeze_noirq(dev);
  586. if (ret)
  587. return ret;
  588. if (genpd->stop_device)
  589. genpd->stop_device(dev);
  590. return 0;
  591. }
  592. /**
  593. * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
  594. * @dev: Device to thaw.
  595. *
  596. * Carry out an early thaw of a device under the assumption that its
  597. * pm_domain field points to the domain member of an object of type
  598. * struct generic_pm_domain representing a power domain consisting of I/O
  599. * devices.
  600. */
  601. static int pm_genpd_thaw_noirq(struct device *dev)
  602. {
  603. struct generic_pm_domain *genpd;
  604. dev_dbg(dev, "%s()\n", __func__);
  605. genpd = dev_to_genpd(dev);
  606. if (IS_ERR(genpd))
  607. return -EINVAL;
  608. if (genpd->suspend_power_off)
  609. return 0;
  610. if (genpd->start_device)
  611. genpd->start_device(dev);
  612. return pm_generic_thaw_noirq(dev);
  613. }
  614. /**
  615. * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
  616. * @dev: Device to thaw.
  617. *
  618. * Thaw a device under the assumption that its pm_domain field points to the
  619. * domain member of an object of type struct generic_pm_domain representing
  620. * a power domain consisting of I/O devices.
  621. */
  622. static int pm_genpd_thaw(struct device *dev)
  623. {
  624. struct generic_pm_domain *genpd;
  625. dev_dbg(dev, "%s()\n", __func__);
  626. genpd = dev_to_genpd(dev);
  627. if (IS_ERR(genpd))
  628. return -EINVAL;
  629. return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
  630. }
  631. /**
  632. * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
  633. * @dev: Device to suspend.
  634. *
  635. * Power off a device under the assumption that its pm_domain field points to
  636. * the domain member of an object of type struct generic_pm_domain representing
  637. * a PM domain consisting of I/O devices.
  638. */
  639. static int pm_genpd_dev_poweroff(struct device *dev)
  640. {
  641. struct generic_pm_domain *genpd;
  642. dev_dbg(dev, "%s()\n", __func__);
  643. genpd = dev_to_genpd(dev);
  644. if (IS_ERR(genpd))
  645. return -EINVAL;
  646. return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
  647. }
  648. /**
  649. * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
  650. * @dev: Device to suspend.
  651. *
  652. * Carry out a late powering off of a device under the assumption that its
  653. * pm_domain field points to the domain member of an object of type
  654. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  655. */
  656. static int pm_genpd_dev_poweroff_noirq(struct device *dev)
  657. {
  658. struct generic_pm_domain *genpd;
  659. int ret;
  660. dev_dbg(dev, "%s()\n", __func__);
  661. genpd = dev_to_genpd(dev);
  662. if (IS_ERR(genpd))
  663. return -EINVAL;
  664. if (genpd->suspend_power_off)
  665. return 0;
  666. ret = pm_generic_poweroff_noirq(dev);
  667. if (ret)
  668. return ret;
  669. if (device_may_wakeup(dev)
  670. && genpd->active_wakeup && genpd->active_wakeup(dev))
  671. return 0;
  672. if (genpd->stop_device)
  673. genpd->stop_device(dev);
  674. /*
  675. * Since all of the "noirq" callbacks are executed sequentially, it is
  676. * guaranteed that this function will never run twice in parallel for
  677. * the same PM domain, so it is not necessary to use locking here.
  678. */
  679. genpd->suspended_count++;
  680. pm_genpd_sync_poweroff(genpd);
  681. return 0;
  682. }
  683. /**
  684. * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
  685. * @dev: Device to resume.
  686. *
  687. * Carry out an early restore of a device under the assumption that its
  688. * pm_domain field points to the domain member of an object of type
  689. * struct generic_pm_domain representing a power domain consisting of I/O
  690. * devices.
  691. */
  692. static int pm_genpd_restore_noirq(struct device *dev)
  693. {
  694. struct generic_pm_domain *genpd;
  695. dev_dbg(dev, "%s()\n", __func__);
  696. genpd = dev_to_genpd(dev);
  697. if (IS_ERR(genpd))
  698. return -EINVAL;
  699. /*
  700. * Since all of the "noirq" callbacks are executed sequentially, it is
  701. * guaranteed that this function will never run twice in parallel for
  702. * the same PM domain, so it is not necessary to use locking here.
  703. */
  704. genpd->status = GPD_STATE_POWER_OFF;
  705. if (genpd->suspend_power_off) {
  706. /*
  707. * The boot kernel might put the domain into the power on state,
  708. * so make sure it really is powered off.
  709. */
  710. if (genpd->power_off)
  711. genpd->power_off(genpd);
  712. return 0;
  713. }
  714. pm_genpd_poweron(genpd);
  715. genpd->suspended_count--;
  716. if (genpd->start_device)
  717. genpd->start_device(dev);
  718. return pm_generic_restore_noirq(dev);
  719. }
  720. /**
  721. * pm_genpd_restore - Restore a device belonging to an I/O power domain.
  722. * @dev: Device to resume.
  723. *
  724. * Restore a device under the assumption that its pm_domain field points to the
  725. * domain member of an object of type struct generic_pm_domain representing
  726. * a power domain consisting of I/O devices.
  727. */
  728. static int pm_genpd_restore(struct device *dev)
  729. {
  730. struct generic_pm_domain *genpd;
  731. dev_dbg(dev, "%s()\n", __func__);
  732. genpd = dev_to_genpd(dev);
  733. if (IS_ERR(genpd))
  734. return -EINVAL;
  735. return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
  736. }
  737. /**
  738. * pm_genpd_complete - Complete power transition of a device in a power domain.
  739. * @dev: Device to complete the transition of.
  740. *
  741. * Complete a power transition of a device (during a system-wide power
  742. * transition) under the assumption that its pm_domain field points to the
  743. * domain member of an object of type struct generic_pm_domain representing
  744. * a power domain consisting of I/O devices.
  745. */
  746. static void pm_genpd_complete(struct device *dev)
  747. {
  748. struct generic_pm_domain *genpd;
  749. bool run_complete;
  750. dev_dbg(dev, "%s()\n", __func__);
  751. genpd = dev_to_genpd(dev);
  752. if (IS_ERR(genpd))
  753. return;
  754. mutex_lock(&genpd->lock);
  755. run_complete = !genpd->suspend_power_off;
  756. if (--genpd->prepared_count == 0)
  757. genpd->suspend_power_off = false;
  758. mutex_unlock(&genpd->lock);
  759. if (run_complete) {
  760. pm_generic_complete(dev);
  761. pm_runtime_set_active(dev);
  762. pm_runtime_enable(dev);
  763. pm_runtime_idle(dev);
  764. }
  765. }
  766. #else
  767. #define pm_genpd_prepare NULL
  768. #define pm_genpd_suspend NULL
  769. #define pm_genpd_suspend_noirq NULL
  770. #define pm_genpd_resume_noirq NULL
  771. #define pm_genpd_resume NULL
  772. #define pm_genpd_freeze NULL
  773. #define pm_genpd_freeze_noirq NULL
  774. #define pm_genpd_thaw_noirq NULL
  775. #define pm_genpd_thaw NULL
  776. #define pm_genpd_dev_poweroff_noirq NULL
  777. #define pm_genpd_dev_poweroff NULL
  778. #define pm_genpd_restore_noirq NULL
  779. #define pm_genpd_restore NULL
  780. #define pm_genpd_complete NULL
  781. #endif /* CONFIG_PM_SLEEP */
  782. /**
  783. * pm_genpd_add_device - Add a device to an I/O PM domain.
  784. * @genpd: PM domain to add the device to.
  785. * @dev: Device to be added.
  786. */
  787. int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
  788. {
  789. struct dev_list_entry *dle;
  790. int ret = 0;
  791. dev_dbg(dev, "%s()\n", __func__);
  792. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  793. return -EINVAL;
  794. genpd_acquire_lock(genpd);
  795. if (genpd->status == GPD_STATE_POWER_OFF) {
  796. ret = -EINVAL;
  797. goto out;
  798. }
  799. if (genpd->prepared_count > 0) {
  800. ret = -EAGAIN;
  801. goto out;
  802. }
  803. list_for_each_entry(dle, &genpd->dev_list, node)
  804. if (dle->dev == dev) {
  805. ret = -EINVAL;
  806. goto out;
  807. }
  808. dle = kzalloc(sizeof(*dle), GFP_KERNEL);
  809. if (!dle) {
  810. ret = -ENOMEM;
  811. goto out;
  812. }
  813. dle->dev = dev;
  814. dle->need_restore = false;
  815. list_add_tail(&dle->node, &genpd->dev_list);
  816. genpd->device_count++;
  817. spin_lock_irq(&dev->power.lock);
  818. dev->pm_domain = &genpd->domain;
  819. spin_unlock_irq(&dev->power.lock);
  820. out:
  821. genpd_release_lock(genpd);
  822. return ret;
  823. }
  824. /**
  825. * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  826. * @genpd: PM domain to remove the device from.
  827. * @dev: Device to be removed.
  828. */
  829. int pm_genpd_remove_device(struct generic_pm_domain *genpd,
  830. struct device *dev)
  831. {
  832. struct dev_list_entry *dle;
  833. int ret = -EINVAL;
  834. dev_dbg(dev, "%s()\n", __func__);
  835. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  836. return -EINVAL;
  837. genpd_acquire_lock(genpd);
  838. if (genpd->prepared_count > 0) {
  839. ret = -EAGAIN;
  840. goto out;
  841. }
  842. list_for_each_entry(dle, &genpd->dev_list, node) {
  843. if (dle->dev != dev)
  844. continue;
  845. spin_lock_irq(&dev->power.lock);
  846. dev->pm_domain = NULL;
  847. spin_unlock_irq(&dev->power.lock);
  848. genpd->device_count--;
  849. list_del(&dle->node);
  850. kfree(dle);
  851. ret = 0;
  852. break;
  853. }
  854. out:
  855. genpd_release_lock(genpd);
  856. return ret;
  857. }
  858. /**
  859. * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  860. * @genpd: Master PM domain to add the subdomain to.
  861. * @new_subdomain: Subdomain to be added.
  862. */
  863. int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
  864. struct generic_pm_domain *new_subdomain)
  865. {
  866. struct generic_pm_domain *subdomain;
  867. int ret = 0;
  868. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
  869. return -EINVAL;
  870. start:
  871. genpd_acquire_lock(genpd);
  872. mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
  873. if (new_subdomain->status != GPD_STATE_POWER_OFF
  874. && new_subdomain->status != GPD_STATE_ACTIVE) {
  875. mutex_unlock(&new_subdomain->lock);
  876. genpd_release_lock(genpd);
  877. goto start;
  878. }
  879. if (genpd->status == GPD_STATE_POWER_OFF
  880. && new_subdomain->status != GPD_STATE_POWER_OFF) {
  881. ret = -EINVAL;
  882. goto out;
  883. }
  884. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  885. if (subdomain == new_subdomain) {
  886. ret = -EINVAL;
  887. goto out;
  888. }
  889. }
  890. list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
  891. new_subdomain->parent = genpd;
  892. if (subdomain->status != GPD_STATE_POWER_OFF)
  893. genpd->sd_count++;
  894. out:
  895. mutex_unlock(&new_subdomain->lock);
  896. genpd_release_lock(genpd);
  897. return ret;
  898. }
  899. /**
  900. * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  901. * @genpd: Master PM domain to remove the subdomain from.
  902. * @target: Subdomain to be removed.
  903. */
  904. int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
  905. struct generic_pm_domain *target)
  906. {
  907. struct generic_pm_domain *subdomain;
  908. int ret = -EINVAL;
  909. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
  910. return -EINVAL;
  911. start:
  912. genpd_acquire_lock(genpd);
  913. list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
  914. if (subdomain != target)
  915. continue;
  916. mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
  917. if (subdomain->status != GPD_STATE_POWER_OFF
  918. && subdomain->status != GPD_STATE_ACTIVE) {
  919. mutex_unlock(&subdomain->lock);
  920. genpd_release_lock(genpd);
  921. goto start;
  922. }
  923. list_del(&subdomain->sd_node);
  924. subdomain->parent = NULL;
  925. if (subdomain->status != GPD_STATE_POWER_OFF)
  926. genpd_sd_counter_dec(genpd);
  927. mutex_unlock(&subdomain->lock);
  928. ret = 0;
  929. break;
  930. }
  931. genpd_release_lock(genpd);
  932. return ret;
  933. }
  934. /**
  935. * pm_genpd_init - Initialize a generic I/O PM domain object.
  936. * @genpd: PM domain object to initialize.
  937. * @gov: PM domain governor to associate with the domain (may be NULL).
  938. * @is_off: Initial value of the domain's power_is_off field.
  939. */
  940. void pm_genpd_init(struct generic_pm_domain *genpd,
  941. struct dev_power_governor *gov, bool is_off)
  942. {
  943. if (IS_ERR_OR_NULL(genpd))
  944. return;
  945. INIT_LIST_HEAD(&genpd->sd_node);
  946. genpd->parent = NULL;
  947. INIT_LIST_HEAD(&genpd->dev_list);
  948. INIT_LIST_HEAD(&genpd->sd_list);
  949. mutex_init(&genpd->lock);
  950. genpd->gov = gov;
  951. INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
  952. genpd->in_progress = 0;
  953. genpd->sd_count = 0;
  954. genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
  955. init_waitqueue_head(&genpd->status_wait_queue);
  956. genpd->poweroff_task = NULL;
  957. genpd->resume_count = 0;
  958. genpd->device_count = 0;
  959. genpd->suspended_count = 0;
  960. genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
  961. genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
  962. genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
  963. genpd->domain.ops.prepare = pm_genpd_prepare;
  964. genpd->domain.ops.suspend = pm_genpd_suspend;
  965. genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
  966. genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
  967. genpd->domain.ops.resume = pm_genpd_resume;
  968. genpd->domain.ops.freeze = pm_genpd_freeze;
  969. genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
  970. genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
  971. genpd->domain.ops.thaw = pm_genpd_thaw;
  972. genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
  973. genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
  974. genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
  975. genpd->domain.ops.restore = pm_genpd_restore;
  976. genpd->domain.ops.complete = pm_genpd_complete;
  977. }