domain.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406
  1. /*
  2. * drivers/base/power/domain.c - Common code related to device power domains.
  3. *
  4. * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/io.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/pm_domain.h>
  13. #include <linux/slab.h>
  14. #include <linux/err.h>
  15. #include <linux/sched.h>
  16. #include <linux/suspend.h>
  17. #include <linux/export.h>
  18. #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
  19. ({ \
  20. type (*__routine)(struct device *__d); \
  21. type __ret = (type)0; \
  22. \
  23. __routine = genpd->dev_ops.callback; \
  24. if (__routine) { \
  25. __ret = __routine(dev); \
  26. } else { \
  27. __routine = dev_gpd_data(dev)->ops.callback; \
  28. if (__routine) \
  29. __ret = __routine(dev); \
  30. } \
  31. __ret; \
  32. })
  33. static LIST_HEAD(gpd_list);
  34. static DEFINE_MUTEX(gpd_list_lock);
  35. #ifdef CONFIG_PM
  36. static struct generic_pm_domain *dev_to_genpd(struct device *dev)
  37. {
  38. if (IS_ERR_OR_NULL(dev->pm_domain))
  39. return ERR_PTR(-EINVAL);
  40. return pd_to_genpd(dev->pm_domain);
  41. }
  42. static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
  43. {
  44. return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
  45. }
  46. static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
  47. {
  48. return GENPD_DEV_CALLBACK(genpd, int, start, dev);
  49. }
  50. static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
  51. {
  52. bool ret = false;
  53. if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
  54. ret = !!atomic_dec_and_test(&genpd->sd_count);
  55. return ret;
  56. }
  57. static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
  58. {
  59. atomic_inc(&genpd->sd_count);
  60. smp_mb__after_atomic_inc();
  61. }
  62. static void genpd_acquire_lock(struct generic_pm_domain *genpd)
  63. {
  64. DEFINE_WAIT(wait);
  65. mutex_lock(&genpd->lock);
  66. /*
  67. * Wait for the domain to transition into either the active,
  68. * or the power off state.
  69. */
  70. for (;;) {
  71. prepare_to_wait(&genpd->status_wait_queue, &wait,
  72. TASK_UNINTERRUPTIBLE);
  73. if (genpd->status == GPD_STATE_ACTIVE
  74. || genpd->status == GPD_STATE_POWER_OFF)
  75. break;
  76. mutex_unlock(&genpd->lock);
  77. schedule();
  78. mutex_lock(&genpd->lock);
  79. }
  80. finish_wait(&genpd->status_wait_queue, &wait);
  81. }
  82. static void genpd_release_lock(struct generic_pm_domain *genpd)
  83. {
  84. mutex_unlock(&genpd->lock);
  85. }
  86. static void genpd_set_active(struct generic_pm_domain *genpd)
  87. {
  88. if (genpd->resume_count == 0)
  89. genpd->status = GPD_STATE_ACTIVE;
  90. }
  91. /**
  92. * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
  93. * @genpd: PM domain to power up.
  94. *
  95. * Restore power to @genpd and all of its masters so that it is possible to
  96. * resume a device belonging to it.
  97. */
  98. int __pm_genpd_poweron(struct generic_pm_domain *genpd)
  99. __releases(&genpd->lock) __acquires(&genpd->lock)
  100. {
  101. struct gpd_link *link;
  102. DEFINE_WAIT(wait);
  103. int ret = 0;
  104. /* If the domain's master is being waited for, we have to wait too. */
  105. for (;;) {
  106. prepare_to_wait(&genpd->status_wait_queue, &wait,
  107. TASK_UNINTERRUPTIBLE);
  108. if (genpd->status != GPD_STATE_WAIT_MASTER)
  109. break;
  110. mutex_unlock(&genpd->lock);
  111. schedule();
  112. mutex_lock(&genpd->lock);
  113. }
  114. finish_wait(&genpd->status_wait_queue, &wait);
  115. if (genpd->status == GPD_STATE_ACTIVE
  116. || (genpd->prepared_count > 0 && genpd->suspend_power_off))
  117. return 0;
  118. if (genpd->status != GPD_STATE_POWER_OFF) {
  119. genpd_set_active(genpd);
  120. return 0;
  121. }
  122. /*
  123. * The list is guaranteed not to change while the loop below is being
  124. * executed, unless one of the masters' .power_on() callbacks fiddles
  125. * with it.
  126. */
  127. list_for_each_entry(link, &genpd->slave_links, slave_node) {
  128. genpd_sd_counter_inc(link->master);
  129. genpd->status = GPD_STATE_WAIT_MASTER;
  130. mutex_unlock(&genpd->lock);
  131. ret = pm_genpd_poweron(link->master);
  132. mutex_lock(&genpd->lock);
  133. /*
  134. * The "wait for parent" status is guaranteed not to change
  135. * while the master is powering on.
  136. */
  137. genpd->status = GPD_STATE_POWER_OFF;
  138. wake_up_all(&genpd->status_wait_queue);
  139. if (ret) {
  140. genpd_sd_counter_dec(link->master);
  141. goto err;
  142. }
  143. }
  144. if (genpd->power_on) {
  145. ret = genpd->power_on(genpd);
  146. if (ret)
  147. goto err;
  148. }
  149. genpd_set_active(genpd);
  150. return 0;
  151. err:
  152. list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
  153. genpd_sd_counter_dec(link->master);
  154. return ret;
  155. }
  156. /**
  157. * pm_genpd_poweron - Restore power to a given PM domain and its masters.
  158. * @genpd: PM domain to power up.
  159. */
  160. int pm_genpd_poweron(struct generic_pm_domain *genpd)
  161. {
  162. int ret;
  163. mutex_lock(&genpd->lock);
  164. ret = __pm_genpd_poweron(genpd);
  165. mutex_unlock(&genpd->lock);
  166. return ret;
  167. }
  168. #endif /* CONFIG_PM */
  169. #ifdef CONFIG_PM_RUNTIME
  170. /**
  171. * __pm_genpd_save_device - Save the pre-suspend state of a device.
  172. * @pdd: Domain data of the device to save the state of.
  173. * @genpd: PM domain the device belongs to.
  174. */
  175. static int __pm_genpd_save_device(struct pm_domain_data *pdd,
  176. struct generic_pm_domain *genpd)
  177. __releases(&genpd->lock) __acquires(&genpd->lock)
  178. {
  179. struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
  180. struct device *dev = pdd->dev;
  181. struct device_driver *drv = dev->driver;
  182. int ret = 0;
  183. if (gpd_data->need_restore)
  184. return 0;
  185. mutex_unlock(&genpd->lock);
  186. if (drv && drv->pm && drv->pm->runtime_suspend) {
  187. genpd_start_dev(genpd, dev);
  188. ret = drv->pm->runtime_suspend(dev);
  189. genpd_stop_dev(genpd, dev);
  190. }
  191. mutex_lock(&genpd->lock);
  192. if (!ret)
  193. gpd_data->need_restore = true;
  194. return ret;
  195. }
  196. /**
  197. * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
  198. * @pdd: Domain data of the device to restore the state of.
  199. * @genpd: PM domain the device belongs to.
  200. */
  201. static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
  202. struct generic_pm_domain *genpd)
  203. __releases(&genpd->lock) __acquires(&genpd->lock)
  204. {
  205. struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
  206. struct device *dev = pdd->dev;
  207. struct device_driver *drv = dev->driver;
  208. if (!gpd_data->need_restore)
  209. return;
  210. mutex_unlock(&genpd->lock);
  211. if (drv && drv->pm && drv->pm->runtime_resume) {
  212. genpd_start_dev(genpd, dev);
  213. drv->pm->runtime_resume(dev);
  214. genpd_stop_dev(genpd, dev);
  215. }
  216. mutex_lock(&genpd->lock);
  217. gpd_data->need_restore = false;
  218. }
  219. /**
  220. * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
  221. * @genpd: PM domain to check.
  222. *
  223. * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
  224. * a "power off" operation, which means that a "power on" has occured in the
  225. * meantime, or if its resume_count field is different from zero, which means
  226. * that one of its devices has been resumed in the meantime.
  227. */
  228. static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
  229. {
  230. return genpd->status == GPD_STATE_WAIT_MASTER
  231. || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
  232. }
  233. /**
  234. * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
  235. * @genpd: PM domait to power off.
  236. *
  237. * Queue up the execution of pm_genpd_poweroff() unless it's already been done
  238. * before.
  239. */
  240. void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
  241. {
  242. if (!work_pending(&genpd->power_off_work))
  243. queue_work(pm_wq, &genpd->power_off_work);
  244. }
  245. /**
  246. * pm_genpd_poweroff - Remove power from a given PM domain.
  247. * @genpd: PM domain to power down.
  248. *
  249. * If all of the @genpd's devices have been suspended and all of its subdomains
  250. * have been powered down, run the runtime suspend callbacks provided by all of
  251. * the @genpd's devices' drivers and remove power from @genpd.
  252. */
  253. static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
  254. __releases(&genpd->lock) __acquires(&genpd->lock)
  255. {
  256. struct pm_domain_data *pdd;
  257. struct gpd_link *link;
  258. unsigned int not_suspended;
  259. int ret = 0;
  260. start:
  261. /*
  262. * Do not try to power off the domain in the following situations:
  263. * (1) The domain is already in the "power off" state.
  264. * (2) The domain is waiting for its master to power up.
  265. * (3) One of the domain's devices is being resumed right now.
  266. * (4) System suspend is in progress.
  267. */
  268. if (genpd->status == GPD_STATE_POWER_OFF
  269. || genpd->status == GPD_STATE_WAIT_MASTER
  270. || genpd->resume_count > 0 || genpd->prepared_count > 0)
  271. return 0;
  272. if (atomic_read(&genpd->sd_count) > 0)
  273. return -EBUSY;
  274. not_suspended = 0;
  275. list_for_each_entry(pdd, &genpd->dev_list, list_node)
  276. if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
  277. || pdd->dev->power.irq_safe))
  278. not_suspended++;
  279. if (not_suspended > genpd->in_progress)
  280. return -EBUSY;
  281. if (genpd->poweroff_task) {
  282. /*
  283. * Another instance of pm_genpd_poweroff() is executing
  284. * callbacks, so tell it to start over and return.
  285. */
  286. genpd->status = GPD_STATE_REPEAT;
  287. return 0;
  288. }
  289. if (genpd->gov && genpd->gov->power_down_ok) {
  290. if (!genpd->gov->power_down_ok(&genpd->domain))
  291. return -EAGAIN;
  292. }
  293. genpd->status = GPD_STATE_BUSY;
  294. genpd->poweroff_task = current;
  295. list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
  296. ret = atomic_read(&genpd->sd_count) == 0 ?
  297. __pm_genpd_save_device(pdd, genpd) : -EBUSY;
  298. if (genpd_abort_poweroff(genpd))
  299. goto out;
  300. if (ret) {
  301. genpd_set_active(genpd);
  302. goto out;
  303. }
  304. if (genpd->status == GPD_STATE_REPEAT) {
  305. genpd->poweroff_task = NULL;
  306. goto start;
  307. }
  308. }
  309. if (genpd->power_off) {
  310. if (atomic_read(&genpd->sd_count) > 0) {
  311. ret = -EBUSY;
  312. goto out;
  313. }
  314. /*
  315. * If sd_count > 0 at this point, one of the subdomains hasn't
  316. * managed to call pm_genpd_poweron() for the master yet after
  317. * incrementing it. In that case pm_genpd_poweron() will wait
  318. * for us to drop the lock, so we can call .power_off() and let
  319. * the pm_genpd_poweron() restore power for us (this shouldn't
  320. * happen very often).
  321. */
  322. ret = genpd->power_off(genpd);
  323. if (ret == -EBUSY) {
  324. genpd_set_active(genpd);
  325. goto out;
  326. }
  327. }
  328. genpd->status = GPD_STATE_POWER_OFF;
  329. list_for_each_entry(link, &genpd->slave_links, slave_node) {
  330. genpd_sd_counter_dec(link->master);
  331. genpd_queue_power_off_work(link->master);
  332. }
  333. out:
  334. genpd->poweroff_task = NULL;
  335. wake_up_all(&genpd->status_wait_queue);
  336. return ret;
  337. }
  338. /**
  339. * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
  340. * @work: Work structure used for scheduling the execution of this function.
  341. */
  342. static void genpd_power_off_work_fn(struct work_struct *work)
  343. {
  344. struct generic_pm_domain *genpd;
  345. genpd = container_of(work, struct generic_pm_domain, power_off_work);
  346. genpd_acquire_lock(genpd);
  347. pm_genpd_poweroff(genpd);
  348. genpd_release_lock(genpd);
  349. }
  350. /**
  351. * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
  352. * @dev: Device to suspend.
  353. *
  354. * Carry out a runtime suspend of a device under the assumption that its
  355. * pm_domain field points to the domain member of an object of type
  356. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  357. */
  358. static int pm_genpd_runtime_suspend(struct device *dev)
  359. {
  360. struct generic_pm_domain *genpd;
  361. int ret;
  362. dev_dbg(dev, "%s()\n", __func__);
  363. genpd = dev_to_genpd(dev);
  364. if (IS_ERR(genpd))
  365. return -EINVAL;
  366. might_sleep_if(!genpd->dev_irq_safe);
  367. ret = genpd_stop_dev(genpd, dev);
  368. if (ret)
  369. return ret;
  370. /*
  371. * If power.irq_safe is set, this routine will be run with interrupts
  372. * off, so it can't use mutexes.
  373. */
  374. if (dev->power.irq_safe)
  375. return 0;
  376. mutex_lock(&genpd->lock);
  377. genpd->in_progress++;
  378. pm_genpd_poweroff(genpd);
  379. genpd->in_progress--;
  380. mutex_unlock(&genpd->lock);
  381. return 0;
  382. }
  383. /**
  384. * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  385. * @dev: Device to resume.
  386. *
  387. * Carry out a runtime resume of a device under the assumption that its
  388. * pm_domain field points to the domain member of an object of type
  389. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  390. */
  391. static int pm_genpd_runtime_resume(struct device *dev)
  392. {
  393. struct generic_pm_domain *genpd;
  394. DEFINE_WAIT(wait);
  395. int ret;
  396. dev_dbg(dev, "%s()\n", __func__);
  397. genpd = dev_to_genpd(dev);
  398. if (IS_ERR(genpd))
  399. return -EINVAL;
  400. might_sleep_if(!genpd->dev_irq_safe);
  401. /* If power.irq_safe, the PM domain is never powered off. */
  402. if (dev->power.irq_safe)
  403. goto out;
  404. mutex_lock(&genpd->lock);
  405. ret = __pm_genpd_poweron(genpd);
  406. if (ret) {
  407. mutex_unlock(&genpd->lock);
  408. return ret;
  409. }
  410. genpd->status = GPD_STATE_BUSY;
  411. genpd->resume_count++;
  412. for (;;) {
  413. prepare_to_wait(&genpd->status_wait_queue, &wait,
  414. TASK_UNINTERRUPTIBLE);
  415. /*
  416. * If current is the powering off task, we have been called
  417. * reentrantly from one of the device callbacks, so we should
  418. * not wait.
  419. */
  420. if (!genpd->poweroff_task || genpd->poweroff_task == current)
  421. break;
  422. mutex_unlock(&genpd->lock);
  423. schedule();
  424. mutex_lock(&genpd->lock);
  425. }
  426. finish_wait(&genpd->status_wait_queue, &wait);
  427. __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
  428. genpd->resume_count--;
  429. genpd_set_active(genpd);
  430. wake_up_all(&genpd->status_wait_queue);
  431. mutex_unlock(&genpd->lock);
  432. out:
  433. genpd_start_dev(genpd, dev);
  434. return 0;
  435. }
  436. /**
  437. * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
  438. */
  439. void pm_genpd_poweroff_unused(void)
  440. {
  441. struct generic_pm_domain *genpd;
  442. mutex_lock(&gpd_list_lock);
  443. list_for_each_entry(genpd, &gpd_list, gpd_list_node)
  444. genpd_queue_power_off_work(genpd);
  445. mutex_unlock(&gpd_list_lock);
  446. }
  447. #else
  448. static inline void genpd_power_off_work_fn(struct work_struct *work) {}
  449. #define pm_genpd_runtime_suspend NULL
  450. #define pm_genpd_runtime_resume NULL
  451. #endif /* CONFIG_PM_RUNTIME */
  452. #ifdef CONFIG_PM_SLEEP
  453. static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
  454. struct device *dev)
  455. {
  456. return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
  457. }
  458. /**
  459. * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  460. * @genpd: PM domain to power off, if possible.
  461. *
  462. * Check if the given PM domain can be powered off (during system suspend or
  463. * hibernation) and do that if so. Also, in that case propagate to its masters.
  464. *
  465. * This function is only called in "noirq" stages of system power transitions,
  466. * so it need not acquire locks (all of the "noirq" callbacks are executed
  467. * sequentially, so it is guaranteed that it will never run twice in parallel).
  468. */
  469. static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
  470. {
  471. struct gpd_link *link;
  472. if (genpd->status == GPD_STATE_POWER_OFF)
  473. return;
  474. if (genpd->suspended_count != genpd->device_count
  475. || atomic_read(&genpd->sd_count) > 0)
  476. return;
  477. if (genpd->power_off)
  478. genpd->power_off(genpd);
  479. genpd->status = GPD_STATE_POWER_OFF;
  480. list_for_each_entry(link, &genpd->slave_links, slave_node) {
  481. genpd_sd_counter_dec(link->master);
  482. pm_genpd_sync_poweroff(link->master);
  483. }
  484. }
  485. /**
  486. * resume_needed - Check whether to resume a device before system suspend.
  487. * @dev: Device to check.
  488. * @genpd: PM domain the device belongs to.
  489. *
  490. * There are two cases in which a device that can wake up the system from sleep
  491. * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
  492. * to wake up the system and it has to remain active for this purpose while the
  493. * system is in the sleep state and (2) if the device is not enabled to wake up
  494. * the system from sleep states and it generally doesn't generate wakeup signals
  495. * by itself (those signals are generated on its behalf by other parts of the
  496. * system). In the latter case it may be necessary to reconfigure the device's
  497. * wakeup settings during system suspend, because it may have been set up to
  498. * signal remote wakeup from the system's working state as needed by runtime PM.
  499. * Return 'true' in either of the above cases.
  500. */
  501. static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
  502. {
  503. bool active_wakeup;
  504. if (!device_can_wakeup(dev))
  505. return false;
  506. active_wakeup = genpd_dev_active_wakeup(genpd, dev);
  507. return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
  508. }
  509. /**
  510. * pm_genpd_prepare - Start power transition of a device in a PM domain.
  511. * @dev: Device to start the transition of.
  512. *
  513. * Start a power transition of a device (during a system-wide power transition)
  514. * under the assumption that its pm_domain field points to the domain member of
  515. * an object of type struct generic_pm_domain representing a PM domain
  516. * consisting of I/O devices.
  517. */
  518. static int pm_genpd_prepare(struct device *dev)
  519. {
  520. struct generic_pm_domain *genpd;
  521. int ret;
  522. dev_dbg(dev, "%s()\n", __func__);
  523. genpd = dev_to_genpd(dev);
  524. if (IS_ERR(genpd))
  525. return -EINVAL;
  526. /*
  527. * If a wakeup request is pending for the device, it should be woken up
  528. * at this point and a system wakeup event should be reported if it's
  529. * set up to wake up the system from sleep states.
  530. */
  531. pm_runtime_get_noresume(dev);
  532. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  533. pm_wakeup_event(dev, 0);
  534. if (pm_wakeup_pending()) {
  535. pm_runtime_put_sync(dev);
  536. return -EBUSY;
  537. }
  538. if (resume_needed(dev, genpd))
  539. pm_runtime_resume(dev);
  540. genpd_acquire_lock(genpd);
  541. if (genpd->prepared_count++ == 0)
  542. genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
  543. genpd_release_lock(genpd);
  544. if (genpd->suspend_power_off) {
  545. pm_runtime_put_noidle(dev);
  546. return 0;
  547. }
  548. /*
  549. * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
  550. * so pm_genpd_poweron() will return immediately, but if the device
  551. * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
  552. * to make it operational.
  553. */
  554. pm_runtime_resume(dev);
  555. __pm_runtime_disable(dev, false);
  556. ret = pm_generic_prepare(dev);
  557. if (ret) {
  558. mutex_lock(&genpd->lock);
  559. if (--genpd->prepared_count == 0)
  560. genpd->suspend_power_off = false;
  561. mutex_unlock(&genpd->lock);
  562. pm_runtime_enable(dev);
  563. }
  564. pm_runtime_put_sync(dev);
  565. return ret;
  566. }
  567. /**
  568. * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
  569. * @dev: Device to suspend.
  570. *
  571. * Suspend a device under the assumption that its pm_domain field points to the
  572. * domain member of an object of type struct generic_pm_domain representing
  573. * a PM domain consisting of I/O devices.
  574. */
  575. static int pm_genpd_suspend(struct device *dev)
  576. {
  577. struct generic_pm_domain *genpd;
  578. dev_dbg(dev, "%s()\n", __func__);
  579. genpd = dev_to_genpd(dev);
  580. if (IS_ERR(genpd))
  581. return -EINVAL;
  582. return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
  583. }
  584. /**
  585. * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
  586. * @dev: Device to suspend.
  587. *
  588. * Carry out a late suspend of a device under the assumption that its
  589. * pm_domain field points to the domain member of an object of type
  590. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  591. */
  592. static int pm_genpd_suspend_noirq(struct device *dev)
  593. {
  594. struct generic_pm_domain *genpd;
  595. int ret;
  596. dev_dbg(dev, "%s()\n", __func__);
  597. genpd = dev_to_genpd(dev);
  598. if (IS_ERR(genpd))
  599. return -EINVAL;
  600. if (genpd->suspend_power_off)
  601. return 0;
  602. ret = pm_generic_suspend_noirq(dev);
  603. if (ret)
  604. return ret;
  605. if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
  606. return 0;
  607. genpd_stop_dev(genpd, dev);
  608. /*
  609. * Since all of the "noirq" callbacks are executed sequentially, it is
  610. * guaranteed that this function will never run twice in parallel for
  611. * the same PM domain, so it is not necessary to use locking here.
  612. */
  613. genpd->suspended_count++;
  614. pm_genpd_sync_poweroff(genpd);
  615. return 0;
  616. }
  617. /**
  618. * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
  619. * @dev: Device to resume.
  620. *
  621. * Carry out an early resume of a device under the assumption that its
  622. * pm_domain field points to the domain member of an object of type
  623. * struct generic_pm_domain representing a power domain consisting of I/O
  624. * devices.
  625. */
  626. static int pm_genpd_resume_noirq(struct device *dev)
  627. {
  628. struct generic_pm_domain *genpd;
  629. dev_dbg(dev, "%s()\n", __func__);
  630. genpd = dev_to_genpd(dev);
  631. if (IS_ERR(genpd))
  632. return -EINVAL;
  633. if (genpd->suspend_power_off)
  634. return 0;
  635. /*
  636. * Since all of the "noirq" callbacks are executed sequentially, it is
  637. * guaranteed that this function will never run twice in parallel for
  638. * the same PM domain, so it is not necessary to use locking here.
  639. */
  640. pm_genpd_poweron(genpd);
  641. genpd->suspended_count--;
  642. genpd_start_dev(genpd, dev);
  643. return pm_generic_resume_noirq(dev);
  644. }
  645. /**
  646. * pm_genpd_resume - Resume a device belonging to an I/O power domain.
  647. * @dev: Device to resume.
  648. *
  649. * Resume a device under the assumption that its pm_domain field points to the
  650. * domain member of an object of type struct generic_pm_domain representing
  651. * a power domain consisting of I/O devices.
  652. */
  653. static int pm_genpd_resume(struct device *dev)
  654. {
  655. struct generic_pm_domain *genpd;
  656. dev_dbg(dev, "%s()\n", __func__);
  657. genpd = dev_to_genpd(dev);
  658. if (IS_ERR(genpd))
  659. return -EINVAL;
  660. return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
  661. }
  662. /**
  663. * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
  664. * @dev: Device to freeze.
  665. *
  666. * Freeze a device under the assumption that its pm_domain field points to the
  667. * domain member of an object of type struct generic_pm_domain representing
  668. * a power domain consisting of I/O devices.
  669. */
  670. static int pm_genpd_freeze(struct device *dev)
  671. {
  672. struct generic_pm_domain *genpd;
  673. dev_dbg(dev, "%s()\n", __func__);
  674. genpd = dev_to_genpd(dev);
  675. if (IS_ERR(genpd))
  676. return -EINVAL;
  677. return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
  678. }
  679. /**
  680. * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
  681. * @dev: Device to freeze.
  682. *
  683. * Carry out a late freeze of a device under the assumption that its
  684. * pm_domain field points to the domain member of an object of type
  685. * struct generic_pm_domain representing a power domain consisting of I/O
  686. * devices.
  687. */
  688. static int pm_genpd_freeze_noirq(struct device *dev)
  689. {
  690. struct generic_pm_domain *genpd;
  691. int ret;
  692. dev_dbg(dev, "%s()\n", __func__);
  693. genpd = dev_to_genpd(dev);
  694. if (IS_ERR(genpd))
  695. return -EINVAL;
  696. if (genpd->suspend_power_off)
  697. return 0;
  698. ret = pm_generic_freeze_noirq(dev);
  699. if (ret)
  700. return ret;
  701. genpd_stop_dev(genpd, dev);
  702. return 0;
  703. }
  704. /**
  705. * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
  706. * @dev: Device to thaw.
  707. *
  708. * Carry out an early thaw of a device under the assumption that its
  709. * pm_domain field points to the domain member of an object of type
  710. * struct generic_pm_domain representing a power domain consisting of I/O
  711. * devices.
  712. */
  713. static int pm_genpd_thaw_noirq(struct device *dev)
  714. {
  715. struct generic_pm_domain *genpd;
  716. dev_dbg(dev, "%s()\n", __func__);
  717. genpd = dev_to_genpd(dev);
  718. if (IS_ERR(genpd))
  719. return -EINVAL;
  720. if (genpd->suspend_power_off)
  721. return 0;
  722. genpd_start_dev(genpd, dev);
  723. return pm_generic_thaw_noirq(dev);
  724. }
  725. /**
  726. * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
  727. * @dev: Device to thaw.
  728. *
  729. * Thaw a device under the assumption that its pm_domain field points to the
  730. * domain member of an object of type struct generic_pm_domain representing
  731. * a power domain consisting of I/O devices.
  732. */
  733. static int pm_genpd_thaw(struct device *dev)
  734. {
  735. struct generic_pm_domain *genpd;
  736. dev_dbg(dev, "%s()\n", __func__);
  737. genpd = dev_to_genpd(dev);
  738. if (IS_ERR(genpd))
  739. return -EINVAL;
  740. return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
  741. }
  742. /**
  743. * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
  744. * @dev: Device to suspend.
  745. *
  746. * Power off a device under the assumption that its pm_domain field points to
  747. * the domain member of an object of type struct generic_pm_domain representing
  748. * a PM domain consisting of I/O devices.
  749. */
  750. static int pm_genpd_dev_poweroff(struct device *dev)
  751. {
  752. struct generic_pm_domain *genpd;
  753. dev_dbg(dev, "%s()\n", __func__);
  754. genpd = dev_to_genpd(dev);
  755. if (IS_ERR(genpd))
  756. return -EINVAL;
  757. return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
  758. }
  759. /**
  760. * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
  761. * @dev: Device to suspend.
  762. *
  763. * Carry out a late powering off of a device under the assumption that its
  764. * pm_domain field points to the domain member of an object of type
  765. * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  766. */
  767. static int pm_genpd_dev_poweroff_noirq(struct device *dev)
  768. {
  769. struct generic_pm_domain *genpd;
  770. int ret;
  771. dev_dbg(dev, "%s()\n", __func__);
  772. genpd = dev_to_genpd(dev);
  773. if (IS_ERR(genpd))
  774. return -EINVAL;
  775. if (genpd->suspend_power_off)
  776. return 0;
  777. ret = pm_generic_poweroff_noirq(dev);
  778. if (ret)
  779. return ret;
  780. if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
  781. return 0;
  782. genpd_stop_dev(genpd, dev);
  783. /*
  784. * Since all of the "noirq" callbacks are executed sequentially, it is
  785. * guaranteed that this function will never run twice in parallel for
  786. * the same PM domain, so it is not necessary to use locking here.
  787. */
  788. genpd->suspended_count++;
  789. pm_genpd_sync_poweroff(genpd);
  790. return 0;
  791. }
  792. /**
  793. * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
  794. * @dev: Device to resume.
  795. *
  796. * Carry out an early restore of a device under the assumption that its
  797. * pm_domain field points to the domain member of an object of type
  798. * struct generic_pm_domain representing a power domain consisting of I/O
  799. * devices.
  800. */
  801. static int pm_genpd_restore_noirq(struct device *dev)
  802. {
  803. struct generic_pm_domain *genpd;
  804. dev_dbg(dev, "%s()\n", __func__);
  805. genpd = dev_to_genpd(dev);
  806. if (IS_ERR(genpd))
  807. return -EINVAL;
  808. /*
  809. * Since all of the "noirq" callbacks are executed sequentially, it is
  810. * guaranteed that this function will never run twice in parallel for
  811. * the same PM domain, so it is not necessary to use locking here.
  812. */
  813. genpd->status = GPD_STATE_POWER_OFF;
  814. if (genpd->suspend_power_off) {
  815. /*
  816. * The boot kernel might put the domain into the power on state,
  817. * so make sure it really is powered off.
  818. */
  819. if (genpd->power_off)
  820. genpd->power_off(genpd);
  821. return 0;
  822. }
  823. pm_genpd_poweron(genpd);
  824. genpd->suspended_count--;
  825. genpd_start_dev(genpd, dev);
  826. return pm_generic_restore_noirq(dev);
  827. }
  828. /**
  829. * pm_genpd_restore - Restore a device belonging to an I/O power domain.
  830. * @dev: Device to resume.
  831. *
  832. * Restore a device under the assumption that its pm_domain field points to the
  833. * domain member of an object of type struct generic_pm_domain representing
  834. * a power domain consisting of I/O devices.
  835. */
  836. static int pm_genpd_restore(struct device *dev)
  837. {
  838. struct generic_pm_domain *genpd;
  839. dev_dbg(dev, "%s()\n", __func__);
  840. genpd = dev_to_genpd(dev);
  841. if (IS_ERR(genpd))
  842. return -EINVAL;
  843. return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
  844. }
  845. /**
  846. * pm_genpd_complete - Complete power transition of a device in a power domain.
  847. * @dev: Device to complete the transition of.
  848. *
  849. * Complete a power transition of a device (during a system-wide power
  850. * transition) under the assumption that its pm_domain field points to the
  851. * domain member of an object of type struct generic_pm_domain representing
  852. * a power domain consisting of I/O devices.
  853. */
  854. static void pm_genpd_complete(struct device *dev)
  855. {
  856. struct generic_pm_domain *genpd;
  857. bool run_complete;
  858. dev_dbg(dev, "%s()\n", __func__);
  859. genpd = dev_to_genpd(dev);
  860. if (IS_ERR(genpd))
  861. return;
  862. mutex_lock(&genpd->lock);
  863. run_complete = !genpd->suspend_power_off;
  864. if (--genpd->prepared_count == 0)
  865. genpd->suspend_power_off = false;
  866. mutex_unlock(&genpd->lock);
  867. if (run_complete) {
  868. pm_generic_complete(dev);
  869. pm_runtime_set_active(dev);
  870. pm_runtime_enable(dev);
  871. pm_runtime_idle(dev);
  872. }
  873. }
  874. #else
  875. #define pm_genpd_prepare NULL
  876. #define pm_genpd_suspend NULL
  877. #define pm_genpd_suspend_noirq NULL
  878. #define pm_genpd_resume_noirq NULL
  879. #define pm_genpd_resume NULL
  880. #define pm_genpd_freeze NULL
  881. #define pm_genpd_freeze_noirq NULL
  882. #define pm_genpd_thaw_noirq NULL
  883. #define pm_genpd_thaw NULL
  884. #define pm_genpd_dev_poweroff_noirq NULL
  885. #define pm_genpd_dev_poweroff NULL
  886. #define pm_genpd_restore_noirq NULL
  887. #define pm_genpd_restore NULL
  888. #define pm_genpd_complete NULL
  889. #endif /* CONFIG_PM_SLEEP */
  890. /**
  891. * pm_genpd_add_device - Add a device to an I/O PM domain.
  892. * @genpd: PM domain to add the device to.
  893. * @dev: Device to be added.
  894. */
  895. int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
  896. {
  897. struct generic_pm_domain_data *gpd_data;
  898. struct pm_domain_data *pdd;
  899. int ret = 0;
  900. dev_dbg(dev, "%s()\n", __func__);
  901. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  902. return -EINVAL;
  903. genpd_acquire_lock(genpd);
  904. if (genpd->status == GPD_STATE_POWER_OFF) {
  905. ret = -EINVAL;
  906. goto out;
  907. }
  908. if (genpd->prepared_count > 0) {
  909. ret = -EAGAIN;
  910. goto out;
  911. }
  912. list_for_each_entry(pdd, &genpd->dev_list, list_node)
  913. if (pdd->dev == dev) {
  914. ret = -EINVAL;
  915. goto out;
  916. }
  917. gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
  918. if (!gpd_data) {
  919. ret = -ENOMEM;
  920. goto out;
  921. }
  922. genpd->device_count++;
  923. dev->pm_domain = &genpd->domain;
  924. dev_pm_get_subsys_data(dev);
  925. dev->power.subsys_data->domain_data = &gpd_data->base;
  926. gpd_data->base.dev = dev;
  927. gpd_data->need_restore = false;
  928. list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
  929. out:
  930. genpd_release_lock(genpd);
  931. return ret;
  932. }
  933. /**
  934. * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  935. * @genpd: PM domain to remove the device from.
  936. * @dev: Device to be removed.
  937. */
  938. int pm_genpd_remove_device(struct generic_pm_domain *genpd,
  939. struct device *dev)
  940. {
  941. struct pm_domain_data *pdd;
  942. int ret = -EINVAL;
  943. dev_dbg(dev, "%s()\n", __func__);
  944. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
  945. return -EINVAL;
  946. genpd_acquire_lock(genpd);
  947. if (genpd->prepared_count > 0) {
  948. ret = -EAGAIN;
  949. goto out;
  950. }
  951. list_for_each_entry(pdd, &genpd->dev_list, list_node) {
  952. if (pdd->dev != dev)
  953. continue;
  954. list_del_init(&pdd->list_node);
  955. pdd->dev = NULL;
  956. dev_pm_put_subsys_data(dev);
  957. dev->pm_domain = NULL;
  958. kfree(to_gpd_data(pdd));
  959. genpd->device_count--;
  960. ret = 0;
  961. break;
  962. }
  963. out:
  964. genpd_release_lock(genpd);
  965. return ret;
  966. }
  967. /**
  968. * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  969. * @genpd: Master PM domain to add the subdomain to.
  970. * @subdomain: Subdomain to be added.
  971. */
  972. int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
  973. struct generic_pm_domain *subdomain)
  974. {
  975. struct gpd_link *link;
  976. int ret = 0;
  977. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
  978. return -EINVAL;
  979. start:
  980. genpd_acquire_lock(genpd);
  981. mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
  982. if (subdomain->status != GPD_STATE_POWER_OFF
  983. && subdomain->status != GPD_STATE_ACTIVE) {
  984. mutex_unlock(&subdomain->lock);
  985. genpd_release_lock(genpd);
  986. goto start;
  987. }
  988. if (genpd->status == GPD_STATE_POWER_OFF
  989. && subdomain->status != GPD_STATE_POWER_OFF) {
  990. ret = -EINVAL;
  991. goto out;
  992. }
  993. list_for_each_entry(link, &genpd->slave_links, slave_node) {
  994. if (link->slave == subdomain && link->master == genpd) {
  995. ret = -EINVAL;
  996. goto out;
  997. }
  998. }
  999. link = kzalloc(sizeof(*link), GFP_KERNEL);
  1000. if (!link) {
  1001. ret = -ENOMEM;
  1002. goto out;
  1003. }
  1004. link->master = genpd;
  1005. list_add_tail(&link->master_node, &genpd->master_links);
  1006. link->slave = subdomain;
  1007. list_add_tail(&link->slave_node, &subdomain->slave_links);
  1008. if (subdomain->status != GPD_STATE_POWER_OFF)
  1009. genpd_sd_counter_inc(genpd);
  1010. out:
  1011. mutex_unlock(&subdomain->lock);
  1012. genpd_release_lock(genpd);
  1013. return ret;
  1014. }
  1015. /**
  1016. * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  1017. * @genpd: Master PM domain to remove the subdomain from.
  1018. * @subdomain: Subdomain to be removed.
  1019. */
  1020. int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
  1021. struct generic_pm_domain *subdomain)
  1022. {
  1023. struct gpd_link *link;
  1024. int ret = -EINVAL;
  1025. if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
  1026. return -EINVAL;
  1027. start:
  1028. genpd_acquire_lock(genpd);
  1029. list_for_each_entry(link, &genpd->master_links, master_node) {
  1030. if (link->slave != subdomain)
  1031. continue;
  1032. mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
  1033. if (subdomain->status != GPD_STATE_POWER_OFF
  1034. && subdomain->status != GPD_STATE_ACTIVE) {
  1035. mutex_unlock(&subdomain->lock);
  1036. genpd_release_lock(genpd);
  1037. goto start;
  1038. }
  1039. list_del(&link->master_node);
  1040. list_del(&link->slave_node);
  1041. kfree(link);
  1042. if (subdomain->status != GPD_STATE_POWER_OFF)
  1043. genpd_sd_counter_dec(genpd);
  1044. mutex_unlock(&subdomain->lock);
  1045. ret = 0;
  1046. break;
  1047. }
  1048. genpd_release_lock(genpd);
  1049. return ret;
  1050. }
  1051. /**
  1052. * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
  1053. * @dev: Device to add the callbacks to.
  1054. * @ops: Set of callbacks to add.
  1055. */
  1056. int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops)
  1057. {
  1058. struct pm_domain_data *pdd;
  1059. int ret = 0;
  1060. if (!(dev && dev->power.subsys_data && ops))
  1061. return -EINVAL;
  1062. pm_runtime_disable(dev);
  1063. device_pm_lock();
  1064. pdd = dev->power.subsys_data->domain_data;
  1065. if (pdd) {
  1066. struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
  1067. gpd_data->ops = *ops;
  1068. } else {
  1069. ret = -EINVAL;
  1070. }
  1071. device_pm_unlock();
  1072. pm_runtime_enable(dev);
  1073. return ret;
  1074. }
  1075. EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
  1076. /**
  1077. * pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
  1078. * @dev: Device to remove the callbacks from.
  1079. */
  1080. int pm_genpd_remove_callbacks(struct device *dev)
  1081. {
  1082. struct pm_domain_data *pdd;
  1083. int ret = 0;
  1084. if (!(dev && dev->power.subsys_data))
  1085. return -EINVAL;
  1086. pm_runtime_disable(dev);
  1087. device_pm_lock();
  1088. pdd = dev->power.subsys_data->domain_data;
  1089. if (pdd) {
  1090. struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
  1091. gpd_data->ops = (struct gpd_dev_ops){ 0 };
  1092. } else {
  1093. ret = -EINVAL;
  1094. }
  1095. device_pm_unlock();
  1096. pm_runtime_enable(dev);
  1097. return ret;
  1098. }
  1099. EXPORT_SYMBOL_GPL(pm_genpd_remove_callbacks);
  1100. /**
  1101. * pm_genpd_init - Initialize a generic I/O PM domain object.
  1102. * @genpd: PM domain object to initialize.
  1103. * @gov: PM domain governor to associate with the domain (may be NULL).
  1104. * @is_off: Initial value of the domain's power_is_off field.
  1105. */
  1106. void pm_genpd_init(struct generic_pm_domain *genpd,
  1107. struct dev_power_governor *gov, bool is_off)
  1108. {
  1109. if (IS_ERR_OR_NULL(genpd))
  1110. return;
  1111. INIT_LIST_HEAD(&genpd->master_links);
  1112. INIT_LIST_HEAD(&genpd->slave_links);
  1113. INIT_LIST_HEAD(&genpd->dev_list);
  1114. mutex_init(&genpd->lock);
  1115. genpd->gov = gov;
  1116. INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
  1117. genpd->in_progress = 0;
  1118. atomic_set(&genpd->sd_count, 0);
  1119. genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
  1120. init_waitqueue_head(&genpd->status_wait_queue);
  1121. genpd->poweroff_task = NULL;
  1122. genpd->resume_count = 0;
  1123. genpd->device_count = 0;
  1124. genpd->suspended_count = 0;
  1125. genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
  1126. genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
  1127. genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
  1128. genpd->domain.ops.prepare = pm_genpd_prepare;
  1129. genpd->domain.ops.suspend = pm_genpd_suspend;
  1130. genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
  1131. genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
  1132. genpd->domain.ops.resume = pm_genpd_resume;
  1133. genpd->domain.ops.freeze = pm_genpd_freeze;
  1134. genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
  1135. genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
  1136. genpd->domain.ops.thaw = pm_genpd_thaw;
  1137. genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
  1138. genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
  1139. genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
  1140. genpd->domain.ops.restore = pm_genpd_restore;
  1141. genpd->domain.ops.complete = pm_genpd_complete;
  1142. mutex_lock(&gpd_list_lock);
  1143. list_add(&genpd->gpd_list_node, &gpd_list);
  1144. mutex_unlock(&gpd_list_lock);
  1145. }