qos.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Devices PM QoS constraints management
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. *
  11. * This module exposes the interface to kernel space for specifying
  12. * per-device PM QoS dependencies. It provides infrastructure for registration
  13. * of:
  14. *
  15. * Dependents on a QoS value : register requests
  16. * Watchers of QoS value : get notified when target QoS value changes
  17. *
  18. * This QoS design is best effort based. Dependents register their QoS needs.
  19. * Watchers register to keep track of the current QoS needs of the system.
  20. * Watchers can register different types of notification callbacks:
  21. * . a per-device notification callback using the dev_pm_qos_*_notifier API.
  22. * The notification chain data is stored in the per-device constraint
  23. * data struct.
  24. * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
  25. * API. The notification chain data is stored in a static variable.
  26. *
  27. * Note about the per-device constraint data struct allocation:
  28. * . The per-device constraints data struct ptr is tored into the device
  29. * dev_pm_info.
  30. * . To minimize the data usage by the per-device constraints, the data struct
  31. * is only allocated at the first call to dev_pm_qos_add_request.
  32. * . The data is later free'd when the device is removed from the system.
  33. * . A global mutex protects the constraints users from the data being
  34. * allocated and free'd.
  35. */
  36. #include <linux/pm_qos.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/device.h>
  40. #include <linux/mutex.h>
  41. #include <linux/export.h>
  42. #include <linux/pm_runtime.h>
  43. #include "power.h"
  44. static DEFINE_MUTEX(dev_pm_qos_mtx);
  45. static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
  46. /**
  47. * __dev_pm_qos_flags - Check PM QoS flags for a given device.
  48. * @dev: Device to check the PM QoS flags for.
  49. * @mask: Flags to check against.
  50. *
  51. * This routine must be called with dev->power.lock held.
  52. */
  53. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
  54. {
  55. struct dev_pm_qos *qos = dev->power.qos;
  56. struct pm_qos_flags *pqf;
  57. s32 val;
  58. if (!qos)
  59. return PM_QOS_FLAGS_UNDEFINED;
  60. pqf = &qos->flags;
  61. if (list_empty(&pqf->list))
  62. return PM_QOS_FLAGS_UNDEFINED;
  63. val = pqf->effective_flags & mask;
  64. if (val)
  65. return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
  66. return PM_QOS_FLAGS_NONE;
  67. }
  68. /**
  69. * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
  70. * @dev: Device to check the PM QoS flags for.
  71. * @mask: Flags to check against.
  72. */
  73. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
  74. {
  75. unsigned long irqflags;
  76. enum pm_qos_flags_status ret;
  77. spin_lock_irqsave(&dev->power.lock, irqflags);
  78. ret = __dev_pm_qos_flags(dev, mask);
  79. spin_unlock_irqrestore(&dev->power.lock, irqflags);
  80. return ret;
  81. }
  82. EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  83. /**
  84. * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
  85. * @dev: Device to get the PM QoS constraint value for.
  86. *
  87. * This routine must be called with dev->power.lock held.
  88. */
  89. s32 __dev_pm_qos_read_value(struct device *dev)
  90. {
  91. return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
  92. }
  93. /**
  94. * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
  95. * @dev: Device to get the PM QoS constraint value for.
  96. */
  97. s32 dev_pm_qos_read_value(struct device *dev)
  98. {
  99. unsigned long flags;
  100. s32 ret;
  101. spin_lock_irqsave(&dev->power.lock, flags);
  102. ret = __dev_pm_qos_read_value(dev);
  103. spin_unlock_irqrestore(&dev->power.lock, flags);
  104. return ret;
  105. }
  106. /**
  107. * apply_constraint - Add/modify/remove device PM QoS request.
  108. * @req: Constraint request to apply
  109. * @action: Action to perform (add/update/remove).
  110. * @value: Value to assign to the QoS request.
  111. *
  112. * Internal function to update the constraints list using the PM QoS core
  113. * code and if needed call the per-device and the global notification
  114. * callbacks
  115. */
  116. static int apply_constraint(struct dev_pm_qos_request *req,
  117. enum pm_qos_req_action action, s32 value)
  118. {
  119. struct dev_pm_qos *qos = req->dev->power.qos;
  120. int ret;
  121. switch(req->type) {
  122. case DEV_PM_QOS_LATENCY:
  123. ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
  124. action, value);
  125. if (ret) {
  126. value = pm_qos_read_value(&qos->latency);
  127. blocking_notifier_call_chain(&dev_pm_notifiers,
  128. (unsigned long)value,
  129. req);
  130. }
  131. break;
  132. case DEV_PM_QOS_FLAGS:
  133. ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
  134. action, value);
  135. break;
  136. default:
  137. ret = -EINVAL;
  138. }
  139. return ret;
  140. }
  141. /*
  142. * dev_pm_qos_constraints_allocate
  143. * @dev: device to allocate data for
  144. *
  145. * Called at the first call to add_request, for constraint data allocation
  146. * Must be called with the dev_pm_qos_mtx mutex held
  147. */
  148. static int dev_pm_qos_constraints_allocate(struct device *dev)
  149. {
  150. struct dev_pm_qos *qos;
  151. struct pm_qos_constraints *c;
  152. struct blocking_notifier_head *n;
  153. qos = kzalloc(sizeof(*qos), GFP_KERNEL);
  154. if (!qos)
  155. return -ENOMEM;
  156. n = kzalloc(sizeof(*n), GFP_KERNEL);
  157. if (!n) {
  158. kfree(qos);
  159. return -ENOMEM;
  160. }
  161. BLOCKING_INIT_NOTIFIER_HEAD(n);
  162. c = &qos->latency;
  163. plist_head_init(&c->list);
  164. c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
  165. c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
  166. c->type = PM_QOS_MIN;
  167. c->notifiers = n;
  168. INIT_LIST_HEAD(&qos->flags.list);
  169. spin_lock_irq(&dev->power.lock);
  170. dev->power.qos = qos;
  171. spin_unlock_irq(&dev->power.lock);
  172. return 0;
  173. }
  174. /**
  175. * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
  176. * @dev: target device
  177. *
  178. * Called from the device PM subsystem during device insertion under
  179. * device_pm_lock().
  180. */
  181. void dev_pm_qos_constraints_init(struct device *dev)
  182. {
  183. mutex_lock(&dev_pm_qos_mtx);
  184. dev->power.qos = NULL;
  185. dev->power.power_state = PMSG_ON;
  186. mutex_unlock(&dev_pm_qos_mtx);
  187. }
  188. /**
  189. * dev_pm_qos_constraints_destroy
  190. * @dev: target device
  191. *
  192. * Called from the device PM subsystem on device removal under device_pm_lock().
  193. */
  194. void dev_pm_qos_constraints_destroy(struct device *dev)
  195. {
  196. struct dev_pm_qos *qos;
  197. struct dev_pm_qos_request *req, *tmp;
  198. struct pm_qos_constraints *c;
  199. struct pm_qos_flags *f;
  200. /*
  201. * If the device's PM QoS resume latency limit or PM QoS flags have been
  202. * exposed to user space, they have to be hidden at this point.
  203. */
  204. dev_pm_qos_hide_latency_limit(dev);
  205. dev_pm_qos_hide_flags(dev);
  206. mutex_lock(&dev_pm_qos_mtx);
  207. dev->power.power_state = PMSG_INVALID;
  208. qos = dev->power.qos;
  209. if (!qos)
  210. goto out;
  211. /* Flush the constraints lists for the device. */
  212. c = &qos->latency;
  213. plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
  214. /*
  215. * Update constraints list and call the notification
  216. * callbacks if needed
  217. */
  218. apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  219. memset(req, 0, sizeof(*req));
  220. }
  221. f = &qos->flags;
  222. list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
  223. apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  224. memset(req, 0, sizeof(*req));
  225. }
  226. spin_lock_irq(&dev->power.lock);
  227. dev->power.qos = NULL;
  228. spin_unlock_irq(&dev->power.lock);
  229. kfree(c->notifiers);
  230. kfree(qos);
  231. out:
  232. mutex_unlock(&dev_pm_qos_mtx);
  233. }
  234. /**
  235. * dev_pm_qos_add_request - inserts new qos request into the list
  236. * @dev: target device for the constraint
  237. * @req: pointer to a preallocated handle
  238. * @type: type of the request
  239. * @value: defines the qos request
  240. *
  241. * This function inserts a new entry in the device constraints list of
  242. * requested qos performance characteristics. It recomputes the aggregate
  243. * QoS expectations of parameters and initializes the dev_pm_qos_request
  244. * handle. Caller needs to save this handle for later use in updates and
  245. * removal.
  246. *
  247. * Returns 1 if the aggregated constraint value has changed,
  248. * 0 if the aggregated constraint value has not changed,
  249. * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
  250. * to allocate for data structures, -ENODEV if the device has just been removed
  251. * from the system.
  252. *
  253. * Callers should ensure that the target device is not RPM_SUSPENDED before
  254. * using this function for requests of type DEV_PM_QOS_FLAGS.
  255. */
  256. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  257. enum dev_pm_qos_req_type type, s32 value)
  258. {
  259. int ret = 0;
  260. if (!dev || !req) /*guard against callers passing in null */
  261. return -EINVAL;
  262. if (WARN(dev_pm_qos_request_active(req),
  263. "%s() called for already added request\n", __func__))
  264. return -EINVAL;
  265. req->dev = dev;
  266. mutex_lock(&dev_pm_qos_mtx);
  267. if (!dev->power.qos) {
  268. if (dev->power.power_state.event == PM_EVENT_INVALID) {
  269. /* The device has been removed from the system. */
  270. req->dev = NULL;
  271. ret = -ENODEV;
  272. goto out;
  273. } else {
  274. /*
  275. * Allocate the constraints data on the first call to
  276. * add_request, i.e. only if the data is not already
  277. * allocated and if the device has not been removed.
  278. */
  279. ret = dev_pm_qos_constraints_allocate(dev);
  280. }
  281. }
  282. if (!ret) {
  283. req->type = type;
  284. ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
  285. }
  286. out:
  287. mutex_unlock(&dev_pm_qos_mtx);
  288. return ret;
  289. }
  290. EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
  291. /**
  292. * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
  293. * @req : PM QoS request to modify.
  294. * @new_value: New value to request.
  295. */
  296. static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  297. s32 new_value)
  298. {
  299. s32 curr_value;
  300. int ret = 0;
  301. if (!req) /*guard against callers passing in null */
  302. return -EINVAL;
  303. if (WARN(!dev_pm_qos_request_active(req),
  304. "%s() called for unknown object\n", __func__))
  305. return -EINVAL;
  306. if (!req->dev->power.qos)
  307. return -ENODEV;
  308. switch(req->type) {
  309. case DEV_PM_QOS_LATENCY:
  310. curr_value = req->data.pnode.prio;
  311. break;
  312. case DEV_PM_QOS_FLAGS:
  313. curr_value = req->data.flr.flags;
  314. break;
  315. default:
  316. return -EINVAL;
  317. }
  318. if (curr_value != new_value)
  319. ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
  320. return ret;
  321. }
  322. /**
  323. * dev_pm_qos_update_request - modifies an existing qos request
  324. * @req : handle to list element holding a dev_pm_qos request to use
  325. * @new_value: defines the qos request
  326. *
  327. * Updates an existing dev PM qos request along with updating the
  328. * target value.
  329. *
  330. * Attempts are made to make this code callable on hot code paths.
  331. *
  332. * Returns 1 if the aggregated constraint value has changed,
  333. * 0 if the aggregated constraint value has not changed,
  334. * -EINVAL in case of wrong parameters, -ENODEV if the device has been
  335. * removed from the system
  336. *
  337. * Callers should ensure that the target device is not RPM_SUSPENDED before
  338. * using this function for requests of type DEV_PM_QOS_FLAGS.
  339. */
  340. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
  341. {
  342. int ret;
  343. mutex_lock(&dev_pm_qos_mtx);
  344. ret = __dev_pm_qos_update_request(req, new_value);
  345. mutex_unlock(&dev_pm_qos_mtx);
  346. return ret;
  347. }
  348. EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
  349. static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  350. {
  351. int ret = 0;
  352. if (!req) /*guard against callers passing in null */
  353. return -EINVAL;
  354. if (WARN(!dev_pm_qos_request_active(req),
  355. "%s() called for unknown object\n", __func__))
  356. return -EINVAL;
  357. if (req->dev->power.qos) {
  358. ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
  359. PM_QOS_DEFAULT_VALUE);
  360. memset(req, 0, sizeof(*req));
  361. } else {
  362. ret = -ENODEV;
  363. }
  364. return ret;
  365. }
  366. /**
  367. * dev_pm_qos_remove_request - modifies an existing qos request
  368. * @req: handle to request list element
  369. *
  370. * Will remove pm qos request from the list of constraints and
  371. * recompute the current target value. Call this on slow code paths.
  372. *
  373. * Returns 1 if the aggregated constraint value has changed,
  374. * 0 if the aggregated constraint value has not changed,
  375. * -EINVAL in case of wrong parameters, -ENODEV if the device has been
  376. * removed from the system
  377. *
  378. * Callers should ensure that the target device is not RPM_SUSPENDED before
  379. * using this function for requests of type DEV_PM_QOS_FLAGS.
  380. */
  381. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  382. {
  383. int ret;
  384. mutex_lock(&dev_pm_qos_mtx);
  385. ret = __dev_pm_qos_remove_request(req);
  386. mutex_unlock(&dev_pm_qos_mtx);
  387. return ret;
  388. }
  389. EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
  390. /**
  391. * dev_pm_qos_add_notifier - sets notification entry for changes to target value
  392. * of per-device PM QoS constraints
  393. *
  394. * @dev: target device for the constraint
  395. * @notifier: notifier block managed by caller.
  396. *
  397. * Will register the notifier into a notification chain that gets called
  398. * upon changes to the target value for the device.
  399. *
  400. * If the device's constraints object doesn't exist when this routine is called,
  401. * it will be created (or error code will be returned if that fails).
  402. */
  403. int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
  404. {
  405. int ret = 0;
  406. mutex_lock(&dev_pm_qos_mtx);
  407. if (!dev->power.qos)
  408. ret = dev->power.power_state.event != PM_EVENT_INVALID ?
  409. dev_pm_qos_constraints_allocate(dev) : -ENODEV;
  410. if (!ret)
  411. ret = blocking_notifier_chain_register(
  412. dev->power.qos->latency.notifiers, notifier);
  413. mutex_unlock(&dev_pm_qos_mtx);
  414. return ret;
  415. }
  416. EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
  417. /**
  418. * dev_pm_qos_remove_notifier - deletes notification for changes to target value
  419. * of per-device PM QoS constraints
  420. *
  421. * @dev: target device for the constraint
  422. * @notifier: notifier block to be removed.
  423. *
  424. * Will remove the notifier from the notification chain that gets called
  425. * upon changes to the target value.
  426. */
  427. int dev_pm_qos_remove_notifier(struct device *dev,
  428. struct notifier_block *notifier)
  429. {
  430. int retval = 0;
  431. mutex_lock(&dev_pm_qos_mtx);
  432. /* Silently return if the constraints object is not present. */
  433. if (dev->power.qos)
  434. retval = blocking_notifier_chain_unregister(
  435. dev->power.qos->latency.notifiers,
  436. notifier);
  437. mutex_unlock(&dev_pm_qos_mtx);
  438. return retval;
  439. }
  440. EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
  441. /**
  442. * dev_pm_qos_add_global_notifier - sets notification entry for changes to
  443. * target value of the PM QoS constraints for any device
  444. *
  445. * @notifier: notifier block managed by caller.
  446. *
  447. * Will register the notifier into a notification chain that gets called
  448. * upon changes to the target value for any device.
  449. */
  450. int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
  451. {
  452. return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
  453. }
  454. EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
  455. /**
  456. * dev_pm_qos_remove_global_notifier - deletes notification for changes to
  457. * target value of PM QoS constraints for any device
  458. *
  459. * @notifier: notifier block to be removed.
  460. *
  461. * Will remove the notifier from the notification chain that gets called
  462. * upon changes to the target value for any device.
  463. */
  464. int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
  465. {
  466. return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
  467. }
  468. EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
  469. /**
  470. * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
  471. * @dev: Device whose ancestor to add the request for.
  472. * @req: Pointer to the preallocated handle.
  473. * @value: Constraint latency value.
  474. */
  475. int dev_pm_qos_add_ancestor_request(struct device *dev,
  476. struct dev_pm_qos_request *req, s32 value)
  477. {
  478. struct device *ancestor = dev->parent;
  479. int ret = -ENODEV;
  480. while (ancestor && !ancestor->power.ignore_children)
  481. ancestor = ancestor->parent;
  482. if (ancestor)
  483. ret = dev_pm_qos_add_request(ancestor, req,
  484. DEV_PM_QOS_LATENCY, value);
  485. if (ret < 0)
  486. req->dev = NULL;
  487. return ret;
  488. }
  489. EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
  490. #ifdef CONFIG_PM_RUNTIME
  491. static void __dev_pm_qos_drop_user_request(struct device *dev,
  492. enum dev_pm_qos_req_type type)
  493. {
  494. struct dev_pm_qos_request *req = NULL;
  495. switch(type) {
  496. case DEV_PM_QOS_LATENCY:
  497. req = dev->power.qos->latency_req;
  498. dev->power.qos->latency_req = NULL;
  499. break;
  500. case DEV_PM_QOS_FLAGS:
  501. req = dev->power.qos->flags_req;
  502. dev->power.qos->flags_req = NULL;
  503. break;
  504. }
  505. __dev_pm_qos_remove_request(req);
  506. kfree(req);
  507. }
  508. /**
  509. * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
  510. * @dev: Device whose PM QoS latency limit is to be exposed to user space.
  511. * @value: Initial value of the latency limit.
  512. */
  513. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  514. {
  515. struct dev_pm_qos_request *req;
  516. int ret;
  517. if (!device_is_registered(dev) || value < 0)
  518. return -EINVAL;
  519. req = kzalloc(sizeof(*req), GFP_KERNEL);
  520. if (!req)
  521. return -ENOMEM;
  522. ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
  523. if (ret < 0) {
  524. kfree(req);
  525. return ret;
  526. }
  527. mutex_lock(&dev_pm_qos_mtx);
  528. if (!dev->power.qos)
  529. ret = -ENODEV;
  530. else if (dev->power.qos->latency_req)
  531. ret = -EEXIST;
  532. if (ret < 0) {
  533. __dev_pm_qos_remove_request(req);
  534. kfree(req);
  535. goto out;
  536. }
  537. dev->power.qos->latency_req = req;
  538. ret = pm_qos_sysfs_add_latency(dev);
  539. if (ret)
  540. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
  541. out:
  542. mutex_unlock(&dev_pm_qos_mtx);
  543. return ret;
  544. }
  545. EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
  546. /**
  547. * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
  548. * @dev: Device whose PM QoS latency limit is to be hidden from user space.
  549. */
  550. void dev_pm_qos_hide_latency_limit(struct device *dev)
  551. {
  552. mutex_lock(&dev_pm_qos_mtx);
  553. if (dev->power.qos && dev->power.qos->latency_req) {
  554. pm_qos_sysfs_remove_latency(dev);
  555. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
  556. }
  557. mutex_unlock(&dev_pm_qos_mtx);
  558. }
  559. EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
  560. /**
  561. * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
  562. * @dev: Device whose PM QoS flags are to be exposed to user space.
  563. * @val: Initial values of the flags.
  564. */
  565. int dev_pm_qos_expose_flags(struct device *dev, s32 val)
  566. {
  567. struct dev_pm_qos_request *req;
  568. int ret;
  569. if (!device_is_registered(dev))
  570. return -EINVAL;
  571. req = kzalloc(sizeof(*req), GFP_KERNEL);
  572. if (!req)
  573. return -ENOMEM;
  574. ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
  575. if (ret < 0) {
  576. kfree(req);
  577. return ret;
  578. }
  579. pm_runtime_get_sync(dev);
  580. mutex_lock(&dev_pm_qos_mtx);
  581. if (!dev->power.qos)
  582. ret = -ENODEV;
  583. else if (dev->power.qos->flags_req)
  584. ret = -EEXIST;
  585. if (ret < 0) {
  586. __dev_pm_qos_remove_request(req);
  587. kfree(req);
  588. goto out;
  589. }
  590. dev->power.qos->flags_req = req;
  591. ret = pm_qos_sysfs_add_flags(dev);
  592. if (ret)
  593. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
  594. out:
  595. mutex_unlock(&dev_pm_qos_mtx);
  596. pm_runtime_put(dev);
  597. return ret;
  598. }
  599. EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
  600. /**
  601. * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
  602. * @dev: Device whose PM QoS flags are to be hidden from user space.
  603. */
  604. void dev_pm_qos_hide_flags(struct device *dev)
  605. {
  606. pm_runtime_get_sync(dev);
  607. mutex_lock(&dev_pm_qos_mtx);
  608. if (dev->power.qos && dev->power.qos->flags_req) {
  609. pm_qos_sysfs_remove_flags(dev);
  610. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
  611. }
  612. mutex_unlock(&dev_pm_qos_mtx);
  613. pm_runtime_put(dev);
  614. }
  615. EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
  616. /**
  617. * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
  618. * @dev: Device to update the PM QoS flags request for.
  619. * @mask: Flags to set/clear.
  620. * @set: Whether to set or clear the flags (true means set).
  621. */
  622. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
  623. {
  624. s32 value;
  625. int ret;
  626. pm_runtime_get_sync(dev);
  627. mutex_lock(&dev_pm_qos_mtx);
  628. if (!dev->power.qos || !dev->power.qos->flags_req) {
  629. ret = -EINVAL;
  630. goto out;
  631. }
  632. value = dev_pm_qos_requested_flags(dev);
  633. if (set)
  634. value |= mask;
  635. else
  636. value &= ~mask;
  637. ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
  638. out:
  639. mutex_unlock(&dev_pm_qos_mtx);
  640. pm_runtime_put(dev);
  641. return ret;
  642. }
  643. #endif /* CONFIG_PM_RUNTIME */