pm_qos.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/device.h>
  11. #include <linux/workqueue.h>
  12. enum {
  13. PM_QOS_RESERVED = 0,
  14. PM_QOS_CPU_DMA_LATENCY,
  15. PM_QOS_NETWORK_LATENCY,
  16. PM_QOS_NETWORK_THROUGHPUT,
  17. /* insert new class ID */
  18. PM_QOS_NUM_CLASSES,
  19. };
  20. enum pm_qos_flags_status {
  21. PM_QOS_FLAGS_UNDEFINED = -1,
  22. PM_QOS_FLAGS_NONE,
  23. PM_QOS_FLAGS_SOME,
  24. PM_QOS_FLAGS_ALL,
  25. };
  26. #define PM_QOS_DEFAULT_VALUE -1
  27. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  28. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  29. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  30. #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
  31. struct pm_qos_request {
  32. struct plist_node node;
  33. int pm_qos_class;
  34. struct delayed_work work; /* for pm_qos_update_request_timeout */
  35. };
  36. struct pm_qos_flags_request {
  37. struct list_head node;
  38. s32 flags; /* Do not change to 64 bit */
  39. };
  40. enum dev_pm_qos_req_type {
  41. DEV_PM_QOS_LATENCY = 1,
  42. DEV_PM_QOS_FLAGS,
  43. };
  44. struct dev_pm_qos_request {
  45. enum dev_pm_qos_req_type type;
  46. union {
  47. struct plist_node pnode;
  48. struct pm_qos_flags_request flr;
  49. } data;
  50. struct device *dev;
  51. };
  52. enum pm_qos_type {
  53. PM_QOS_UNITIALIZED,
  54. PM_QOS_MAX, /* return the largest value */
  55. PM_QOS_MIN /* return the smallest value */
  56. };
  57. /*
  58. * Note: The lockless read path depends on the CPU accessing target_value
  59. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  60. * types linux supports for 32 bit quantites
  61. */
  62. struct pm_qos_constraints {
  63. struct plist_head list;
  64. s32 target_value; /* Do not change to 64 bit */
  65. s32 default_value;
  66. enum pm_qos_type type;
  67. struct blocking_notifier_head *notifiers;
  68. };
  69. struct pm_qos_flags {
  70. struct list_head list;
  71. s32 effective_flags; /* Do not change to 64 bit */
  72. };
  73. struct dev_pm_qos {
  74. struct pm_qos_constraints latency;
  75. struct pm_qos_flags flags;
  76. };
  77. /* Action requested to pm_qos_update_target */
  78. enum pm_qos_req_action {
  79. PM_QOS_ADD_REQ, /* Add a new request */
  80. PM_QOS_UPDATE_REQ, /* Update an existing request */
  81. PM_QOS_REMOVE_REQ /* Remove an existing request */
  82. };
  83. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  84. {
  85. return req->dev != NULL;
  86. }
  87. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  88. enum pm_qos_req_action action, int value);
  89. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  90. struct pm_qos_flags_request *req,
  91. enum pm_qos_req_action action, s32 val);
  92. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  93. s32 value);
  94. void pm_qos_update_request(struct pm_qos_request *req,
  95. s32 new_value);
  96. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  97. s32 new_value, unsigned long timeout_us);
  98. void pm_qos_remove_request(struct pm_qos_request *req);
  99. int pm_qos_request(int pm_qos_class);
  100. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  101. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  102. int pm_qos_request_active(struct pm_qos_request *req);
  103. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  104. #ifdef CONFIG_PM
  105. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  106. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  107. s32 __dev_pm_qos_read_value(struct device *dev);
  108. s32 dev_pm_qos_read_value(struct device *dev);
  109. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  110. enum dev_pm_qos_req_type type, s32 value);
  111. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  112. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  113. int dev_pm_qos_add_notifier(struct device *dev,
  114. struct notifier_block *notifier);
  115. int dev_pm_qos_remove_notifier(struct device *dev,
  116. struct notifier_block *notifier);
  117. int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
  118. int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
  119. void dev_pm_qos_constraints_init(struct device *dev);
  120. void dev_pm_qos_constraints_destroy(struct device *dev);
  121. int dev_pm_qos_add_ancestor_request(struct device *dev,
  122. struct dev_pm_qos_request *req, s32 value);
  123. #else
  124. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  125. s32 mask)
  126. { return PM_QOS_FLAGS_UNDEFINED; }
  127. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  128. s32 mask)
  129. { return PM_QOS_FLAGS_UNDEFINED; }
  130. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  131. { return 0; }
  132. static inline s32 dev_pm_qos_read_value(struct device *dev)
  133. { return 0; }
  134. static inline int dev_pm_qos_add_request(struct device *dev,
  135. struct dev_pm_qos_request *req,
  136. enum dev_pm_qos_req_type type,
  137. s32 value)
  138. { return 0; }
  139. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  140. s32 new_value)
  141. { return 0; }
  142. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  143. { return 0; }
  144. static inline int dev_pm_qos_add_notifier(struct device *dev,
  145. struct notifier_block *notifier)
  146. { return 0; }
  147. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  148. struct notifier_block *notifier)
  149. { return 0; }
  150. static inline int dev_pm_qos_add_global_notifier(
  151. struct notifier_block *notifier)
  152. { return 0; }
  153. static inline int dev_pm_qos_remove_global_notifier(
  154. struct notifier_block *notifier)
  155. { return 0; }
  156. static inline void dev_pm_qos_constraints_init(struct device *dev)
  157. {
  158. dev->power.power_state = PMSG_ON;
  159. }
  160. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  161. {
  162. dev->power.power_state = PMSG_INVALID;
  163. }
  164. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  165. struct dev_pm_qos_request *req, s32 value)
  166. { return 0; }
  167. #endif
  168. #ifdef CONFIG_PM_RUNTIME
  169. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  170. void dev_pm_qos_hide_latency_limit(struct device *dev);
  171. #else
  172. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  173. { return 0; }
  174. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  175. #endif
  176. #endif