pm_qos.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/device.h>
  11. #include <linux/workqueue.h>
  12. enum {
  13. PM_QOS_RESERVED = 0,
  14. PM_QOS_CPU_DMA_LATENCY,
  15. PM_QOS_NETWORK_LATENCY,
  16. PM_QOS_NETWORK_THROUGHPUT,
  17. PM_QOS_MEMORY_BANDWIDTH,
  18. /* insert new class ID */
  19. PM_QOS_NUM_CLASSES,
  20. };
  21. enum pm_qos_flags_status {
  22. PM_QOS_FLAGS_UNDEFINED = -1,
  23. PM_QOS_FLAGS_NONE,
  24. PM_QOS_FLAGS_SOME,
  25. PM_QOS_FLAGS_ALL,
  26. };
  27. #define PM_QOS_DEFAULT_VALUE -1
  28. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  29. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  30. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  31. #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
  32. #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
  33. #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
  34. #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
  35. #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
  36. #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
  37. #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
  38. struct pm_qos_request {
  39. struct plist_node node;
  40. int pm_qos_class;
  41. struct delayed_work work; /* for pm_qos_update_request_timeout */
  42. };
  43. struct pm_qos_flags_request {
  44. struct list_head node;
  45. s32 flags; /* Do not change to 64 bit */
  46. };
  47. enum dev_pm_qos_req_type {
  48. DEV_PM_QOS_RESUME_LATENCY = 1,
  49. DEV_PM_QOS_LATENCY_TOLERANCE,
  50. DEV_PM_QOS_FLAGS,
  51. };
  52. struct dev_pm_qos_request {
  53. enum dev_pm_qos_req_type type;
  54. union {
  55. struct plist_node pnode;
  56. struct pm_qos_flags_request flr;
  57. } data;
  58. struct device *dev;
  59. };
  60. enum pm_qos_type {
  61. PM_QOS_UNITIALIZED,
  62. PM_QOS_MAX, /* return the largest value */
  63. PM_QOS_MIN, /* return the smallest value */
  64. PM_QOS_SUM /* return the sum */
  65. };
  66. /*
  67. * Note: The lockless read path depends on the CPU accessing target_value
  68. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  69. * types linux supports for 32 bit quantites
  70. */
  71. struct pm_qos_constraints {
  72. struct plist_head list;
  73. s32 target_value; /* Do not change to 64 bit */
  74. s32 default_value;
  75. s32 no_constraint_value;
  76. enum pm_qos_type type;
  77. struct blocking_notifier_head *notifiers;
  78. };
  79. struct pm_qos_flags {
  80. struct list_head list;
  81. s32 effective_flags; /* Do not change to 64 bit */
  82. };
  83. struct dev_pm_qos {
  84. struct pm_qos_constraints resume_latency;
  85. struct pm_qos_constraints latency_tolerance;
  86. struct pm_qos_flags flags;
  87. struct dev_pm_qos_request *resume_latency_req;
  88. struct dev_pm_qos_request *latency_tolerance_req;
  89. struct dev_pm_qos_request *flags_req;
  90. };
  91. /* Action requested to pm_qos_update_target */
  92. enum pm_qos_req_action {
  93. PM_QOS_ADD_REQ, /* Add a new request */
  94. PM_QOS_UPDATE_REQ, /* Update an existing request */
  95. PM_QOS_REMOVE_REQ /* Remove an existing request */
  96. };
  97. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  98. {
  99. return req->dev != NULL;
  100. }
  101. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  102. enum pm_qos_req_action action, int value);
  103. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  104. struct pm_qos_flags_request *req,
  105. enum pm_qos_req_action action, s32 val);
  106. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  107. s32 value);
  108. void pm_qos_update_request(struct pm_qos_request *req,
  109. s32 new_value);
  110. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  111. s32 new_value, unsigned long timeout_us);
  112. void pm_qos_remove_request(struct pm_qos_request *req);
  113. int pm_qos_request(int pm_qos_class);
  114. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  115. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  116. int pm_qos_request_active(struct pm_qos_request *req);
  117. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  118. #ifdef CONFIG_PM
  119. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  120. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  121. s32 __dev_pm_qos_read_value(struct device *dev);
  122. s32 dev_pm_qos_read_value(struct device *dev);
  123. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  124. enum dev_pm_qos_req_type type, s32 value);
  125. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  126. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  127. int dev_pm_qos_add_notifier(struct device *dev,
  128. struct notifier_block *notifier);
  129. int dev_pm_qos_remove_notifier(struct device *dev,
  130. struct notifier_block *notifier);
  131. int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
  132. int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
  133. void dev_pm_qos_constraints_init(struct device *dev);
  134. void dev_pm_qos_constraints_destroy(struct device *dev);
  135. int dev_pm_qos_add_ancestor_request(struct device *dev,
  136. struct dev_pm_qos_request *req,
  137. enum dev_pm_qos_req_type type, s32 value);
  138. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  139. void dev_pm_qos_hide_latency_limit(struct device *dev);
  140. int dev_pm_qos_expose_flags(struct device *dev, s32 value);
  141. void dev_pm_qos_hide_flags(struct device *dev);
  142. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
  143. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
  144. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
  145. int dev_pm_qos_expose_latency_tolerance(struct device *dev);
  146. void dev_pm_qos_hide_latency_tolerance(struct device *dev);
  147. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  148. {
  149. return dev->power.qos->resume_latency_req->data.pnode.prio;
  150. }
  151. static inline s32 dev_pm_qos_requested_flags(struct device *dev)
  152. {
  153. return dev->power.qos->flags_req->data.flr.flags;
  154. }
  155. #else
  156. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  157. s32 mask)
  158. { return PM_QOS_FLAGS_UNDEFINED; }
  159. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  160. s32 mask)
  161. { return PM_QOS_FLAGS_UNDEFINED; }
  162. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  163. { return 0; }
  164. static inline s32 dev_pm_qos_read_value(struct device *dev)
  165. { return 0; }
  166. static inline int dev_pm_qos_add_request(struct device *dev,
  167. struct dev_pm_qos_request *req,
  168. enum dev_pm_qos_req_type type,
  169. s32 value)
  170. { return 0; }
  171. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  172. s32 new_value)
  173. { return 0; }
  174. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  175. { return 0; }
  176. static inline int dev_pm_qos_add_notifier(struct device *dev,
  177. struct notifier_block *notifier)
  178. { return 0; }
  179. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  180. struct notifier_block *notifier)
  181. { return 0; }
  182. static inline int dev_pm_qos_add_global_notifier(
  183. struct notifier_block *notifier)
  184. { return 0; }
  185. static inline int dev_pm_qos_remove_global_notifier(
  186. struct notifier_block *notifier)
  187. { return 0; }
  188. static inline void dev_pm_qos_constraints_init(struct device *dev)
  189. {
  190. dev->power.power_state = PMSG_ON;
  191. }
  192. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  193. {
  194. dev->power.power_state = PMSG_INVALID;
  195. }
  196. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  197. struct dev_pm_qos_request *req,
  198. enum dev_pm_qos_req_type type,
  199. s32 value)
  200. { return 0; }
  201. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  202. { return 0; }
  203. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  204. static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
  205. { return 0; }
  206. static inline void dev_pm_qos_hide_flags(struct device *dev) {}
  207. static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
  208. { return 0; }
  209. static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  210. { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
  211. static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  212. { return 0; }
  213. static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
  214. { return 0; }
  215. static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
  216. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
  217. static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
  218. #endif
  219. #endif