blk-wbt.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef WB_THROTTLE_H
  3. #define WB_THROTTLE_H
  4. #include <linux/kernel.h>
  5. #include <linux/atomic.h>
  6. #include <linux/wait.h>
  7. #include <linux/timer.h>
  8. #include <linux/ktime.h>
  9. #include "blk-stat.h"
  10. enum wbt_flags {
  11. WBT_TRACKED = 1, /* write, tracked for throttling */
  12. WBT_READ = 2, /* read */
  13. WBT_KSWAPD = 4, /* write, from kswapd */
  14. WBT_NR_BITS = 3, /* number of bits */
  15. };
  16. enum {
  17. WBT_NUM_RWQ = 2,
  18. };
  19. /*
  20. * Enable states. Either off, or on by default (done at init time),
  21. * or on through manual setup in sysfs.
  22. */
  23. enum {
  24. WBT_STATE_ON_DEFAULT = 1,
  25. WBT_STATE_ON_MANUAL = 2,
  26. };
  27. static inline void wbt_clear_state(struct blk_issue_stat *stat)
  28. {
  29. stat->stat &= ~BLK_STAT_RES_MASK;
  30. }
  31. static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
  32. {
  33. return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT;
  34. }
  35. static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
  36. {
  37. stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT;
  38. }
  39. static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
  40. {
  41. return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED;
  42. }
  43. static inline bool wbt_is_read(struct blk_issue_stat *stat)
  44. {
  45. return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ;
  46. }
  47. struct rq_wait {
  48. wait_queue_head_t wait;
  49. atomic_t inflight;
  50. };
  51. struct rq_wb {
  52. /*
  53. * Settings that govern how we throttle
  54. */
  55. unsigned int wb_background; /* background writeback */
  56. unsigned int wb_normal; /* normal writeback */
  57. unsigned int wb_max; /* max throughput writeback */
  58. int scale_step;
  59. bool scaled_max;
  60. short enable_state; /* WBT_STATE_* */
  61. /*
  62. * Number of consecutive periods where we don't have enough
  63. * information to make a firm scale up/down decision.
  64. */
  65. unsigned int unknown_cnt;
  66. u64 win_nsec; /* default window size */
  67. u64 cur_win_nsec; /* current window size */
  68. struct blk_stat_callback *cb;
  69. s64 sync_issue;
  70. void *sync_cookie;
  71. unsigned int wc;
  72. unsigned int queue_depth;
  73. unsigned long last_issue; /* last non-throttled issue */
  74. unsigned long last_comp; /* last non-throttled comp */
  75. unsigned long min_lat_nsec;
  76. struct request_queue *queue;
  77. struct rq_wait rq_wait[WBT_NUM_RWQ];
  78. };
  79. static inline unsigned int wbt_inflight(struct rq_wb *rwb)
  80. {
  81. unsigned int i, ret = 0;
  82. for (i = 0; i < WBT_NUM_RWQ; i++)
  83. ret += atomic_read(&rwb->rq_wait[i].inflight);
  84. return ret;
  85. }
  86. #ifdef CONFIG_BLK_WBT
  87. void __wbt_done(struct rq_wb *, enum wbt_flags);
  88. void wbt_done(struct rq_wb *, struct blk_issue_stat *);
  89. enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
  90. int wbt_init(struct request_queue *);
  91. void wbt_exit(struct request_queue *);
  92. void wbt_update_limits(struct rq_wb *);
  93. void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
  94. void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
  95. void wbt_disable_default(struct request_queue *);
  96. void wbt_enable_default(struct request_queue *);
  97. void wbt_set_queue_depth(struct rq_wb *, unsigned int);
  98. void wbt_set_write_cache(struct rq_wb *, bool);
  99. u64 wbt_default_latency_nsec(struct request_queue *);
  100. #else
  101. static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
  102. {
  103. }
  104. static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
  105. {
  106. }
  107. static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
  108. spinlock_t *lock)
  109. {
  110. return 0;
  111. }
  112. static inline int wbt_init(struct request_queue *q)
  113. {
  114. return -EINVAL;
  115. }
  116. static inline void wbt_exit(struct request_queue *q)
  117. {
  118. }
  119. static inline void wbt_update_limits(struct rq_wb *rwb)
  120. {
  121. }
  122. static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
  123. {
  124. }
  125. static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
  126. {
  127. }
  128. static inline void wbt_disable_default(struct request_queue *q)
  129. {
  130. }
  131. static inline void wbt_enable_default(struct request_queue *q)
  132. {
  133. }
  134. static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
  135. {
  136. }
  137. static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
  138. {
  139. }
  140. static inline u64 wbt_default_latency_nsec(struct request_queue *q)
  141. {
  142. return 0;
  143. }
  144. #endif /* CONFIG_BLK_WBT */
  145. #endif