blk-wbt.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #ifndef WB_THROTTLE_H
  2. #define WB_THROTTLE_H
  3. #include <linux/kernel.h>
  4. #include <linux/atomic.h>
  5. #include <linux/wait.h>
  6. #include <linux/timer.h>
  7. #include <linux/ktime.h>
  8. #include "blk-stat.h"
  9. enum wbt_flags {
  10. WBT_TRACKED = 1, /* write, tracked for throttling */
  11. WBT_READ = 2, /* read */
  12. WBT_KSWAPD = 4, /* write, from kswapd */
  13. WBT_NR_BITS = 3, /* number of bits */
  14. };
  15. enum {
  16. WBT_NUM_RWQ = 2,
  17. };
  18. /*
  19. * Enable states. Either off, or on by default (done at init time),
  20. * or on through manual setup in sysfs.
  21. */
  22. enum {
  23. WBT_STATE_ON_DEFAULT = 1,
  24. WBT_STATE_ON_MANUAL = 2,
  25. };
  26. static inline void wbt_clear_state(struct blk_issue_stat *stat)
  27. {
  28. stat->time &= BLK_STAT_TIME_MASK;
  29. }
  30. static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
  31. {
  32. return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
  33. }
  34. static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
  35. {
  36. stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
  37. }
  38. static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
  39. {
  40. return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
  41. }
  42. static inline bool wbt_is_read(struct blk_issue_stat *stat)
  43. {
  44. return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
  45. }
  46. struct rq_wait {
  47. wait_queue_head_t wait;
  48. atomic_t inflight;
  49. };
  50. struct rq_wb {
  51. /*
  52. * Settings that govern how we throttle
  53. */
  54. unsigned int wb_background; /* background writeback */
  55. unsigned int wb_normal; /* normal writeback */
  56. unsigned int wb_max; /* max throughput writeback */
  57. int scale_step;
  58. bool scaled_max;
  59. short enable_state; /* WBT_STATE_* */
  60. /*
  61. * Number of consecutive periods where we don't have enough
  62. * information to make a firm scale up/down decision.
  63. */
  64. unsigned int unknown_cnt;
  65. u64 win_nsec; /* default window size */
  66. u64 cur_win_nsec; /* current window size */
  67. struct timer_list window_timer;
  68. s64 sync_issue;
  69. void *sync_cookie;
  70. unsigned int wc;
  71. unsigned int queue_depth;
  72. unsigned long last_issue; /* last non-throttled issue */
  73. unsigned long last_comp; /* last non-throttled comp */
  74. unsigned long min_lat_nsec;
  75. struct request_queue *queue;
  76. struct rq_wait rq_wait[WBT_NUM_RWQ];
  77. };
  78. static inline unsigned int wbt_inflight(struct rq_wb *rwb)
  79. {
  80. unsigned int i, ret = 0;
  81. for (i = 0; i < WBT_NUM_RWQ; i++)
  82. ret += atomic_read(&rwb->rq_wait[i].inflight);
  83. return ret;
  84. }
  85. #ifdef CONFIG_BLK_WBT
  86. void __wbt_done(struct rq_wb *, enum wbt_flags);
  87. void wbt_done(struct rq_wb *, struct blk_issue_stat *);
  88. enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
  89. int wbt_init(struct request_queue *);
  90. void wbt_exit(struct request_queue *);
  91. void wbt_update_limits(struct rq_wb *);
  92. void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
  93. void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
  94. void wbt_disable_default(struct request_queue *);
  95. void wbt_set_queue_depth(struct rq_wb *, unsigned int);
  96. void wbt_set_write_cache(struct rq_wb *, bool);
  97. u64 wbt_default_latency_nsec(struct request_queue *);
  98. #else
  99. static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
  100. {
  101. }
  102. static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
  103. {
  104. }
  105. static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
  106. spinlock_t *lock)
  107. {
  108. return 0;
  109. }
  110. static inline int wbt_init(struct request_queue *q)
  111. {
  112. return -EINVAL;
  113. }
  114. static inline void wbt_exit(struct request_queue *q)
  115. {
  116. }
  117. static inline void wbt_update_limits(struct rq_wb *rwb)
  118. {
  119. }
  120. static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
  121. {
  122. }
  123. static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
  124. {
  125. }
  126. static inline void wbt_disable_default(struct request_queue *q)
  127. {
  128. }
  129. static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
  130. {
  131. }
  132. static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
  133. {
  134. }
  135. static inline u64 wbt_default_latency_nsec(struct request_queue *q)
  136. {
  137. return 0;
  138. }
  139. #endif /* CONFIG_BLK_WBT */
  140. #endif