dm-cache-policy.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #ifndef DM_CACHE_POLICY_H
  7. #define DM_CACHE_POLICY_H
  8. #include "dm-cache-block-types.h"
  9. #include <linux/device-mapper.h>
  10. /*----------------------------------------------------------------*/
  11. /* FIXME: make it clear which methods are optional. Get debug policy to
  12. * double check this at start.
  13. */
  14. /*
  15. * The cache policy makes the important decisions about which blocks get to
  16. * live on the faster cache device.
  17. *
  18. * When the core target has to remap a bio it calls the 'map' method of the
  19. * policy. This returns an instruction telling the core target what to do.
  20. *
  21. * POLICY_HIT:
  22. * That block is in the cache. Remap to the cache and carry on.
  23. *
  24. * POLICY_MISS:
  25. * This block is on the origin device. Remap and carry on.
  26. *
  27. * POLICY_NEW:
  28. * This block is currently on the origin device, but the policy wants to
  29. * move it. The core should:
  30. *
  31. * - hold any further io to this origin block
  32. * - copy the origin to the given cache block
  33. * - release all the held blocks
  34. * - remap the original block to the cache
  35. *
  36. * POLICY_REPLACE:
  37. * This block is currently on the origin device. The policy wants to
  38. * move it to the cache, with the added complication that the destination
  39. * cache block needs a writeback first. The core should:
  40. *
  41. * - hold any further io to this origin block
  42. * - hold any further io to the origin block that's being written back
  43. * - writeback
  44. * - copy new block to cache
  45. * - release held blocks
  46. * - remap bio to cache and reissue.
  47. *
  48. * Should the core run into trouble while processing a POLICY_NEW or
  49. * POLICY_REPLACE instruction it will roll back the policies mapping using
  50. * remove_mapping() or force_mapping(). These methods must not fail. This
  51. * approach avoids having transactional semantics in the policy (ie, the
  52. * core informing the policy when a migration is complete), and hence makes
  53. * it easier to write new policies.
  54. *
  55. * In general policy methods should never block, except in the case of the
  56. * map function when can_migrate is set. So be careful to implement using
  57. * bounded, preallocated memory.
  58. */
  59. enum policy_operation {
  60. POLICY_HIT,
  61. POLICY_MISS,
  62. POLICY_NEW,
  63. POLICY_REPLACE
  64. };
  65. /*
  66. * When issuing a POLICY_REPLACE the policy needs to make a callback to
  67. * lock the block being demoted. This doesn't need to occur during a
  68. * writeback operation since the block remains in the cache.
  69. */
  70. struct policy_locker;
  71. typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
  72. struct policy_locker {
  73. policy_lock_fn fn;
  74. };
  75. /*
  76. * This is the instruction passed back to the core target.
  77. */
  78. struct policy_result {
  79. enum policy_operation op;
  80. dm_oblock_t old_oblock; /* POLICY_REPLACE */
  81. dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
  82. };
  83. /*
  84. * The cache policy object. Just a bunch of methods. It is envisaged that
  85. * this structure will be embedded in a bigger, policy specific structure
  86. * (ie. use container_of()).
  87. */
  88. struct dm_cache_policy {
  89. /*
  90. * FIXME: make it clear which methods are optional, and which may
  91. * block.
  92. */
  93. /*
  94. * Destroys this object.
  95. */
  96. void (*destroy)(struct dm_cache_policy *p);
  97. /*
  98. * See large comment above.
  99. *
  100. * oblock - the origin block we're interested in.
  101. *
  102. * can_block - indicates whether the current thread is allowed to
  103. * block. -EWOULDBLOCK returned if it can't and would.
  104. *
  105. * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
  106. * instructions. If denied and the policy would have
  107. * returned one of these instructions it should
  108. * return -EWOULDBLOCK.
  109. *
  110. * discarded_oblock - indicates whether the whole origin block is
  111. * in a discarded state (FIXME: better to tell the
  112. * policy about this sooner, so it can recycle that
  113. * cache block if it wants.)
  114. * bio - the bio that triggered this call.
  115. * result - gets filled in with the instruction.
  116. *
  117. * May only return 0, or -EWOULDBLOCK (if !can_migrate)
  118. */
  119. int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
  120. bool can_block, bool can_migrate, bool discarded_oblock,
  121. struct bio *bio, struct policy_locker *locker,
  122. struct policy_result *result);
  123. /*
  124. * Sometimes we want to see if a block is in the cache, without
  125. * triggering any update of stats. (ie. it's not a real hit).
  126. *
  127. * Must not block.
  128. *
  129. * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
  130. * (-EWOULDBLOCK would be typical).
  131. */
  132. int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
  133. void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
  134. void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
  135. /*
  136. * Called when a cache target is first created. Used to load a
  137. * mapping from the metadata device into the policy.
  138. */
  139. int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
  140. dm_cblock_t cblock, uint32_t hint, bool hint_valid);
  141. /*
  142. * Gets the hint for a given cblock. Called in a single threaded
  143. * context. So no locking required.
  144. */
  145. uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
  146. /*
  147. * Override functions used on the error paths of the core target.
  148. * They must succeed.
  149. */
  150. void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
  151. void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
  152. dm_oblock_t new_oblock);
  153. /*
  154. * This is called via the invalidate_cblocks message. It is
  155. * possible the particular cblock has already been removed due to a
  156. * write io in passthrough mode. In which case this should return
  157. * -ENODATA.
  158. */
  159. int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
  160. /*
  161. * Provide a dirty block to be written back by the core target. If
  162. * critical_only is set then the policy should only provide work if
  163. * it urgently needs it.
  164. *
  165. * Returns:
  166. *
  167. * 0 and @cblock,@oblock: block to write back provided
  168. *
  169. * -ENODATA: no dirty blocks available
  170. */
  171. int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock,
  172. bool critical_only);
  173. /*
  174. * How full is the cache?
  175. */
  176. dm_cblock_t (*residency)(struct dm_cache_policy *p);
  177. /*
  178. * Because of where we sit in the block layer, we can be asked to
  179. * map a lot of little bios that are all in the same block (no
  180. * queue merging has occurred). To stop the policy being fooled by
  181. * these, the core target sends regular tick() calls to the policy.
  182. * The policy should only count an entry as hit once per tick.
  183. */
  184. void (*tick)(struct dm_cache_policy *p, bool can_block);
  185. /*
  186. * Configuration.
  187. */
  188. int (*emit_config_values)(struct dm_cache_policy *p, char *result,
  189. unsigned maxlen, ssize_t *sz_ptr);
  190. int (*set_config_value)(struct dm_cache_policy *p,
  191. const char *key, const char *value);
  192. /*
  193. * Book keeping ptr for the policy register, not for general use.
  194. */
  195. void *private;
  196. };
  197. /*----------------------------------------------------------------*/
  198. /*
  199. * We maintain a little register of the different policy types.
  200. */
  201. #define CACHE_POLICY_NAME_SIZE 16
  202. #define CACHE_POLICY_VERSION_SIZE 3
  203. struct dm_cache_policy_type {
  204. /* For use by the register code only. */
  205. struct list_head list;
  206. /*
  207. * Policy writers should fill in these fields. The name field is
  208. * what gets passed on the target line to select your policy.
  209. */
  210. char name[CACHE_POLICY_NAME_SIZE];
  211. unsigned version[CACHE_POLICY_VERSION_SIZE];
  212. /*
  213. * For use by an alias dm_cache_policy_type to point to the
  214. * real dm_cache_policy_type.
  215. */
  216. struct dm_cache_policy_type *real;
  217. /*
  218. * Policies may store a hint for each each cache block.
  219. * Currently the size of this hint must be 0 or 4 bytes but we
  220. * expect to relax this in future.
  221. */
  222. size_t hint_size;
  223. struct module *owner;
  224. struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
  225. sector_t origin_size,
  226. sector_t block_size);
  227. };
  228. int dm_cache_policy_register(struct dm_cache_policy_type *type);
  229. void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
  230. /*----------------------------------------------------------------*/
  231. #endif /* DM_CACHE_POLICY_H */