tmem.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * tmem.h
  3. *
  4. * Transcendent memory
  5. *
  6. * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
  7. */
  8. #ifndef _TMEM_H_
  9. #define _TMEM_H_
  10. #include <linux/types.h>
  11. #include <linux/highmem.h>
  12. #include <linux/hash.h>
  13. #include <linux/atomic.h>
  14. /*
  15. * These are pre-defined by the Xen<->Linux ABI
  16. */
  17. #define TMEM_PUT_PAGE 4
  18. #define TMEM_GET_PAGE 5
  19. #define TMEM_FLUSH_PAGE 6
  20. #define TMEM_FLUSH_OBJECT 7
  21. #define TMEM_POOL_PERSIST 1
  22. #define TMEM_POOL_SHARED 2
  23. #define TMEM_POOL_PRECOMPRESSED 4
  24. #define TMEM_POOL_PAGESIZE_SHIFT 4
  25. #define TMEM_POOL_PAGESIZE_MASK 0xf
  26. #define TMEM_POOL_RESERVED_BITS 0x00ffff00
  27. /*
  28. * sentinels have proven very useful for debugging but can be removed
  29. * or disabled before final merge.
  30. */
  31. #define SENTINELS
  32. #ifdef SENTINELS
  33. #define DECL_SENTINEL uint32_t sentinel;
  34. #define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
  35. #define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
  36. #define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
  37. #define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
  38. #else
  39. #define DECL_SENTINEL
  40. #define SET_SENTINEL(_x, _y) do { } while (0)
  41. #define INVERT_SENTINEL(_x, _y) do { } while (0)
  42. #define ASSERT_SENTINEL(_x, _y) do { } while (0)
  43. #define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
  44. #endif
  45. #define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l))
  46. /*
  47. * A pool is the highest-level data structure managed by tmem and
  48. * usually corresponds to a large independent set of pages such as
  49. * a filesystem. Each pool has an id, and certain attributes and counters.
  50. * It also contains a set of hash buckets, each of which contains an rbtree
  51. * of objects and a lock to manage concurrency within the pool.
  52. */
  53. #define TMEM_HASH_BUCKET_BITS 8
  54. #define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
  55. struct tmem_hashbucket {
  56. struct rb_root obj_rb_root;
  57. spinlock_t lock;
  58. };
  59. struct tmem_pool {
  60. void *client; /* "up" for some clients, avoids table lookup */
  61. struct list_head pool_list;
  62. uint32_t pool_id;
  63. bool persistent;
  64. bool shared;
  65. atomic_t obj_count;
  66. atomic_t refcount;
  67. struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
  68. DECL_SENTINEL
  69. };
  70. #define is_persistent(_p) (_p->persistent)
  71. #define is_ephemeral(_p) (!(_p->persistent))
  72. /*
  73. * An object id ("oid") is large: 192-bits (to ensure, for example, files
  74. * in a modern filesystem can be uniquely identified).
  75. */
  76. struct tmem_oid {
  77. uint64_t oid[3];
  78. };
  79. static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
  80. {
  81. oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
  82. }
  83. static inline bool tmem_oid_valid(struct tmem_oid *oidp)
  84. {
  85. return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
  86. oidp->oid[2] != -1UL;
  87. }
  88. static inline int tmem_oid_compare(struct tmem_oid *left,
  89. struct tmem_oid *right)
  90. {
  91. int ret;
  92. if (left->oid[2] == right->oid[2]) {
  93. if (left->oid[1] == right->oid[1]) {
  94. if (left->oid[0] == right->oid[0])
  95. ret = 0;
  96. else if (left->oid[0] < right->oid[0])
  97. ret = -1;
  98. else
  99. return 1;
  100. } else if (left->oid[1] < right->oid[1])
  101. ret = -1;
  102. else
  103. ret = 1;
  104. } else if (left->oid[2] < right->oid[2])
  105. ret = -1;
  106. else
  107. ret = 1;
  108. return ret;
  109. }
  110. static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
  111. {
  112. return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
  113. TMEM_HASH_BUCKET_BITS);
  114. }
  115. /*
  116. * A tmem_obj contains an identifier (oid), pointers to the parent
  117. * pool and the rb_tree to which it belongs, counters, and an ordered
  118. * set of pampds, structured in a radix-tree-like tree. The intermediate
  119. * nodes of the tree are called tmem_objnodes.
  120. */
  121. struct tmem_objnode;
  122. struct tmem_obj {
  123. struct tmem_oid oid;
  124. struct tmem_pool *pool;
  125. struct rb_node rb_tree_node;
  126. struct tmem_objnode *objnode_tree_root;
  127. unsigned int objnode_tree_height;
  128. unsigned long objnode_count;
  129. long pampd_count;
  130. DECL_SENTINEL
  131. };
  132. #define OBJNODE_TREE_MAP_SHIFT 6
  133. #define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
  134. #define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
  135. #define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
  136. #define OBJNODE_TREE_MAX_PATH \
  137. (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
  138. struct tmem_objnode {
  139. struct tmem_obj *obj;
  140. DECL_SENTINEL
  141. void *slots[OBJNODE_TREE_MAP_SIZE];
  142. unsigned int slots_in_use;
  143. };
  144. /* pampd abstract datatype methods provided by the PAM implementation */
  145. struct tmem_pamops {
  146. void *(*create)(struct tmem_pool *, struct tmem_oid *, uint32_t,
  147. struct page *);
  148. int (*get_data)(struct page *, void *, struct tmem_pool *);
  149. void (*free)(void *, struct tmem_pool *);
  150. };
  151. extern void tmem_register_pamops(struct tmem_pamops *m);
  152. /* memory allocation methods provided by the host implementation */
  153. struct tmem_hostops {
  154. struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
  155. void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
  156. struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
  157. void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
  158. };
  159. extern void tmem_register_hostops(struct tmem_hostops *m);
  160. /* core tmem accessor functions */
  161. extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
  162. struct page *page);
  163. extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
  164. struct page *page);
  165. extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
  166. uint32_t index);
  167. extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
  168. extern int tmem_destroy_pool(struct tmem_pool *);
  169. extern void tmem_new_pool(struct tmem_pool *, uint32_t);
  170. #endif /* _TMEM_H */