osdmap.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. #ifndef _FS_CEPH_OSDMAP_H
  2. #define _FS_CEPH_OSDMAP_H
  3. #include <linux/rbtree.h>
  4. #include <linux/ceph/types.h>
  5. #include <linux/ceph/decode.h>
  6. #include <linux/ceph/ceph_fs.h>
  7. #include <linux/crush/crush.h>
  8. /*
  9. * The osd map describes the current membership of the osd cluster and
  10. * specifies the mapping of objects to placement groups and placement
  11. * groups to (sets of) osds. That is, it completely specifies the
  12. * (desired) distribution of all data objects in the system at some
  13. * point in time.
  14. *
  15. * Each map version is identified by an epoch, which increases monotonically.
  16. *
  17. * The map can be updated either via an incremental map (diff) describing
  18. * the change between two successive epochs, or as a fully encoded map.
  19. */
  20. struct ceph_pg {
  21. uint64_t pool;
  22. uint32_t seed;
  23. };
  24. int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
  25. #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
  26. together */
  27. #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
  28. struct ceph_pg_pool_info {
  29. struct rb_node node;
  30. s64 id;
  31. u8 type; /* CEPH_POOL_TYPE_* */
  32. u8 size;
  33. u8 min_size;
  34. u8 crush_ruleset;
  35. u8 object_hash;
  36. u32 last_force_request_resend;
  37. u32 pg_num, pgp_num;
  38. int pg_num_mask, pgp_num_mask;
  39. s64 read_tier;
  40. s64 write_tier; /* wins for read+write ops */
  41. u64 flags; /* CEPH_POOL_FLAG_* */
  42. char *name;
  43. bool was_full; /* for handle_one_map() */
  44. };
  45. static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
  46. {
  47. switch (pool->type) {
  48. case CEPH_POOL_TYPE_REP:
  49. return true;
  50. case CEPH_POOL_TYPE_EC:
  51. return false;
  52. default:
  53. BUG();
  54. }
  55. }
  56. struct ceph_object_locator {
  57. s64 pool;
  58. struct ceph_string *pool_ns;
  59. };
  60. static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
  61. {
  62. oloc->pool = -1;
  63. oloc->pool_ns = NULL;
  64. }
  65. static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
  66. {
  67. return oloc->pool == -1;
  68. }
  69. void ceph_oloc_copy(struct ceph_object_locator *dest,
  70. const struct ceph_object_locator *src);
  71. void ceph_oloc_destroy(struct ceph_object_locator *oloc);
  72. /*
  73. * Maximum supported by kernel client object name length
  74. *
  75. * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
  76. */
  77. #define CEPH_MAX_OID_NAME_LEN 100
  78. /*
  79. * 51-char inline_name is long enough for all cephfs and all but one
  80. * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
  81. * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
  82. * other rbd requests fit into inline_name.
  83. *
  84. * Makes ceph_object_id 64 bytes on 64-bit.
  85. */
  86. #define CEPH_OID_INLINE_LEN 52
  87. /*
  88. * Both inline and external buffers have space for a NUL-terminator,
  89. * which is carried around. It's not required though - RADOS object
  90. * names don't have to be NUL-terminated and may contain NULs.
  91. */
  92. struct ceph_object_id {
  93. char *name;
  94. char inline_name[CEPH_OID_INLINE_LEN];
  95. int name_len;
  96. };
  97. static inline void ceph_oid_init(struct ceph_object_id *oid)
  98. {
  99. oid->name = oid->inline_name;
  100. oid->name_len = 0;
  101. }
  102. #define CEPH_OID_INIT_ONSTACK(oid) \
  103. ({ ceph_oid_init(&oid); oid; })
  104. #define CEPH_DEFINE_OID_ONSTACK(oid) \
  105. struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
  106. static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
  107. {
  108. return oid->name == oid->inline_name && !oid->name_len;
  109. }
  110. void ceph_oid_copy(struct ceph_object_id *dest,
  111. const struct ceph_object_id *src);
  112. __printf(2, 3)
  113. void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
  114. __printf(3, 4)
  115. int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
  116. const char *fmt, ...);
  117. void ceph_oid_destroy(struct ceph_object_id *oid);
  118. struct ceph_pg_mapping {
  119. struct rb_node node;
  120. struct ceph_pg pgid;
  121. union {
  122. struct {
  123. int len;
  124. int osds[];
  125. } pg_temp;
  126. struct {
  127. int osd;
  128. } primary_temp;
  129. };
  130. };
  131. struct ceph_osdmap {
  132. struct ceph_fsid fsid;
  133. u32 epoch;
  134. struct ceph_timespec created, modified;
  135. u32 flags; /* CEPH_OSDMAP_* */
  136. u32 max_osd; /* size of osd_state, _offload, _addr arrays */
  137. u8 *osd_state; /* CEPH_OSD_* */
  138. u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
  139. struct ceph_entity_addr *osd_addr;
  140. struct rb_root pg_temp;
  141. struct rb_root primary_temp;
  142. u32 *osd_primary_affinity;
  143. struct rb_root pg_pools;
  144. u32 pool_max;
  145. /* the CRUSH map specifies the mapping of placement groups to
  146. * the list of osds that store+replicate them. */
  147. struct crush_map *crush;
  148. struct mutex crush_scratch_mutex;
  149. int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
  150. };
  151. static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
  152. {
  153. return osd >= 0 && osd < map->max_osd &&
  154. (map->osd_state[osd] & CEPH_OSD_EXISTS);
  155. }
  156. static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
  157. {
  158. return ceph_osd_exists(map, osd) &&
  159. (map->osd_state[osd] & CEPH_OSD_UP);
  160. }
  161. static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
  162. {
  163. return !ceph_osd_is_up(map, osd);
  164. }
  165. extern char *ceph_osdmap_state_str(char *str, int len, int state);
  166. extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
  167. static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
  168. int osd)
  169. {
  170. if (osd >= map->max_osd)
  171. return NULL;
  172. return &map->osd_addr[osd];
  173. }
  174. static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
  175. {
  176. __u8 version;
  177. if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
  178. pr_warn("incomplete pg encoding\n");
  179. return -EINVAL;
  180. }
  181. version = ceph_decode_8(p);
  182. if (version > 1) {
  183. pr_warn("do not understand pg encoding %d > 1\n",
  184. (int)version);
  185. return -EINVAL;
  186. }
  187. pgid->pool = ceph_decode_64(p);
  188. pgid->seed = ceph_decode_32(p);
  189. *p += 4; /* skip deprecated preferred value */
  190. return 0;
  191. }
  192. struct ceph_osdmap *ceph_osdmap_alloc(void);
  193. extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
  194. struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
  195. struct ceph_osdmap *map);
  196. extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
  197. struct ceph_osds {
  198. int osds[CEPH_PG_MAX_SIZE];
  199. int size;
  200. int primary; /* id, NOT index */
  201. };
  202. static inline void ceph_osds_init(struct ceph_osds *set)
  203. {
  204. set->size = 0;
  205. set->primary = -1;
  206. }
  207. void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
  208. bool ceph_is_new_interval(const struct ceph_osds *old_acting,
  209. const struct ceph_osds *new_acting,
  210. const struct ceph_osds *old_up,
  211. const struct ceph_osds *new_up,
  212. int old_size,
  213. int new_size,
  214. int old_min_size,
  215. int new_min_size,
  216. u32 old_pg_num,
  217. u32 new_pg_num,
  218. bool old_sort_bitwise,
  219. bool new_sort_bitwise,
  220. const struct ceph_pg *pgid);
  221. bool ceph_osds_changed(const struct ceph_osds *old_acting,
  222. const struct ceph_osds *new_acting,
  223. bool any_change);
  224. /* calculate mapping of a file extent to an object */
  225. extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
  226. u64 off, u64 len,
  227. u64 *bno, u64 *oxoff, u64 *oxlen);
  228. int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
  229. struct ceph_object_id *oid,
  230. struct ceph_object_locator *oloc,
  231. struct ceph_pg *raw_pgid);
  232. void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
  233. const struct ceph_pg *raw_pgid,
  234. struct ceph_osds *up,
  235. struct ceph_osds *acting);
  236. int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
  237. const struct ceph_pg *raw_pgid);
  238. extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
  239. u64 id);
  240. extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
  241. extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
  242. #endif