nodemask.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. #ifndef __LINUX_NODEMASK_H
  2. #define __LINUX_NODEMASK_H
  3. /*
  4. * Nodemasks provide a bitmap suitable for representing the
  5. * set of Node's in a system, one bit position per Node number.
  6. *
  7. * See detailed comments in the file linux/bitmap.h describing the
  8. * data type on which these nodemasks are based.
  9. *
  10. * For details of nodemask_parse_user(), see bitmap_parse_user() in
  11. * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
  12. * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
  13. * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
  14. * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
  15. * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
  16. * lib/bitmap.c.
  17. *
  18. * The available nodemask operations are:
  19. *
  20. * void node_set(node, mask) turn on bit 'node' in mask
  21. * void node_clear(node, mask) turn off bit 'node' in mask
  22. * void nodes_setall(mask) set all bits
  23. * void nodes_clear(mask) clear all bits
  24. * int node_isset(node, mask) true iff bit 'node' set in mask
  25. * int node_test_and_set(node, mask) test and set bit 'node' in mask
  26. *
  27. * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection]
  28. * void nodes_or(dst, src1, src2) dst = src1 | src2 [union]
  29. * void nodes_xor(dst, src1, src2) dst = src1 ^ src2
  30. * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2
  31. * void nodes_complement(dst, src) dst = ~src
  32. *
  33. * int nodes_equal(mask1, mask2) Does mask1 == mask2?
  34. * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect?
  35. * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2?
  36. * int nodes_empty(mask) Is mask empty (no bits sets)?
  37. * int nodes_full(mask) Is mask full (all bits sets)?
  38. * int nodes_weight(mask) Hamming weight - number of set bits
  39. *
  40. * void nodes_shift_right(dst, src, n) Shift right
  41. * void nodes_shift_left(dst, src, n) Shift left
  42. *
  43. * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
  44. * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
  45. * int next_node_in(node, mask) Next node past 'node', or wrap to first,
  46. * or MAX_NUMNODES
  47. * int first_unset_node(mask) First node not set in mask, or
  48. * MAX_NUMNODES
  49. *
  50. * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
  51. * NODE_MASK_ALL Initializer - all bits set
  52. * NODE_MASK_NONE Initializer - no bits set
  53. * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
  54. *
  55. * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
  56. * int nodelist_parse(buf, map) Parse ascii string as nodelist
  57. * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
  58. * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
  59. * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
  60. * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
  61. *
  62. * for_each_node_mask(node, mask) for-loop node over mask
  63. *
  64. * int num_online_nodes() Number of online Nodes
  65. * int num_possible_nodes() Number of all possible Nodes
  66. *
  67. * int node_random(mask) Random node with set bit in mask
  68. *
  69. * int node_online(node) Is some node online?
  70. * int node_possible(node) Is some node possible?
  71. *
  72. * node_set_online(node) set bit 'node' in node_online_map
  73. * node_set_offline(node) clear bit 'node' in node_online_map
  74. *
  75. * for_each_node(node) for-loop node over node_possible_map
  76. * for_each_online_node(node) for-loop node over node_online_map
  77. *
  78. * Subtlety:
  79. * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
  80. * to generate slightly worse code. So use a simple one-line #define
  81. * for node_isset(), instead of wrapping an inline inside a macro, the
  82. * way we do the other calls.
  83. *
  84. * NODEMASK_SCRATCH
  85. * When doing above logical AND, OR, XOR, Remap operations the callers tend to
  86. * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
  87. * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
  88. * for such situations. See below and CPUMASK_ALLOC also.
  89. */
  90. #include <linux/kernel.h>
  91. #include <linux/threads.h>
  92. #include <linux/bitmap.h>
  93. #include <linux/numa.h>
  94. typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
  95. extern nodemask_t _unused_nodemask_arg_;
  96. /**
  97. * nodemask_pr_args - printf args to output a nodemask
  98. * @maskp: nodemask to be printed
  99. *
  100. * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
  101. */
  102. #define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
  103. /*
  104. * The inline keyword gives the compiler room to decide to inline, or
  105. * not inline a function as it sees best. However, as these functions
  106. * are called in both __init and non-__init functions, if they are not
  107. * inlined we will end up with a section mis-match error (of the type of
  108. * freeable items not being freed). So we must use __always_inline here
  109. * to fix the problem. If other functions in the future also end up in
  110. * this situation they will also need to be annotated as __always_inline
  111. */
  112. #define node_set(node, dst) __node_set((node), &(dst))
  113. static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
  114. {
  115. set_bit(node, dstp->bits);
  116. }
  117. #define node_clear(node, dst) __node_clear((node), &(dst))
  118. static inline void __node_clear(int node, volatile nodemask_t *dstp)
  119. {
  120. clear_bit(node, dstp->bits);
  121. }
  122. #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
  123. static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
  124. {
  125. bitmap_fill(dstp->bits, nbits);
  126. }
  127. #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
  128. static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
  129. {
  130. bitmap_zero(dstp->bits, nbits);
  131. }
  132. /* No static inline type checking - see Subtlety (1) above. */
  133. #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
  134. #define node_test_and_set(node, nodemask) \
  135. __node_test_and_set((node), &(nodemask))
  136. static inline int __node_test_and_set(int node, nodemask_t *addr)
  137. {
  138. return test_and_set_bit(node, addr->bits);
  139. }
  140. #define nodes_and(dst, src1, src2) \
  141. __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
  142. static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
  143. const nodemask_t *src2p, unsigned int nbits)
  144. {
  145. bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
  146. }
  147. #define nodes_or(dst, src1, src2) \
  148. __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
  149. static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
  150. const nodemask_t *src2p, unsigned int nbits)
  151. {
  152. bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
  153. }
  154. #define nodes_xor(dst, src1, src2) \
  155. __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
  156. static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
  157. const nodemask_t *src2p, unsigned int nbits)
  158. {
  159. bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
  160. }
  161. #define nodes_andnot(dst, src1, src2) \
  162. __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
  163. static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
  164. const nodemask_t *src2p, unsigned int nbits)
  165. {
  166. bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
  167. }
  168. #define nodes_complement(dst, src) \
  169. __nodes_complement(&(dst), &(src), MAX_NUMNODES)
  170. static inline void __nodes_complement(nodemask_t *dstp,
  171. const nodemask_t *srcp, unsigned int nbits)
  172. {
  173. bitmap_complement(dstp->bits, srcp->bits, nbits);
  174. }
  175. #define nodes_equal(src1, src2) \
  176. __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
  177. static inline int __nodes_equal(const nodemask_t *src1p,
  178. const nodemask_t *src2p, unsigned int nbits)
  179. {
  180. return bitmap_equal(src1p->bits, src2p->bits, nbits);
  181. }
  182. #define nodes_intersects(src1, src2) \
  183. __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
  184. static inline int __nodes_intersects(const nodemask_t *src1p,
  185. const nodemask_t *src2p, unsigned int nbits)
  186. {
  187. return bitmap_intersects(src1p->bits, src2p->bits, nbits);
  188. }
  189. #define nodes_subset(src1, src2) \
  190. __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
  191. static inline int __nodes_subset(const nodemask_t *src1p,
  192. const nodemask_t *src2p, unsigned int nbits)
  193. {
  194. return bitmap_subset(src1p->bits, src2p->bits, nbits);
  195. }
  196. #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
  197. static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
  198. {
  199. return bitmap_empty(srcp->bits, nbits);
  200. }
  201. #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
  202. static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
  203. {
  204. return bitmap_full(srcp->bits, nbits);
  205. }
  206. #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
  207. static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
  208. {
  209. return bitmap_weight(srcp->bits, nbits);
  210. }
  211. #define nodes_shift_right(dst, src, n) \
  212. __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
  213. static inline void __nodes_shift_right(nodemask_t *dstp,
  214. const nodemask_t *srcp, int n, int nbits)
  215. {
  216. bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
  217. }
  218. #define nodes_shift_left(dst, src, n) \
  219. __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
  220. static inline void __nodes_shift_left(nodemask_t *dstp,
  221. const nodemask_t *srcp, int n, int nbits)
  222. {
  223. bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
  224. }
  225. /* FIXME: better would be to fix all architectures to never return
  226. > MAX_NUMNODES, then the silly min_ts could be dropped. */
  227. #define first_node(src) __first_node(&(src))
  228. static inline int __first_node(const nodemask_t *srcp)
  229. {
  230. return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
  231. }
  232. #define next_node(n, src) __next_node((n), &(src))
  233. static inline int __next_node(int n, const nodemask_t *srcp)
  234. {
  235. return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
  236. }
  237. /*
  238. * Find the next present node in src, starting after node n, wrapping around to
  239. * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
  240. */
  241. #define next_node_in(n, src) __next_node_in((n), &(src))
  242. int __next_node_in(int node, const nodemask_t *srcp);
  243. static inline void init_nodemask_of_node(nodemask_t *mask, int node)
  244. {
  245. nodes_clear(*mask);
  246. node_set(node, *mask);
  247. }
  248. #define nodemask_of_node(node) \
  249. ({ \
  250. typeof(_unused_nodemask_arg_) m; \
  251. if (sizeof(m) == sizeof(unsigned long)) { \
  252. m.bits[0] = 1UL << (node); \
  253. } else { \
  254. init_nodemask_of_node(&m, (node)); \
  255. } \
  256. m; \
  257. })
  258. #define first_unset_node(mask) __first_unset_node(&(mask))
  259. static inline int __first_unset_node(const nodemask_t *maskp)
  260. {
  261. return min_t(int,MAX_NUMNODES,
  262. find_first_zero_bit(maskp->bits, MAX_NUMNODES));
  263. }
  264. #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
  265. #if MAX_NUMNODES <= BITS_PER_LONG
  266. #define NODE_MASK_ALL \
  267. ((nodemask_t) { { \
  268. [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
  269. } })
  270. #else
  271. #define NODE_MASK_ALL \
  272. ((nodemask_t) { { \
  273. [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \
  274. [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
  275. } })
  276. #endif
  277. #define NODE_MASK_NONE \
  278. ((nodemask_t) { { \
  279. [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \
  280. } })
  281. #define nodes_addr(src) ((src).bits)
  282. #define nodemask_parse_user(ubuf, ulen, dst) \
  283. __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
  284. static inline int __nodemask_parse_user(const char __user *buf, int len,
  285. nodemask_t *dstp, int nbits)
  286. {
  287. return bitmap_parse_user(buf, len, dstp->bits, nbits);
  288. }
  289. #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
  290. static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
  291. {
  292. return bitmap_parselist(buf, dstp->bits, nbits);
  293. }
  294. #define node_remap(oldbit, old, new) \
  295. __node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
  296. static inline int __node_remap(int oldbit,
  297. const nodemask_t *oldp, const nodemask_t *newp, int nbits)
  298. {
  299. return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
  300. }
  301. #define nodes_remap(dst, src, old, new) \
  302. __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
  303. static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
  304. const nodemask_t *oldp, const nodemask_t *newp, int nbits)
  305. {
  306. bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
  307. }
  308. #define nodes_onto(dst, orig, relmap) \
  309. __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
  310. static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
  311. const nodemask_t *relmapp, int nbits)
  312. {
  313. bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
  314. }
  315. #define nodes_fold(dst, orig, sz) \
  316. __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
  317. static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
  318. int sz, int nbits)
  319. {
  320. bitmap_fold(dstp->bits, origp->bits, sz, nbits);
  321. }
  322. #if MAX_NUMNODES > 1
  323. #define for_each_node_mask(node, mask) \
  324. for ((node) = first_node(mask); \
  325. (node) < MAX_NUMNODES; \
  326. (node) = next_node((node), (mask)))
  327. #else /* MAX_NUMNODES == 1 */
  328. #define for_each_node_mask(node, mask) \
  329. if (!nodes_empty(mask)) \
  330. for ((node) = 0; (node) < 1; (node)++)
  331. #endif /* MAX_NUMNODES */
  332. /*
  333. * Bitmasks that are kept for all the nodes.
  334. */
  335. enum node_states {
  336. N_POSSIBLE, /* The node could become online at some point */
  337. N_ONLINE, /* The node is online */
  338. N_NORMAL_MEMORY, /* The node has regular memory */
  339. #ifdef CONFIG_HIGHMEM
  340. N_HIGH_MEMORY, /* The node has regular or high memory */
  341. #else
  342. N_HIGH_MEMORY = N_NORMAL_MEMORY,
  343. #endif
  344. #ifdef CONFIG_MOVABLE_NODE
  345. N_MEMORY, /* The node has memory(regular, high, movable) */
  346. #else
  347. N_MEMORY = N_HIGH_MEMORY,
  348. #endif
  349. N_CPU, /* The node has one or more cpus */
  350. NR_NODE_STATES
  351. };
  352. /*
  353. * The following particular system nodemasks and operations
  354. * on them manage all possible and online nodes.
  355. */
  356. extern nodemask_t node_states[NR_NODE_STATES];
  357. #if MAX_NUMNODES > 1
  358. static inline int node_state(int node, enum node_states state)
  359. {
  360. return node_isset(node, node_states[state]);
  361. }
  362. static inline void node_set_state(int node, enum node_states state)
  363. {
  364. __node_set(node, &node_states[state]);
  365. }
  366. static inline void node_clear_state(int node, enum node_states state)
  367. {
  368. __node_clear(node, &node_states[state]);
  369. }
  370. static inline int num_node_state(enum node_states state)
  371. {
  372. return nodes_weight(node_states[state]);
  373. }
  374. #define for_each_node_state(__node, __state) \
  375. for_each_node_mask((__node), node_states[__state])
  376. #define first_online_node first_node(node_states[N_ONLINE])
  377. #define first_memory_node first_node(node_states[N_MEMORY])
  378. static inline int next_online_node(int nid)
  379. {
  380. return next_node(nid, node_states[N_ONLINE]);
  381. }
  382. static inline int next_memory_node(int nid)
  383. {
  384. return next_node(nid, node_states[N_MEMORY]);
  385. }
  386. extern int nr_node_ids;
  387. extern int nr_online_nodes;
  388. static inline void node_set_online(int nid)
  389. {
  390. node_set_state(nid, N_ONLINE);
  391. nr_online_nodes = num_node_state(N_ONLINE);
  392. }
  393. static inline void node_set_offline(int nid)
  394. {
  395. node_clear_state(nid, N_ONLINE);
  396. nr_online_nodes = num_node_state(N_ONLINE);
  397. }
  398. #else
  399. static inline int node_state(int node, enum node_states state)
  400. {
  401. return node == 0;
  402. }
  403. static inline void node_set_state(int node, enum node_states state)
  404. {
  405. }
  406. static inline void node_clear_state(int node, enum node_states state)
  407. {
  408. }
  409. static inline int num_node_state(enum node_states state)
  410. {
  411. return 1;
  412. }
  413. #define for_each_node_state(node, __state) \
  414. for ( (node) = 0; (node) == 0; (node) = 1)
  415. #define first_online_node 0
  416. #define first_memory_node 0
  417. #define next_online_node(nid) (MAX_NUMNODES)
  418. #define nr_node_ids 1
  419. #define nr_online_nodes 1
  420. #define node_set_online(node) node_set_state((node), N_ONLINE)
  421. #define node_set_offline(node) node_clear_state((node), N_ONLINE)
  422. #endif
  423. #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
  424. extern int node_random(const nodemask_t *maskp);
  425. #else
  426. static inline int node_random(const nodemask_t *mask)
  427. {
  428. return 0;
  429. }
  430. #endif
  431. #define node_online_map node_states[N_ONLINE]
  432. #define node_possible_map node_states[N_POSSIBLE]
  433. #define num_online_nodes() num_node_state(N_ONLINE)
  434. #define num_possible_nodes() num_node_state(N_POSSIBLE)
  435. #define node_online(node) node_state((node), N_ONLINE)
  436. #define node_possible(node) node_state((node), N_POSSIBLE)
  437. #define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
  438. #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
  439. /*
  440. * For nodemask scrach area.
  441. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
  442. * name.
  443. */
  444. #if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
  445. #define NODEMASK_ALLOC(type, name, gfp_flags) \
  446. type *name = kmalloc(sizeof(*name), gfp_flags)
  447. #define NODEMASK_FREE(m) kfree(m)
  448. #else
  449. #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name
  450. #define NODEMASK_FREE(m) do {} while (0)
  451. #endif
  452. /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
  453. struct nodemask_scratch {
  454. nodemask_t mask1;
  455. nodemask_t mask2;
  456. };
  457. #define NODEMASK_SCRATCH(x) \
  458. NODEMASK_ALLOC(struct nodemask_scratch, x, \
  459. GFP_KERNEL | __GFP_NORETRY)
  460. #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
  461. #endif /* __LINUX_NODEMASK_H */