kmemcheck.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #ifndef LINUX_KMEMCHECK_H
  2. #define LINUX_KMEMCHECK_H
  3. #include <linux/mm_types.h>
  4. #include <linux/types.h>
  5. #ifdef CONFIG_KMEMCHECK
  6. extern int kmemcheck_enabled;
  7. /* The slab-related functions. */
  8. void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
  9. void kmemcheck_free_shadow(struct page *page, int order);
  10. void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  11. size_t size);
  12. void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
  13. void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
  14. gfp_t gfpflags);
  15. void kmemcheck_show_pages(struct page *p, unsigned int n);
  16. void kmemcheck_hide_pages(struct page *p, unsigned int n);
  17. bool kmemcheck_page_is_tracked(struct page *p);
  18. void kmemcheck_mark_unallocated(void *address, unsigned int n);
  19. void kmemcheck_mark_uninitialized(void *address, unsigned int n);
  20. void kmemcheck_mark_initialized(void *address, unsigned int n);
  21. void kmemcheck_mark_freed(void *address, unsigned int n);
  22. void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
  23. void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
  24. void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
  25. int kmemcheck_show_addr(unsigned long address);
  26. int kmemcheck_hide_addr(unsigned long address);
  27. bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
  28. /*
  29. * Bitfield annotations
  30. *
  31. * How to use: If you have a struct using bitfields, for example
  32. *
  33. * struct a {
  34. * int x:8, y:8;
  35. * };
  36. *
  37. * then this should be rewritten as
  38. *
  39. * struct a {
  40. * kmemcheck_bitfield_begin(flags);
  41. * int x:8, y:8;
  42. * kmemcheck_bitfield_end(flags);
  43. * };
  44. *
  45. * Now the "flags_begin" and "flags_end" members may be used to refer to the
  46. * beginning and end, respectively, of the bitfield (and things like
  47. * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
  48. * fields should be annotated:
  49. *
  50. * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
  51. * kmemcheck_annotate_bitfield(a, flags);
  52. */
  53. #define kmemcheck_bitfield_begin(name) \
  54. int name##_begin[0];
  55. #define kmemcheck_bitfield_end(name) \
  56. int name##_end[0];
  57. #define kmemcheck_annotate_bitfield(ptr, name) \
  58. do { \
  59. int _n; \
  60. \
  61. if (!ptr) \
  62. break; \
  63. \
  64. _n = (long) &((ptr)->name##_end) \
  65. - (long) &((ptr)->name##_begin); \
  66. BUILD_BUG_ON(_n < 0); \
  67. \
  68. kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
  69. } while (0)
  70. #define kmemcheck_annotate_variable(var) \
  71. do { \
  72. kmemcheck_mark_initialized(&(var), sizeof(var)); \
  73. } while (0) \
  74. #else
  75. #define kmemcheck_enabled 0
  76. static inline void
  77. kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  78. {
  79. }
  80. static inline void
  81. kmemcheck_free_shadow(struct page *page, int order)
  82. {
  83. }
  84. static inline void
  85. kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  86. size_t size)
  87. {
  88. }
  89. static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  90. size_t size)
  91. {
  92. }
  93. static inline void kmemcheck_pagealloc_alloc(struct page *p,
  94. unsigned int order, gfp_t gfpflags)
  95. {
  96. }
  97. static inline bool kmemcheck_page_is_tracked(struct page *p)
  98. {
  99. return false;
  100. }
  101. static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  102. {
  103. }
  104. static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  105. {
  106. }
  107. static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  108. {
  109. }
  110. static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  111. {
  112. }
  113. static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  114. unsigned int n)
  115. {
  116. }
  117. static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  118. unsigned int n)
  119. {
  120. }
  121. static inline void kmemcheck_mark_initialized_pages(struct page *p,
  122. unsigned int n)
  123. {
  124. }
  125. static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
  126. {
  127. return true;
  128. }
  129. #define kmemcheck_bitfield_begin(name)
  130. #define kmemcheck_bitfield_end(name)
  131. #define kmemcheck_annotate_bitfield(ptr, name) \
  132. do { \
  133. } while (0)
  134. #define kmemcheck_annotate_variable(var) \
  135. do { \
  136. } while (0)
  137. #endif /* CONFIG_KMEMCHECK */
  138. #endif /* LINUX_KMEMCHECK_H */