kmemcheck.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. #include <linux/gfp.h>
  2. #include <linux/mm_types.h>
  3. #include <linux/mm.h>
  4. #include <linux/slab.h>
  5. #include <linux/kmemcheck.h>
  6. void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  7. {
  8. struct page *shadow;
  9. int pages;
  10. int i;
  11. pages = 1 << order;
  12. /*
  13. * With kmemcheck enabled, we need to allocate a memory area for the
  14. * shadow bits as well.
  15. */
  16. shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
  17. if (!shadow) {
  18. if (printk_ratelimit())
  19. printk(KERN_ERR "kmemcheck: failed to allocate "
  20. "shadow bitmap\n");
  21. return;
  22. }
  23. for(i = 0; i < pages; ++i)
  24. page[i].shadow = page_address(&shadow[i]);
  25. /*
  26. * Mark it as non-present for the MMU so that our accesses to
  27. * this memory will trigger a page fault and let us analyze
  28. * the memory accesses.
  29. */
  30. kmemcheck_hide_pages(page, pages);
  31. }
  32. void kmemcheck_free_shadow(struct page *page, int order)
  33. {
  34. struct page *shadow;
  35. int pages;
  36. int i;
  37. if (!kmemcheck_page_is_tracked(page))
  38. return;
  39. pages = 1 << order;
  40. kmemcheck_show_pages(page, pages);
  41. shadow = virt_to_page(page[0].shadow);
  42. for(i = 0; i < pages; ++i)
  43. page[i].shadow = NULL;
  44. __free_pages(shadow, order);
  45. }
  46. void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  47. size_t size)
  48. {
  49. /*
  50. * Has already been memset(), which initializes the shadow for us
  51. * as well.
  52. */
  53. if (gfpflags & __GFP_ZERO)
  54. return;
  55. /* No need to initialize the shadow of a non-tracked slab. */
  56. if (s->flags & SLAB_NOTRACK)
  57. return;
  58. if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
  59. /*
  60. * Allow notracked objects to be allocated from
  61. * tracked caches. Note however that these objects
  62. * will still get page faults on access, they just
  63. * won't ever be flagged as uninitialized. If page
  64. * faults are not acceptable, the slab cache itself
  65. * should be marked NOTRACK.
  66. */
  67. kmemcheck_mark_initialized(object, size);
  68. } else if (!s->ctor) {
  69. /*
  70. * New objects should be marked uninitialized before
  71. * they're returned to the called.
  72. */
  73. kmemcheck_mark_uninitialized(object, size);
  74. }
  75. }
  76. void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
  77. {
  78. /* TODO: RCU freeing is unsupported for now; hide false positives. */
  79. if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
  80. kmemcheck_mark_freed(object, size);
  81. }
  82. void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
  83. gfp_t gfpflags)
  84. {
  85. int pages;
  86. if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
  87. return;
  88. pages = 1 << order;
  89. /*
  90. * NOTE: We choose to track GFP_ZERO pages too; in fact, they
  91. * can become uninitialized by copying uninitialized memory
  92. * into them.
  93. */
  94. /* XXX: Can use zone->node for node? */
  95. kmemcheck_alloc_shadow(page, order, gfpflags, -1);
  96. if (gfpflags & __GFP_ZERO)
  97. kmemcheck_mark_initialized_pages(page, pages);
  98. else
  99. kmemcheck_mark_uninitialized_pages(page, pages);
  100. }