cma_debug.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * CMA DebugFS Interface
  4. *
  5. * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/cma.h>
  9. #include <linux/list.h>
  10. #include <linux/kernel.h>
  11. #include <linux/slab.h>
  12. #include <linux/mm_types.h>
  13. #include "cma.h"
  14. struct cma_mem {
  15. struct hlist_node node;
  16. struct page *p;
  17. unsigned long n;
  18. };
  19. static struct dentry *cma_debugfs_root;
  20. static int cma_debugfs_get(void *data, u64 *val)
  21. {
  22. unsigned long *p = data;
  23. *val = *p;
  24. return 0;
  25. }
  26. DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
  27. static int cma_used_get(void *data, u64 *val)
  28. {
  29. struct cma *cma = data;
  30. unsigned long used;
  31. mutex_lock(&cma->lock);
  32. /* pages counter is smaller than sizeof(int) */
  33. used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
  34. mutex_unlock(&cma->lock);
  35. *val = (u64)used << cma->order_per_bit;
  36. return 0;
  37. }
  38. DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
  39. static int cma_maxchunk_get(void *data, u64 *val)
  40. {
  41. struct cma *cma = data;
  42. unsigned long maxchunk = 0;
  43. unsigned long start, end = 0;
  44. unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
  45. mutex_lock(&cma->lock);
  46. for (;;) {
  47. start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
  48. if (start >= bitmap_maxno)
  49. break;
  50. end = find_next_bit(cma->bitmap, bitmap_maxno, start);
  51. maxchunk = max(end - start, maxchunk);
  52. }
  53. mutex_unlock(&cma->lock);
  54. *val = (u64)maxchunk << cma->order_per_bit;
  55. return 0;
  56. }
  57. DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
  58. static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
  59. {
  60. spin_lock(&cma->mem_head_lock);
  61. hlist_add_head(&mem->node, &cma->mem_head);
  62. spin_unlock(&cma->mem_head_lock);
  63. }
  64. static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
  65. {
  66. struct cma_mem *mem = NULL;
  67. spin_lock(&cma->mem_head_lock);
  68. if (!hlist_empty(&cma->mem_head)) {
  69. mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
  70. hlist_del_init(&mem->node);
  71. }
  72. spin_unlock(&cma->mem_head_lock);
  73. return mem;
  74. }
  75. static int cma_free_mem(struct cma *cma, int count)
  76. {
  77. struct cma_mem *mem = NULL;
  78. while (count) {
  79. mem = cma_get_entry_from_list(cma);
  80. if (mem == NULL)
  81. return 0;
  82. if (mem->n <= count) {
  83. cma_release(cma, mem->p, mem->n);
  84. count -= mem->n;
  85. kfree(mem);
  86. } else if (cma->order_per_bit == 0) {
  87. cma_release(cma, mem->p, count);
  88. mem->p += count;
  89. mem->n -= count;
  90. count = 0;
  91. cma_add_to_cma_mem_list(cma, mem);
  92. } else {
  93. pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
  94. cma_add_to_cma_mem_list(cma, mem);
  95. break;
  96. }
  97. }
  98. return 0;
  99. }
  100. static int cma_free_write(void *data, u64 val)
  101. {
  102. int pages = val;
  103. struct cma *cma = data;
  104. return cma_free_mem(cma, pages);
  105. }
  106. DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
  107. static int cma_alloc_mem(struct cma *cma, int count)
  108. {
  109. struct cma_mem *mem;
  110. struct page *p;
  111. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  112. if (!mem)
  113. return -ENOMEM;
  114. p = cma_alloc(cma, count, 0, GFP_KERNEL);
  115. if (!p) {
  116. kfree(mem);
  117. return -ENOMEM;
  118. }
  119. mem->p = p;
  120. mem->n = count;
  121. cma_add_to_cma_mem_list(cma, mem);
  122. return 0;
  123. }
  124. static int cma_alloc_write(void *data, u64 val)
  125. {
  126. int pages = val;
  127. struct cma *cma = data;
  128. return cma_alloc_mem(cma, pages);
  129. }
  130. DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
  131. static void cma_debugfs_add_one(struct cma *cma, int idx)
  132. {
  133. struct dentry *tmp;
  134. char name[16];
  135. int u32s;
  136. scnprintf(name, sizeof(name), "cma-%s", cma->name);
  137. tmp = debugfs_create_dir(name, cma_debugfs_root);
  138. debugfs_create_file("alloc", S_IWUSR, tmp, cma,
  139. &cma_alloc_fops);
  140. debugfs_create_file("free", S_IWUSR, tmp, cma,
  141. &cma_free_fops);
  142. debugfs_create_file("base_pfn", S_IRUGO, tmp,
  143. &cma->base_pfn, &cma_debugfs_fops);
  144. debugfs_create_file("count", S_IRUGO, tmp,
  145. &cma->count, &cma_debugfs_fops);
  146. debugfs_create_file("order_per_bit", S_IRUGO, tmp,
  147. &cma->order_per_bit, &cma_debugfs_fops);
  148. debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
  149. debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
  150. u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
  151. debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
  152. }
  153. static int __init cma_debugfs_init(void)
  154. {
  155. int i;
  156. cma_debugfs_root = debugfs_create_dir("cma", NULL);
  157. if (!cma_debugfs_root)
  158. return -ENOMEM;
  159. for (i = 0; i < cma_area_count; i++)
  160. cma_debugfs_add_one(&cma_areas[i], i);
  161. return 0;
  162. }
  163. late_initcall(cma_debugfs_init);