memory_alloc.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <asm/page.h>
  13. #include <linux/io.h>
  14. #include <linux/memory_alloc.h>
  15. #include <linux/mm.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/slab.h>
  18. #include <linux/module.h>
  19. #include <linux/err.h>
  20. #include <linux/log2.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/seq_file.h>
  23. #define MAX_MEMPOOLS 8
  24. struct mem_pool mpools[MAX_MEMPOOLS];
  25. /* The tree contains all allocations over all memory pools */
  26. static struct rb_root alloc_root;
  27. static struct mutex alloc_mutex;
  28. static void *s_start(struct seq_file *m, loff_t *pos)
  29. __acquires(&alloc_mutex)
  30. {
  31. loff_t n = *pos;
  32. struct rb_node *r;
  33. mutex_lock(&alloc_mutex);
  34. r = rb_first(&alloc_root);
  35. while (n > 0 && r) {
  36. n--;
  37. r = rb_next(r);
  38. }
  39. if (!n)
  40. return r;
  41. return NULL;
  42. }
  43. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  44. {
  45. struct rb_node *r = p;
  46. ++*pos;
  47. return rb_next(r);
  48. }
  49. static void s_stop(struct seq_file *m, void *p)
  50. __releases(&alloc_mutex)
  51. {
  52. mutex_unlock(&alloc_mutex);
  53. }
  54. static int s_show(struct seq_file *m, void *p)
  55. {
  56. struct rb_node *r = p;
  57. struct alloc *node = rb_entry(r, struct alloc, rb_node);
  58. seq_printf(m, "0x%pa 0x%pa %ld %u %pS\n", &node->paddr, &node->vaddr,
  59. node->len, node->mpool->id, node->caller);
  60. return 0;
  61. }
  62. static const struct seq_operations mempool_op = {
  63. .start = s_start,
  64. .next = s_next,
  65. .stop = s_stop,
  66. .show = s_show,
  67. };
  68. static int mempool_open(struct inode *inode, struct file *file)
  69. {
  70. return seq_open(file, &mempool_op);
  71. }
  72. static struct alloc *find_alloc(phys_addr_t addr)
  73. {
  74. struct rb_root *root = &alloc_root;
  75. struct rb_node *p = root->rb_node;
  76. mutex_lock(&alloc_mutex);
  77. while (p) {
  78. struct alloc *node;
  79. node = rb_entry(p, struct alloc, rb_node);
  80. if (addr < node->vaddr)
  81. p = p->rb_left;
  82. else if (addr > node->vaddr)
  83. p = p->rb_right;
  84. else {
  85. mutex_unlock(&alloc_mutex);
  86. return node;
  87. }
  88. }
  89. mutex_unlock(&alloc_mutex);
  90. return NULL;
  91. }
  92. static int add_alloc(struct alloc *node)
  93. {
  94. struct rb_root *root = &alloc_root;
  95. struct rb_node **p = &root->rb_node;
  96. struct rb_node *parent = NULL;
  97. mutex_lock(&alloc_mutex);
  98. while (*p) {
  99. struct alloc *tmp;
  100. parent = *p;
  101. tmp = rb_entry(parent, struct alloc, rb_node);
  102. if (node->vaddr < tmp->vaddr)
  103. p = &(*p)->rb_left;
  104. else if (node->vaddr > tmp->vaddr)
  105. p = &(*p)->rb_right;
  106. else {
  107. WARN(1, "memory at %pa already allocated", &tmp->vaddr);
  108. mutex_unlock(&alloc_mutex);
  109. return -EINVAL;
  110. }
  111. }
  112. rb_link_node(&node->rb_node, parent, p);
  113. rb_insert_color(&node->rb_node, root);
  114. mutex_unlock(&alloc_mutex);
  115. return 0;
  116. }
  117. static int remove_alloc(struct alloc *victim_node)
  118. {
  119. struct rb_root *root = &alloc_root;
  120. if (!victim_node)
  121. return -EINVAL;
  122. mutex_lock(&alloc_mutex);
  123. rb_erase(&victim_node->rb_node, root);
  124. mutex_unlock(&alloc_mutex);
  125. return 0;
  126. }
  127. static struct gen_pool *initialize_gpool(phys_addr_t start,
  128. unsigned long size)
  129. {
  130. struct gen_pool *gpool;
  131. gpool = gen_pool_create(PAGE_SHIFT, -1);
  132. if (!gpool)
  133. return NULL;
  134. if (gen_pool_add(gpool, start, size, -1)) {
  135. gen_pool_destroy(gpool);
  136. return NULL;
  137. }
  138. return gpool;
  139. }
  140. static void *__alloc(struct mem_pool *mpool, unsigned long size,
  141. unsigned long align, int cached, void *caller)
  142. {
  143. unsigned long paddr;
  144. void __iomem *vaddr;
  145. unsigned long aligned_size;
  146. int log_align = ilog2(align);
  147. struct alloc *node;
  148. aligned_size = PFN_ALIGN(size);
  149. paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
  150. if (!paddr)
  151. return NULL;
  152. node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
  153. if (!node)
  154. goto out;
  155. if (cached)
  156. vaddr = ioremap_cached(paddr, aligned_size);
  157. else
  158. vaddr = ioremap(paddr, aligned_size);
  159. if (!vaddr)
  160. goto out_kfree;
  161. /*
  162. * Just cast to an unsigned long to avoid warnings about casting from a
  163. * pointer to an integer of different size. The pointer is only 32-bits
  164. * so we lose no data.
  165. */
  166. node->vaddr = (unsigned long)vaddr;
  167. node->paddr = paddr;
  168. node->len = aligned_size;
  169. node->mpool = mpool;
  170. node->caller = caller;
  171. if (add_alloc(node))
  172. goto out_kfree;
  173. mpool->free -= aligned_size;
  174. return vaddr;
  175. out_kfree:
  176. if (vaddr)
  177. iounmap(vaddr);
  178. kfree(node);
  179. out:
  180. gen_pool_free(mpool->gpool, paddr, aligned_size);
  181. return NULL;
  182. }
  183. static void __free(void *vaddr, bool unmap)
  184. {
  185. struct alloc *node = find_alloc((unsigned long)vaddr);
  186. if (!node)
  187. return;
  188. if (unmap)
  189. /*
  190. * We need the double cast because otherwise gcc complains about
  191. * cast to pointer of different size. This is technically a down
  192. * cast but if unmap is being called, this had better be an
  193. * actual 32-bit pointer anyway.
  194. */
  195. iounmap((void *)(unsigned long)node->vaddr);
  196. gen_pool_free(node->mpool->gpool, node->paddr, node->len);
  197. node->mpool->free += node->len;
  198. remove_alloc(node);
  199. kfree(node);
  200. }
  201. static struct mem_pool *mem_type_to_memory_pool(int mem_type)
  202. {
  203. struct mem_pool *mpool = &mpools[mem_type];
  204. if (!mpool->size)
  205. return NULL;
  206. mutex_lock(&mpool->pool_mutex);
  207. if (!mpool->gpool)
  208. mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
  209. mutex_unlock(&mpool->pool_mutex);
  210. if (!mpool->gpool)
  211. return NULL;
  212. return mpool;
  213. }
  214. struct mem_pool *initialize_memory_pool(phys_addr_t start,
  215. unsigned long size, int mem_type)
  216. {
  217. int id = mem_type;
  218. if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
  219. return NULL;
  220. mutex_lock(&mpools[id].pool_mutex);
  221. mpools[id].paddr = start;
  222. mpools[id].size = size;
  223. mpools[id].free = size;
  224. mpools[id].id = id;
  225. mutex_unlock(&mpools[id].pool_mutex);
  226. pr_info("memory pool %d (start %pa size %lx) initialized\n",
  227. id, &start, size);
  228. return &mpools[id];
  229. }
  230. EXPORT_SYMBOL_GPL(initialize_memory_pool);
  231. void *allocate_contiguous_memory(unsigned long size,
  232. int mem_type, unsigned long align, int cached)
  233. {
  234. unsigned long aligned_size = PFN_ALIGN(size);
  235. struct mem_pool *mpool;
  236. mpool = mem_type_to_memory_pool(mem_type);
  237. if (!mpool)
  238. return NULL;
  239. return __alloc(mpool, aligned_size, align, cached,
  240. __builtin_return_address(0));
  241. }
  242. EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
  243. phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
  244. int mem_type, unsigned long align, void *caller)
  245. {
  246. phys_addr_t paddr;
  247. unsigned long aligned_size;
  248. struct alloc *node;
  249. struct mem_pool *mpool;
  250. int log_align = ilog2(align);
  251. mpool = mem_type_to_memory_pool(mem_type);
  252. if (!mpool || !mpool->gpool)
  253. return 0;
  254. aligned_size = PFN_ALIGN(size);
  255. paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
  256. if (!paddr)
  257. return 0;
  258. node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
  259. if (!node)
  260. goto out;
  261. node->paddr = paddr;
  262. /* We search the tree using node->vaddr, so set
  263. * it to something unique even though we don't
  264. * use it for physical allocation nodes.
  265. * The virtual and physical address ranges
  266. * are disjoint, so there won't be any chance of
  267. * a duplicate node->vaddr value.
  268. */
  269. node->vaddr = paddr;
  270. node->len = aligned_size;
  271. node->mpool = mpool;
  272. node->caller = caller;
  273. if (add_alloc(node))
  274. goto out_kfree;
  275. mpool->free -= aligned_size;
  276. return paddr;
  277. out_kfree:
  278. kfree(node);
  279. out:
  280. gen_pool_free(mpool->gpool, paddr, aligned_size);
  281. return 0;
  282. }
  283. EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
  284. phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
  285. int mem_type, unsigned long align)
  286. {
  287. return _allocate_contiguous_memory_nomap(size, mem_type, align,
  288. __builtin_return_address(0));
  289. }
  290. EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
  291. void free_contiguous_memory(void *addr)
  292. {
  293. if (!addr)
  294. return;
  295. __free(addr, true);
  296. return;
  297. }
  298. EXPORT_SYMBOL_GPL(free_contiguous_memory);
  299. void free_contiguous_memory_by_paddr(phys_addr_t paddr)
  300. {
  301. if (!paddr)
  302. return;
  303. __free((void *)(unsigned long)paddr, false);
  304. return;
  305. }
  306. EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
  307. phys_addr_t memory_pool_node_paddr(void *vaddr)
  308. {
  309. struct alloc *node = find_alloc((unsigned long)vaddr);
  310. if (!node)
  311. return -EINVAL;
  312. return node->paddr;
  313. }
  314. EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
  315. unsigned long memory_pool_node_len(void *vaddr)
  316. {
  317. struct alloc *node = find_alloc((unsigned long)vaddr);
  318. if (!node)
  319. return -EINVAL;
  320. return node->len;
  321. }
  322. EXPORT_SYMBOL_GPL(memory_pool_node_len);
  323. static const struct file_operations mempool_operations = {
  324. .owner = THIS_MODULE,
  325. .open = mempool_open,
  326. .read = seq_read,
  327. .llseek = seq_lseek,
  328. .release = seq_release_private,
  329. };
  330. int __init memory_pool_init(void)
  331. {
  332. int i;
  333. alloc_root = RB_ROOT;
  334. mutex_init(&alloc_mutex);
  335. for (i = 0; i < ARRAY_SIZE(mpools); i++) {
  336. mutex_init(&mpools[i].pool_mutex);
  337. mpools[i].gpool = NULL;
  338. }
  339. return 0;
  340. }
  341. static int __init debugfs_mempool_init(void)
  342. {
  343. struct dentry *entry, *dir = debugfs_create_dir("mempool", NULL);
  344. if (!dir) {
  345. pr_err("Cannot create /sys/kernel/debug/mempool");
  346. return -EINVAL;
  347. }
  348. entry = debugfs_create_file("map", S_IRUSR, dir,
  349. NULL, &mempool_operations);
  350. if (!entry)
  351. pr_err("Cannot create /sys/kernel/debug/mempool/map");
  352. return entry ? 0 : -EINVAL;
  353. }
  354. module_init(debugfs_mempool_init);