umem.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/mm.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/sched.h>
  37. #include <linux/hugetlb.h>
  38. #include <linux/dma-attrs.h>
  39. #include <linux/slab.h>
  40. #include "uverbs.h"
  41. #define IB_UMEM_MAX_PAGE_CHUNK \
  42. ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
  43. ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
  44. (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
  45. static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  46. {
  47. struct ib_umem_chunk *chunk, *tmp;
  48. int i;
  49. list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
  50. ib_dma_unmap_sg(dev, chunk->page_list,
  51. chunk->nents, DMA_BIDIRECTIONAL);
  52. for (i = 0; i < chunk->nents; ++i) {
  53. struct page *page = sg_page(&chunk->page_list[i]);
  54. if (umem->writable && dirty)
  55. set_page_dirty_lock(page);
  56. put_page(page);
  57. }
  58. kfree(chunk);
  59. }
  60. }
  61. /**
  62. * ib_umem_get - Pin and DMA map userspace memory.
  63. * @context: userspace context to pin memory for
  64. * @addr: userspace virtual address to start at
  65. * @size: length of region to pin
  66. * @access: IB_ACCESS_xxx flags for memory being pinned
  67. * @dmasync: flush in-flight DMA when the memory region is written
  68. */
  69. struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
  70. size_t size, int access, int dmasync)
  71. {
  72. struct ib_umem *umem;
  73. struct page **page_list;
  74. struct vm_area_struct **vma_list;
  75. struct ib_umem_chunk *chunk;
  76. unsigned long locked;
  77. unsigned long lock_limit;
  78. unsigned long cur_base;
  79. unsigned long npages;
  80. int ret;
  81. int off;
  82. int i;
  83. DEFINE_DMA_ATTRS(attrs);
  84. if (dmasync)
  85. dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
  86. if (!can_do_mlock())
  87. return ERR_PTR(-EPERM);
  88. umem = kmalloc(sizeof *umem, GFP_KERNEL);
  89. if (!umem)
  90. return ERR_PTR(-ENOMEM);
  91. umem->context = context;
  92. umem->length = size;
  93. umem->offset = addr & ~PAGE_MASK;
  94. umem->page_size = PAGE_SIZE;
  95. /*
  96. * We ask for writable memory if any access flags other than
  97. * "remote read" are set. "Local write" and "remote write"
  98. * obviously require write access. "Remote atomic" can do
  99. * things like fetch and add, which will modify memory, and
  100. * "MW bind" can change permissions by binding a window.
  101. */
  102. umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
  103. /* We assume the memory is from hugetlb until proved otherwise */
  104. umem->hugetlb = 1;
  105. INIT_LIST_HEAD(&umem->chunk_list);
  106. page_list = (struct page **) __get_free_page(GFP_KERNEL);
  107. if (!page_list) {
  108. kfree(umem);
  109. return ERR_PTR(-ENOMEM);
  110. }
  111. /*
  112. * if we can't alloc the vma_list, it's not so bad;
  113. * just assume the memory is not hugetlb memory
  114. */
  115. vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
  116. if (!vma_list)
  117. umem->hugetlb = 0;
  118. npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
  119. down_write(&current->mm->mmap_sem);
  120. locked = npages + current->mm->locked_vm;
  121. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  122. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  123. ret = -ENOMEM;
  124. goto out;
  125. }
  126. cur_base = addr & PAGE_MASK;
  127. ret = 0;
  128. while (npages) {
  129. ret = get_user_pages(current, current->mm, cur_base,
  130. min_t(unsigned long, npages,
  131. PAGE_SIZE / sizeof (struct page *)),
  132. 1, !umem->writable, page_list, vma_list);
  133. if (ret < 0)
  134. goto out;
  135. cur_base += ret * PAGE_SIZE;
  136. npages -= ret;
  137. off = 0;
  138. while (ret) {
  139. chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
  140. min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
  141. GFP_KERNEL);
  142. if (!chunk) {
  143. ret = -ENOMEM;
  144. goto out;
  145. }
  146. chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
  147. sg_init_table(chunk->page_list, chunk->nents);
  148. for (i = 0; i < chunk->nents; ++i) {
  149. if (vma_list &&
  150. !is_vm_hugetlb_page(vma_list[i + off]))
  151. umem->hugetlb = 0;
  152. sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
  153. }
  154. chunk->nmap = ib_dma_map_sg_attrs(context->device,
  155. &chunk->page_list[0],
  156. chunk->nents,
  157. DMA_BIDIRECTIONAL,
  158. &attrs);
  159. if (chunk->nmap <= 0) {
  160. for (i = 0; i < chunk->nents; ++i)
  161. put_page(sg_page(&chunk->page_list[i]));
  162. kfree(chunk);
  163. ret = -ENOMEM;
  164. goto out;
  165. }
  166. ret -= chunk->nents;
  167. off += chunk->nents;
  168. list_add_tail(&chunk->list, &umem->chunk_list);
  169. }
  170. ret = 0;
  171. }
  172. out:
  173. if (ret < 0) {
  174. __ib_umem_release(context->device, umem, 0);
  175. kfree(umem);
  176. } else
  177. current->mm->locked_vm = locked;
  178. up_write(&current->mm->mmap_sem);
  179. if (vma_list)
  180. free_page((unsigned long) vma_list);
  181. free_page((unsigned long) page_list);
  182. return ret < 0 ? ERR_PTR(ret) : umem;
  183. }
  184. EXPORT_SYMBOL(ib_umem_get);
  185. static void ib_umem_account(struct work_struct *work)
  186. {
  187. struct ib_umem *umem = container_of(work, struct ib_umem, work);
  188. down_write(&umem->mm->mmap_sem);
  189. umem->mm->locked_vm -= umem->diff;
  190. up_write(&umem->mm->mmap_sem);
  191. mmput(umem->mm);
  192. kfree(umem);
  193. }
  194. /**
  195. * ib_umem_release - release memory pinned with ib_umem_get
  196. * @umem: umem struct to release
  197. */
  198. void ib_umem_release(struct ib_umem *umem)
  199. {
  200. struct ib_ucontext *context = umem->context;
  201. struct mm_struct *mm;
  202. unsigned long diff;
  203. __ib_umem_release(umem->context->device, umem, 1);
  204. mm = get_task_mm(current);
  205. if (!mm) {
  206. kfree(umem);
  207. return;
  208. }
  209. diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
  210. /*
  211. * We may be called with the mm's mmap_sem already held. This
  212. * can happen when a userspace munmap() is the call that drops
  213. * the last reference to our file and calls our release
  214. * method. If there are memory regions to destroy, we'll end
  215. * up here and not be able to take the mmap_sem. In that case
  216. * we defer the vm_locked accounting to the system workqueue.
  217. */
  218. if (context->closing) {
  219. if (!down_write_trylock(&mm->mmap_sem)) {
  220. INIT_WORK(&umem->work, ib_umem_account);
  221. umem->mm = mm;
  222. umem->diff = diff;
  223. queue_work(ib_wq, &umem->work);
  224. return;
  225. }
  226. } else
  227. down_write(&mm->mmap_sem);
  228. current->mm->locked_vm -= diff;
  229. up_write(&mm->mmap_sem);
  230. mmput(mm);
  231. kfree(umem);
  232. }
  233. EXPORT_SYMBOL(ib_umem_release);
  234. int ib_umem_page_count(struct ib_umem *umem)
  235. {
  236. struct ib_umem_chunk *chunk;
  237. int shift;
  238. int i;
  239. int n;
  240. shift = ilog2(umem->page_size);
  241. n = 0;
  242. list_for_each_entry(chunk, &umem->chunk_list, list)
  243. for (i = 0; i < chunk->nmap; ++i)
  244. n += sg_dma_len(&chunk->page_list[i]) >> shift;
  245. return n;
  246. }
  247. EXPORT_SYMBOL(ib_umem_page_count);