usnic_uiom.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2013 Cisco Systems. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/mm.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/sched.h>
  37. #include <linux/hugetlb.h>
  38. #include <linux/iommu.h>
  39. #include <linux/workqueue.h>
  40. #include <linux/list.h>
  41. #include <linux/pci.h>
  42. #include "usnic_log.h"
  43. #include "usnic_uiom.h"
  44. #include "usnic_uiom_interval_tree.h"
  45. static struct workqueue_struct *usnic_uiom_wq;
  46. #define USNIC_UIOM_PAGE_CHUNK \
  47. ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
  48. ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
  49. (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
  50. static void usnic_uiom_reg_account(struct work_struct *work)
  51. {
  52. struct usnic_uiom_reg *umem = container_of(work,
  53. struct usnic_uiom_reg, work);
  54. down_write(&umem->mm->mmap_sem);
  55. umem->mm->locked_vm -= umem->diff;
  56. up_write(&umem->mm->mmap_sem);
  57. mmput(umem->mm);
  58. kfree(umem);
  59. }
  60. static int usnic_uiom_dma_fault(struct iommu_domain *domain,
  61. struct device *dev,
  62. unsigned long iova, int flags,
  63. void *token)
  64. {
  65. usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
  66. dev_name(dev),
  67. domain, iova, flags);
  68. return -ENOSYS;
  69. }
  70. static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
  71. {
  72. struct usnic_uiom_chunk *chunk, *tmp;
  73. struct page *page;
  74. struct scatterlist *sg;
  75. int i;
  76. dma_addr_t pa;
  77. list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
  78. for_each_sg(chunk->page_list, sg, chunk->nents, i) {
  79. page = sg_page(sg);
  80. pa = sg_phys(sg);
  81. if (dirty)
  82. set_page_dirty_lock(page);
  83. put_page(page);
  84. usnic_dbg("pa: %pa\n", &pa);
  85. }
  86. kfree(chunk);
  87. }
  88. }
  89. static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
  90. int dmasync, struct list_head *chunk_list)
  91. {
  92. struct page **page_list;
  93. struct scatterlist *sg;
  94. struct usnic_uiom_chunk *chunk;
  95. unsigned long locked;
  96. unsigned long lock_limit;
  97. unsigned long cur_base;
  98. unsigned long npages;
  99. int ret;
  100. int off;
  101. int i;
  102. int flags;
  103. dma_addr_t pa;
  104. unsigned int gup_flags;
  105. if (!can_do_mlock())
  106. return -EPERM;
  107. INIT_LIST_HEAD(chunk_list);
  108. page_list = (struct page **) __get_free_page(GFP_KERNEL);
  109. if (!page_list)
  110. return -ENOMEM;
  111. npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
  112. down_write(&current->mm->mmap_sem);
  113. locked = npages + current->mm->locked_vm;
  114. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  115. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  116. ret = -ENOMEM;
  117. goto out;
  118. }
  119. flags = IOMMU_READ | IOMMU_CACHE;
  120. flags |= (writable) ? IOMMU_WRITE : 0;
  121. gup_flags = FOLL_WRITE;
  122. gup_flags |= (writable) ? 0 : FOLL_FORCE;
  123. cur_base = addr & PAGE_MASK;
  124. ret = 0;
  125. while (npages) {
  126. ret = get_user_pages(cur_base,
  127. min_t(unsigned long, npages,
  128. PAGE_SIZE / sizeof(struct page *)),
  129. gup_flags, page_list, NULL);
  130. if (ret < 0)
  131. goto out;
  132. npages -= ret;
  133. off = 0;
  134. while (ret) {
  135. chunk = kmalloc(sizeof(*chunk) +
  136. sizeof(struct scatterlist) *
  137. min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
  138. GFP_KERNEL);
  139. if (!chunk) {
  140. ret = -ENOMEM;
  141. goto out;
  142. }
  143. chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
  144. sg_init_table(chunk->page_list, chunk->nents);
  145. for_each_sg(chunk->page_list, sg, chunk->nents, i) {
  146. sg_set_page(sg, page_list[i + off],
  147. PAGE_SIZE, 0);
  148. pa = sg_phys(sg);
  149. usnic_dbg("va: 0x%lx pa: %pa\n",
  150. cur_base + i*PAGE_SIZE, &pa);
  151. }
  152. cur_base += chunk->nents * PAGE_SIZE;
  153. ret -= chunk->nents;
  154. off += chunk->nents;
  155. list_add_tail(&chunk->list, chunk_list);
  156. }
  157. ret = 0;
  158. }
  159. out:
  160. if (ret < 0)
  161. usnic_uiom_put_pages(chunk_list, 0);
  162. else
  163. current->mm->locked_vm = locked;
  164. up_write(&current->mm->mmap_sem);
  165. free_page((unsigned long) page_list);
  166. return ret;
  167. }
  168. static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
  169. struct usnic_uiom_pd *pd)
  170. {
  171. struct usnic_uiom_interval_node *interval, *tmp;
  172. long unsigned va, size;
  173. list_for_each_entry_safe(interval, tmp, intervals, link) {
  174. va = interval->start << PAGE_SHIFT;
  175. size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
  176. while (size > 0) {
  177. /* Workaround for RH 970401 */
  178. usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
  179. iommu_unmap(pd->domain, va, PAGE_SIZE);
  180. va += PAGE_SIZE;
  181. size -= PAGE_SIZE;
  182. }
  183. }
  184. }
  185. static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
  186. struct usnic_uiom_reg *uiomr,
  187. int dirty)
  188. {
  189. int npages;
  190. unsigned long vpn_start, vpn_last;
  191. struct usnic_uiom_interval_node *interval, *tmp;
  192. int writable = 0;
  193. LIST_HEAD(rm_intervals);
  194. npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
  195. vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
  196. vpn_last = vpn_start + npages - 1;
  197. spin_lock(&pd->lock);
  198. usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
  199. vpn_last, &rm_intervals);
  200. usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
  201. list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
  202. if (interval->flags & IOMMU_WRITE)
  203. writable = 1;
  204. list_del(&interval->link);
  205. kfree(interval);
  206. }
  207. usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
  208. spin_unlock(&pd->lock);
  209. }
  210. static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
  211. struct usnic_uiom_reg *uiomr)
  212. {
  213. int i, err;
  214. size_t size;
  215. struct usnic_uiom_chunk *chunk;
  216. struct usnic_uiom_interval_node *interval_node;
  217. dma_addr_t pa;
  218. dma_addr_t pa_start = 0;
  219. dma_addr_t pa_end = 0;
  220. long int va_start = -EINVAL;
  221. struct usnic_uiom_pd *pd = uiomr->pd;
  222. long int va = uiomr->va & PAGE_MASK;
  223. int flags = IOMMU_READ | IOMMU_CACHE;
  224. flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
  225. chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
  226. list);
  227. list_for_each_entry(interval_node, intervals, link) {
  228. iter_chunk:
  229. for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
  230. pa = sg_phys(&chunk->page_list[i]);
  231. if ((va >> PAGE_SHIFT) < interval_node->start)
  232. continue;
  233. if ((va >> PAGE_SHIFT) == interval_node->start) {
  234. /* First page of the interval */
  235. va_start = va;
  236. pa_start = pa;
  237. pa_end = pa;
  238. }
  239. WARN_ON(va_start == -EINVAL);
  240. if ((pa_end + PAGE_SIZE != pa) &&
  241. (pa != pa_start)) {
  242. /* PAs are not contiguous */
  243. size = pa_end - pa_start + PAGE_SIZE;
  244. usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
  245. va_start, &pa_start, size, flags);
  246. err = iommu_map(pd->domain, va_start, pa_start,
  247. size, flags);
  248. if (err) {
  249. usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
  250. va_start, &pa_start, size, err);
  251. goto err_out;
  252. }
  253. va_start = va;
  254. pa_start = pa;
  255. pa_end = pa;
  256. }
  257. if ((va >> PAGE_SHIFT) == interval_node->last) {
  258. /* Last page of the interval */
  259. size = pa - pa_start + PAGE_SIZE;
  260. usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
  261. va_start, &pa_start, size, flags);
  262. err = iommu_map(pd->domain, va_start, pa_start,
  263. size, flags);
  264. if (err) {
  265. usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
  266. va_start, &pa_start, size, err);
  267. goto err_out;
  268. }
  269. break;
  270. }
  271. if (pa != pa_start)
  272. pa_end += PAGE_SIZE;
  273. }
  274. if (i == chunk->nents) {
  275. /*
  276. * Hit last entry of the chunk,
  277. * hence advance to next chunk
  278. */
  279. chunk = list_first_entry(&chunk->list,
  280. struct usnic_uiom_chunk,
  281. list);
  282. goto iter_chunk;
  283. }
  284. }
  285. return 0;
  286. err_out:
  287. usnic_uiom_unmap_sorted_intervals(intervals, pd);
  288. return err;
  289. }
  290. struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
  291. unsigned long addr, size_t size,
  292. int writable, int dmasync)
  293. {
  294. struct usnic_uiom_reg *uiomr;
  295. unsigned long va_base, vpn_start, vpn_last;
  296. unsigned long npages;
  297. int offset, err;
  298. LIST_HEAD(sorted_diff_intervals);
  299. /*
  300. * Intel IOMMU map throws an error if a translation entry is
  301. * changed from read to write. This module may not unmap
  302. * and then remap the entry after fixing the permission
  303. * b/c this open up a small windows where hw DMA may page fault
  304. * Hence, make all entries to be writable.
  305. */
  306. writable = 1;
  307. va_base = addr & PAGE_MASK;
  308. offset = addr & ~PAGE_MASK;
  309. npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
  310. vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
  311. vpn_last = vpn_start + npages - 1;
  312. uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
  313. if (!uiomr)
  314. return ERR_PTR(-ENOMEM);
  315. uiomr->va = va_base;
  316. uiomr->offset = offset;
  317. uiomr->length = size;
  318. uiomr->writable = writable;
  319. uiomr->pd = pd;
  320. err = usnic_uiom_get_pages(addr, size, writable, dmasync,
  321. &uiomr->chunk_list);
  322. if (err) {
  323. usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
  324. vpn_start, vpn_last, err);
  325. goto out_free_uiomr;
  326. }
  327. spin_lock(&pd->lock);
  328. err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
  329. (writable) ? IOMMU_WRITE : 0,
  330. IOMMU_WRITE,
  331. &pd->rb_root,
  332. &sorted_diff_intervals);
  333. if (err) {
  334. usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
  335. vpn_start, vpn_last, err);
  336. goto out_put_pages;
  337. }
  338. err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
  339. if (err) {
  340. usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
  341. vpn_start, vpn_last, err);
  342. goto out_put_intervals;
  343. }
  344. err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
  345. (writable) ? IOMMU_WRITE : 0);
  346. if (err) {
  347. usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
  348. vpn_start, vpn_last, err);
  349. goto out_unmap_intervals;
  350. }
  351. usnic_uiom_put_interval_set(&sorted_diff_intervals);
  352. spin_unlock(&pd->lock);
  353. return uiomr;
  354. out_unmap_intervals:
  355. usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
  356. out_put_intervals:
  357. usnic_uiom_put_interval_set(&sorted_diff_intervals);
  358. out_put_pages:
  359. usnic_uiom_put_pages(&uiomr->chunk_list, 0);
  360. spin_unlock(&pd->lock);
  361. out_free_uiomr:
  362. kfree(uiomr);
  363. return ERR_PTR(err);
  364. }
  365. void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
  366. {
  367. struct mm_struct *mm;
  368. unsigned long diff;
  369. __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
  370. mm = get_task_mm(current);
  371. if (!mm) {
  372. kfree(uiomr);
  373. return;
  374. }
  375. diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
  376. /*
  377. * We may be called with the mm's mmap_sem already held. This
  378. * can happen when a userspace munmap() is the call that drops
  379. * the last reference to our file and calls our release
  380. * method. If there are memory regions to destroy, we'll end
  381. * up here and not be able to take the mmap_sem. In that case
  382. * we defer the vm_locked accounting to the system workqueue.
  383. */
  384. if (closing) {
  385. if (!down_write_trylock(&mm->mmap_sem)) {
  386. INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
  387. uiomr->mm = mm;
  388. uiomr->diff = diff;
  389. queue_work(usnic_uiom_wq, &uiomr->work);
  390. return;
  391. }
  392. } else
  393. down_write(&mm->mmap_sem);
  394. current->mm->locked_vm -= diff;
  395. up_write(&mm->mmap_sem);
  396. mmput(mm);
  397. kfree(uiomr);
  398. }
  399. struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
  400. {
  401. struct usnic_uiom_pd *pd;
  402. void *domain;
  403. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  404. if (!pd)
  405. return ERR_PTR(-ENOMEM);
  406. pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
  407. if (!domain) {
  408. usnic_err("Failed to allocate IOMMU domain");
  409. kfree(pd);
  410. return ERR_PTR(-ENOMEM);
  411. }
  412. iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
  413. spin_lock_init(&pd->lock);
  414. INIT_LIST_HEAD(&pd->devs);
  415. return pd;
  416. }
  417. void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
  418. {
  419. iommu_domain_free(pd->domain);
  420. kfree(pd);
  421. }
  422. int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
  423. {
  424. struct usnic_uiom_dev *uiom_dev;
  425. int err;
  426. uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
  427. if (!uiom_dev)
  428. return -ENOMEM;
  429. uiom_dev->dev = dev;
  430. err = iommu_attach_device(pd->domain, dev);
  431. if (err)
  432. goto out_free_dev;
  433. if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
  434. usnic_err("IOMMU of %s does not support cache coherency\n",
  435. dev_name(dev));
  436. err = -EINVAL;
  437. goto out_detach_device;
  438. }
  439. spin_lock(&pd->lock);
  440. list_add_tail(&uiom_dev->link, &pd->devs);
  441. pd->dev_cnt++;
  442. spin_unlock(&pd->lock);
  443. return 0;
  444. out_detach_device:
  445. iommu_detach_device(pd->domain, dev);
  446. out_free_dev:
  447. kfree(uiom_dev);
  448. return err;
  449. }
  450. void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
  451. {
  452. struct usnic_uiom_dev *uiom_dev;
  453. int found = 0;
  454. spin_lock(&pd->lock);
  455. list_for_each_entry(uiom_dev, &pd->devs, link) {
  456. if (uiom_dev->dev == dev) {
  457. found = 1;
  458. break;
  459. }
  460. }
  461. if (!found) {
  462. usnic_err("Unable to free dev %s - not found\n",
  463. dev_name(dev));
  464. spin_unlock(&pd->lock);
  465. return;
  466. }
  467. list_del(&uiom_dev->link);
  468. pd->dev_cnt--;
  469. spin_unlock(&pd->lock);
  470. return iommu_detach_device(pd->domain, dev);
  471. }
  472. struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
  473. {
  474. struct usnic_uiom_dev *uiom_dev;
  475. struct device **devs;
  476. int i = 0;
  477. spin_lock(&pd->lock);
  478. devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
  479. if (!devs) {
  480. devs = ERR_PTR(-ENOMEM);
  481. goto out;
  482. }
  483. list_for_each_entry(uiom_dev, &pd->devs, link) {
  484. devs[i++] = uiom_dev->dev;
  485. }
  486. out:
  487. spin_unlock(&pd->lock);
  488. return devs;
  489. }
  490. void usnic_uiom_free_dev_list(struct device **devs)
  491. {
  492. kfree(devs);
  493. }
  494. int usnic_uiom_init(char *drv_name)
  495. {
  496. if (!iommu_present(&pci_bus_type)) {
  497. usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
  498. return -EPERM;
  499. }
  500. usnic_uiom_wq = create_workqueue(drv_name);
  501. if (!usnic_uiom_wq) {
  502. usnic_err("Unable to alloc wq for drv %s\n", drv_name);
  503. return -ENOMEM;
  504. }
  505. return 0;
  506. }
  507. void usnic_uiom_fini(void)
  508. {
  509. flush_workqueue(usnic_uiom_wq);
  510. destroy_workqueue(usnic_uiom_wq);
  511. }