pci_dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * Copyright IBM Corp. 2012
  3. *
  4. * Author(s):
  5. * Jan Glauber <jang@linux.vnet.ibm.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/slab.h>
  9. #include <linux/export.h>
  10. #include <linux/iommu-helper.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/pci.h>
  14. #include <asm/pci_dma.h>
  15. static struct kmem_cache *dma_region_table_cache;
  16. static struct kmem_cache *dma_page_table_cache;
  17. static int s390_iommu_strict;
  18. static int zpci_refresh_global(struct zpci_dev *zdev)
  19. {
  20. return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
  21. zdev->iommu_pages * PAGE_SIZE);
  22. }
  23. unsigned long *dma_alloc_cpu_table(void)
  24. {
  25. unsigned long *table, *entry;
  26. table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
  27. if (!table)
  28. return NULL;
  29. for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
  30. *entry = ZPCI_TABLE_INVALID;
  31. return table;
  32. }
  33. static void dma_free_cpu_table(void *table)
  34. {
  35. kmem_cache_free(dma_region_table_cache, table);
  36. }
  37. static unsigned long *dma_alloc_page_table(void)
  38. {
  39. unsigned long *table, *entry;
  40. table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
  41. if (!table)
  42. return NULL;
  43. for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
  44. *entry = ZPCI_PTE_INVALID;
  45. return table;
  46. }
  47. static void dma_free_page_table(void *table)
  48. {
  49. kmem_cache_free(dma_page_table_cache, table);
  50. }
  51. static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
  52. {
  53. unsigned long *sto;
  54. if (reg_entry_isvalid(*entry))
  55. sto = get_rt_sto(*entry);
  56. else {
  57. sto = dma_alloc_cpu_table();
  58. if (!sto)
  59. return NULL;
  60. set_rt_sto(entry, sto);
  61. validate_rt_entry(entry);
  62. entry_clr_protected(entry);
  63. }
  64. return sto;
  65. }
  66. static unsigned long *dma_get_page_table_origin(unsigned long *entry)
  67. {
  68. unsigned long *pto;
  69. if (reg_entry_isvalid(*entry))
  70. pto = get_st_pto(*entry);
  71. else {
  72. pto = dma_alloc_page_table();
  73. if (!pto)
  74. return NULL;
  75. set_st_pto(entry, pto);
  76. validate_st_entry(entry);
  77. entry_clr_protected(entry);
  78. }
  79. return pto;
  80. }
  81. unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
  82. {
  83. unsigned long *sto, *pto;
  84. unsigned int rtx, sx, px;
  85. rtx = calc_rtx(dma_addr);
  86. sto = dma_get_seg_table_origin(&rto[rtx]);
  87. if (!sto)
  88. return NULL;
  89. sx = calc_sx(dma_addr);
  90. pto = dma_get_page_table_origin(&sto[sx]);
  91. if (!pto)
  92. return NULL;
  93. px = calc_px(dma_addr);
  94. return &pto[px];
  95. }
  96. void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
  97. {
  98. if (flags & ZPCI_PTE_INVALID) {
  99. invalidate_pt_entry(entry);
  100. } else {
  101. set_pt_pfaa(entry, page_addr);
  102. validate_pt_entry(entry);
  103. }
  104. if (flags & ZPCI_TABLE_PROTECTED)
  105. entry_set_protected(entry);
  106. else
  107. entry_clr_protected(entry);
  108. }
  109. static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
  110. dma_addr_t dma_addr, size_t size, int flags)
  111. {
  112. unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  113. u8 *page_addr = (u8 *) (pa & PAGE_MASK);
  114. unsigned long irq_flags;
  115. unsigned long *entry;
  116. int i, rc = 0;
  117. if (!nr_pages)
  118. return -EINVAL;
  119. spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
  120. if (!zdev->dma_table) {
  121. rc = -EINVAL;
  122. goto out_unlock;
  123. }
  124. for (i = 0; i < nr_pages; i++) {
  125. entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
  126. if (!entry) {
  127. rc = -ENOMEM;
  128. goto undo_cpu_trans;
  129. }
  130. dma_update_cpu_trans(entry, page_addr, flags);
  131. page_addr += PAGE_SIZE;
  132. dma_addr += PAGE_SIZE;
  133. }
  134. undo_cpu_trans:
  135. if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
  136. flags = ZPCI_PTE_INVALID;
  137. while (i-- > 0) {
  138. page_addr -= PAGE_SIZE;
  139. dma_addr -= PAGE_SIZE;
  140. entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
  141. if (!entry)
  142. break;
  143. dma_update_cpu_trans(entry, page_addr, flags);
  144. }
  145. }
  146. out_unlock:
  147. spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
  148. return rc;
  149. }
  150. static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
  151. size_t size, int flags)
  152. {
  153. /*
  154. * With zdev->tlb_refresh == 0, rpcit is not required to establish new
  155. * translations when previously invalid translation-table entries are
  156. * validated. With lazy unmap, it also is skipped for previously valid
  157. * entries, but a global rpcit is then required before any address can
  158. * be re-used, i.e. after each iommu bitmap wrap-around.
  159. */
  160. if (!zdev->tlb_refresh &&
  161. (!s390_iommu_strict ||
  162. ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
  163. return 0;
  164. return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
  165. PAGE_ALIGN(size));
  166. }
  167. static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
  168. dma_addr_t dma_addr, size_t size, int flags)
  169. {
  170. int rc;
  171. rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
  172. if (rc)
  173. return rc;
  174. rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
  175. if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
  176. __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
  177. return rc;
  178. }
  179. void dma_free_seg_table(unsigned long entry)
  180. {
  181. unsigned long *sto = get_rt_sto(entry);
  182. int sx;
  183. for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
  184. if (reg_entry_isvalid(sto[sx]))
  185. dma_free_page_table(get_st_pto(sto[sx]));
  186. dma_free_cpu_table(sto);
  187. }
  188. void dma_cleanup_tables(unsigned long *table)
  189. {
  190. int rtx;
  191. if (!table)
  192. return;
  193. for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
  194. if (reg_entry_isvalid(table[rtx]))
  195. dma_free_seg_table(table[rtx]);
  196. dma_free_cpu_table(table);
  197. }
  198. static unsigned long __dma_alloc_iommu(struct device *dev,
  199. unsigned long start, int size)
  200. {
  201. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  202. unsigned long boundary_size;
  203. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  204. PAGE_SIZE) >> PAGE_SHIFT;
  205. return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
  206. start, size, zdev->start_dma >> PAGE_SHIFT,
  207. boundary_size, 0);
  208. }
  209. static dma_addr_t dma_alloc_address(struct device *dev, int size)
  210. {
  211. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  212. unsigned long offset, flags;
  213. spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
  214. offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
  215. if (offset == -1) {
  216. if (!zdev->tlb_refresh && !s390_iommu_strict) {
  217. /* global flush before DMA addresses are reused */
  218. if (zpci_refresh_global(zdev))
  219. goto out_error;
  220. bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
  221. zdev->lazy_bitmap, zdev->iommu_pages);
  222. bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
  223. }
  224. /* wrap-around */
  225. offset = __dma_alloc_iommu(dev, 0, size);
  226. if (offset == -1)
  227. goto out_error;
  228. }
  229. zdev->next_bit = offset + size;
  230. spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
  231. return zdev->start_dma + offset * PAGE_SIZE;
  232. out_error:
  233. spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
  234. return DMA_ERROR_CODE;
  235. }
  236. static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
  237. {
  238. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  239. unsigned long flags, offset;
  240. offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
  241. spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
  242. if (!zdev->iommu_bitmap)
  243. goto out;
  244. if (zdev->tlb_refresh || s390_iommu_strict)
  245. bitmap_clear(zdev->iommu_bitmap, offset, size);
  246. else
  247. bitmap_set(zdev->lazy_bitmap, offset, size);
  248. out:
  249. spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
  250. }
  251. static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
  252. {
  253. struct {
  254. unsigned long rc;
  255. unsigned long addr;
  256. } __packed data = {rc, addr};
  257. zpci_err_hex(&data, sizeof(data));
  258. }
  259. static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
  260. unsigned long offset, size_t size,
  261. enum dma_data_direction direction,
  262. unsigned long attrs)
  263. {
  264. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  265. unsigned long pa = page_to_phys(page) + offset;
  266. int flags = ZPCI_PTE_VALID;
  267. unsigned long nr_pages;
  268. dma_addr_t dma_addr;
  269. int ret;
  270. /* This rounds up number of pages based on size and offset */
  271. nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
  272. dma_addr = dma_alloc_address(dev, nr_pages);
  273. if (dma_addr == DMA_ERROR_CODE) {
  274. ret = -ENOSPC;
  275. goto out_err;
  276. }
  277. /* Use rounded up size */
  278. size = nr_pages * PAGE_SIZE;
  279. if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
  280. flags |= ZPCI_TABLE_PROTECTED;
  281. ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
  282. if (ret)
  283. goto out_free;
  284. atomic64_add(nr_pages, &zdev->mapped_pages);
  285. return dma_addr + (offset & ~PAGE_MASK);
  286. out_free:
  287. dma_free_address(dev, dma_addr, nr_pages);
  288. out_err:
  289. zpci_err("map error:\n");
  290. zpci_err_dma(ret, pa);
  291. return DMA_ERROR_CODE;
  292. }
  293. static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
  294. size_t size, enum dma_data_direction direction,
  295. unsigned long attrs)
  296. {
  297. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  298. int npages, ret;
  299. npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  300. dma_addr = dma_addr & PAGE_MASK;
  301. ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
  302. ZPCI_PTE_INVALID);
  303. if (ret) {
  304. zpci_err("unmap error:\n");
  305. zpci_err_dma(ret, dma_addr);
  306. return;
  307. }
  308. atomic64_add(npages, &zdev->unmapped_pages);
  309. dma_free_address(dev, dma_addr, npages);
  310. }
  311. static void *s390_dma_alloc(struct device *dev, size_t size,
  312. dma_addr_t *dma_handle, gfp_t flag,
  313. unsigned long attrs)
  314. {
  315. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  316. struct page *page;
  317. unsigned long pa;
  318. dma_addr_t map;
  319. size = PAGE_ALIGN(size);
  320. page = alloc_pages(flag, get_order(size));
  321. if (!page)
  322. return NULL;
  323. pa = page_to_phys(page);
  324. memset((void *) pa, 0, size);
  325. map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
  326. if (dma_mapping_error(dev, map)) {
  327. free_pages(pa, get_order(size));
  328. return NULL;
  329. }
  330. atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
  331. if (dma_handle)
  332. *dma_handle = map;
  333. return (void *) pa;
  334. }
  335. static void s390_dma_free(struct device *dev, size_t size,
  336. void *pa, dma_addr_t dma_handle,
  337. unsigned long attrs)
  338. {
  339. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  340. size = PAGE_ALIGN(size);
  341. atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
  342. s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
  343. free_pages((unsigned long) pa, get_order(size));
  344. }
  345. /* Map a segment into a contiguous dma address area */
  346. static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
  347. size_t size, dma_addr_t *handle,
  348. enum dma_data_direction dir)
  349. {
  350. unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  351. struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
  352. dma_addr_t dma_addr_base, dma_addr;
  353. int flags = ZPCI_PTE_VALID;
  354. struct scatterlist *s;
  355. unsigned long pa = 0;
  356. int ret;
  357. dma_addr_base = dma_alloc_address(dev, nr_pages);
  358. if (dma_addr_base == DMA_ERROR_CODE)
  359. return -ENOMEM;
  360. dma_addr = dma_addr_base;
  361. if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
  362. flags |= ZPCI_TABLE_PROTECTED;
  363. for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
  364. pa = page_to_phys(sg_page(s));
  365. ret = __dma_update_trans(zdev, pa, dma_addr,
  366. s->offset + s->length, flags);
  367. if (ret)
  368. goto unmap;
  369. dma_addr += s->offset + s->length;
  370. }
  371. ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
  372. if (ret)
  373. goto unmap;
  374. *handle = dma_addr_base;
  375. atomic64_add(nr_pages, &zdev->mapped_pages);
  376. return ret;
  377. unmap:
  378. dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
  379. ZPCI_PTE_INVALID);
  380. dma_free_address(dev, dma_addr_base, nr_pages);
  381. zpci_err("map error:\n");
  382. zpci_err_dma(ret, pa);
  383. return ret;
  384. }
  385. static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
  386. int nr_elements, enum dma_data_direction dir,
  387. unsigned long attrs)
  388. {
  389. struct scatterlist *s = sg, *start = sg, *dma = sg;
  390. unsigned int max = dma_get_max_seg_size(dev);
  391. unsigned int size = s->offset + s->length;
  392. unsigned int offset = s->offset;
  393. int count = 0, i;
  394. for (i = 1; i < nr_elements; i++) {
  395. s = sg_next(s);
  396. s->dma_address = DMA_ERROR_CODE;
  397. s->dma_length = 0;
  398. if (s->offset || (size & ~PAGE_MASK) ||
  399. size + s->length > max) {
  400. if (__s390_dma_map_sg(dev, start, size,
  401. &dma->dma_address, dir))
  402. goto unmap;
  403. dma->dma_address += offset;
  404. dma->dma_length = size - offset;
  405. size = offset = s->offset;
  406. start = s;
  407. dma = sg_next(dma);
  408. count++;
  409. }
  410. size += s->length;
  411. }
  412. if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
  413. goto unmap;
  414. dma->dma_address += offset;
  415. dma->dma_length = size - offset;
  416. return count + 1;
  417. unmap:
  418. for_each_sg(sg, s, count, i)
  419. s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
  420. dir, attrs);
  421. return 0;
  422. }
  423. static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  424. int nr_elements, enum dma_data_direction dir,
  425. unsigned long attrs)
  426. {
  427. struct scatterlist *s;
  428. int i;
  429. for_each_sg(sg, s, nr_elements, i) {
  430. if (s->dma_length)
  431. s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
  432. dir, attrs);
  433. s->dma_address = 0;
  434. s->dma_length = 0;
  435. }
  436. }
  437. int zpci_dma_init_device(struct zpci_dev *zdev)
  438. {
  439. int rc;
  440. /*
  441. * At this point, if the device is part of an IOMMU domain, this would
  442. * be a strong hint towards a bug in the IOMMU API (common) code and/or
  443. * simultaneous access via IOMMU and DMA API. So let's issue a warning.
  444. */
  445. WARN_ON(zdev->s390_domain);
  446. spin_lock_init(&zdev->iommu_bitmap_lock);
  447. spin_lock_init(&zdev->dma_table_lock);
  448. zdev->dma_table = dma_alloc_cpu_table();
  449. if (!zdev->dma_table) {
  450. rc = -ENOMEM;
  451. goto out;
  452. }
  453. /*
  454. * Restrict the iommu bitmap size to the minimum of the following:
  455. * - main memory size
  456. * - 3-level pagetable address limit minus start_dma offset
  457. * - DMA address range allowed by the hardware (clp query pci fn)
  458. *
  459. * Also set zdev->end_dma to the actual end address of the usable
  460. * range, instead of the theoretical maximum as reported by hardware.
  461. */
  462. zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
  463. zdev->iommu_size = min3((u64) high_memory,
  464. ZPCI_TABLE_SIZE_RT - zdev->start_dma,
  465. zdev->end_dma - zdev->start_dma + 1);
  466. zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
  467. zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
  468. zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
  469. if (!zdev->iommu_bitmap) {
  470. rc = -ENOMEM;
  471. goto free_dma_table;
  472. }
  473. if (!zdev->tlb_refresh && !s390_iommu_strict) {
  474. zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
  475. if (!zdev->lazy_bitmap) {
  476. rc = -ENOMEM;
  477. goto free_bitmap;
  478. }
  479. }
  480. rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  481. (u64) zdev->dma_table);
  482. if (rc)
  483. goto free_bitmap;
  484. return 0;
  485. free_bitmap:
  486. vfree(zdev->iommu_bitmap);
  487. zdev->iommu_bitmap = NULL;
  488. vfree(zdev->lazy_bitmap);
  489. zdev->lazy_bitmap = NULL;
  490. free_dma_table:
  491. dma_free_cpu_table(zdev->dma_table);
  492. zdev->dma_table = NULL;
  493. out:
  494. return rc;
  495. }
  496. void zpci_dma_exit_device(struct zpci_dev *zdev)
  497. {
  498. /*
  499. * At this point, if the device is part of an IOMMU domain, this would
  500. * be a strong hint towards a bug in the IOMMU API (common) code and/or
  501. * simultaneous access via IOMMU and DMA API. So let's issue a warning.
  502. */
  503. WARN_ON(zdev->s390_domain);
  504. zpci_unregister_ioat(zdev, 0);
  505. dma_cleanup_tables(zdev->dma_table);
  506. zdev->dma_table = NULL;
  507. vfree(zdev->iommu_bitmap);
  508. zdev->iommu_bitmap = NULL;
  509. vfree(zdev->lazy_bitmap);
  510. zdev->lazy_bitmap = NULL;
  511. zdev->next_bit = 0;
  512. }
  513. static int __init dma_alloc_cpu_table_caches(void)
  514. {
  515. dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
  516. ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
  517. 0, NULL);
  518. if (!dma_region_table_cache)
  519. return -ENOMEM;
  520. dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
  521. ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
  522. 0, NULL);
  523. if (!dma_page_table_cache) {
  524. kmem_cache_destroy(dma_region_table_cache);
  525. return -ENOMEM;
  526. }
  527. return 0;
  528. }
  529. int __init zpci_dma_init(void)
  530. {
  531. return dma_alloc_cpu_table_caches();
  532. }
  533. void zpci_dma_exit(void)
  534. {
  535. kmem_cache_destroy(dma_page_table_cache);
  536. kmem_cache_destroy(dma_region_table_cache);
  537. }
  538. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  539. static int __init dma_debug_do_init(void)
  540. {
  541. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  542. return 0;
  543. }
  544. fs_initcall(dma_debug_do_init);
  545. struct dma_map_ops s390_pci_dma_ops = {
  546. .alloc = s390_dma_alloc,
  547. .free = s390_dma_free,
  548. .map_sg = s390_dma_map_sg,
  549. .unmap_sg = s390_dma_unmap_sg,
  550. .map_page = s390_dma_map_pages,
  551. .unmap_page = s390_dma_unmap_pages,
  552. /* if we support direct DMA this must be conditional */
  553. .is_phys = 0,
  554. /* dma_supported is unconditionally true without a callback */
  555. };
  556. EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
  557. static int __init s390_iommu_setup(char *str)
  558. {
  559. if (!strncmp(str, "strict", 6))
  560. s390_iommu_strict = 1;
  561. return 0;
  562. }
  563. __setup("s390_iommu=", s390_iommu_setup);