omap-iovmm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/err.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/device.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/iommu.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/mach/map.h>
  21. #include <plat/iommu.h>
  22. #include <plat/iovmm.h>
  23. #include <plat/iopgtable.h>
  24. static struct kmem_cache *iovm_area_cachep;
  25. /* return the offset of the first scatterlist entry in a sg table */
  26. static unsigned int sgtable_offset(const struct sg_table *sgt)
  27. {
  28. if (!sgt || !sgt->nents)
  29. return 0;
  30. return sgt->sgl->offset;
  31. }
  32. /* return total bytes of sg buffers */
  33. static size_t sgtable_len(const struct sg_table *sgt)
  34. {
  35. unsigned int i, total = 0;
  36. struct scatterlist *sg;
  37. if (!sgt)
  38. return 0;
  39. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  40. size_t bytes;
  41. bytes = sg->length + sg->offset;
  42. if (!iopgsz_ok(bytes)) {
  43. pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
  44. __func__, i, bytes, sg->offset);
  45. return 0;
  46. }
  47. if (i && sg->offset) {
  48. pr_err("%s: sg[%d] offset not allowed in internal "
  49. "entries\n", __func__, i);
  50. return 0;
  51. }
  52. total += bytes;
  53. }
  54. return total;
  55. }
  56. #define sgtable_ok(x) (!!sgtable_len(x))
  57. static unsigned max_alignment(u32 addr)
  58. {
  59. int i;
  60. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  61. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  62. ;
  63. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  64. }
  65. /*
  66. * calculate the optimal number sg elements from total bytes based on
  67. * iommu superpages
  68. */
  69. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  70. {
  71. unsigned nr_entries = 0, ent_sz;
  72. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  73. pr_err("%s: wrong size %08x\n", __func__, bytes);
  74. return 0;
  75. }
  76. while (bytes) {
  77. ent_sz = max_alignment(da | pa);
  78. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  79. nr_entries++;
  80. da += ent_sz;
  81. pa += ent_sz;
  82. bytes -= ent_sz;
  83. }
  84. return nr_entries;
  85. }
  86. /* allocate and initialize sg_table header(a kind of 'superblock') */
  87. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  88. u32 da, u32 pa)
  89. {
  90. unsigned int nr_entries;
  91. int err;
  92. struct sg_table *sgt;
  93. if (!bytes)
  94. return ERR_PTR(-EINVAL);
  95. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  96. return ERR_PTR(-EINVAL);
  97. if (flags & IOVMF_LINEAR) {
  98. nr_entries = sgtable_nents(bytes, da, pa);
  99. if (!nr_entries)
  100. return ERR_PTR(-EINVAL);
  101. } else
  102. nr_entries = bytes / PAGE_SIZE;
  103. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  104. if (!sgt)
  105. return ERR_PTR(-ENOMEM);
  106. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  107. if (err) {
  108. kfree(sgt);
  109. return ERR_PTR(err);
  110. }
  111. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  112. return sgt;
  113. }
  114. /* free sg_table header(a kind of superblock) */
  115. static void sgtable_free(struct sg_table *sgt)
  116. {
  117. if (!sgt)
  118. return;
  119. sg_free_table(sgt);
  120. kfree(sgt);
  121. pr_debug("%s: sgt:%p\n", __func__, sgt);
  122. }
  123. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  124. static void *vmap_sg(const struct sg_table *sgt)
  125. {
  126. u32 va;
  127. size_t total;
  128. unsigned int i;
  129. struct scatterlist *sg;
  130. struct vm_struct *new;
  131. const struct mem_type *mtype;
  132. mtype = get_mem_type(MT_DEVICE);
  133. if (!mtype)
  134. return ERR_PTR(-EINVAL);
  135. total = sgtable_len(sgt);
  136. if (!total)
  137. return ERR_PTR(-EINVAL);
  138. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  139. if (!new)
  140. return ERR_PTR(-ENOMEM);
  141. va = (u32)new->addr;
  142. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  143. size_t bytes;
  144. u32 pa;
  145. int err;
  146. pa = sg_phys(sg) - sg->offset;
  147. bytes = sg->length + sg->offset;
  148. BUG_ON(bytes != PAGE_SIZE);
  149. err = ioremap_page(va, pa, mtype);
  150. if (err)
  151. goto err_out;
  152. va += bytes;
  153. }
  154. flush_cache_vmap((unsigned long)new->addr,
  155. (unsigned long)(new->addr + total));
  156. return new->addr;
  157. err_out:
  158. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  159. vunmap(new->addr);
  160. return ERR_PTR(-EAGAIN);
  161. }
  162. static inline void vunmap_sg(const void *va)
  163. {
  164. vunmap(va);
  165. }
  166. static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
  167. const u32 da)
  168. {
  169. struct iovm_struct *tmp;
  170. list_for_each_entry(tmp, &obj->mmap, list) {
  171. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  172. size_t len;
  173. len = tmp->da_end - tmp->da_start;
  174. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  175. __func__, tmp->da_start, da, tmp->da_end, len,
  176. tmp->flags);
  177. return tmp;
  178. }
  179. }
  180. return NULL;
  181. }
  182. /**
  183. * omap_find_iovm_area - find iovma which includes @da
  184. * @dev: client device
  185. * @da: iommu device virtual address
  186. *
  187. * Find the existing iovma starting at @da
  188. */
  189. struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
  190. {
  191. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  192. struct iovm_struct *area;
  193. mutex_lock(&obj->mmap_lock);
  194. area = __find_iovm_area(obj, da);
  195. mutex_unlock(&obj->mmap_lock);
  196. return area;
  197. }
  198. EXPORT_SYMBOL_GPL(omap_find_iovm_area);
  199. /*
  200. * This finds the hole(area) which fits the requested address and len
  201. * in iovmas mmap, and returns the new allocated iovma.
  202. */
  203. static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
  204. size_t bytes, u32 flags)
  205. {
  206. struct iovm_struct *new, *tmp;
  207. u32 start, prev_end, alignment;
  208. if (!obj || !bytes)
  209. return ERR_PTR(-EINVAL);
  210. start = da;
  211. alignment = PAGE_SIZE;
  212. if (~flags & IOVMF_DA_FIXED) {
  213. /* Don't map address 0 */
  214. start = obj->da_start ? obj->da_start : alignment;
  215. if (flags & IOVMF_LINEAR)
  216. alignment = iopgsz_max(bytes);
  217. start = roundup(start, alignment);
  218. } else if (start < obj->da_start || start > obj->da_end ||
  219. obj->da_end - start < bytes) {
  220. return ERR_PTR(-EINVAL);
  221. }
  222. tmp = NULL;
  223. if (list_empty(&obj->mmap))
  224. goto found;
  225. prev_end = 0;
  226. list_for_each_entry(tmp, &obj->mmap, list) {
  227. if (prev_end > start)
  228. break;
  229. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  230. goto found;
  231. if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
  232. start = roundup(tmp->da_end + 1, alignment);
  233. prev_end = tmp->da_end;
  234. }
  235. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  236. goto found;
  237. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  238. __func__, da, bytes, flags);
  239. return ERR_PTR(-EINVAL);
  240. found:
  241. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  242. if (!new)
  243. return ERR_PTR(-ENOMEM);
  244. new->iommu = obj;
  245. new->da_start = start;
  246. new->da_end = start + bytes;
  247. new->flags = flags;
  248. /*
  249. * keep ascending order of iovmas
  250. */
  251. if (tmp)
  252. list_add_tail(&new->list, &tmp->list);
  253. else
  254. list_add(&new->list, &obj->mmap);
  255. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  256. __func__, new->da_start, start, new->da_end, bytes, flags);
  257. return new;
  258. }
  259. static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
  260. {
  261. size_t bytes;
  262. BUG_ON(!obj || !area);
  263. bytes = area->da_end - area->da_start;
  264. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  265. __func__, area->da_start, area->da_end, bytes, area->flags);
  266. list_del(&area->list);
  267. kmem_cache_free(iovm_area_cachep, area);
  268. }
  269. /**
  270. * omap_da_to_va - convert (d) to (v)
  271. * @dev: client device
  272. * @da: iommu device virtual address
  273. * @va: mpu virtual address
  274. *
  275. * Returns mpu virtual addr which corresponds to a given device virtual addr
  276. */
  277. void *omap_da_to_va(struct device *dev, u32 da)
  278. {
  279. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  280. void *va = NULL;
  281. struct iovm_struct *area;
  282. mutex_lock(&obj->mmap_lock);
  283. area = __find_iovm_area(obj, da);
  284. if (!area) {
  285. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  286. goto out;
  287. }
  288. va = area->va;
  289. out:
  290. mutex_unlock(&obj->mmap_lock);
  291. return va;
  292. }
  293. EXPORT_SYMBOL_GPL(omap_da_to_va);
  294. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  295. {
  296. unsigned int i;
  297. struct scatterlist *sg;
  298. void *va = _va;
  299. void *va_end;
  300. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  301. struct page *pg;
  302. const size_t bytes = PAGE_SIZE;
  303. /*
  304. * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
  305. */
  306. pg = vmalloc_to_page(va);
  307. BUG_ON(!pg);
  308. sg_set_page(sg, pg, bytes, 0);
  309. va += bytes;
  310. }
  311. va_end = _va + PAGE_SIZE * i;
  312. }
  313. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  314. {
  315. /*
  316. * Actually this is not necessary at all, just exists for
  317. * consistency of the code readability.
  318. */
  319. BUG_ON(!sgt);
  320. }
  321. /* create 'da' <-> 'pa' mapping from 'sgt' */
  322. static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
  323. const struct sg_table *sgt, u32 flags)
  324. {
  325. int err;
  326. unsigned int i, j;
  327. struct scatterlist *sg;
  328. u32 da = new->da_start;
  329. if (!domain || !sgt)
  330. return -EINVAL;
  331. BUG_ON(!sgtable_ok(sgt));
  332. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  333. u32 pa;
  334. size_t bytes;
  335. pa = sg_phys(sg) - sg->offset;
  336. bytes = sg->length + sg->offset;
  337. flags &= ~IOVMF_PGSZ_MASK;
  338. if (bytes_to_iopgsz(bytes) < 0)
  339. goto err_out;
  340. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  341. i, da, pa, bytes);
  342. err = iommu_map(domain, da, pa, bytes, flags);
  343. if (err)
  344. goto err_out;
  345. da += bytes;
  346. }
  347. return 0;
  348. err_out:
  349. da = new->da_start;
  350. for_each_sg(sgt->sgl, sg, i, j) {
  351. size_t bytes;
  352. bytes = sg->length + sg->offset;
  353. /* ignore failures.. we're already handling one */
  354. iommu_unmap(domain, da, bytes);
  355. da += bytes;
  356. }
  357. return err;
  358. }
  359. /* release 'da' <-> 'pa' mapping */
  360. static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
  361. struct iovm_struct *area)
  362. {
  363. u32 start;
  364. size_t total = area->da_end - area->da_start;
  365. const struct sg_table *sgt = area->sgt;
  366. struct scatterlist *sg;
  367. int i;
  368. size_t unmapped;
  369. BUG_ON(!sgtable_ok(sgt));
  370. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  371. start = area->da_start;
  372. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  373. size_t bytes;
  374. bytes = sg->length + sg->offset;
  375. unmapped = iommu_unmap(domain, start, bytes);
  376. if (unmapped < bytes)
  377. break;
  378. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  379. __func__, start, bytes, area->flags);
  380. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  381. total -= bytes;
  382. start += bytes;
  383. }
  384. BUG_ON(total);
  385. }
  386. /* template function for all unmapping */
  387. static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
  388. struct omap_iommu *obj, const u32 da,
  389. void (*fn)(const void *), u32 flags)
  390. {
  391. struct sg_table *sgt = NULL;
  392. struct iovm_struct *area;
  393. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  394. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  395. return NULL;
  396. }
  397. mutex_lock(&obj->mmap_lock);
  398. area = __find_iovm_area(obj, da);
  399. if (!area) {
  400. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  401. goto out;
  402. }
  403. if ((area->flags & flags) != flags) {
  404. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  405. area->flags);
  406. goto out;
  407. }
  408. sgt = (struct sg_table *)area->sgt;
  409. unmap_iovm_area(domain, obj, area);
  410. fn(area->va);
  411. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  412. area->da_start, da, area->da_end,
  413. area->da_end - area->da_start, area->flags);
  414. free_iovm_area(obj, area);
  415. out:
  416. mutex_unlock(&obj->mmap_lock);
  417. return sgt;
  418. }
  419. static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
  420. u32 da, const struct sg_table *sgt, void *va,
  421. size_t bytes, u32 flags)
  422. {
  423. int err = -ENOMEM;
  424. struct iovm_struct *new;
  425. mutex_lock(&obj->mmap_lock);
  426. new = alloc_iovm_area(obj, da, bytes, flags);
  427. if (IS_ERR(new)) {
  428. err = PTR_ERR(new);
  429. goto err_alloc_iovma;
  430. }
  431. new->va = va;
  432. new->sgt = sgt;
  433. if (map_iovm_area(domain, new, sgt, new->flags))
  434. goto err_map;
  435. mutex_unlock(&obj->mmap_lock);
  436. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  437. __func__, new->da_start, bytes, new->flags, va);
  438. return new->da_start;
  439. err_map:
  440. free_iovm_area(obj, new);
  441. err_alloc_iovma:
  442. mutex_unlock(&obj->mmap_lock);
  443. return err;
  444. }
  445. static inline u32
  446. __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
  447. u32 da, const struct sg_table *sgt,
  448. void *va, size_t bytes, u32 flags)
  449. {
  450. return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  451. }
  452. /**
  453. * omap_iommu_vmap - (d)-(p)-(v) address mapper
  454. * @domain: iommu domain
  455. * @dev: client device
  456. * @sgt: address of scatter gather table
  457. * @flags: iovma and page property
  458. *
  459. * Creates 1-n-1 mapping with given @sgt and returns @da.
  460. * All @sgt element must be io page size aligned.
  461. */
  462. u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
  463. const struct sg_table *sgt, u32 flags)
  464. {
  465. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  466. size_t bytes;
  467. void *va = NULL;
  468. if (!obj || !obj->dev || !sgt)
  469. return -EINVAL;
  470. bytes = sgtable_len(sgt);
  471. if (!bytes)
  472. return -EINVAL;
  473. bytes = PAGE_ALIGN(bytes);
  474. if (flags & IOVMF_MMIO) {
  475. va = vmap_sg(sgt);
  476. if (IS_ERR(va))
  477. return PTR_ERR(va);
  478. }
  479. flags |= IOVMF_DISCONT;
  480. flags |= IOVMF_MMIO;
  481. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  482. if (IS_ERR_VALUE(da))
  483. vunmap_sg(va);
  484. return da + sgtable_offset(sgt);
  485. }
  486. EXPORT_SYMBOL_GPL(omap_iommu_vmap);
  487. /**
  488. * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
  489. * @domain: iommu domain
  490. * @dev: client device
  491. * @da: iommu device virtual address
  492. *
  493. * Free the iommu virtually contiguous memory area starting at
  494. * @da, which was returned by 'omap_iommu_vmap()'.
  495. */
  496. struct sg_table *
  497. omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
  498. {
  499. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  500. struct sg_table *sgt;
  501. /*
  502. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
  503. * Just returns 'sgt' to the caller to free
  504. */
  505. da &= PAGE_MASK;
  506. sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
  507. IOVMF_DISCONT | IOVMF_MMIO);
  508. if (!sgt)
  509. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  510. return sgt;
  511. }
  512. EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
  513. /**
  514. * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  515. * @dev: client device
  516. * @da: contiguous iommu virtual memory
  517. * @bytes: allocation size
  518. * @flags: iovma and page property
  519. *
  520. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  521. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  522. */
  523. u32
  524. omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
  525. size_t bytes, u32 flags)
  526. {
  527. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  528. void *va;
  529. struct sg_table *sgt;
  530. if (!obj || !obj->dev || !bytes)
  531. return -EINVAL;
  532. bytes = PAGE_ALIGN(bytes);
  533. va = vmalloc(bytes);
  534. if (!va)
  535. return -ENOMEM;
  536. flags |= IOVMF_DISCONT;
  537. flags |= IOVMF_ALLOC;
  538. sgt = sgtable_alloc(bytes, flags, da, 0);
  539. if (IS_ERR(sgt)) {
  540. da = PTR_ERR(sgt);
  541. goto err_sgt_alloc;
  542. }
  543. sgtable_fill_vmalloc(sgt, va);
  544. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  545. if (IS_ERR_VALUE(da))
  546. goto err_iommu_vmap;
  547. return da;
  548. err_iommu_vmap:
  549. sgtable_drain_vmalloc(sgt);
  550. sgtable_free(sgt);
  551. err_sgt_alloc:
  552. vfree(va);
  553. return da;
  554. }
  555. EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
  556. /**
  557. * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
  558. * @dev: client device
  559. * @da: iommu device virtual address
  560. *
  561. * Frees the iommu virtually continuous memory area starting at
  562. * @da, as obtained from 'omap_iommu_vmalloc()'.
  563. */
  564. void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
  565. const u32 da)
  566. {
  567. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  568. struct sg_table *sgt;
  569. sgt = unmap_vm_area(domain, obj, da, vfree,
  570. IOVMF_DISCONT | IOVMF_ALLOC);
  571. if (!sgt)
  572. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  573. sgtable_free(sgt);
  574. }
  575. EXPORT_SYMBOL_GPL(omap_iommu_vfree);
  576. static int __init iovmm_init(void)
  577. {
  578. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  579. struct kmem_cache *p;
  580. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  581. flags, NULL);
  582. if (!p)
  583. return -ENOMEM;
  584. iovm_area_cachep = p;
  585. return 0;
  586. }
  587. module_init(iovmm_init);
  588. static void __exit iovmm_exit(void)
  589. {
  590. kmem_cache_destroy(iovm_area_cachep);
  591. }
  592. module_exit(iovmm_exit);
  593. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  594. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  595. MODULE_LICENSE("GPL v2");