ion_iommu_map.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/dma-buf.h>
  14. #include <linux/export.h>
  15. #include <linux/iommu.h>
  16. #include <linux/ion.h>
  17. #include <linux/kernel.h>
  18. #include <linux/kref.h>
  19. #include <linux/scatterlist.h>
  20. #include <linux/slab.h>
  21. #include <mach/iommu_domains.h>
  22. enum {
  23. DI_PARTITION_NUM = 0,
  24. DI_DOMAIN_NUM = 1,
  25. DI_MAX,
  26. };
  27. #define iommu_map_domain(__m) ((__m)->domain_info[1])
  28. #define iommu_map_partition(__m) ((__m)->domain_info[0])
  29. /**
  30. * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
  31. * @iova_addr - iommu virtual address
  32. * @node - rb node to exist in the buffer's tree of iommu mappings
  33. * @domain_info - contains the partition number and domain number
  34. * domain_info[1] = domain number
  35. * domain_info[0] = partition number
  36. * @ref - for reference counting this mapping
  37. * @mapped_size - size of the iova space mapped
  38. * (may not be the same as the buffer size)
  39. * @flags - iommu domain/partition specific flags.
  40. *
  41. * Represents a mapping of one ion buffer to a particular iommu domain
  42. * and address range. There may exist other mappings of this buffer in
  43. * different domains or address ranges. All mappings will have the same
  44. * cacheability and security.
  45. */
  46. struct ion_iommu_map {
  47. unsigned long iova_addr;
  48. struct rb_node node;
  49. union {
  50. int domain_info[DI_MAX];
  51. uint64_t key;
  52. };
  53. struct ion_iommu_meta *meta;
  54. struct kref ref;
  55. int mapped_size;
  56. unsigned long flags;
  57. };
  58. struct ion_iommu_meta {
  59. struct rb_node node;
  60. struct ion_handle *handle;
  61. struct rb_root iommu_maps;
  62. struct kref ref;
  63. struct sg_table *table;
  64. unsigned long size;
  65. struct mutex lock;
  66. struct dma_buf *dbuf;
  67. };
  68. static struct rb_root iommu_root;
  69. DEFINE_MUTEX(msm_iommu_map_mutex);
  70. static void ion_iommu_meta_add(struct ion_iommu_meta *meta)
  71. {
  72. struct rb_root *root = &iommu_root;
  73. struct rb_node **p = &root->rb_node;
  74. struct rb_node *parent = NULL;
  75. struct ion_iommu_meta *entry;
  76. while (*p) {
  77. parent = *p;
  78. entry = rb_entry(parent, struct ion_iommu_meta, node);
  79. if (meta->table < entry->table) {
  80. p = &(*p)->rb_left;
  81. } else if (meta->table > entry->table) {
  82. p = &(*p)->rb_right;
  83. } else {
  84. pr_err("%s: handle %p already exists\n", __func__,
  85. entry->handle);
  86. BUG();
  87. }
  88. }
  89. rb_link_node(&meta->node, parent, p);
  90. rb_insert_color(&meta->node, root);
  91. }
  92. static struct ion_iommu_meta *ion_iommu_meta_lookup(struct sg_table *table)
  93. {
  94. struct rb_root *root = &iommu_root;
  95. struct rb_node **p = &root->rb_node;
  96. struct rb_node *parent = NULL;
  97. struct ion_iommu_meta *entry = NULL;
  98. while (*p) {
  99. parent = *p;
  100. entry = rb_entry(parent, struct ion_iommu_meta, node);
  101. if (table < entry->table)
  102. p = &(*p)->rb_left;
  103. else if (table > entry->table)
  104. p = &(*p)->rb_right;
  105. else
  106. return entry;
  107. }
  108. return NULL;
  109. }
  110. static void ion_iommu_add(struct ion_iommu_meta *meta,
  111. struct ion_iommu_map *iommu)
  112. {
  113. struct rb_node **p = &meta->iommu_maps.rb_node;
  114. struct rb_node *parent = NULL;
  115. struct ion_iommu_map *entry;
  116. while (*p) {
  117. parent = *p;
  118. entry = rb_entry(parent, struct ion_iommu_map, node);
  119. if (iommu->key < entry->key) {
  120. p = &(*p)->rb_left;
  121. } else if (iommu->key > entry->key) {
  122. p = &(*p)->rb_right;
  123. } else {
  124. pr_err("%s: handle %p already has mapping for domain %d and partition %d\n",
  125. __func__,
  126. meta->handle,
  127. iommu_map_domain(iommu),
  128. iommu_map_partition(iommu));
  129. BUG();
  130. }
  131. }
  132. rb_link_node(&iommu->node, parent, p);
  133. rb_insert_color(&iommu->node, &meta->iommu_maps);
  134. }
  135. static struct ion_iommu_map *ion_iommu_lookup(
  136. struct ion_iommu_meta *meta,
  137. unsigned int domain_no,
  138. unsigned int partition_no)
  139. {
  140. struct rb_node **p = &meta->iommu_maps.rb_node;
  141. struct rb_node *parent = NULL;
  142. struct ion_iommu_map *entry;
  143. uint64_t key = domain_no;
  144. key = key << 32 | partition_no;
  145. while (*p) {
  146. parent = *p;
  147. entry = rb_entry(parent, struct ion_iommu_map, node);
  148. if (key < entry->key)
  149. p = &(*p)->rb_left;
  150. else if (key > entry->key)
  151. p = &(*p)->rb_right;
  152. else
  153. return entry;
  154. }
  155. return NULL;
  156. }
  157. static int ion_iommu_map_iommu(struct ion_iommu_meta *meta,
  158. struct ion_iommu_map *data,
  159. unsigned int domain_num,
  160. unsigned int partition_num,
  161. unsigned long align,
  162. unsigned long iova_length,
  163. unsigned long flags)
  164. {
  165. struct iommu_domain *domain;
  166. int ret = 0;
  167. unsigned long extra, size;
  168. struct sg_table *table;
  169. int prot = IOMMU_WRITE | IOMMU_READ;
  170. size = meta->size;
  171. data->mapped_size = iova_length;
  172. extra = iova_length - size;
  173. table = meta->table;
  174. /* Use the biggest alignment to allow bigger IOMMU mappings.
  175. * Use the first entry since the first entry will always be the
  176. * biggest entry. To take advantage of bigger mapping sizes both the
  177. * VA and PA addresses have to be aligned to the biggest size.
  178. */
  179. if (sg_dma_len(table->sgl) > align)
  180. align = sg_dma_len(table->sgl);
  181. ret = msm_allocate_iova_address(domain_num, partition_num,
  182. data->mapped_size, align,
  183. &data->iova_addr);
  184. if (ret)
  185. goto out;
  186. domain = msm_get_iommu_domain(domain_num);
  187. if (!domain) {
  188. ret = -ENOMEM;
  189. goto out1;
  190. }
  191. ret = iommu_map_range(domain, data->iova_addr,
  192. table->sgl,
  193. size, prot);
  194. if (ret) {
  195. pr_err("%s: could not map %lx in domain %p\n",
  196. __func__, data->iova_addr, domain);
  197. goto out1;
  198. }
  199. if (extra) {
  200. unsigned long extra_iova_addr = data->iova_addr + size;
  201. unsigned long phys_addr = sg_phys(table->sgl);
  202. ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
  203. extra, SZ_4K, prot);
  204. if (ret)
  205. goto out2;
  206. }
  207. return ret;
  208. out2:
  209. iommu_unmap_range(domain, data->iova_addr, size);
  210. out1:
  211. msm_free_iova_address(data->iova_addr, domain_num, partition_num,
  212. size);
  213. out:
  214. return ret;
  215. }
  216. static void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
  217. {
  218. unsigned int domain_num;
  219. unsigned int partition_num;
  220. struct iommu_domain *domain;
  221. BUG_ON(!msm_use_iommu());
  222. domain_num = iommu_map_domain(data);
  223. partition_num = iommu_map_partition(data);
  224. domain = msm_get_iommu_domain(domain_num);
  225. if (!domain) {
  226. WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
  227. return;
  228. }
  229. iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
  230. msm_free_iova_address(data->iova_addr, domain_num, partition_num,
  231. data->mapped_size);
  232. return;
  233. }
  234. static struct ion_iommu_map *__ion_iommu_map(struct ion_iommu_meta *meta,
  235. int domain_num, int partition_num, unsigned long align,
  236. unsigned long iova_length, unsigned long flags,
  237. ion_phys_addr_t *iova)
  238. {
  239. struct ion_iommu_map *data;
  240. int ret;
  241. data = kmalloc(sizeof(*data), GFP_ATOMIC);
  242. if (!data)
  243. return ERR_PTR(-ENOMEM);
  244. iommu_map_domain(data) = domain_num;
  245. iommu_map_partition(data) = partition_num;
  246. ret = ion_iommu_map_iommu(meta, data,
  247. domain_num,
  248. partition_num,
  249. align,
  250. iova_length,
  251. flags);
  252. if (ret)
  253. goto out;
  254. kref_init(&data->ref);
  255. *iova = data->iova_addr;
  256. data->meta = meta;
  257. ion_iommu_add(meta, data);
  258. return data;
  259. out:
  260. kfree(data);
  261. return ERR_PTR(ret);
  262. }
  263. static struct ion_iommu_meta *ion_iommu_meta_create(struct ion_client *client,
  264. struct ion_handle *handle,
  265. struct sg_table *table,
  266. unsigned long size)
  267. {
  268. struct ion_iommu_meta *meta;
  269. meta = kzalloc(sizeof(*meta), GFP_KERNEL);
  270. if (!meta)
  271. return ERR_PTR(-ENOMEM);
  272. meta->handle = handle;
  273. meta->table = table;
  274. meta->size = size;
  275. meta->dbuf = ion_share_dma_buf(client, handle);
  276. kref_init(&meta->ref);
  277. mutex_init(&meta->lock);
  278. ion_iommu_meta_add(meta);
  279. return meta;
  280. }
  281. static void ion_iommu_meta_destroy(struct kref *kref)
  282. {
  283. struct ion_iommu_meta *meta = container_of(kref, struct ion_iommu_meta,
  284. ref);
  285. rb_erase(&meta->node, &iommu_root);
  286. dma_buf_put(meta->dbuf);
  287. kfree(meta);
  288. }
  289. static void ion_iommu_meta_put(struct ion_iommu_meta *meta)
  290. {
  291. /*
  292. * Need to lock here to prevent race against map/unmap
  293. */
  294. mutex_lock(&msm_iommu_map_mutex);
  295. kref_put(&meta->ref, ion_iommu_meta_destroy);
  296. mutex_unlock(&msm_iommu_map_mutex);
  297. }
  298. int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
  299. int domain_num, int partition_num, unsigned long align,
  300. unsigned long iova_length, ion_phys_addr_t *iova,
  301. unsigned long *buffer_size,
  302. unsigned long flags, unsigned long iommu_flags)
  303. {
  304. struct ion_iommu_map *iommu_map;
  305. struct ion_iommu_meta *iommu_meta = NULL;
  306. struct sg_table *table;
  307. struct scatterlist *sg;
  308. int ret = 0;
  309. int i;
  310. unsigned long size = 0;
  311. if (IS_ERR_OR_NULL(client)) {
  312. pr_err("%s: client pointer is invalid\n", __func__);
  313. return -EINVAL;
  314. }
  315. if (IS_ERR_OR_NULL(handle)) {
  316. pr_err("%s: handle pointer is invalid\n", __func__);
  317. return -EINVAL;
  318. }
  319. table = ion_sg_table(client, handle);
  320. if (IS_ERR_OR_NULL(table))
  321. return PTR_ERR(table);
  322. for_each_sg(table->sgl, sg, table->nents, i)
  323. size += sg_dma_len(sg);
  324. if (!msm_use_iommu()) {
  325. unsigned long pa = sg_dma_address(table->sgl);
  326. if (pa == 0)
  327. pa = sg_phys(table->sgl);
  328. *iova = pa;
  329. *buffer_size = size;
  330. }
  331. /*
  332. * If clients don't want a custom iova length, just use whatever
  333. * the buffer size is
  334. */
  335. if (!iova_length)
  336. iova_length = size;
  337. if (size > iova_length) {
  338. pr_debug("%s: iova length %lx is not at least buffer size %lx\n",
  339. __func__, iova_length, size);
  340. ret = -EINVAL;
  341. goto out;
  342. }
  343. if (size & ~PAGE_MASK) {
  344. pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
  345. size, PAGE_SIZE);
  346. ret = -EINVAL;
  347. goto out;
  348. }
  349. if (iova_length & ~PAGE_MASK) {
  350. pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
  351. iova_length, PAGE_SIZE);
  352. ret = -EINVAL;
  353. goto out;
  354. }
  355. mutex_lock(&msm_iommu_map_mutex);
  356. iommu_meta = ion_iommu_meta_lookup(table);
  357. if (!iommu_meta)
  358. iommu_meta = ion_iommu_meta_create(client, handle, table, size);
  359. else
  360. kref_get(&iommu_meta->ref);
  361. BUG_ON(iommu_meta->size != size);
  362. mutex_unlock(&msm_iommu_map_mutex);
  363. mutex_lock(&iommu_meta->lock);
  364. iommu_map = ion_iommu_lookup(iommu_meta, domain_num, partition_num);
  365. if (!iommu_map) {
  366. iommu_map = __ion_iommu_map(iommu_meta, domain_num,
  367. partition_num, align, iova_length,
  368. flags, iova);
  369. if (!IS_ERR_OR_NULL(iommu_map)) {
  370. iommu_map->flags = iommu_flags;
  371. ret = 0;
  372. } else {
  373. ret = PTR_ERR(iommu_map);
  374. goto out_unlock;
  375. }
  376. } else {
  377. if (iommu_map->flags != iommu_flags) {
  378. pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
  379. __func__, handle,
  380. iommu_map->flags, iommu_flags);
  381. ret = -EINVAL;
  382. goto out_unlock;
  383. } else if (iommu_map->mapped_size != iova_length) {
  384. pr_err("%s: handle %p is already mapped with length %x, trying to map with length %lx\n",
  385. __func__, handle, iommu_map->mapped_size,
  386. iova_length);
  387. ret = -EINVAL;
  388. goto out_unlock;
  389. } else {
  390. kref_get(&iommu_map->ref);
  391. *iova = iommu_map->iova_addr;
  392. }
  393. }
  394. mutex_unlock(&iommu_meta->lock);
  395. *buffer_size = size;
  396. return ret;
  397. out_unlock:
  398. mutex_unlock(&iommu_meta->lock);
  399. out:
  400. ion_iommu_meta_put(iommu_meta);
  401. return ret;
  402. }
  403. EXPORT_SYMBOL(ion_map_iommu);
  404. static void ion_iommu_map_release(struct kref *kref)
  405. {
  406. struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
  407. ref);
  408. struct ion_iommu_meta *meta = map->meta;
  409. rb_erase(&map->node, &meta->iommu_maps);
  410. ion_iommu_heap_unmap_iommu(map);
  411. kfree(map);
  412. }
  413. void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
  414. int domain_num, int partition_num)
  415. {
  416. struct ion_iommu_map *iommu_map;
  417. struct ion_iommu_meta *meta;
  418. struct sg_table *table;
  419. if (IS_ERR_OR_NULL(client)) {
  420. pr_err("%s: client pointer is invalid\n", __func__);
  421. return;
  422. }
  423. if (IS_ERR_OR_NULL(handle)) {
  424. pr_err("%s: handle pointer is invalid\n", __func__);
  425. return;
  426. }
  427. table = ion_sg_table(client, handle);
  428. mutex_lock(&msm_iommu_map_mutex);
  429. meta = ion_iommu_meta_lookup(table);
  430. if (!meta) {
  431. WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
  432. domain_num, partition_num, handle);
  433. mutex_unlock(&msm_iommu_map_mutex);
  434. goto out;
  435. }
  436. mutex_unlock(&msm_iommu_map_mutex);
  437. mutex_lock(&meta->lock);
  438. iommu_map = ion_iommu_lookup(meta, domain_num, partition_num);
  439. if (!iommu_map) {
  440. WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
  441. domain_num, partition_num, handle);
  442. mutex_unlock(&meta->lock);
  443. goto out;
  444. }
  445. kref_put(&iommu_map->ref, ion_iommu_map_release);
  446. mutex_unlock(&meta->lock);
  447. ion_iommu_meta_put(meta);
  448. out:
  449. return;
  450. }
  451. EXPORT_SYMBOL(ion_unmap_iommu);