msm_iommu_sec.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/errno.h>
  17. #include <linux/io.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/list.h>
  20. #include <linux/mutex.h>
  21. #include <linux/slab.h>
  22. #include <linux/iommu.h>
  23. #include <linux/clk.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/of.h>
  26. #include <linux/of_device.h>
  27. #include <linux/kmemleak.h>
  28. #include <asm/sizes.h>
  29. #include <mach/iommu_perfmon.h>
  30. #include <mach/iommu_hw-v1.h>
  31. #include <mach/msm_iommu_priv.h>
  32. #include <mach/iommu.h>
  33. #include <mach/scm.h>
  34. #include <mach/memory.h>
  35. /* bitmap of the page sizes currently supported */
  36. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  37. /* commands for SCM_SVC_MP */
  38. #define IOMMU_SECURE_CFG 2
  39. #define IOMMU_SECURE_PTBL_SIZE 3
  40. #define IOMMU_SECURE_PTBL_INIT 4
  41. #define IOMMU_SET_CP_POOL_SIZE 5
  42. #define IOMMU_SECURE_MAP 6
  43. #define IOMMU_SECURE_UNMAP 7
  44. #define IOMMU_SECURE_MAP2 0x0B
  45. #define IOMMU_SECURE_UNMAP2 0x0C
  46. #define IOMMU_TLBINVAL_FLAG 0x00000001
  47. /* commands for SCM_SVC_UTIL */
  48. #define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
  49. #define MAXIMUM_VIRT_SIZE (300*SZ_1M)
  50. #define MAKE_CP_VERSION(major, minor, patch) \
  51. (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
  52. static struct iommu_access_ops *iommu_access_ops;
  53. static const struct of_device_id msm_smmu_list[] = {
  54. { .compatible = "qcom,msm-smmu-v1", },
  55. { .compatible = "qcom,msm-smmu-v2", },
  56. { }
  57. };
  58. struct msm_scm_paddr_list {
  59. unsigned int list;
  60. unsigned int list_size;
  61. unsigned int size;
  62. };
  63. struct msm_scm_mapping_info {
  64. unsigned int id;
  65. unsigned int ctx_id;
  66. unsigned int va;
  67. unsigned int size;
  68. };
  69. struct msm_scm_map2_req {
  70. struct msm_scm_paddr_list plist;
  71. struct msm_scm_mapping_info info;
  72. unsigned int flags;
  73. };
  74. struct msm_scm_unmap2_req {
  75. struct msm_scm_mapping_info info;
  76. unsigned int flags;
  77. };
  78. struct msm_cp_pool_size {
  79. uint32_t size;
  80. uint32_t spare;
  81. };
  82. #define NUM_DUMP_REGS 14
  83. /*
  84. * some space to allow the number of registers returned by the secure
  85. * environment to grow
  86. */
  87. #define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
  88. /* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
  89. #define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
  90. struct msm_scm_fault_regs_dump {
  91. uint32_t dump_size;
  92. uint32_t dump_data[SEC_DUMP_SIZE];
  93. } __packed;
  94. void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
  95. {
  96. iommu_access_ops = access_ops;
  97. }
  98. static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
  99. struct msm_scm_fault_regs_dump *regs)
  100. {
  101. int ret;
  102. struct msm_scm_fault_regs_dump_req {
  103. uint32_t id;
  104. uint32_t cb_num;
  105. phys_addr_t buff;
  106. uint32_t len;
  107. } req_info;
  108. int resp;
  109. req_info.id = smmu_id;
  110. req_info.cb_num = cb_num;
  111. req_info.buff = virt_to_phys(regs);
  112. req_info.len = sizeof(*regs);
  113. ret = scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
  114. &req_info, sizeof(req_info), &resp, 1);
  115. invalidate_caches((unsigned long) regs, sizeof(*regs),
  116. (unsigned long)virt_to_phys(regs));
  117. return ret;
  118. }
  119. #define EXTRACT_DUMP_REG_KEY(addr, ctx) (addr & ((1 << CTX_SHIFT) - 1))
  120. static int msm_iommu_reg_dump_to_regs(
  121. struct msm_iommu_context_reg ctx_regs[],
  122. struct msm_scm_fault_regs_dump *dump, int cb_num)
  123. {
  124. int i, j, ret = 0;
  125. const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
  126. uint32_t *it = (uint32_t *) dump->dump_data;
  127. const uint32_t * const end = ((uint32_t *) dump) + nvals;
  128. for (i = 1; it < end; it += 2, i += 2) {
  129. uint32_t addr = *it;
  130. uint32_t val = *(it + 1);
  131. struct msm_iommu_context_reg *reg = NULL;
  132. for (j = 0; j < MAX_DUMP_REGS; ++j) {
  133. if (dump_regs_tbl[j].key ==
  134. EXTRACT_DUMP_REG_KEY(addr, cb_num)) {
  135. reg = &ctx_regs[j];
  136. break;
  137. }
  138. }
  139. if (reg == NULL) {
  140. pr_debug("Unknown register in secure CB dump: %x (%x)\n",
  141. addr, EXTRACT_DUMP_REG_KEY(addr, cb_num));
  142. continue;
  143. }
  144. if (reg->valid) {
  145. WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
  146. addr);
  147. continue;
  148. }
  149. reg->val = val;
  150. reg->valid = true;
  151. }
  152. if (i != nvals) {
  153. pr_err("Invalid dump! %d != %d\n", i, nvals);
  154. ret = 1;
  155. goto out;
  156. }
  157. for (i = 0; i < MAX_DUMP_REGS; ++i) {
  158. if (!ctx_regs[i].valid) {
  159. if (dump_regs_tbl[i].must_be_present) {
  160. pr_err("Register missing from dump: %s, %lx\n",
  161. dump_regs_tbl[i].name,
  162. dump_regs_tbl[i].key);
  163. ret = 1;
  164. }
  165. ctx_regs[i].val = 0;
  166. }
  167. }
  168. out:
  169. return ret;
  170. }
  171. irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
  172. {
  173. struct platform_device *pdev = dev_id;
  174. struct msm_iommu_drvdata *drvdata;
  175. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  176. struct msm_scm_fault_regs_dump *regs;
  177. int tmp, ret = IRQ_HANDLED;
  178. iommu_access_ops->iommu_lock_acquire(0);
  179. BUG_ON(!pdev);
  180. drvdata = dev_get_drvdata(pdev->dev.parent);
  181. BUG_ON(!drvdata);
  182. ctx_drvdata = dev_get_drvdata(&pdev->dev);
  183. BUG_ON(!ctx_drvdata);
  184. regs = kmalloc(sizeof(*regs), GFP_KERNEL);
  185. if (!regs) {
  186. pr_err("%s: Couldn't allocate memory\n", __func__);
  187. goto lock_release;
  188. }
  189. if (!drvdata->ctx_attach_count) {
  190. pr_err("Unexpected IOMMU page fault from secure context bank!\n");
  191. pr_err("name = %s\n", drvdata->name);
  192. pr_err("Power is OFF. Unable to read page fault information\n");
  193. /*
  194. * We cannot determine which context bank caused the issue so
  195. * we just return handled here to ensure IRQ handler code is
  196. * happy
  197. */
  198. goto free_regs;
  199. }
  200. iommu_access_ops->iommu_clk_on(drvdata);
  201. tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
  202. ctx_drvdata->num, regs);
  203. iommu_access_ops->iommu_clk_off(drvdata);
  204. if (tmp) {
  205. pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
  206. __func__, tmp, drvdata->name, ctx_drvdata->num);
  207. goto free_regs;
  208. } else {
  209. struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
  210. memset(ctx_regs, 0, sizeof(ctx_regs));
  211. tmp = msm_iommu_reg_dump_to_regs(ctx_regs, regs,
  212. ctx_drvdata->num);
  213. if (!tmp && ctx_regs[DUMP_REG_FSR].val) {
  214. if (!ctx_drvdata->attached_domain) {
  215. pr_err("Bad domain in interrupt handler\n");
  216. tmp = -ENOSYS;
  217. } else {
  218. tmp = report_iommu_fault(
  219. ctx_drvdata->attached_domain,
  220. &ctx_drvdata->pdev->dev,
  221. COMBINE_DUMP_REG(
  222. ctx_regs[DUMP_REG_FAR1].val,
  223. ctx_regs[DUMP_REG_FAR0].val),
  224. 0);
  225. }
  226. /* if the fault wasn't handled by someone else: */
  227. if (tmp == -ENOSYS) {
  228. pr_err("Unexpected IOMMU page fault from secure context bank!\n");
  229. pr_err("name = %s\n", drvdata->name);
  230. pr_err("context = %s (%d)\n", ctx_drvdata->name,
  231. ctx_drvdata->num);
  232. pr_err("Interesting registers:\n");
  233. print_ctx_regs(ctx_regs);
  234. }
  235. } else {
  236. ret = IRQ_NONE;
  237. }
  238. }
  239. free_regs:
  240. kfree(regs);
  241. lock_release:
  242. iommu_access_ops->iommu_lock_release(0);
  243. return ret;
  244. }
  245. static int msm_iommu_sec_ptbl_init(void)
  246. {
  247. struct device_node *np;
  248. struct msm_scm_ptbl_init {
  249. unsigned int paddr;
  250. unsigned int size;
  251. unsigned int spare;
  252. } pinit;
  253. unsigned int *buf;
  254. int psize[2] = {0, 0};
  255. unsigned int spare;
  256. int ret, ptbl_ret = 0;
  257. int version;
  258. for_each_matching_node(np, msm_smmu_list)
  259. if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
  260. of_device_is_available(np))
  261. break;
  262. if (!np)
  263. return 0;
  264. of_node_put(np);
  265. version = scm_get_feat_version(SCM_SVC_MP);
  266. if (version >= MAKE_CP_VERSION(1, 1, 1)) {
  267. struct msm_cp_pool_size psize;
  268. int retval;
  269. psize.size = MAXIMUM_VIRT_SIZE;
  270. psize.spare = 0;
  271. ret = scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE, &psize,
  272. sizeof(psize), &retval, sizeof(retval));
  273. if (ret) {
  274. pr_err("scm call IOMMU_SET_CP_POOL_SIZE failed\n");
  275. goto fail;
  276. }
  277. }
  278. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
  279. sizeof(spare), psize, sizeof(psize));
  280. if (ret) {
  281. pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
  282. goto fail;
  283. }
  284. if (psize[1]) {
  285. pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
  286. goto fail;
  287. }
  288. buf = kmalloc(psize[0], GFP_KERNEL);
  289. if (!buf) {
  290. pr_err("%s: Failed to allocate %d bytes for PTBL\n",
  291. __func__, psize[0]);
  292. ret = -ENOMEM;
  293. goto fail;
  294. }
  295. pinit.paddr = virt_to_phys(buf);
  296. pinit.size = psize[0];
  297. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
  298. sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
  299. if (ret) {
  300. pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
  301. goto fail_mem;
  302. }
  303. if (ptbl_ret) {
  304. pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
  305. goto fail_mem;
  306. }
  307. kmemleak_not_leak(buf);
  308. return 0;
  309. fail_mem:
  310. kfree(buf);
  311. fail:
  312. return ret;
  313. }
  314. int msm_iommu_sec_program_iommu(int sec_id)
  315. {
  316. struct msm_scm_sec_cfg {
  317. unsigned int id;
  318. unsigned int spare;
  319. } cfg;
  320. int ret, scm_ret = 0;
  321. cfg.id = sec_id;
  322. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
  323. &scm_ret, sizeof(scm_ret));
  324. if (ret || scm_ret) {
  325. pr_err("scm call IOMMU_SECURE_CFG failed\n");
  326. return ret ? ret : -EINVAL;
  327. }
  328. return ret;
  329. }
  330. static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
  331. struct msm_iommu_ctx_drvdata *ctx_drvdata,
  332. unsigned long va, phys_addr_t pa, size_t len)
  333. {
  334. struct msm_scm_map2_req map;
  335. void *flush_va;
  336. phys_addr_t flush_pa;
  337. int ret = 0;
  338. map.plist.list = virt_to_phys(&pa);
  339. map.plist.list_size = 1;
  340. map.plist.size = len;
  341. map.info.id = iommu_drvdata->sec_id;
  342. map.info.ctx_id = ctx_drvdata->num;
  343. map.info.va = va;
  344. map.info.size = len;
  345. map.flags = IOMMU_TLBINVAL_FLAG;
  346. flush_va = &pa;
  347. flush_pa = virt_to_phys(&pa);
  348. /*
  349. * Ensure that the buffer is in RAM by the time it gets to TZ
  350. */
  351. clean_caches((unsigned long) flush_va, len, flush_pa);
  352. if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map), &ret,
  353. sizeof(ret)))
  354. return -EINVAL;
  355. if (ret)
  356. return -EINVAL;
  357. /* Invalidate cache since TZ touched this address range */
  358. invalidate_caches((unsigned long) flush_va, len, flush_pa);
  359. return 0;
  360. }
  361. static unsigned int get_phys_addr(struct scatterlist *sg)
  362. {
  363. /*
  364. * Try sg_dma_address first so that we can
  365. * map carveout regions that do not have a
  366. * struct page associated with them.
  367. */
  368. unsigned int pa = sg_dma_address(sg);
  369. if (pa == 0)
  370. pa = sg_phys(sg);
  371. return pa;
  372. }
  373. static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
  374. struct msm_iommu_ctx_drvdata *ctx_drvdata,
  375. unsigned long va, struct scatterlist *sg, size_t len)
  376. {
  377. struct scatterlist *sgiter;
  378. struct msm_scm_map2_req map;
  379. unsigned int *pa_list = 0;
  380. unsigned int pa, cnt;
  381. void *flush_va;
  382. unsigned int offset = 0, chunk_offset = 0;
  383. int ret, scm_ret;
  384. map.info.id = iommu_drvdata->sec_id;
  385. map.info.ctx_id = ctx_drvdata->num;
  386. map.info.va = va;
  387. map.info.size = len;
  388. map.flags = IOMMU_TLBINVAL_FLAG;
  389. if (sg->length == len) {
  390. pa = get_phys_addr(sg);
  391. map.plist.list = virt_to_phys(&pa);
  392. map.plist.list_size = 1;
  393. map.plist.size = len;
  394. flush_va = &pa;
  395. } else {
  396. sgiter = sg;
  397. cnt = sg->length / SZ_1M;
  398. while ((sgiter = sg_next(sgiter)))
  399. cnt += sgiter->length / SZ_1M;
  400. pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
  401. if (!pa_list)
  402. return -ENOMEM;
  403. sgiter = sg;
  404. cnt = 0;
  405. pa = get_phys_addr(sgiter);
  406. while (offset < len) {
  407. pa += chunk_offset;
  408. pa_list[cnt] = pa;
  409. chunk_offset += SZ_1M;
  410. offset += SZ_1M;
  411. cnt++;
  412. if (chunk_offset >= sgiter->length && offset < len) {
  413. chunk_offset = 0;
  414. sgiter = sg_next(sgiter);
  415. pa = get_phys_addr(sgiter);
  416. }
  417. }
  418. map.plist.list = virt_to_phys(pa_list);
  419. map.plist.list_size = cnt;
  420. map.plist.size = SZ_1M;
  421. flush_va = pa_list;
  422. }
  423. /*
  424. * Ensure that the buffer is in RAM by the time it gets to TZ
  425. */
  426. clean_caches((unsigned long) flush_va,
  427. sizeof(unsigned long) * map.plist.list_size,
  428. virt_to_phys(flush_va));
  429. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map),
  430. &scm_ret, sizeof(scm_ret));
  431. kfree(pa_list);
  432. return ret;
  433. }
  434. static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
  435. struct msm_iommu_ctx_drvdata *ctx_drvdata,
  436. unsigned long va, size_t len)
  437. {
  438. struct msm_scm_unmap2_req unmap;
  439. int ret, scm_ret;
  440. unmap.info.id = iommu_drvdata->sec_id;
  441. unmap.info.ctx_id = ctx_drvdata->num;
  442. unmap.info.va = va;
  443. unmap.info.size = len;
  444. unmap.flags = IOMMU_TLBINVAL_FLAG;
  445. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &unmap, sizeof(unmap),
  446. &scm_ret, sizeof(scm_ret));
  447. return ret;
  448. }
  449. static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
  450. {
  451. struct msm_iommu_priv *priv;
  452. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  453. if (!priv)
  454. return -ENOMEM;
  455. INIT_LIST_HEAD(&priv->list_attached);
  456. domain->priv = priv;
  457. return 0;
  458. }
  459. static void msm_iommu_domain_destroy(struct iommu_domain *domain)
  460. {
  461. struct msm_iommu_priv *priv;
  462. iommu_access_ops->iommu_lock_acquire(0);
  463. priv = domain->priv;
  464. domain->priv = NULL;
  465. kfree(priv);
  466. iommu_access_ops->iommu_lock_release(0);
  467. }
  468. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  469. {
  470. struct msm_iommu_priv *priv;
  471. struct msm_iommu_drvdata *iommu_drvdata;
  472. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  473. struct msm_iommu_ctx_drvdata *tmp_drvdata;
  474. int ret = 0;
  475. iommu_access_ops->iommu_lock_acquire(0);
  476. priv = domain->priv;
  477. if (!priv || !dev) {
  478. ret = -EINVAL;
  479. goto fail;
  480. }
  481. iommu_drvdata = dev_get_drvdata(dev->parent);
  482. ctx_drvdata = dev_get_drvdata(dev);
  483. if (!iommu_drvdata || !ctx_drvdata) {
  484. ret = -EINVAL;
  485. goto fail;
  486. }
  487. if (!list_empty(&ctx_drvdata->attached_elm)) {
  488. ret = -EBUSY;
  489. goto fail;
  490. }
  491. list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
  492. if (tmp_drvdata == ctx_drvdata) {
  493. ret = -EBUSY;
  494. goto fail;
  495. }
  496. ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
  497. if (ret)
  498. goto fail;
  499. /* We can only do this once */
  500. if (!iommu_drvdata->ctx_attach_count) {
  501. ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
  502. if (ret) {
  503. iommu_access_ops->iommu_power_off(iommu_drvdata);
  504. goto fail;
  505. }
  506. ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
  507. /* bfb settings are always programmed by HLOS */
  508. program_iommu_bfb_settings(iommu_drvdata->base,
  509. iommu_drvdata->bfb_settings);
  510. iommu_access_ops->iommu_clk_off(iommu_drvdata);
  511. if (ret) {
  512. iommu_access_ops->iommu_power_off(iommu_drvdata);
  513. goto fail;
  514. }
  515. }
  516. list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
  517. ctx_drvdata->attached_domain = domain;
  518. ++iommu_drvdata->ctx_attach_count;
  519. iommu_access_ops->iommu_lock_release(0);
  520. msm_iommu_attached(dev->parent);
  521. return ret;
  522. fail:
  523. iommu_access_ops->iommu_lock_release(0);
  524. return ret;
  525. }
  526. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  527. struct device *dev)
  528. {
  529. struct msm_iommu_drvdata *iommu_drvdata;
  530. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  531. msm_iommu_detached(dev->parent);
  532. iommu_access_ops->iommu_lock_acquire(0);
  533. if (!dev)
  534. goto fail;
  535. iommu_drvdata = dev_get_drvdata(dev->parent);
  536. ctx_drvdata = dev_get_drvdata(dev);
  537. if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
  538. goto fail;
  539. list_del_init(&ctx_drvdata->attached_elm);
  540. ctx_drvdata->attached_domain = NULL;
  541. iommu_access_ops->iommu_power_off(iommu_drvdata);
  542. BUG_ON(iommu_drvdata->ctx_attach_count == 0);
  543. --iommu_drvdata->ctx_attach_count;
  544. fail:
  545. iommu_access_ops->iommu_lock_release(0);
  546. }
  547. static int get_drvdata(struct iommu_domain *domain,
  548. struct msm_iommu_drvdata **iommu_drvdata,
  549. struct msm_iommu_ctx_drvdata **ctx_drvdata)
  550. {
  551. struct msm_iommu_priv *priv = domain->priv;
  552. struct msm_iommu_ctx_drvdata *ctx;
  553. list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
  554. if (ctx->attached_domain == domain)
  555. break;
  556. }
  557. if (ctx->attached_domain != domain)
  558. return -EINVAL;
  559. *ctx_drvdata = ctx;
  560. *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
  561. return 0;
  562. }
  563. static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
  564. phys_addr_t pa, size_t len, int prot)
  565. {
  566. struct msm_iommu_drvdata *iommu_drvdata;
  567. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  568. int ret = 0;
  569. iommu_access_ops->iommu_lock_acquire(0);
  570. ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
  571. if (ret)
  572. goto fail;
  573. iommu_access_ops->iommu_clk_on(iommu_drvdata);
  574. ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
  575. va, pa, len);
  576. iommu_access_ops->iommu_clk_off(iommu_drvdata);
  577. fail:
  578. iommu_access_ops->iommu_lock_release(0);
  579. return ret;
  580. }
  581. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
  582. size_t len)
  583. {
  584. struct msm_iommu_drvdata *iommu_drvdata;
  585. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  586. int ret = -ENODEV;
  587. iommu_access_ops->iommu_lock_acquire(0);
  588. ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
  589. if (ret)
  590. goto fail;
  591. iommu_access_ops->iommu_clk_on(iommu_drvdata);
  592. ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
  593. va, len);
  594. iommu_access_ops->iommu_clk_off(iommu_drvdata);
  595. fail:
  596. iommu_access_ops->iommu_lock_release(0);
  597. /* the IOMMU API requires us to return how many bytes were unmapped */
  598. len = ret ? 0 : len;
  599. return len;
  600. }
  601. static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
  602. struct scatterlist *sg, unsigned int len,
  603. int prot)
  604. {
  605. int ret;
  606. struct msm_iommu_drvdata *iommu_drvdata;
  607. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  608. iommu_access_ops->iommu_lock_acquire(0);
  609. ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
  610. if (ret)
  611. goto fail;
  612. iommu_access_ops->iommu_clk_on(iommu_drvdata);
  613. ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
  614. va, sg, len);
  615. iommu_access_ops->iommu_clk_off(iommu_drvdata);
  616. fail:
  617. iommu_access_ops->iommu_lock_release(0);
  618. return ret;
  619. }
  620. static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
  621. unsigned int len)
  622. {
  623. struct msm_iommu_drvdata *iommu_drvdata;
  624. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  625. int ret;
  626. iommu_access_ops->iommu_lock_acquire(0);
  627. ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
  628. if (ret)
  629. goto fail;
  630. iommu_access_ops->iommu_clk_on(iommu_drvdata);
  631. ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
  632. iommu_access_ops->iommu_clk_off(iommu_drvdata);
  633. fail:
  634. iommu_access_ops->iommu_lock_release(0);
  635. return 0;
  636. }
  637. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  638. unsigned long va)
  639. {
  640. return 0;
  641. }
  642. static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
  643. unsigned long cap)
  644. {
  645. return 0;
  646. }
  647. static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
  648. {
  649. return 0;
  650. }
  651. static struct iommu_ops msm_iommu_ops = {
  652. .domain_init = msm_iommu_domain_init,
  653. .domain_destroy = msm_iommu_domain_destroy,
  654. .attach_dev = msm_iommu_attach_dev,
  655. .detach_dev = msm_iommu_detach_dev,
  656. .map = msm_iommu_map,
  657. .unmap = msm_iommu_unmap,
  658. .map_range = msm_iommu_map_range,
  659. .unmap_range = msm_iommu_unmap_range,
  660. .iova_to_phys = msm_iommu_iova_to_phys,
  661. .domain_has_cap = msm_iommu_domain_has_cap,
  662. .get_pt_base_addr = msm_iommu_get_pt_base_addr,
  663. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  664. };
  665. static int __init msm_iommu_sec_init(void)
  666. {
  667. int ret;
  668. ret = bus_register(&msm_iommu_sec_bus_type);
  669. if (ret)
  670. goto fail;
  671. bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
  672. ret = msm_iommu_sec_ptbl_init();
  673. fail:
  674. return ret;
  675. }
  676. subsys_initcall(msm_iommu_sec_init);
  677. MODULE_LICENSE("GPL v2");
  678. MODULE_DESCRIPTION("MSM SMMU Secure Driver");