msm_iommu.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15. * 02110-1301, USA.
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/errno.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/slab.h>
  27. #include <linux/iommu.h>
  28. #include <linux/clk.h>
  29. #include <linux/err.h>
  30. #include <linux/of_iommu.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/sizes.h>
  33. #include "msm_iommu_hw-8xxx.h"
  34. #include "msm_iommu.h"
  35. #include "io-pgtable.h"
  36. #define MRC(reg, processor, op1, crn, crm, op2) \
  37. __asm__ __volatile__ ( \
  38. " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
  39. : "=r" (reg))
  40. /* bitmap of the page sizes currently supported */
  41. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  42. DEFINE_SPINLOCK(msm_iommu_lock);
  43. static LIST_HEAD(qcom_iommu_devices);
  44. static struct iommu_ops msm_iommu_ops;
  45. struct msm_priv {
  46. struct list_head list_attached;
  47. struct iommu_domain domain;
  48. struct io_pgtable_cfg cfg;
  49. struct io_pgtable_ops *iop;
  50. struct device *dev;
  51. spinlock_t pgtlock; /* pagetable lock */
  52. };
  53. static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  54. {
  55. return container_of(dom, struct msm_priv, domain);
  56. }
  57. static int __enable_clocks(struct msm_iommu_dev *iommu)
  58. {
  59. int ret;
  60. ret = clk_enable(iommu->pclk);
  61. if (ret)
  62. goto fail;
  63. if (iommu->clk) {
  64. ret = clk_enable(iommu->clk);
  65. if (ret)
  66. clk_disable(iommu->pclk);
  67. }
  68. fail:
  69. return ret;
  70. }
  71. static void __disable_clocks(struct msm_iommu_dev *iommu)
  72. {
  73. if (iommu->clk)
  74. clk_disable(iommu->clk);
  75. clk_disable(iommu->pclk);
  76. }
  77. static void msm_iommu_reset(void __iomem *base, int ncb)
  78. {
  79. int ctx;
  80. SET_RPUE(base, 0);
  81. SET_RPUEIE(base, 0);
  82. SET_ESRRESTORE(base, 0);
  83. SET_TBE(base, 0);
  84. SET_CR(base, 0);
  85. SET_SPDMBE(base, 0);
  86. SET_TESTBUSCR(base, 0);
  87. SET_TLBRSW(base, 0);
  88. SET_GLOBAL_TLBIALL(base, 0);
  89. SET_RPU_ACR(base, 0);
  90. SET_TLBLKCRWE(base, 1);
  91. for (ctx = 0; ctx < ncb; ctx++) {
  92. SET_BPRCOSH(base, ctx, 0);
  93. SET_BPRCISH(base, ctx, 0);
  94. SET_BPRCNSH(base, ctx, 0);
  95. SET_BPSHCFG(base, ctx, 0);
  96. SET_BPMTCFG(base, ctx, 0);
  97. SET_ACTLR(base, ctx, 0);
  98. SET_SCTLR(base, ctx, 0);
  99. SET_FSRRESTORE(base, ctx, 0);
  100. SET_TTBR0(base, ctx, 0);
  101. SET_TTBR1(base, ctx, 0);
  102. SET_TTBCR(base, ctx, 0);
  103. SET_BFBCR(base, ctx, 0);
  104. SET_PAR(base, ctx, 0);
  105. SET_FAR(base, ctx, 0);
  106. SET_CTX_TLBIALL(base, ctx, 0);
  107. SET_TLBFLPTER(base, ctx, 0);
  108. SET_TLBSLPTER(base, ctx, 0);
  109. SET_TLBLKCR(base, ctx, 0);
  110. SET_CONTEXTIDR(base, ctx, 0);
  111. }
  112. }
  113. static void __flush_iotlb(void *cookie)
  114. {
  115. struct msm_priv *priv = cookie;
  116. struct msm_iommu_dev *iommu = NULL;
  117. struct msm_iommu_ctx_dev *master;
  118. int ret = 0;
  119. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  120. ret = __enable_clocks(iommu);
  121. if (ret)
  122. goto fail;
  123. list_for_each_entry(master, &iommu->ctx_list, list)
  124. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  125. __disable_clocks(iommu);
  126. }
  127. fail:
  128. return;
  129. }
  130. static void __flush_iotlb_range(unsigned long iova, size_t size,
  131. size_t granule, bool leaf, void *cookie)
  132. {
  133. struct msm_priv *priv = cookie;
  134. struct msm_iommu_dev *iommu = NULL;
  135. struct msm_iommu_ctx_dev *master;
  136. int ret = 0;
  137. int temp_size;
  138. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  139. ret = __enable_clocks(iommu);
  140. if (ret)
  141. goto fail;
  142. list_for_each_entry(master, &iommu->ctx_list, list) {
  143. temp_size = size;
  144. do {
  145. iova &= TLBIVA_VA;
  146. iova |= GET_CONTEXTIDR_ASID(iommu->base,
  147. master->num);
  148. SET_TLBIVA(iommu->base, master->num, iova);
  149. iova += granule;
  150. } while (temp_size -= granule);
  151. }
  152. __disable_clocks(iommu);
  153. }
  154. fail:
  155. return;
  156. }
  157. static void __flush_iotlb_sync(void *cookie)
  158. {
  159. /*
  160. * Nothing is needed here, the barrier to guarantee
  161. * completion of the tlb sync operation is implicitly
  162. * taken care when the iommu client does a writel before
  163. * kick starting the other master.
  164. */
  165. }
  166. static const struct iommu_gather_ops msm_iommu_gather_ops = {
  167. .tlb_flush_all = __flush_iotlb,
  168. .tlb_add_flush = __flush_iotlb_range,
  169. .tlb_sync = __flush_iotlb_sync,
  170. };
  171. static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
  172. {
  173. int idx;
  174. do {
  175. idx = find_next_zero_bit(map, end, start);
  176. if (idx == end)
  177. return -ENOSPC;
  178. } while (test_and_set_bit(idx, map));
  179. return idx;
  180. }
  181. static void msm_iommu_free_ctx(unsigned long *map, int idx)
  182. {
  183. clear_bit(idx, map);
  184. }
  185. static void config_mids(struct msm_iommu_dev *iommu,
  186. struct msm_iommu_ctx_dev *master)
  187. {
  188. int mid, ctx, i;
  189. for (i = 0; i < master->num_mids; i++) {
  190. mid = master->mids[i];
  191. ctx = master->num;
  192. SET_M2VCBR_N(iommu->base, mid, 0);
  193. SET_CBACR_N(iommu->base, ctx, 0);
  194. /* Set VMID = 0 */
  195. SET_VMID(iommu->base, mid, 0);
  196. /* Set the context number for that MID to this context */
  197. SET_CBNDX(iommu->base, mid, ctx);
  198. /* Set MID associated with this context bank to 0*/
  199. SET_CBVMID(iommu->base, ctx, 0);
  200. /* Set the ASID for TLB tagging for this context */
  201. SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
  202. /* Set security bit override to be Non-secure */
  203. SET_NSCFG(iommu->base, mid, 3);
  204. }
  205. }
  206. static void __reset_context(void __iomem *base, int ctx)
  207. {
  208. SET_BPRCOSH(base, ctx, 0);
  209. SET_BPRCISH(base, ctx, 0);
  210. SET_BPRCNSH(base, ctx, 0);
  211. SET_BPSHCFG(base, ctx, 0);
  212. SET_BPMTCFG(base, ctx, 0);
  213. SET_ACTLR(base, ctx, 0);
  214. SET_SCTLR(base, ctx, 0);
  215. SET_FSRRESTORE(base, ctx, 0);
  216. SET_TTBR0(base, ctx, 0);
  217. SET_TTBR1(base, ctx, 0);
  218. SET_TTBCR(base, ctx, 0);
  219. SET_BFBCR(base, ctx, 0);
  220. SET_PAR(base, ctx, 0);
  221. SET_FAR(base, ctx, 0);
  222. SET_CTX_TLBIALL(base, ctx, 0);
  223. SET_TLBFLPTER(base, ctx, 0);
  224. SET_TLBSLPTER(base, ctx, 0);
  225. SET_TLBLKCR(base, ctx, 0);
  226. }
  227. static void __program_context(void __iomem *base, int ctx,
  228. struct msm_priv *priv)
  229. {
  230. __reset_context(base, ctx);
  231. /* Turn on TEX Remap */
  232. SET_TRE(base, ctx, 1);
  233. SET_AFE(base, ctx, 1);
  234. /* Set up HTW mode */
  235. /* TLB miss configuration: perform HTW on miss */
  236. SET_TLBMCFG(base, ctx, 0x3);
  237. /* V2P configuration: HTW for access */
  238. SET_V2PCFG(base, ctx, 0x3);
  239. SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
  240. SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
  241. SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
  242. /* Set prrr and nmrr */
  243. SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
  244. SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
  245. /* Invalidate the TLB for this context */
  246. SET_CTX_TLBIALL(base, ctx, 0);
  247. /* Set interrupt number to "secure" interrupt */
  248. SET_IRPTNDX(base, ctx, 0);
  249. /* Enable context fault interrupt */
  250. SET_CFEIE(base, ctx, 1);
  251. /* Stall access on a context fault and let the handler deal with it */
  252. SET_CFCFG(base, ctx, 1);
  253. /* Redirect all cacheable requests to L2 slave port. */
  254. SET_RCISH(base, ctx, 1);
  255. SET_RCOSH(base, ctx, 1);
  256. SET_RCNSH(base, ctx, 1);
  257. /* Turn on BFB prefetch */
  258. SET_BFBDFE(base, ctx, 1);
  259. /* Enable the MMU */
  260. SET_M(base, ctx, 1);
  261. }
  262. static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
  263. {
  264. struct msm_priv *priv;
  265. if (type != IOMMU_DOMAIN_UNMANAGED)
  266. return NULL;
  267. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  268. if (!priv)
  269. goto fail_nomem;
  270. INIT_LIST_HEAD(&priv->list_attached);
  271. priv->domain.geometry.aperture_start = 0;
  272. priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
  273. priv->domain.geometry.force_aperture = true;
  274. return &priv->domain;
  275. fail_nomem:
  276. kfree(priv);
  277. return NULL;
  278. }
  279. static void msm_iommu_domain_free(struct iommu_domain *domain)
  280. {
  281. struct msm_priv *priv;
  282. unsigned long flags;
  283. spin_lock_irqsave(&msm_iommu_lock, flags);
  284. priv = to_msm_priv(domain);
  285. kfree(priv);
  286. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  287. }
  288. static int msm_iommu_domain_config(struct msm_priv *priv)
  289. {
  290. spin_lock_init(&priv->pgtlock);
  291. priv->cfg = (struct io_pgtable_cfg) {
  292. .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
  293. .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
  294. .ias = 32,
  295. .oas = 32,
  296. .tlb = &msm_iommu_gather_ops,
  297. .iommu_dev = priv->dev,
  298. };
  299. priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
  300. if (!priv->iop) {
  301. dev_err(priv->dev, "Failed to allocate pgtable\n");
  302. return -EINVAL;
  303. }
  304. msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
  305. return 0;
  306. }
  307. /* Must be called under msm_iommu_lock */
  308. static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
  309. {
  310. struct msm_iommu_dev *iommu, *ret = NULL;
  311. struct msm_iommu_ctx_dev *master;
  312. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  313. master = list_first_entry(&iommu->ctx_list,
  314. struct msm_iommu_ctx_dev,
  315. list);
  316. if (master->of_node == dev->of_node) {
  317. ret = iommu;
  318. break;
  319. }
  320. }
  321. return ret;
  322. }
  323. static int msm_iommu_add_device(struct device *dev)
  324. {
  325. struct msm_iommu_dev *iommu;
  326. struct iommu_group *group;
  327. unsigned long flags;
  328. spin_lock_irqsave(&msm_iommu_lock, flags);
  329. iommu = find_iommu_for_dev(dev);
  330. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  331. if (iommu)
  332. iommu_device_link(&iommu->iommu, dev);
  333. else
  334. return -ENODEV;
  335. group = iommu_group_get_for_dev(dev);
  336. if (IS_ERR(group))
  337. return PTR_ERR(group);
  338. iommu_group_put(group);
  339. return 0;
  340. }
  341. static void msm_iommu_remove_device(struct device *dev)
  342. {
  343. struct msm_iommu_dev *iommu;
  344. unsigned long flags;
  345. spin_lock_irqsave(&msm_iommu_lock, flags);
  346. iommu = find_iommu_for_dev(dev);
  347. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  348. if (iommu)
  349. iommu_device_unlink(&iommu->iommu, dev);
  350. iommu_group_remove_device(dev);
  351. }
  352. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  353. {
  354. int ret = 0;
  355. unsigned long flags;
  356. struct msm_iommu_dev *iommu;
  357. struct msm_priv *priv = to_msm_priv(domain);
  358. struct msm_iommu_ctx_dev *master;
  359. priv->dev = dev;
  360. msm_iommu_domain_config(priv);
  361. spin_lock_irqsave(&msm_iommu_lock, flags);
  362. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  363. master = list_first_entry(&iommu->ctx_list,
  364. struct msm_iommu_ctx_dev,
  365. list);
  366. if (master->of_node == dev->of_node) {
  367. ret = __enable_clocks(iommu);
  368. if (ret)
  369. goto fail;
  370. list_for_each_entry(master, &iommu->ctx_list, list) {
  371. if (master->num) {
  372. dev_err(dev, "domain already attached");
  373. ret = -EEXIST;
  374. goto fail;
  375. }
  376. master->num =
  377. msm_iommu_alloc_ctx(iommu->context_map,
  378. 0, iommu->ncb);
  379. if (IS_ERR_VALUE(master->num)) {
  380. ret = -ENODEV;
  381. goto fail;
  382. }
  383. config_mids(iommu, master);
  384. __program_context(iommu->base, master->num,
  385. priv);
  386. }
  387. __disable_clocks(iommu);
  388. list_add(&iommu->dom_node, &priv->list_attached);
  389. }
  390. }
  391. fail:
  392. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  393. return ret;
  394. }
  395. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  396. struct device *dev)
  397. {
  398. struct msm_priv *priv = to_msm_priv(domain);
  399. unsigned long flags;
  400. struct msm_iommu_dev *iommu;
  401. struct msm_iommu_ctx_dev *master;
  402. int ret;
  403. free_io_pgtable_ops(priv->iop);
  404. spin_lock_irqsave(&msm_iommu_lock, flags);
  405. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  406. ret = __enable_clocks(iommu);
  407. if (ret)
  408. goto fail;
  409. list_for_each_entry(master, &iommu->ctx_list, list) {
  410. msm_iommu_free_ctx(iommu->context_map, master->num);
  411. __reset_context(iommu->base, master->num);
  412. }
  413. __disable_clocks(iommu);
  414. }
  415. fail:
  416. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  417. }
  418. static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
  419. phys_addr_t pa, size_t len, int prot)
  420. {
  421. struct msm_priv *priv = to_msm_priv(domain);
  422. unsigned long flags;
  423. int ret;
  424. spin_lock_irqsave(&priv->pgtlock, flags);
  425. ret = priv->iop->map(priv->iop, iova, pa, len, prot);
  426. spin_unlock_irqrestore(&priv->pgtlock, flags);
  427. return ret;
  428. }
  429. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  430. size_t len)
  431. {
  432. struct msm_priv *priv = to_msm_priv(domain);
  433. unsigned long flags;
  434. spin_lock_irqsave(&priv->pgtlock, flags);
  435. len = priv->iop->unmap(priv->iop, iova, len);
  436. spin_unlock_irqrestore(&priv->pgtlock, flags);
  437. return len;
  438. }
  439. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  440. dma_addr_t va)
  441. {
  442. struct msm_priv *priv;
  443. struct msm_iommu_dev *iommu;
  444. struct msm_iommu_ctx_dev *master;
  445. unsigned int par;
  446. unsigned long flags;
  447. phys_addr_t ret = 0;
  448. spin_lock_irqsave(&msm_iommu_lock, flags);
  449. priv = to_msm_priv(domain);
  450. iommu = list_first_entry(&priv->list_attached,
  451. struct msm_iommu_dev, dom_node);
  452. if (list_empty(&iommu->ctx_list))
  453. goto fail;
  454. master = list_first_entry(&iommu->ctx_list,
  455. struct msm_iommu_ctx_dev, list);
  456. if (!master)
  457. goto fail;
  458. ret = __enable_clocks(iommu);
  459. if (ret)
  460. goto fail;
  461. /* Invalidate context TLB */
  462. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  463. SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
  464. par = GET_PAR(iommu->base, master->num);
  465. /* We are dealing with a supersection */
  466. if (GET_NOFAULT_SS(iommu->base, master->num))
  467. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  468. else /* Upper 20 bits from PAR, lower 12 from VA */
  469. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  470. if (GET_FAULT(iommu->base, master->num))
  471. ret = 0;
  472. __disable_clocks(iommu);
  473. fail:
  474. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  475. return ret;
  476. }
  477. static bool msm_iommu_capable(enum iommu_cap cap)
  478. {
  479. return false;
  480. }
  481. static void print_ctx_regs(void __iomem *base, int ctx)
  482. {
  483. unsigned int fsr = GET_FSR(base, ctx);
  484. pr_err("FAR = %08x PAR = %08x\n",
  485. GET_FAR(base, ctx), GET_PAR(base, ctx));
  486. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
  487. (fsr & 0x02) ? "TF " : "",
  488. (fsr & 0x04) ? "AFF " : "",
  489. (fsr & 0x08) ? "APF " : "",
  490. (fsr & 0x10) ? "TLBMF " : "",
  491. (fsr & 0x20) ? "HTWDEEF " : "",
  492. (fsr & 0x40) ? "HTWSEEF " : "",
  493. (fsr & 0x80) ? "MHF " : "",
  494. (fsr & 0x10000) ? "SL " : "",
  495. (fsr & 0x40000000) ? "SS " : "",
  496. (fsr & 0x80000000) ? "MULTI " : "");
  497. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  498. GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
  499. pr_err("TTBR0 = %08x TTBR1 = %08x\n",
  500. GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
  501. pr_err("SCTLR = %08x ACTLR = %08x\n",
  502. GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
  503. }
  504. static void insert_iommu_master(struct device *dev,
  505. struct msm_iommu_dev **iommu,
  506. struct of_phandle_args *spec)
  507. {
  508. struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
  509. int sid;
  510. if (list_empty(&(*iommu)->ctx_list)) {
  511. master = kzalloc(sizeof(*master), GFP_ATOMIC);
  512. master->of_node = dev->of_node;
  513. list_add(&master->list, &(*iommu)->ctx_list);
  514. dev->archdata.iommu = master;
  515. }
  516. for (sid = 0; sid < master->num_mids; sid++)
  517. if (master->mids[sid] == spec->args[0]) {
  518. dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
  519. sid);
  520. return;
  521. }
  522. master->mids[master->num_mids++] = spec->args[0];
  523. }
  524. static int qcom_iommu_of_xlate(struct device *dev,
  525. struct of_phandle_args *spec)
  526. {
  527. struct msm_iommu_dev *iommu;
  528. unsigned long flags;
  529. int ret = 0;
  530. spin_lock_irqsave(&msm_iommu_lock, flags);
  531. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
  532. if (iommu->dev->of_node == spec->np)
  533. break;
  534. if (!iommu || iommu->dev->of_node != spec->np) {
  535. ret = -ENODEV;
  536. goto fail;
  537. }
  538. insert_iommu_master(dev, &iommu, spec);
  539. fail:
  540. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  541. return ret;
  542. }
  543. irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
  544. {
  545. struct msm_iommu_dev *iommu = dev_id;
  546. unsigned int fsr;
  547. int i, ret;
  548. spin_lock(&msm_iommu_lock);
  549. if (!iommu) {
  550. pr_err("Invalid device ID in context interrupt handler\n");
  551. goto fail;
  552. }
  553. pr_err("Unexpected IOMMU page fault!\n");
  554. pr_err("base = %08x\n", (unsigned int)iommu->base);
  555. ret = __enable_clocks(iommu);
  556. if (ret)
  557. goto fail;
  558. for (i = 0; i < iommu->ncb; i++) {
  559. fsr = GET_FSR(iommu->base, i);
  560. if (fsr) {
  561. pr_err("Fault occurred in context %d.\n", i);
  562. pr_err("Interesting registers:\n");
  563. print_ctx_regs(iommu->base, i);
  564. SET_FSR(iommu->base, i, 0x4000000F);
  565. }
  566. }
  567. __disable_clocks(iommu);
  568. fail:
  569. spin_unlock(&msm_iommu_lock);
  570. return 0;
  571. }
  572. static struct iommu_ops msm_iommu_ops = {
  573. .capable = msm_iommu_capable,
  574. .domain_alloc = msm_iommu_domain_alloc,
  575. .domain_free = msm_iommu_domain_free,
  576. .attach_dev = msm_iommu_attach_dev,
  577. .detach_dev = msm_iommu_detach_dev,
  578. .map = msm_iommu_map,
  579. .unmap = msm_iommu_unmap,
  580. .map_sg = default_iommu_map_sg,
  581. .iova_to_phys = msm_iommu_iova_to_phys,
  582. .add_device = msm_iommu_add_device,
  583. .remove_device = msm_iommu_remove_device,
  584. .device_group = generic_device_group,
  585. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  586. .of_xlate = qcom_iommu_of_xlate,
  587. };
  588. static int msm_iommu_probe(struct platform_device *pdev)
  589. {
  590. struct resource *r;
  591. resource_size_t ioaddr;
  592. struct msm_iommu_dev *iommu;
  593. int ret, par, val;
  594. iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
  595. if (!iommu)
  596. return -ENODEV;
  597. iommu->dev = &pdev->dev;
  598. INIT_LIST_HEAD(&iommu->ctx_list);
  599. iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
  600. if (IS_ERR(iommu->pclk)) {
  601. dev_err(iommu->dev, "could not get smmu_pclk\n");
  602. return PTR_ERR(iommu->pclk);
  603. }
  604. ret = clk_prepare(iommu->pclk);
  605. if (ret) {
  606. dev_err(iommu->dev, "could not prepare smmu_pclk\n");
  607. return ret;
  608. }
  609. iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
  610. if (IS_ERR(iommu->clk)) {
  611. dev_err(iommu->dev, "could not get iommu_clk\n");
  612. clk_unprepare(iommu->pclk);
  613. return PTR_ERR(iommu->clk);
  614. }
  615. ret = clk_prepare(iommu->clk);
  616. if (ret) {
  617. dev_err(iommu->dev, "could not prepare iommu_clk\n");
  618. clk_unprepare(iommu->pclk);
  619. return ret;
  620. }
  621. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  622. iommu->base = devm_ioremap_resource(iommu->dev, r);
  623. if (IS_ERR(iommu->base)) {
  624. dev_err(iommu->dev, "could not get iommu base\n");
  625. ret = PTR_ERR(iommu->base);
  626. goto fail;
  627. }
  628. ioaddr = r->start;
  629. iommu->irq = platform_get_irq(pdev, 0);
  630. if (iommu->irq < 0) {
  631. dev_err(iommu->dev, "could not get iommu irq\n");
  632. ret = -ENODEV;
  633. goto fail;
  634. }
  635. ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
  636. if (ret) {
  637. dev_err(iommu->dev, "could not get ncb\n");
  638. goto fail;
  639. }
  640. iommu->ncb = val;
  641. msm_iommu_reset(iommu->base, iommu->ncb);
  642. SET_M(iommu->base, 0, 1);
  643. SET_PAR(iommu->base, 0, 0);
  644. SET_V2PCFG(iommu->base, 0, 1);
  645. SET_V2PPR(iommu->base, 0, 0);
  646. par = GET_PAR(iommu->base, 0);
  647. SET_V2PCFG(iommu->base, 0, 0);
  648. SET_M(iommu->base, 0, 0);
  649. if (!par) {
  650. pr_err("Invalid PAR value detected\n");
  651. ret = -ENODEV;
  652. goto fail;
  653. }
  654. ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
  655. msm_iommu_fault_handler,
  656. IRQF_ONESHOT | IRQF_SHARED,
  657. "msm_iommu_secure_irpt_handler",
  658. iommu);
  659. if (ret) {
  660. pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
  661. goto fail;
  662. }
  663. list_add(&iommu->dev_node, &qcom_iommu_devices);
  664. ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
  665. "msm-smmu.%pa", &ioaddr);
  666. if (ret) {
  667. pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
  668. goto fail;
  669. }
  670. iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
  671. iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
  672. ret = iommu_device_register(&iommu->iommu);
  673. if (ret) {
  674. pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
  675. goto fail;
  676. }
  677. pr_info("device mapped at %p, irq %d with %d ctx banks\n",
  678. iommu->base, iommu->irq, iommu->ncb);
  679. return ret;
  680. fail:
  681. clk_unprepare(iommu->clk);
  682. clk_unprepare(iommu->pclk);
  683. return ret;
  684. }
  685. static const struct of_device_id msm_iommu_dt_match[] = {
  686. { .compatible = "qcom,apq8064-iommu" },
  687. {}
  688. };
  689. static int msm_iommu_remove(struct platform_device *pdev)
  690. {
  691. struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
  692. clk_unprepare(iommu->clk);
  693. clk_unprepare(iommu->pclk);
  694. return 0;
  695. }
  696. static struct platform_driver msm_iommu_driver = {
  697. .driver = {
  698. .name = "msm_iommu",
  699. .of_match_table = msm_iommu_dt_match,
  700. },
  701. .probe = msm_iommu_probe,
  702. .remove = msm_iommu_remove,
  703. };
  704. static int __init msm_iommu_driver_init(void)
  705. {
  706. int ret;
  707. ret = platform_driver_register(&msm_iommu_driver);
  708. if (ret != 0)
  709. pr_err("Failed to register IOMMU driver\n");
  710. return ret;
  711. }
  712. static void __exit msm_iommu_driver_exit(void)
  713. {
  714. platform_driver_unregister(&msm_iommu_driver);
  715. }
  716. subsys_initcall(msm_iommu_driver_init);
  717. module_exit(msm_iommu_driver_exit);
  718. static int __init msm_iommu_init(void)
  719. {
  720. bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
  721. return 0;
  722. }
  723. static int __init msm_iommu_of_setup(struct device_node *np)
  724. {
  725. msm_iommu_init();
  726. return 0;
  727. }
  728. IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
  729. MODULE_LICENSE("GPL v2");
  730. MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");