mtk_iommu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /*
  2. * Copyright (c) 2015-2016 MediaTek Inc.
  3. * Author: Yong Wu <yong.wu@mediatek.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/bootmem.h>
  15. #include <linux/bug.h>
  16. #include <linux/clk.h>
  17. #include <linux/component.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-iommu.h>
  20. #include <linux/err.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/iommu.h>
  24. #include <linux/iopoll.h>
  25. #include <linux/list.h>
  26. #include <linux/of_address.h>
  27. #include <linux/of_iommu.h>
  28. #include <linux/of_irq.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include <asm/barrier.h>
  34. #include <dt-bindings/memory/mt8173-larb-port.h>
  35. #include <soc/mediatek/smi.h>
  36. #include "mtk_iommu.h"
  37. #define REG_MMU_PT_BASE_ADDR 0x000
  38. #define REG_MMU_INVALIDATE 0x020
  39. #define F_ALL_INVLD 0x2
  40. #define F_MMU_INV_RANGE 0x1
  41. #define REG_MMU_INVLD_START_A 0x024
  42. #define REG_MMU_INVLD_END_A 0x028
  43. #define REG_MMU_INV_SEL 0x038
  44. #define F_INVLD_EN0 BIT(0)
  45. #define F_INVLD_EN1 BIT(1)
  46. #define REG_MMU_STANDARD_AXI_MODE 0x048
  47. #define REG_MMU_DCM_DIS 0x050
  48. #define REG_MMU_CTRL_REG 0x110
  49. #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
  50. #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
  51. #define REG_MMU_IVRP_PADDR 0x114
  52. #define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
  53. #define REG_MMU_INT_CONTROL0 0x120
  54. #define F_L2_MULIT_HIT_EN BIT(0)
  55. #define F_TABLE_WALK_FAULT_INT_EN BIT(1)
  56. #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
  57. #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
  58. #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
  59. #define F_MISS_FIFO_ERR_INT_EN BIT(6)
  60. #define F_INT_CLR_BIT BIT(12)
  61. #define REG_MMU_INT_MAIN_CONTROL 0x124
  62. #define F_INT_TRANSLATION_FAULT BIT(0)
  63. #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
  64. #define F_INT_INVALID_PA_FAULT BIT(2)
  65. #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
  66. #define F_INT_TLB_MISS_FAULT BIT(4)
  67. #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
  68. #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
  69. #define REG_MMU_CPE_DONE 0x12C
  70. #define REG_MMU_FAULT_ST1 0x134
  71. #define REG_MMU_FAULT_VA 0x13c
  72. #define F_MMU_FAULT_VA_MSK 0xfffff000
  73. #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
  74. #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
  75. #define REG_MMU_INVLD_PA 0x140
  76. #define REG_MMU_INT_ID 0x150
  77. #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
  78. #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
  79. #define MTK_PROTECT_PA_ALIGN 128
  80. struct mtk_iommu_domain {
  81. spinlock_t pgtlock; /* lock for page table */
  82. struct io_pgtable_cfg cfg;
  83. struct io_pgtable_ops *iop;
  84. struct iommu_domain domain;
  85. };
  86. static struct iommu_ops mtk_iommu_ops;
  87. static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
  88. {
  89. return container_of(dom, struct mtk_iommu_domain, domain);
  90. }
  91. static void mtk_iommu_tlb_flush_all(void *cookie)
  92. {
  93. struct mtk_iommu_data *data = cookie;
  94. writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
  95. writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
  96. wmb(); /* Make sure the tlb flush all done */
  97. }
  98. static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
  99. size_t granule, bool leaf,
  100. void *cookie)
  101. {
  102. struct mtk_iommu_data *data = cookie;
  103. writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
  104. writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
  105. writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
  106. writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
  107. }
  108. static void mtk_iommu_tlb_sync(void *cookie)
  109. {
  110. struct mtk_iommu_data *data = cookie;
  111. int ret;
  112. u32 tmp;
  113. ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
  114. tmp != 0, 10, 100000);
  115. if (ret) {
  116. dev_warn(data->dev,
  117. "Partial TLB flush timed out, falling back to full flush\n");
  118. mtk_iommu_tlb_flush_all(cookie);
  119. }
  120. /* Clear the CPE status */
  121. writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
  122. }
  123. static const struct iommu_gather_ops mtk_iommu_gather_ops = {
  124. .tlb_flush_all = mtk_iommu_tlb_flush_all,
  125. .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
  126. .tlb_sync = mtk_iommu_tlb_sync,
  127. };
  128. static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
  129. {
  130. struct mtk_iommu_data *data = dev_id;
  131. struct mtk_iommu_domain *dom = data->m4u_dom;
  132. u32 int_state, regval, fault_iova, fault_pa;
  133. unsigned int fault_larb, fault_port;
  134. bool layer, write;
  135. /* Read error info from registers */
  136. int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
  137. fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
  138. layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
  139. write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
  140. fault_iova &= F_MMU_FAULT_VA_MSK;
  141. fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
  142. regval = readl_relaxed(data->base + REG_MMU_INT_ID);
  143. fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
  144. fault_port = F_MMU0_INT_ID_PORT_ID(regval);
  145. if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
  146. write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
  147. dev_err_ratelimited(
  148. data->dev,
  149. "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
  150. int_state, fault_iova, fault_pa, fault_larb, fault_port,
  151. layer, write ? "write" : "read");
  152. }
  153. /* Interrupt clear */
  154. regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
  155. regval |= F_INT_CLR_BIT;
  156. writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
  157. mtk_iommu_tlb_flush_all(data);
  158. return IRQ_HANDLED;
  159. }
  160. static void mtk_iommu_config(struct mtk_iommu_data *data,
  161. struct device *dev, bool enable)
  162. {
  163. struct mtk_iommu_client_priv *head, *cur, *next;
  164. struct mtk_smi_larb_iommu *larb_mmu;
  165. unsigned int larbid, portid;
  166. head = dev->archdata.iommu;
  167. list_for_each_entry_safe(cur, next, &head->client, client) {
  168. larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
  169. portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
  170. larb_mmu = &data->smi_imu.larb_imu[larbid];
  171. dev_dbg(dev, "%s iommu port: %d\n",
  172. enable ? "enable" : "disable", portid);
  173. if (enable)
  174. larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
  175. else
  176. larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
  177. }
  178. }
  179. static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
  180. {
  181. struct mtk_iommu_domain *dom = data->m4u_dom;
  182. spin_lock_init(&dom->pgtlock);
  183. dom->cfg = (struct io_pgtable_cfg) {
  184. .quirks = IO_PGTABLE_QUIRK_ARM_NS |
  185. IO_PGTABLE_QUIRK_NO_PERMS |
  186. IO_PGTABLE_QUIRK_TLBI_ON_MAP,
  187. .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
  188. .ias = 32,
  189. .oas = 32,
  190. .tlb = &mtk_iommu_gather_ops,
  191. .iommu_dev = data->dev,
  192. };
  193. if (data->enable_4GB)
  194. dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
  195. dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
  196. if (!dom->iop) {
  197. dev_err(data->dev, "Failed to alloc io pgtable\n");
  198. return -EINVAL;
  199. }
  200. /* Update our support page sizes bitmap */
  201. dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
  202. writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
  203. data->base + REG_MMU_PT_BASE_ADDR);
  204. return 0;
  205. }
  206. static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
  207. {
  208. struct mtk_iommu_domain *dom;
  209. if (type != IOMMU_DOMAIN_DMA)
  210. return NULL;
  211. dom = kzalloc(sizeof(*dom), GFP_KERNEL);
  212. if (!dom)
  213. return NULL;
  214. if (iommu_get_dma_cookie(&dom->domain)) {
  215. kfree(dom);
  216. return NULL;
  217. }
  218. dom->domain.geometry.aperture_start = 0;
  219. dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
  220. dom->domain.geometry.force_aperture = true;
  221. return &dom->domain;
  222. }
  223. static void mtk_iommu_domain_free(struct iommu_domain *domain)
  224. {
  225. iommu_put_dma_cookie(domain);
  226. kfree(to_mtk_domain(domain));
  227. }
  228. static int mtk_iommu_attach_device(struct iommu_domain *domain,
  229. struct device *dev)
  230. {
  231. struct mtk_iommu_domain *dom = to_mtk_domain(domain);
  232. struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
  233. struct mtk_iommu_data *data;
  234. int ret;
  235. if (!priv)
  236. return -ENODEV;
  237. data = dev_get_drvdata(priv->m4udev);
  238. if (!data->m4u_dom) {
  239. data->m4u_dom = dom;
  240. ret = mtk_iommu_domain_finalise(data);
  241. if (ret) {
  242. data->m4u_dom = NULL;
  243. return ret;
  244. }
  245. } else if (data->m4u_dom != dom) {
  246. /* All the client devices should be in the same m4u domain */
  247. dev_err(dev, "try to attach into the error iommu domain\n");
  248. return -EPERM;
  249. }
  250. mtk_iommu_config(data, dev, true);
  251. return 0;
  252. }
  253. static void mtk_iommu_detach_device(struct iommu_domain *domain,
  254. struct device *dev)
  255. {
  256. struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
  257. struct mtk_iommu_data *data;
  258. if (!priv)
  259. return;
  260. data = dev_get_drvdata(priv->m4udev);
  261. mtk_iommu_config(data, dev, false);
  262. }
  263. static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
  264. phys_addr_t paddr, size_t size, int prot)
  265. {
  266. struct mtk_iommu_domain *dom = to_mtk_domain(domain);
  267. unsigned long flags;
  268. int ret;
  269. spin_lock_irqsave(&dom->pgtlock, flags);
  270. ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
  271. spin_unlock_irqrestore(&dom->pgtlock, flags);
  272. return ret;
  273. }
  274. static size_t mtk_iommu_unmap(struct iommu_domain *domain,
  275. unsigned long iova, size_t size)
  276. {
  277. struct mtk_iommu_domain *dom = to_mtk_domain(domain);
  278. unsigned long flags;
  279. size_t unmapsz;
  280. spin_lock_irqsave(&dom->pgtlock, flags);
  281. unmapsz = dom->iop->unmap(dom->iop, iova, size);
  282. spin_unlock_irqrestore(&dom->pgtlock, flags);
  283. return unmapsz;
  284. }
  285. static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
  286. dma_addr_t iova)
  287. {
  288. struct mtk_iommu_domain *dom = to_mtk_domain(domain);
  289. unsigned long flags;
  290. phys_addr_t pa;
  291. spin_lock_irqsave(&dom->pgtlock, flags);
  292. pa = dom->iop->iova_to_phys(dom->iop, iova);
  293. spin_unlock_irqrestore(&dom->pgtlock, flags);
  294. return pa;
  295. }
  296. static int mtk_iommu_add_device(struct device *dev)
  297. {
  298. struct iommu_group *group;
  299. if (!dev->archdata.iommu) /* Not a iommu client device */
  300. return -ENODEV;
  301. group = iommu_group_get_for_dev(dev);
  302. if (IS_ERR(group))
  303. return PTR_ERR(group);
  304. iommu_group_put(group);
  305. return 0;
  306. }
  307. static void mtk_iommu_remove_device(struct device *dev)
  308. {
  309. struct mtk_iommu_client_priv *head, *cur, *next;
  310. head = dev->archdata.iommu;
  311. if (!head)
  312. return;
  313. list_for_each_entry_safe(cur, next, &head->client, client) {
  314. list_del(&cur->client);
  315. kfree(cur);
  316. }
  317. kfree(head);
  318. dev->archdata.iommu = NULL;
  319. iommu_group_remove_device(dev);
  320. }
  321. static struct iommu_group *mtk_iommu_device_group(struct device *dev)
  322. {
  323. struct mtk_iommu_data *data;
  324. struct mtk_iommu_client_priv *priv;
  325. priv = dev->archdata.iommu;
  326. if (!priv)
  327. return ERR_PTR(-ENODEV);
  328. /* All the client devices are in the same m4u iommu-group */
  329. data = dev_get_drvdata(priv->m4udev);
  330. if (!data->m4u_group) {
  331. data->m4u_group = iommu_group_alloc();
  332. if (IS_ERR(data->m4u_group))
  333. dev_err(dev, "Failed to allocate M4U IOMMU group\n");
  334. }
  335. return data->m4u_group;
  336. }
  337. static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
  338. {
  339. struct mtk_iommu_client_priv *head, *priv, *next;
  340. struct platform_device *m4updev;
  341. if (args->args_count != 1) {
  342. dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
  343. args->args_count);
  344. return -EINVAL;
  345. }
  346. if (!dev->archdata.iommu) {
  347. /* Get the m4u device */
  348. m4updev = of_find_device_by_node(args->np);
  349. if (WARN_ON(!m4updev))
  350. return -EINVAL;
  351. head = kzalloc(sizeof(*head), GFP_KERNEL);
  352. if (!head)
  353. return -ENOMEM;
  354. dev->archdata.iommu = head;
  355. INIT_LIST_HEAD(&head->client);
  356. head->m4udev = &m4updev->dev;
  357. } else {
  358. head = dev->archdata.iommu;
  359. }
  360. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  361. if (!priv)
  362. goto err_free_mem;
  363. priv->mtk_m4u_id = args->args[0];
  364. list_add_tail(&priv->client, &head->client);
  365. return 0;
  366. err_free_mem:
  367. list_for_each_entry_safe(priv, next, &head->client, client)
  368. kfree(priv);
  369. kfree(head);
  370. dev->archdata.iommu = NULL;
  371. return -ENOMEM;
  372. }
  373. static struct iommu_ops mtk_iommu_ops = {
  374. .domain_alloc = mtk_iommu_domain_alloc,
  375. .domain_free = mtk_iommu_domain_free,
  376. .attach_dev = mtk_iommu_attach_device,
  377. .detach_dev = mtk_iommu_detach_device,
  378. .map = mtk_iommu_map,
  379. .unmap = mtk_iommu_unmap,
  380. .map_sg = default_iommu_map_sg,
  381. .iova_to_phys = mtk_iommu_iova_to_phys,
  382. .add_device = mtk_iommu_add_device,
  383. .remove_device = mtk_iommu_remove_device,
  384. .device_group = mtk_iommu_device_group,
  385. .of_xlate = mtk_iommu_of_xlate,
  386. .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
  387. };
  388. static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
  389. {
  390. u32 regval;
  391. int ret;
  392. ret = clk_prepare_enable(data->bclk);
  393. if (ret) {
  394. dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
  395. return ret;
  396. }
  397. regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
  398. F_MMU_TF_PROTECT_SEL(2);
  399. writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
  400. regval = F_L2_MULIT_HIT_EN |
  401. F_TABLE_WALK_FAULT_INT_EN |
  402. F_PREETCH_FIFO_OVERFLOW_INT_EN |
  403. F_MISS_FIFO_OVERFLOW_INT_EN |
  404. F_PREFETCH_FIFO_ERR_INT_EN |
  405. F_MISS_FIFO_ERR_INT_EN;
  406. writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
  407. regval = F_INT_TRANSLATION_FAULT |
  408. F_INT_MAIN_MULTI_HIT_FAULT |
  409. F_INT_INVALID_PA_FAULT |
  410. F_INT_ENTRY_REPLACEMENT_FAULT |
  411. F_INT_TLB_MISS_FAULT |
  412. F_INT_MISS_TRANSACTION_FIFO_FAULT |
  413. F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
  414. writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
  415. writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
  416. data->base + REG_MMU_IVRP_PADDR);
  417. writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
  418. writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
  419. if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
  420. dev_name(data->dev), (void *)data)) {
  421. writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
  422. clk_disable_unprepare(data->bclk);
  423. dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
  424. return -ENODEV;
  425. }
  426. return 0;
  427. }
  428. static const struct component_master_ops mtk_iommu_com_ops = {
  429. .bind = mtk_iommu_bind,
  430. .unbind = mtk_iommu_unbind,
  431. };
  432. static int mtk_iommu_probe(struct platform_device *pdev)
  433. {
  434. struct mtk_iommu_data *data;
  435. struct device *dev = &pdev->dev;
  436. struct resource *res;
  437. struct component_match *match = NULL;
  438. void *protect;
  439. int i, larb_nr, ret;
  440. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  441. if (!data)
  442. return -ENOMEM;
  443. data->dev = dev;
  444. /* Protect memory. HW will access here while translation fault.*/
  445. protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
  446. if (!protect)
  447. return -ENOMEM;
  448. data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
  449. /* Whether the current dram is over 4GB */
  450. data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
  451. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  452. data->base = devm_ioremap_resource(dev, res);
  453. if (IS_ERR(data->base))
  454. return PTR_ERR(data->base);
  455. data->irq = platform_get_irq(pdev, 0);
  456. if (data->irq < 0)
  457. return data->irq;
  458. data->bclk = devm_clk_get(dev, "bclk");
  459. if (IS_ERR(data->bclk))
  460. return PTR_ERR(data->bclk);
  461. larb_nr = of_count_phandle_with_args(dev->of_node,
  462. "mediatek,larbs", NULL);
  463. if (larb_nr < 0)
  464. return larb_nr;
  465. data->smi_imu.larb_nr = larb_nr;
  466. for (i = 0; i < larb_nr; i++) {
  467. struct device_node *larbnode;
  468. struct platform_device *plarbdev;
  469. larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
  470. if (!larbnode)
  471. return -EINVAL;
  472. if (!of_device_is_available(larbnode))
  473. continue;
  474. plarbdev = of_find_device_by_node(larbnode);
  475. of_node_put(larbnode);
  476. if (!plarbdev) {
  477. plarbdev = of_platform_device_create(
  478. larbnode, NULL,
  479. platform_bus_type.dev_root);
  480. if (!plarbdev)
  481. return -EPROBE_DEFER;
  482. }
  483. data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
  484. component_match_add(dev, &match, compare_of, larbnode);
  485. }
  486. platform_set_drvdata(pdev, data);
  487. ret = mtk_iommu_hw_init(data);
  488. if (ret)
  489. return ret;
  490. if (!iommu_present(&platform_bus_type))
  491. bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
  492. return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
  493. }
  494. static int mtk_iommu_remove(struct platform_device *pdev)
  495. {
  496. struct mtk_iommu_data *data = platform_get_drvdata(pdev);
  497. if (iommu_present(&platform_bus_type))
  498. bus_set_iommu(&platform_bus_type, NULL);
  499. free_io_pgtable_ops(data->m4u_dom->iop);
  500. clk_disable_unprepare(data->bclk);
  501. devm_free_irq(&pdev->dev, data->irq, data);
  502. component_master_del(&pdev->dev, &mtk_iommu_com_ops);
  503. return 0;
  504. }
  505. static int __maybe_unused mtk_iommu_suspend(struct device *dev)
  506. {
  507. struct mtk_iommu_data *data = dev_get_drvdata(dev);
  508. struct mtk_iommu_suspend_reg *reg = &data->reg;
  509. void __iomem *base = data->base;
  510. reg->standard_axi_mode = readl_relaxed(base +
  511. REG_MMU_STANDARD_AXI_MODE);
  512. reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
  513. reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
  514. reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
  515. reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
  516. return 0;
  517. }
  518. static int __maybe_unused mtk_iommu_resume(struct device *dev)
  519. {
  520. struct mtk_iommu_data *data = dev_get_drvdata(dev);
  521. struct mtk_iommu_suspend_reg *reg = &data->reg;
  522. void __iomem *base = data->base;
  523. writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
  524. base + REG_MMU_PT_BASE_ADDR);
  525. writel_relaxed(reg->standard_axi_mode,
  526. base + REG_MMU_STANDARD_AXI_MODE);
  527. writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
  528. writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
  529. writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
  530. writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
  531. writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
  532. base + REG_MMU_IVRP_PADDR);
  533. return 0;
  534. }
  535. const struct dev_pm_ops mtk_iommu_pm_ops = {
  536. SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
  537. };
  538. static const struct of_device_id mtk_iommu_of_ids[] = {
  539. { .compatible = "mediatek,mt8173-m4u", },
  540. {}
  541. };
  542. static struct platform_driver mtk_iommu_driver = {
  543. .probe = mtk_iommu_probe,
  544. .remove = mtk_iommu_remove,
  545. .driver = {
  546. .name = "mtk-iommu",
  547. .of_match_table = mtk_iommu_of_ids,
  548. .pm = &mtk_iommu_pm_ops,
  549. }
  550. };
  551. static int mtk_iommu_init_fn(struct device_node *np)
  552. {
  553. int ret;
  554. struct platform_device *pdev;
  555. pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
  556. if (!pdev)
  557. return -ENOMEM;
  558. ret = platform_driver_register(&mtk_iommu_driver);
  559. if (ret) {
  560. pr_err("%s: Failed to register driver\n", __func__);
  561. return ret;
  562. }
  563. of_iommu_set_ops(np, &mtk_iommu_ops);
  564. return 0;
  565. }
  566. IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);