msm_iommu_dev-v1.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/io.h>
  17. #include <linux/clk.h>
  18. #include <linux/iommu.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/err.h>
  21. #include <linux/slab.h>
  22. #include <linux/of.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_device.h>
  25. #include <mach/iommu_hw-v1.h>
  26. #include <mach/iommu.h>
  27. #include <mach/iommu_perfmon.h>
  28. #include <mach/msm_bus.h>
  29. static struct of_device_id msm_iommu_v1_ctx_match_table[];
  30. #ifdef CONFIG_IOMMU_LPAE
  31. static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
  32. static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
  33. #else
  34. static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
  35. static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
  36. #endif
  37. static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
  38. struct msm_iommu_drvdata *drvdata)
  39. {
  40. struct msm_iommu_bfb_settings *bfb_settings;
  41. u32 nreg, nval;
  42. int ret;
  43. /*
  44. * It is not valid for a device to have the BFB_REG_NODE_NAME
  45. * property but not the BFB_DATA_NODE_NAME property, and vice versa.
  46. */
  47. if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
  48. if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
  49. &nval))
  50. return -EINVAL;
  51. return 0;
  52. }
  53. if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
  54. return -EINVAL;
  55. if (nreg >= sizeof(bfb_settings->regs))
  56. return -EINVAL;
  57. if (nval >= sizeof(bfb_settings->data))
  58. return -EINVAL;
  59. if (nval != nreg)
  60. return -EINVAL;
  61. bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
  62. GFP_KERNEL);
  63. if (!bfb_settings)
  64. return -ENOMEM;
  65. ret = of_property_read_u32_array(pdev->dev.of_node,
  66. BFB_REG_NODE_NAME,
  67. bfb_settings->regs,
  68. nreg / sizeof(*bfb_settings->regs));
  69. if (ret)
  70. return ret;
  71. ret = of_property_read_u32_array(pdev->dev.of_node,
  72. BFB_DATA_NODE_NAME,
  73. bfb_settings->data,
  74. nval / sizeof(*bfb_settings->data));
  75. if (ret)
  76. return ret;
  77. bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
  78. drvdata->bfb_settings = bfb_settings;
  79. return 0;
  80. }
  81. static int __get_bus_vote_client(struct platform_device *pdev,
  82. struct msm_iommu_drvdata *drvdata)
  83. {
  84. int ret = 0;
  85. struct msm_bus_scale_pdata *bs_table;
  86. const char *dummy;
  87. /* Check whether bus scaling has been specified for this node */
  88. ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
  89. &dummy);
  90. if (ret)
  91. return 0;
  92. bs_table = msm_bus_cl_get_pdata(pdev);
  93. if (bs_table) {
  94. drvdata->bus_client = msm_bus_scale_register_client(bs_table);
  95. if (IS_ERR(&drvdata->bus_client)) {
  96. pr_err("%s(): Bus client register failed.\n", __func__);
  97. ret = -EINVAL;
  98. }
  99. }
  100. return ret;
  101. }
  102. static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
  103. {
  104. msm_bus_scale_unregister_client(drvdata->bus_client);
  105. drvdata->bus_client = 0;
  106. }
  107. #ifdef CONFIG_IOMMU_NON_SECURE
  108. static inline void get_secure_id(struct device_node *node,
  109. struct msm_iommu_drvdata *drvdata)
  110. {
  111. }
  112. static inline void get_secure_ctx(struct device_node *node,
  113. struct msm_iommu_ctx_drvdata *ctx_drvdata)
  114. {
  115. ctx_drvdata->secure_context = 0;
  116. }
  117. #else
  118. static void get_secure_id(struct device_node *node,
  119. struct msm_iommu_drvdata *drvdata)
  120. {
  121. of_property_read_u32(node, "qcom,iommu-secure-id", &drvdata->sec_id);
  122. }
  123. static void get_secure_ctx(struct device_node *node,
  124. struct msm_iommu_ctx_drvdata *ctx_drvdata)
  125. {
  126. ctx_drvdata->secure_context =
  127. of_property_read_bool(node, "qcom,secure-context");
  128. }
  129. #endif
  130. static int msm_iommu_parse_dt(struct platform_device *pdev,
  131. struct msm_iommu_drvdata *drvdata)
  132. {
  133. struct device_node *child;
  134. int ret = 0;
  135. struct resource *r;
  136. drvdata->dev = &pdev->dev;
  137. ret = __get_bus_vote_client(pdev, drvdata);
  138. if (ret)
  139. goto fail;
  140. ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
  141. if (ret)
  142. goto fail;
  143. for_each_child_of_node(pdev->dev.of_node, child)
  144. drvdata->ncb++;
  145. drvdata->asid = devm_kzalloc(&pdev->dev, drvdata->ncb * sizeof(int),
  146. GFP_KERNEL);
  147. if (!drvdata->asid) {
  148. pr_err("Unable to get memory for asid array\n");
  149. ret = -ENOMEM;
  150. goto fail;
  151. }
  152. ret = of_property_read_string(pdev->dev.of_node, "label",
  153. &drvdata->name);
  154. if (ret)
  155. goto fail;
  156. drvdata->sec_id = -1;
  157. get_secure_id(pdev->dev.of_node, drvdata);
  158. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clk_base");
  159. if (r) {
  160. drvdata->clk_reg_virt = devm_ioremap(&pdev->dev, r->start,
  161. resource_size(r));
  162. if (!drvdata->clk_reg_virt) {
  163. pr_err("Failed to map resource for iommu clk: %pr\n",
  164. r);
  165. ret = -ENOMEM;
  166. goto fail;
  167. }
  168. }
  169. drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
  170. "qcom,iommu-enable-halt");
  171. ret = of_platform_populate(pdev->dev.of_node,
  172. msm_iommu_v1_ctx_match_table,
  173. NULL, &pdev->dev);
  174. if (ret) {
  175. pr_err("Failed to create iommu context device\n");
  176. goto fail;
  177. }
  178. msm_iommu_add_drv(drvdata);
  179. return 0;
  180. fail:
  181. __put_bus_vote_client(drvdata);
  182. return ret;
  183. }
  184. static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
  185. struct iommu_pmon *pmon_info)
  186. {
  187. int ret = 0;
  188. int irq = platform_get_irq(pdev, 0);
  189. unsigned int cls_prop_size;
  190. if (irq > 0) {
  191. pmon_info->iommu.evt_irq = platform_get_irq(pdev, 0);
  192. ret = of_property_read_u32(pdev->dev.of_node,
  193. "qcom,iommu-pmu-ngroups",
  194. &pmon_info->num_groups);
  195. if (ret) {
  196. pr_err("Error reading qcom,iommu-pmu-ngroups\n");
  197. goto fail;
  198. }
  199. ret = of_property_read_u32(pdev->dev.of_node,
  200. "qcom,iommu-pmu-ncounters",
  201. &pmon_info->num_counters);
  202. if (ret) {
  203. pr_err("Error reading qcom,iommu-pmu-ncounters\n");
  204. goto fail;
  205. }
  206. if (!of_get_property(pdev->dev.of_node,
  207. "qcom,iommu-pmu-event-classes",
  208. &cls_prop_size)) {
  209. pr_err("Error reading qcom,iommu-pmu-event-classes\n");
  210. return -EINVAL;
  211. }
  212. pmon_info->event_cls_supported =
  213. devm_kzalloc(&pdev->dev, cls_prop_size, GFP_KERNEL);
  214. if (!pmon_info->event_cls_supported) {
  215. pr_err("Unable to get memory for event class array\n");
  216. return -ENOMEM;
  217. }
  218. pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
  219. ret = of_property_read_u32_array(pdev->dev.of_node,
  220. "qcom,iommu-pmu-event-classes",
  221. pmon_info->event_cls_supported,
  222. pmon_info->nevent_cls_supported);
  223. if (ret) {
  224. pr_err("Error reading qcom,iommu-pmu-event-classes\n");
  225. return ret;
  226. }
  227. } else {
  228. pmon_info->iommu.evt_irq = -1;
  229. ret = irq;
  230. }
  231. fail:
  232. return ret;
  233. }
  234. static int __devinit msm_iommu_probe(struct platform_device *pdev)
  235. {
  236. struct iommu_pmon *pmon_info;
  237. struct msm_iommu_drvdata *drvdata;
  238. struct resource *r;
  239. int ret, needs_alt_core_clk;
  240. drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
  241. if (!drvdata)
  242. return -ENOMEM;
  243. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
  244. if (!r)
  245. return -EINVAL;
  246. drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
  247. if (!drvdata->base)
  248. return -ENOMEM;
  249. drvdata->phys_base = r->start;
  250. drvdata->glb_base = drvdata->base;
  251. if (of_get_property(pdev->dev.of_node, "vdd-supply", NULL)) {
  252. drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd");
  253. if (IS_ERR(drvdata->gdsc))
  254. return PTR_ERR(drvdata->gdsc);
  255. drvdata->alt_gdsc = devm_regulator_get(&pdev->dev,
  256. "qcom,alt-vdd");
  257. if (IS_ERR(drvdata->alt_gdsc))
  258. drvdata->alt_gdsc = NULL;
  259. } else {
  260. pr_debug("Warning: No regulator specified for IOMMU\n");
  261. }
  262. drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
  263. if (IS_ERR(drvdata->pclk))
  264. return PTR_ERR(drvdata->pclk);
  265. drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
  266. if (IS_ERR(drvdata->clk))
  267. return PTR_ERR(drvdata->clk);
  268. needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
  269. "qcom,needs-alt-core-clk");
  270. if (needs_alt_core_clk) {
  271. drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
  272. if (IS_ERR(drvdata->aclk))
  273. return PTR_ERR(drvdata->aclk);
  274. }
  275. if (clk_get_rate(drvdata->clk) == 0) {
  276. ret = clk_round_rate(drvdata->clk, 1000);
  277. clk_set_rate(drvdata->clk, ret);
  278. }
  279. if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
  280. ret = clk_round_rate(drvdata->aclk, 1000);
  281. clk_set_rate(drvdata->aclk, ret);
  282. }
  283. ret = msm_iommu_parse_dt(pdev, drvdata);
  284. if (ret)
  285. return ret;
  286. dev_info(&pdev->dev, "device %s mapped at %p, with %d ctx banks\n",
  287. drvdata->name, drvdata->base, drvdata->ncb);
  288. platform_set_drvdata(pdev, drvdata);
  289. pmon_info = msm_iommu_pm_alloc(&pdev->dev);
  290. if (pmon_info != NULL) {
  291. ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
  292. if (ret) {
  293. msm_iommu_pm_free(&pdev->dev);
  294. pr_info("%s: pmon not available.\n", drvdata->name);
  295. } else {
  296. pmon_info->iommu.base = drvdata->base;
  297. pmon_info->iommu.ops = msm_get_iommu_access_ops();
  298. pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
  299. pmon_info->iommu.iommu_name = drvdata->name;
  300. ret = msm_iommu_pm_iommu_register(pmon_info);
  301. if (ret) {
  302. pr_err("%s iommu register fail\n",
  303. drvdata->name);
  304. msm_iommu_pm_free(&pdev->dev);
  305. } else {
  306. pr_debug("%s iommu registered for pmon\n",
  307. pmon_info->iommu.iommu_name);
  308. }
  309. }
  310. }
  311. return 0;
  312. }
  313. static int __devexit msm_iommu_remove(struct platform_device *pdev)
  314. {
  315. struct msm_iommu_drvdata *drv = NULL;
  316. msm_iommu_pm_iommu_unregister(&pdev->dev);
  317. msm_iommu_pm_free(&pdev->dev);
  318. drv = platform_get_drvdata(pdev);
  319. if (drv) {
  320. __put_bus_vote_client(drv);
  321. msm_iommu_remove_drv(drv);
  322. platform_set_drvdata(pdev, NULL);
  323. }
  324. return 0;
  325. }
  326. static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
  327. struct msm_iommu_ctx_drvdata *ctx_drvdata)
  328. {
  329. struct resource *r, rp;
  330. int irq = 0, ret = 0;
  331. u32 nsid;
  332. get_secure_ctx(pdev->dev.of_node, ctx_drvdata);
  333. if (ctx_drvdata->secure_context) {
  334. irq = platform_get_irq(pdev, 1);
  335. if (irq > 0) {
  336. ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
  337. msm_iommu_secure_fault_handler_v2,
  338. IRQF_ONESHOT | IRQF_SHARED,
  339. "msm_iommu_secure_irq", pdev);
  340. if (ret) {
  341. pr_err("Request IRQ %d failed with ret=%d\n",
  342. irq, ret);
  343. return ret;
  344. }
  345. }
  346. } else {
  347. irq = platform_get_irq(pdev, 0);
  348. if (irq > 0) {
  349. ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
  350. msm_iommu_fault_handler_v2,
  351. IRQF_ONESHOT | IRQF_SHARED,
  352. "msm_iommu_nonsecure_irq", pdev);
  353. if (ret) {
  354. pr_err("Request IRQ %d failed with ret=%d\n",
  355. irq, ret);
  356. goto out;
  357. }
  358. }
  359. }
  360. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  361. if (!r) {
  362. ret = -EINVAL;
  363. goto out;
  364. }
  365. ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
  366. if (ret)
  367. goto out;
  368. /* Calculate the context bank number using the base addresses. The
  369. * first 8 pages belong to the global address space which is followed
  370. * by the context banks, hence subtract by 8 to get the context bank
  371. * number.
  372. */
  373. ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
  374. if (of_property_read_string(pdev->dev.of_node, "label",
  375. &ctx_drvdata->name))
  376. ctx_drvdata->name = dev_name(&pdev->dev);
  377. if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
  378. ret = -EINVAL;
  379. goto out;
  380. }
  381. if (nsid >= sizeof(ctx_drvdata->sids)) {
  382. ret = -EINVAL;
  383. goto out;
  384. }
  385. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
  386. ctx_drvdata->sids,
  387. nsid / sizeof(*ctx_drvdata->sids))) {
  388. ret = -EINVAL;
  389. goto out;
  390. }
  391. ctx_drvdata->nsid = nsid;
  392. ctx_drvdata->asid = -1;
  393. out:
  394. return ret;
  395. }
  396. static int __devinit msm_iommu_ctx_probe(struct platform_device *pdev)
  397. {
  398. struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
  399. int ret;
  400. if (!pdev->dev.parent)
  401. return -EINVAL;
  402. ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
  403. GFP_KERNEL);
  404. if (!ctx_drvdata)
  405. return -ENOMEM;
  406. ctx_drvdata->pdev = pdev;
  407. INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
  408. ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
  409. if (!ret) {
  410. platform_set_drvdata(pdev, ctx_drvdata);
  411. dev_info(&pdev->dev, "context %s using bank %d\n",
  412. ctx_drvdata->name, ctx_drvdata->num);
  413. }
  414. return ret;
  415. }
  416. static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev)
  417. {
  418. platform_set_drvdata(pdev, NULL);
  419. return 0;
  420. }
  421. static struct of_device_id msm_iommu_match_table[] = {
  422. { .compatible = "qcom,msm-smmu-v1", },
  423. {}
  424. };
  425. static struct platform_driver msm_iommu_driver = {
  426. .driver = {
  427. .name = "msm_iommu_v1",
  428. .of_match_table = msm_iommu_match_table,
  429. },
  430. .probe = msm_iommu_probe,
  431. .remove = __devexit_p(msm_iommu_remove),
  432. };
  433. static struct of_device_id msm_iommu_v1_ctx_match_table[] = {
  434. { .compatible = "qcom,msm-smmu-v1-ctx", },
  435. {}
  436. };
  437. static struct platform_driver msm_iommu_ctx_driver = {
  438. .driver = {
  439. .name = "msm_iommu_ctx_v1",
  440. .of_match_table = msm_iommu_v1_ctx_match_table,
  441. },
  442. .probe = msm_iommu_ctx_probe,
  443. .remove = __devexit_p(msm_iommu_ctx_remove),
  444. };
  445. static int __init msm_iommu_driver_init(void)
  446. {
  447. int ret;
  448. if (!msm_soc_version_supports_iommu_v0()) {
  449. msm_set_iommu_access_ops(&iommu_access_ops_v1);
  450. msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
  451. }
  452. ret = platform_driver_register(&msm_iommu_driver);
  453. if (ret != 0) {
  454. pr_err("Failed to register IOMMU driver\n");
  455. goto error;
  456. }
  457. ret = platform_driver_register(&msm_iommu_ctx_driver);
  458. if (ret != 0) {
  459. pr_err("Failed to register IOMMU context driver\n");
  460. goto error;
  461. }
  462. error:
  463. return ret;
  464. }
  465. static void __exit msm_iommu_driver_exit(void)
  466. {
  467. platform_driver_unregister(&msm_iommu_ctx_driver);
  468. platform_driver_unregister(&msm_iommu_driver);
  469. }
  470. subsys_initcall(msm_iommu_driver_init);
  471. module_exit(msm_iommu_driver_exit);
  472. MODULE_LICENSE("GPL v2");