msm_iommu-v1.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/errno.h>
  17. #include <linux/io.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/slab.h>
  23. #include <linux/iommu.h>
  24. #include <linux/clk.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/regulator/consumer.h>
  29. #include <asm/sizes.h>
  30. #include <mach/iommu_hw-v1.h>
  31. #include <mach/iommu.h>
  32. #include <mach/msm_iommu_priv.h>
  33. #include <mach/iommu_perfmon.h>
  34. #include <mach/msm_bus.h>
  35. #include "msm_iommu_pagetable.h"
  36. /* bitmap of the page sizes currently supported */
  37. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  38. static DEFINE_MUTEX(msm_iommu_lock);
  39. struct dump_regs_tbl dump_regs_tbl[MAX_DUMP_REGS];
  40. static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
  41. {
  42. int ret = 0;
  43. if (drvdata->gdsc) {
  44. ret = regulator_enable(drvdata->gdsc);
  45. if (ret)
  46. goto fail;
  47. if (drvdata->alt_gdsc)
  48. ret = regulator_enable(drvdata->alt_gdsc);
  49. if (ret)
  50. regulator_disable(drvdata->gdsc);
  51. }
  52. fail:
  53. return ret;
  54. }
  55. static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
  56. {
  57. if (drvdata->alt_gdsc)
  58. regulator_disable(drvdata->alt_gdsc);
  59. if (drvdata->gdsc)
  60. regulator_disable(drvdata->gdsc);
  61. }
  62. static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
  63. {
  64. int ret = 0;
  65. if (drvdata->bus_client) {
  66. ret = msm_bus_scale_client_update_request(drvdata->bus_client,
  67. vote);
  68. if (ret)
  69. pr_err("%s: Failed to vote for bus: %d\n", __func__,
  70. vote);
  71. }
  72. return ret;
  73. }
  74. static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
  75. {
  76. int ret;
  77. ret = clk_prepare_enable(drvdata->pclk);
  78. if (ret)
  79. goto fail;
  80. ret = clk_prepare_enable(drvdata->clk);
  81. if (ret)
  82. clk_disable_unprepare(drvdata->pclk);
  83. if (drvdata->aclk) {
  84. ret = clk_prepare_enable(drvdata->aclk);
  85. if (ret) {
  86. clk_disable_unprepare(drvdata->clk);
  87. clk_disable_unprepare(drvdata->pclk);
  88. }
  89. }
  90. if (drvdata->clk_reg_virt) {
  91. unsigned int value;
  92. value = readl_relaxed(drvdata->clk_reg_virt);
  93. value &= ~0x1;
  94. writel_relaxed(value, drvdata->clk_reg_virt);
  95. /* Ensure clock is on before continuing */
  96. mb();
  97. }
  98. fail:
  99. return ret;
  100. }
  101. static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
  102. {
  103. if (drvdata->aclk)
  104. clk_disable_unprepare(drvdata->aclk);
  105. clk_disable_unprepare(drvdata->clk);
  106. clk_disable_unprepare(drvdata->pclk);
  107. }
  108. static void _iommu_lock_acquire(unsigned int need_extra_lock)
  109. {
  110. mutex_lock(&msm_iommu_lock);
  111. }
  112. static void _iommu_lock_release(unsigned int need_extra_lock)
  113. {
  114. mutex_unlock(&msm_iommu_lock);
  115. }
  116. struct iommu_access_ops iommu_access_ops_v1 = {
  117. .iommu_power_on = __enable_regulators,
  118. .iommu_power_off = __disable_regulators,
  119. .iommu_bus_vote = apply_bus_vote,
  120. .iommu_clk_on = __enable_clocks,
  121. .iommu_clk_off = __disable_clocks,
  122. .iommu_lock_acquire = _iommu_lock_acquire,
  123. .iommu_lock_release = _iommu_lock_release,
  124. };
  125. #ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
  126. #define VBIF_XIN_HALT_CTRL0 0x200
  127. #define VBIF_XIN_HALT_CTRL1 0x204
  128. #define VBIF_AXI_HALT_CTRL0 0x208
  129. #define VBIF_AXI_HALT_CTRL1 0x20C
  130. static void __halt_vbif_xin(void __iomem *vbif_base)
  131. {
  132. pr_err("Halting VBIF_XIN\n");
  133. writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
  134. }
  135. static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
  136. {
  137. unsigned int reg_val;
  138. reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
  139. pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
  140. reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
  141. pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
  142. reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
  143. pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
  144. reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
  145. pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
  146. reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
  147. pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
  148. }
  149. static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
  150. {
  151. phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
  152. - (phys_addr_t) 0x4000);
  153. void __iomem *base = ioremap(addr, 0x1000);
  154. int ret = 0;
  155. if (base) {
  156. __dump_vbif_state(drvdata->base, base);
  157. __halt_vbif_xin(drvdata->base);
  158. __dump_vbif_state(drvdata->base, base);
  159. iounmap(base);
  160. } else {
  161. pr_err("%s: Unable to ioremap\n", __func__);
  162. ret = -ENOMEM;
  163. }
  164. return ret;
  165. }
  166. static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
  167. {
  168. int res;
  169. unsigned int val;
  170. void __iomem *base = drvdata->base;
  171. char const *name = drvdata->name;
  172. pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
  173. res = __check_vbif_state(drvdata);
  174. if (res)
  175. BUG();
  176. pr_err("Checking if IOMMU halt completed for %s\n", name);
  177. res = readl_tight_poll_timeout(
  178. GLB_REG(MICRO_MMU_CTRL, base), val,
  179. (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000);
  180. if (res) {
  181. pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
  182. name);
  183. } else {
  184. pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
  185. }
  186. BUG();
  187. }
  188. static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
  189. int ctx)
  190. {
  191. int res;
  192. unsigned int val;
  193. void __iomem *base = drvdata->base;
  194. char const *name = drvdata->name;
  195. pr_err("Timed out waiting for TLB SYNC to complete for %s\n", name);
  196. res = __check_vbif_state(drvdata);
  197. if (res)
  198. BUG();
  199. pr_err("Checking if TLB sync completed for %s\n", name);
  200. res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
  201. (val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
  202. if (res) {
  203. pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
  204. name);
  205. } else {
  206. pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
  207. }
  208. BUG();
  209. }
  210. #else
  211. /*
  212. * For targets without VBIF or for targets with the VBIF check disabled
  213. * we directly just crash to capture the issue
  214. */
  215. static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
  216. {
  217. BUG();
  218. }
  219. static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
  220. int ctx)
  221. {
  222. BUG();
  223. }
  224. #endif
  225. void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
  226. {
  227. if (iommu_drvdata->halt_enabled) {
  228. unsigned int val;
  229. void __iomem *base = iommu_drvdata->base;
  230. int res;
  231. SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
  232. res = readl_tight_poll_timeout(
  233. GLB_REG(MICRO_MMU_CTRL, base), val,
  234. (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000);
  235. if (res)
  236. check_halt_state(iommu_drvdata);
  237. /* Ensure device is idle before continuing */
  238. mb();
  239. }
  240. }
  241. void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
  242. {
  243. if (iommu_drvdata->halt_enabled) {
  244. /*
  245. * Ensure transactions have completed before releasing
  246. * the halt
  247. */
  248. mb();
  249. SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
  250. /*
  251. * Ensure write is complete before continuing to ensure
  252. * we don't turn off clocks while transaction is still
  253. * pending.
  254. */
  255. mb();
  256. }
  257. }
  258. static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
  259. {
  260. unsigned int val;
  261. unsigned int res;
  262. void __iomem *base = iommu_drvdata->base;
  263. SET_TLBSYNC(base, ctx, 0);
  264. /* No barrier needed due to read dependency */
  265. res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
  266. (val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
  267. if (res)
  268. check_tlb_sync_state(iommu_drvdata, ctx);
  269. }
  270. static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
  271. {
  272. struct msm_iommu_priv *priv = domain->priv;
  273. struct msm_iommu_drvdata *iommu_drvdata;
  274. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  275. int ret = 0;
  276. list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
  277. BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
  278. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  279. BUG_ON(!iommu_drvdata);
  280. ret = __enable_clocks(iommu_drvdata);
  281. if (ret)
  282. goto fail;
  283. SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
  284. ctx_drvdata->asid | (va & CB_TLBIVA_VA));
  285. mb();
  286. __sync_tlb(iommu_drvdata, ctx_drvdata->num);
  287. __disable_clocks(iommu_drvdata);
  288. }
  289. fail:
  290. return ret;
  291. }
  292. static int __flush_iotlb(struct iommu_domain *domain)
  293. {
  294. struct msm_iommu_priv *priv = domain->priv;
  295. struct msm_iommu_drvdata *iommu_drvdata;
  296. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  297. int ret = 0;
  298. list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
  299. BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
  300. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  301. BUG_ON(!iommu_drvdata);
  302. ret = __enable_clocks(iommu_drvdata);
  303. if (ret)
  304. goto fail;
  305. SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
  306. ctx_drvdata->asid);
  307. mb();
  308. __sync_tlb(iommu_drvdata, ctx_drvdata->num);
  309. __disable_clocks(iommu_drvdata);
  310. }
  311. fail:
  312. return ret;
  313. }
  314. /*
  315. * May only be called for non-secure iommus
  316. */
  317. static void __reset_iommu(void __iomem *base)
  318. {
  319. int i, smt_size;
  320. SET_ACR(base, 0);
  321. SET_CR2(base, 0);
  322. SET_GFAR(base, 0);
  323. SET_GFSRRESTORE(base, 0);
  324. SET_TLBIALLNSNH(base, 0);
  325. smt_size = GET_IDR0_NUMSMRG(base);
  326. for (i = 0; i < smt_size; i++)
  327. SET_SMR_VALID(base, i, 0);
  328. mb();
  329. }
  330. #ifdef CONFIG_IOMMU_NON_SECURE
  331. static void __reset_iommu_secure(void __iomem *base)
  332. {
  333. SET_NSACR(base, 0);
  334. SET_NSCR2(base, 0);
  335. SET_NSGFAR(base, 0);
  336. SET_NSGFSRRESTORE(base, 0);
  337. mb();
  338. }
  339. static void __program_iommu_secure(void __iomem *base)
  340. {
  341. SET_NSCR0_SMCFCFG(base, 1);
  342. SET_NSCR0_USFCFG(base, 1);
  343. SET_NSCR0_STALLD(base, 1);
  344. SET_NSCR0_GCFGFIE(base, 1);
  345. SET_NSCR0_GCFGFRE(base, 1);
  346. SET_NSCR0_GFIE(base, 1);
  347. SET_NSCR0_GFRE(base, 1);
  348. SET_NSCR0_CLIENTPD(base, 0);
  349. }
  350. #else
  351. static inline void __reset_iommu_secure(void __iomem *base)
  352. {
  353. }
  354. static inline void __program_iommu_secure(void __iomem *base)
  355. {
  356. }
  357. #endif
  358. /*
  359. * May only be called for non-secure iommus
  360. */
  361. static void __program_iommu(void __iomem *base)
  362. {
  363. __reset_iommu(base);
  364. __reset_iommu_secure(base);
  365. SET_CR0_SMCFCFG(base, 1);
  366. SET_CR0_USFCFG(base, 1);
  367. SET_CR0_STALLD(base, 1);
  368. SET_CR0_GCFGFIE(base, 1);
  369. SET_CR0_GCFGFRE(base, 1);
  370. SET_CR0_GFIE(base, 1);
  371. SET_CR0_GFRE(base, 1);
  372. SET_CR0_CLIENTPD(base, 0);
  373. __program_iommu_secure(base);
  374. mb(); /* Make sure writes complete before returning */
  375. }
  376. void program_iommu_bfb_settings(void __iomem *base,
  377. const struct msm_iommu_bfb_settings *bfb_settings)
  378. {
  379. unsigned int i;
  380. if (bfb_settings)
  381. for (i = 0; i < bfb_settings->length; i++)
  382. SET_GLOBAL_REG(base, bfb_settings->regs[i],
  383. bfb_settings->data[i]);
  384. mb(); /* Make sure writes complete before returning */
  385. }
  386. static void __reset_context(void __iomem *base, int ctx)
  387. {
  388. SET_ACTLR(base, ctx, 0);
  389. SET_FAR(base, ctx, 0);
  390. SET_FSRRESTORE(base, ctx, 0);
  391. SET_NMRR(base, ctx, 0);
  392. SET_PAR(base, ctx, 0);
  393. SET_PRRR(base, ctx, 0);
  394. SET_SCTLR(base, ctx, 0);
  395. SET_TLBIALL(base, ctx, 0);
  396. SET_TTBCR(base, ctx, 0);
  397. SET_TTBR0(base, ctx, 0);
  398. SET_TTBR1(base, ctx, 0);
  399. mb();
  400. }
  401. static void __release_smg(void __iomem *base)
  402. {
  403. int i, smt_size;
  404. smt_size = GET_IDR0_NUMSMRG(base);
  405. /* Invalidate all SMGs */
  406. for (i = 0; i < smt_size; i++)
  407. if (GET_SMR_VALID(base, i))
  408. SET_SMR_VALID(base, i, 0);
  409. }
  410. static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
  411. struct msm_iommu_ctx_drvdata *curr_ctx,
  412. struct msm_iommu_priv *priv)
  413. {
  414. unsigned int found = 0;
  415. void __iomem *base = iommu_drvdata->base;
  416. unsigned int i;
  417. unsigned int ncb = iommu_drvdata->ncb;
  418. struct msm_iommu_ctx_drvdata *tmp_drvdata;
  419. /* Find if this page table is used elsewhere, and re-use ASID */
  420. if (!list_empty(&priv->list_attached)) {
  421. tmp_drvdata = list_first_entry(&priv->list_attached,
  422. struct msm_iommu_ctx_drvdata, attached_elm);
  423. ++iommu_drvdata->asid[tmp_drvdata->asid - 1];
  424. curr_ctx->asid = tmp_drvdata->asid;
  425. SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, curr_ctx->asid);
  426. found = 1;
  427. }
  428. /* If page table is new, find an unused ASID */
  429. if (!found) {
  430. for (i = 0; i < ncb; ++i) {
  431. if (iommu_drvdata->asid[i] == 0) {
  432. ++iommu_drvdata->asid[i];
  433. curr_ctx->asid = i + 1;
  434. SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
  435. curr_ctx->asid);
  436. found = 1;
  437. break;
  438. }
  439. }
  440. BUG_ON(!found);
  441. }
  442. }
  443. static int program_m2v_table(struct device *dev, void __iomem *base)
  444. {
  445. struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
  446. u32 *sids = ctx_drvdata->sids;
  447. unsigned int ctx = ctx_drvdata->num;
  448. int num = 0, i, smt_size;
  449. int len = ctx_drvdata->nsid;
  450. smt_size = GET_IDR0_NUMSMRG(base);
  451. /* Program the M2V tables for this context */
  452. for (i = 0; i < len / sizeof(*sids); i++) {
  453. for (; num < smt_size; num++)
  454. if (GET_SMR_VALID(base, num) == 0)
  455. break;
  456. BUG_ON(num >= smt_size);
  457. SET_SMR_VALID(base, num, 1);
  458. SET_SMR_MASK(base, num, 0);
  459. SET_SMR_ID(base, num, sids[i]);
  460. SET_S2CR_N(base, num, 0);
  461. SET_S2CR_CBNDX(base, num, ctx);
  462. SET_S2CR_MEMATTR(base, num, 0x0A);
  463. /* Set security bit override to be Non-secure */
  464. SET_S2CR_NSCFG(base, num, 3);
  465. }
  466. return 0;
  467. }
  468. static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
  469. {
  470. device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
  471. program_m2v_table);
  472. }
  473. static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
  474. struct msm_iommu_ctx_drvdata *ctx_drvdata,
  475. struct msm_iommu_priv *priv, bool is_secure,
  476. bool program_m2v)
  477. {
  478. unsigned int prrr, nmrr;
  479. phys_addr_t pn;
  480. void __iomem *base = iommu_drvdata->base;
  481. unsigned int ctx = ctx_drvdata->num;
  482. phys_addr_t pgtable = __pa(priv->pt.fl_table);
  483. __reset_context(base, ctx);
  484. pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
  485. SET_TTBCR(base, ctx, 0);
  486. SET_CB_TTBR0_ADDR(base, ctx, pn);
  487. /* Enable context fault interrupt */
  488. SET_CB_SCTLR_CFIE(base, ctx, 1);
  489. /* Redirect all cacheable requests to L2 slave port. */
  490. SET_CB_ACTLR_BPRCISH(base, ctx, 1);
  491. SET_CB_ACTLR_BPRCOSH(base, ctx, 1);
  492. SET_CB_ACTLR_BPRCNSH(base, ctx, 1);
  493. /* Turn on TEX Remap */
  494. SET_CB_SCTLR_TRE(base, ctx, 1);
  495. /* Enable private ASID namespace */
  496. SET_CB_SCTLR_ASIDPNE(base, ctx, 1);
  497. /* Set TEX remap attributes */
  498. RCP15_PRRR(prrr);
  499. RCP15_NMRR(nmrr);
  500. SET_PRRR(base, ctx, prrr);
  501. SET_NMRR(base, ctx, nmrr);
  502. /* Configure page tables as inner-cacheable and shareable to reduce
  503. * the TLB miss penalty.
  504. */
  505. if (priv->pt.redirect) {
  506. SET_CB_TTBR0_S(base, ctx, 1);
  507. SET_CB_TTBR0_NOS(base, ctx, 1);
  508. SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
  509. SET_CB_TTBR0_IRGN0(base, ctx, 1);
  510. SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
  511. }
  512. if (!is_secure) {
  513. if (program_m2v)
  514. program_all_m2v_tables(iommu_drvdata);
  515. SET_CBAR_N(base, ctx, 0);
  516. /* Stage 1 Context with Stage 2 bypass */
  517. SET_CBAR_TYPE(base, ctx, 1);
  518. /* Route page faults to the non-secure interrupt */
  519. SET_CBAR_IRPTNDX(base, ctx, 1);
  520. /* Set VMID to non-secure HLOS */
  521. SET_CBAR_VMID(base, ctx, 3);
  522. /* Bypass is treated as inner-shareable */
  523. SET_CBAR_BPSHCFG(base, ctx, 2);
  524. /* Do not downgrade memory attributes */
  525. SET_CBAR_MEMATTR(base, ctx, 0x0A);
  526. }
  527. msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
  528. /* Enable the MMU */
  529. SET_CB_SCTLR_M(base, ctx, 1);
  530. mb();
  531. }
  532. static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
  533. {
  534. struct msm_iommu_priv *priv;
  535. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  536. if (!priv)
  537. goto fail_nomem;
  538. #ifdef CONFIG_IOMMU_PGTABLES_L2
  539. priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
  540. #endif
  541. INIT_LIST_HEAD(&priv->list_attached);
  542. if (msm_iommu_pagetable_alloc(&priv->pt))
  543. goto fail_nomem;
  544. domain->priv = priv;
  545. return 0;
  546. fail_nomem:
  547. kfree(priv);
  548. return -ENOMEM;
  549. }
  550. static void msm_iommu_domain_destroy(struct iommu_domain *domain)
  551. {
  552. struct msm_iommu_priv *priv;
  553. mutex_lock(&msm_iommu_lock);
  554. priv = domain->priv;
  555. domain->priv = NULL;
  556. if (priv)
  557. msm_iommu_pagetable_free(&priv->pt);
  558. kfree(priv);
  559. mutex_unlock(&msm_iommu_lock);
  560. }
  561. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  562. {
  563. struct msm_iommu_priv *priv;
  564. struct msm_iommu_drvdata *iommu_drvdata;
  565. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  566. struct msm_iommu_ctx_drvdata *tmp_drvdata;
  567. int ret = 0;
  568. int is_secure;
  569. bool set_m2v = false;
  570. mutex_lock(&msm_iommu_lock);
  571. priv = domain->priv;
  572. if (!priv || !dev) {
  573. ret = -EINVAL;
  574. goto unlock;
  575. }
  576. iommu_drvdata = dev_get_drvdata(dev->parent);
  577. ctx_drvdata = dev_get_drvdata(dev);
  578. if (!iommu_drvdata || !ctx_drvdata) {
  579. ret = -EINVAL;
  580. goto unlock;
  581. }
  582. ++ctx_drvdata->attach_count;
  583. if (ctx_drvdata->attach_count > 1)
  584. goto already_attached;
  585. if (!list_empty(&ctx_drvdata->attached_elm)) {
  586. ret = -EBUSY;
  587. goto unlock;
  588. }
  589. list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
  590. if (tmp_drvdata == ctx_drvdata) {
  591. ret = -EBUSY;
  592. goto unlock;
  593. }
  594. is_secure = iommu_drvdata->sec_id != -1;
  595. ret = __enable_regulators(iommu_drvdata);
  596. if (ret)
  597. goto unlock;
  598. ret = apply_bus_vote(iommu_drvdata, 1);
  599. if (ret)
  600. goto unlock;
  601. ret = __enable_clocks(iommu_drvdata);
  602. if (ret) {
  603. __disable_regulators(iommu_drvdata);
  604. goto unlock;
  605. }
  606. /* We can only do this once */
  607. if (!iommu_drvdata->ctx_attach_count) {
  608. if (!is_secure) {
  609. iommu_halt(iommu_drvdata);
  610. __program_iommu(iommu_drvdata->base);
  611. iommu_resume(iommu_drvdata);
  612. } else {
  613. ret = msm_iommu_sec_program_iommu(
  614. iommu_drvdata->sec_id);
  615. if (ret) {
  616. __disable_regulators(iommu_drvdata);
  617. __disable_clocks(iommu_drvdata);
  618. goto unlock;
  619. }
  620. }
  621. program_iommu_bfb_settings(iommu_drvdata->base,
  622. iommu_drvdata->bfb_settings);
  623. set_m2v = true;
  624. }
  625. iommu_halt(iommu_drvdata);
  626. __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
  627. iommu_resume(iommu_drvdata);
  628. __disable_clocks(iommu_drvdata);
  629. list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
  630. ctx_drvdata->attached_domain = domain;
  631. ++iommu_drvdata->ctx_attach_count;
  632. already_attached:
  633. mutex_unlock(&msm_iommu_lock);
  634. msm_iommu_attached(dev->parent);
  635. return ret;
  636. unlock:
  637. mutex_unlock(&msm_iommu_lock);
  638. return ret;
  639. }
  640. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  641. struct device *dev)
  642. {
  643. struct msm_iommu_priv *priv;
  644. struct msm_iommu_drvdata *iommu_drvdata;
  645. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  646. int ret;
  647. int is_secure;
  648. msm_iommu_detached(dev->parent);
  649. mutex_lock(&msm_iommu_lock);
  650. priv = domain->priv;
  651. if (!priv || !dev)
  652. goto unlock;
  653. iommu_drvdata = dev_get_drvdata(dev->parent);
  654. ctx_drvdata = dev_get_drvdata(dev);
  655. if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
  656. goto unlock;
  657. --ctx_drvdata->attach_count;
  658. BUG_ON(ctx_drvdata->attach_count < 0);
  659. if (ctx_drvdata->attach_count > 0)
  660. goto unlock;
  661. ret = __enable_clocks(iommu_drvdata);
  662. if (ret)
  663. goto unlock;
  664. is_secure = iommu_drvdata->sec_id != -1;
  665. SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
  666. BUG_ON(iommu_drvdata->asid[ctx_drvdata->asid - 1] == 0);
  667. iommu_drvdata->asid[ctx_drvdata->asid - 1]--;
  668. ctx_drvdata->asid = -1;
  669. __reset_context(iommu_drvdata->base, ctx_drvdata->num);
  670. /*
  671. * Only reset the M2V tables on the very last detach */
  672. if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
  673. iommu_halt(iommu_drvdata);
  674. __release_smg(iommu_drvdata->base);
  675. iommu_resume(iommu_drvdata);
  676. }
  677. __disable_clocks(iommu_drvdata);
  678. apply_bus_vote(iommu_drvdata, 0);
  679. __disable_regulators(iommu_drvdata);
  680. list_del_init(&ctx_drvdata->attached_elm);
  681. ctx_drvdata->attached_domain = NULL;
  682. BUG_ON(iommu_drvdata->ctx_attach_count == 0);
  683. --iommu_drvdata->ctx_attach_count;
  684. unlock:
  685. mutex_unlock(&msm_iommu_lock);
  686. }
  687. static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
  688. phys_addr_t pa, size_t len, int prot)
  689. {
  690. struct msm_iommu_priv *priv;
  691. int ret = 0;
  692. mutex_lock(&msm_iommu_lock);
  693. priv = domain->priv;
  694. if (!priv) {
  695. ret = -EINVAL;
  696. goto fail;
  697. }
  698. ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
  699. if (ret)
  700. goto fail;
  701. ret = __flush_iotlb_va(domain, va);
  702. fail:
  703. mutex_unlock(&msm_iommu_lock);
  704. return ret;
  705. }
  706. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
  707. size_t len)
  708. {
  709. struct msm_iommu_priv *priv;
  710. int ret = -ENODEV;
  711. mutex_lock(&msm_iommu_lock);
  712. priv = domain->priv;
  713. if (!priv)
  714. goto fail;
  715. ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
  716. if (ret < 0)
  717. goto fail;
  718. ret = __flush_iotlb_va(domain, va);
  719. fail:
  720. mutex_unlock(&msm_iommu_lock);
  721. /* the IOMMU API requires us to return how many bytes were unmapped */
  722. len = ret ? 0 : len;
  723. return len;
  724. }
  725. static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
  726. struct scatterlist *sg, unsigned int len,
  727. int prot)
  728. {
  729. int ret;
  730. struct msm_iommu_priv *priv;
  731. mutex_lock(&msm_iommu_lock);
  732. priv = domain->priv;
  733. if (!priv) {
  734. ret = -EINVAL;
  735. goto fail;
  736. }
  737. ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
  738. if (ret)
  739. goto fail;
  740. __flush_iotlb(domain);
  741. fail:
  742. mutex_unlock(&msm_iommu_lock);
  743. return ret;
  744. }
  745. static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
  746. unsigned int len)
  747. {
  748. struct msm_iommu_priv *priv;
  749. mutex_lock(&msm_iommu_lock);
  750. priv = domain->priv;
  751. msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
  752. __flush_iotlb(domain);
  753. mutex_unlock(&msm_iommu_lock);
  754. return 0;
  755. }
  756. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  757. unsigned long va)
  758. {
  759. struct msm_iommu_priv *priv;
  760. struct msm_iommu_drvdata *iommu_drvdata;
  761. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  762. u64 par;
  763. void __iomem *base;
  764. phys_addr_t ret = 0;
  765. int ctx;
  766. mutex_lock(&msm_iommu_lock);
  767. priv = domain->priv;
  768. if (list_empty(&priv->list_attached))
  769. goto fail;
  770. ctx_drvdata = list_entry(priv->list_attached.next,
  771. struct msm_iommu_ctx_drvdata, attached_elm);
  772. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  773. base = iommu_drvdata->base;
  774. ctx = ctx_drvdata->num;
  775. ret = __enable_clocks(iommu_drvdata);
  776. if (ret) {
  777. ret = 0; /* 0 indicates translation failed */
  778. goto fail;
  779. }
  780. SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
  781. mb();
  782. while (GET_CB_ATSR_ACTIVE(base, ctx))
  783. cpu_relax();
  784. par = GET_PAR(base, ctx);
  785. __disable_clocks(iommu_drvdata);
  786. if (par & CB_PAR_F) {
  787. unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
  788. pr_err("IOMMU translation fault!\n");
  789. pr_err("name = %s\n", iommu_drvdata->name);
  790. pr_err("context = %s (%d)\n", ctx_drvdata->name,
  791. ctx_drvdata->num);
  792. pr_err("Interesting registers:\n");
  793. pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
  794. (par & CB_PAR_F) ? "F " : "",
  795. (par & CB_PAR_TF) ? "TF " : "",
  796. (par & CB_PAR_AFF) ? "AFF " : "",
  797. (par & CB_PAR_PF) ? "PF " : "",
  798. (par & CB_PAR_EF) ? "EF " : "",
  799. (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
  800. (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
  801. (par & CB_PAR_ATOT) ? "ATOT " : "",
  802. level,
  803. (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
  804. ret = 0;
  805. } else {
  806. /* We are dealing with a supersection */
  807. if (ret & CB_PAR_SS)
  808. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  809. else /* Upper 20 bits from PAR, lower 12 from VA */
  810. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  811. }
  812. fail:
  813. mutex_unlock(&msm_iommu_lock);
  814. return ret;
  815. }
  816. static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
  817. unsigned long cap)
  818. {
  819. return 0;
  820. }
  821. void print_ctx_regs(struct msm_iommu_context_reg regs[])
  822. {
  823. uint32_t fsr = regs[DUMP_REG_FSR].val;
  824. u64 ttbr;
  825. pr_err("FAR = %016llx\n",
  826. COMBINE_DUMP_REG(
  827. regs[DUMP_REG_FAR1].val,
  828. regs[DUMP_REG_FAR0].val));
  829. pr_err("PAR = %016llx\n",
  830. COMBINE_DUMP_REG(
  831. regs[DUMP_REG_PAR1].val,
  832. regs[DUMP_REG_PAR0].val));
  833. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
  834. (fsr & 0x02) ? "TF " : "",
  835. (fsr & 0x04) ? "AFF " : "",
  836. (fsr & 0x08) ? "PF " : "",
  837. (fsr & 0x10) ? "EF " : "",
  838. (fsr & 0x20) ? "TLBMCF " : "",
  839. (fsr & 0x40) ? "TLBLKF " : "",
  840. (fsr & 0x80) ? "MHF " : "",
  841. (fsr & 0x40000000) ? "SS " : "",
  842. (fsr & 0x80000000) ? "MULTI " : "");
  843. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  844. regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
  845. ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
  846. regs[DUMP_REG_TTBR0_0].val);
  847. if (regs[DUMP_REG_TTBR0_1].valid)
  848. pr_err("TTBR0 = %016llx\n", ttbr);
  849. else
  850. pr_err("TTBR0 = %016llx (32b)\n", ttbr);
  851. ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
  852. regs[DUMP_REG_TTBR1_0].val);
  853. if (regs[DUMP_REG_TTBR1_1].valid)
  854. pr_err("TTBR1 = %016llx\n", ttbr);
  855. else
  856. pr_err("TTBR1 = %016llx (32b)\n", ttbr);
  857. pr_err("SCTLR = %08x ACTLR = %08x\n",
  858. regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
  859. pr_err("PRRR = %08x NMRR = %08x\n",
  860. regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
  861. }
  862. static void __print_ctx_regs(void __iomem *base, int ctx, unsigned int fsr)
  863. {
  864. struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
  865. unsigned int i;
  866. for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
  867. regs[i].val = GET_CTX_REG(dump_regs_tbl[i].key, base, ctx);
  868. regs[i].valid = 1;
  869. }
  870. print_ctx_regs(regs);
  871. }
  872. irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
  873. {
  874. struct platform_device *pdev = dev_id;
  875. struct msm_iommu_drvdata *drvdata;
  876. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  877. unsigned int fsr;
  878. int ret;
  879. mutex_lock(&msm_iommu_lock);
  880. BUG_ON(!pdev);
  881. drvdata = dev_get_drvdata(pdev->dev.parent);
  882. BUG_ON(!drvdata);
  883. ctx_drvdata = dev_get_drvdata(&pdev->dev);
  884. BUG_ON(!ctx_drvdata);
  885. if (!drvdata->ctx_attach_count) {
  886. pr_err("Unexpected IOMMU page fault!\n");
  887. pr_err("name = %s\n", drvdata->name);
  888. pr_err("Power is OFF. Unable to read page fault information\n");
  889. /*
  890. * We cannot determine which context bank caused the issue so
  891. * we just return handled here to ensure IRQ handler code is
  892. * happy
  893. */
  894. ret = IRQ_HANDLED;
  895. goto fail;
  896. }
  897. ret = __enable_clocks(drvdata);
  898. if (ret) {
  899. ret = IRQ_NONE;
  900. goto fail;
  901. }
  902. fsr = GET_FSR(drvdata->base, ctx_drvdata->num);
  903. if (fsr) {
  904. if (!ctx_drvdata->attached_domain) {
  905. pr_err("Bad domain in interrupt handler\n");
  906. ret = -ENOSYS;
  907. } else
  908. ret = report_iommu_fault(ctx_drvdata->attached_domain,
  909. &ctx_drvdata->pdev->dev,
  910. GET_FAR(drvdata->base, ctx_drvdata->num), 0);
  911. if (ret == -ENOSYS) {
  912. pr_err("Unexpected IOMMU page fault!\n");
  913. pr_err("name = %s\n", drvdata->name);
  914. pr_err("context = %s (%d)\n", ctx_drvdata->name,
  915. ctx_drvdata->num);
  916. pr_err("Interesting registers:\n");
  917. __print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
  918. }
  919. if (ret != -EBUSY)
  920. SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
  921. ret = IRQ_HANDLED;
  922. } else
  923. ret = IRQ_NONE;
  924. __disable_clocks(drvdata);
  925. fail:
  926. mutex_unlock(&msm_iommu_lock);
  927. return ret;
  928. }
  929. static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
  930. {
  931. struct msm_iommu_priv *priv = domain->priv;
  932. return __pa(priv->pt.fl_table);
  933. }
  934. #define DUMP_REG_INIT(dump_reg, cb_reg, mbp) \
  935. do { \
  936. dump_regs_tbl[dump_reg].key = cb_reg; \
  937. dump_regs_tbl[dump_reg].name = #cb_reg; \
  938. dump_regs_tbl[dump_reg].must_be_present = mbp; \
  939. } while (0)
  940. static void msm_iommu_build_dump_regs_table(void)
  941. {
  942. DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1);
  943. DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1);
  944. DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1);
  945. DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1);
  946. DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1);
  947. DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1);
  948. DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1);
  949. DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1);
  950. DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0);
  951. DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1);
  952. DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0);
  953. DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1);
  954. DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1);
  955. DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1);
  956. DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1);
  957. }
  958. static struct iommu_ops msm_iommu_ops = {
  959. .domain_init = msm_iommu_domain_init,
  960. .domain_destroy = msm_iommu_domain_destroy,
  961. .attach_dev = msm_iommu_attach_dev,
  962. .detach_dev = msm_iommu_detach_dev,
  963. .map = msm_iommu_map,
  964. .unmap = msm_iommu_unmap,
  965. .map_range = msm_iommu_map_range,
  966. .unmap_range = msm_iommu_unmap_range,
  967. .iova_to_phys = msm_iommu_iova_to_phys,
  968. .domain_has_cap = msm_iommu_domain_has_cap,
  969. .get_pt_base_addr = msm_iommu_get_pt_base_addr,
  970. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  971. };
  972. static int __init msm_iommu_init(void)
  973. {
  974. msm_iommu_pagetable_init();
  975. bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
  976. msm_iommu_build_dump_regs_table();
  977. return 0;
  978. }
  979. subsys_initcall(msm_iommu_init);
  980. MODULE_LICENSE("GPL v2");
  981. MODULE_DESCRIPTION("MSM SMMU v2 Driver");