msm_iommu-v0.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454
  1. /* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/errno.h>
  17. #include <linux/io.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/slab.h>
  22. #include <linux/iommu.h>
  23. #include <linux/clk.h>
  24. #include <linux/scatterlist.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/sizes.h>
  27. #include <mach/iommu_perfmon.h>
  28. #include <mach/iommu_hw-v0.h>
  29. #include <mach/msm_iommu_priv.h>
  30. #include <mach/iommu.h>
  31. #include <mach/msm_smem.h>
  32. #include <mach/msm_bus.h>
  33. #define MRC(reg, processor, op1, crn, crm, op2) \
  34. __asm__ __volatile__ ( \
  35. " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
  36. : "=r" (reg))
  37. #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
  38. #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
  39. /* Sharability attributes of MSM IOMMU mappings */
  40. #define MSM_IOMMU_ATTR_NON_SH 0x0
  41. #define MSM_IOMMU_ATTR_SH 0x4
  42. /* Cacheability attributes of MSM IOMMU mappings */
  43. #define MSM_IOMMU_ATTR_NONCACHED 0x0
  44. #define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
  45. #define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
  46. #define MSM_IOMMU_ATTR_CACHED_WT 0x3
  47. static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
  48. unsigned int len);
  49. static inline void clean_pte(unsigned long *start, unsigned long *end,
  50. int redirect)
  51. {
  52. if (!redirect)
  53. dmac_flush_range(start, end);
  54. }
  55. /* bitmap of the page sizes currently supported */
  56. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  57. static int msm_iommu_tex_class[4];
  58. DEFINE_MUTEX(msm_iommu_lock);
  59. /**
  60. * Remote spinlock implementation based on Peterson's algorithm to be used
  61. * to synchronize IOMMU config port access between CPU and GPU.
  62. * This implements Process 0 of the spin lock algorithm. GPU implements
  63. * Process 1. Flag and turn is stored in shared memory to allow GPU to
  64. * access these.
  65. */
  66. struct msm_iommu_remote_lock {
  67. int initialized;
  68. struct remote_iommu_petersons_spinlock *lock;
  69. };
  70. static struct msm_iommu_remote_lock msm_iommu_remote_lock;
  71. #ifdef CONFIG_MSM_IOMMU_SYNC
  72. static void _msm_iommu_remote_spin_lock_init(void)
  73. {
  74. msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
  75. memset(msm_iommu_remote_lock.lock, 0,
  76. sizeof(*msm_iommu_remote_lock.lock));
  77. }
  78. void msm_iommu_remote_p0_spin_lock(unsigned int need_lock)
  79. {
  80. if (!need_lock)
  81. return;
  82. msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
  83. msm_iommu_remote_lock.lock->turn = 1;
  84. smp_mb();
  85. while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
  86. msm_iommu_remote_lock.lock->turn == 1)
  87. cpu_relax();
  88. }
  89. void msm_iommu_remote_p0_spin_unlock(unsigned int need_lock)
  90. {
  91. if (!need_lock)
  92. return;
  93. smp_mb();
  94. msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
  95. }
  96. #endif
  97. inline void msm_iommu_mutex_lock(void)
  98. {
  99. mutex_lock(&msm_iommu_lock);
  100. }
  101. inline void msm_iommu_mutex_unlock(void)
  102. {
  103. mutex_unlock(&msm_iommu_lock);
  104. }
  105. void *msm_iommu_lock_initialize(void)
  106. {
  107. mutex_lock(&msm_iommu_lock);
  108. if (!msm_iommu_remote_lock.initialized) {
  109. msm_iommu_remote_lock_init();
  110. msm_iommu_remote_lock.initialized = 1;
  111. }
  112. mutex_unlock(&msm_iommu_lock);
  113. return msm_iommu_remote_lock.lock;
  114. }
  115. static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
  116. {
  117. int ret = 0;
  118. if (drvdata->bus_client) {
  119. ret = msm_bus_scale_client_update_request(drvdata->bus_client,
  120. vote);
  121. if (ret)
  122. pr_err("%s: Failed to vote for bus: %d\n", __func__,
  123. vote);
  124. }
  125. return ret;
  126. }
  127. static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
  128. {
  129. int ret;
  130. ret = clk_prepare_enable(drvdata->pclk);
  131. if (ret)
  132. goto fail;
  133. if (drvdata->clk) {
  134. ret = clk_prepare_enable(drvdata->clk);
  135. if (ret)
  136. clk_disable_unprepare(drvdata->pclk);
  137. }
  138. if (ret)
  139. goto fail;
  140. if (drvdata->aclk) {
  141. ret = clk_prepare_enable(drvdata->aclk);
  142. if (ret) {
  143. clk_disable_unprepare(drvdata->clk);
  144. clk_disable_unprepare(drvdata->pclk);
  145. }
  146. }
  147. fail:
  148. return ret;
  149. }
  150. static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
  151. {
  152. if (drvdata->aclk)
  153. clk_disable_unprepare(drvdata->aclk);
  154. if (drvdata->clk)
  155. clk_disable_unprepare(drvdata->clk);
  156. clk_disable_unprepare(drvdata->pclk);
  157. }
  158. static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
  159. {
  160. /* No need to do anything. IOMMUv0 is always on. */
  161. return 0;
  162. }
  163. static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
  164. {
  165. /* No need to do anything. IOMMUv0 is always on. */
  166. }
  167. static void *_iommu_lock_initialize(void)
  168. {
  169. return msm_iommu_lock_initialize();
  170. }
  171. static void _iommu_lock_acquire(unsigned int need_extra_lock)
  172. {
  173. msm_iommu_mutex_lock();
  174. msm_iommu_remote_spin_lock(need_extra_lock);
  175. }
  176. static void _iommu_lock_release(unsigned int need_extra_lock)
  177. {
  178. msm_iommu_remote_spin_unlock(need_extra_lock);
  179. msm_iommu_mutex_unlock();
  180. }
  181. struct iommu_access_ops iommu_access_ops_v0 = {
  182. .iommu_power_on = __enable_regulators,
  183. .iommu_power_off = __disable_regulators,
  184. .iommu_bus_vote = apply_bus_vote,
  185. .iommu_clk_on = __enable_clocks,
  186. .iommu_clk_off = __disable_clocks,
  187. .iommu_lock_initialize = _iommu_lock_initialize,
  188. .iommu_lock_acquire = _iommu_lock_acquire,
  189. .iommu_lock_release = _iommu_lock_release,
  190. };
  191. static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
  192. {
  193. struct msm_iommu_priv *priv = domain->priv;
  194. struct msm_iommu_drvdata *iommu_drvdata;
  195. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  196. int ret = 0;
  197. int asid;
  198. list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
  199. if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
  200. BUG();
  201. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  202. if (!iommu_drvdata)
  203. BUG();
  204. ret = __enable_clocks(iommu_drvdata);
  205. if (ret)
  206. goto fail;
  207. msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
  208. asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
  209. ctx_drvdata->num);
  210. SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
  211. asid | (va & TLBIVA_VA));
  212. mb();
  213. msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
  214. __disable_clocks(iommu_drvdata);
  215. }
  216. fail:
  217. return ret;
  218. }
  219. static int __flush_iotlb(struct iommu_domain *domain)
  220. {
  221. struct msm_iommu_priv *priv = domain->priv;
  222. struct msm_iommu_drvdata *iommu_drvdata;
  223. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  224. int ret = 0;
  225. int asid;
  226. list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
  227. if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
  228. BUG();
  229. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  230. if (!iommu_drvdata)
  231. BUG();
  232. ret = __enable_clocks(iommu_drvdata);
  233. if (ret)
  234. goto fail;
  235. msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
  236. asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
  237. ctx_drvdata->num);
  238. SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
  239. mb();
  240. msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
  241. __disable_clocks(iommu_drvdata);
  242. }
  243. fail:
  244. return ret;
  245. }
  246. static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
  247. {
  248. SET_BPRCOSH(glb_base, ctx, 0);
  249. SET_BPRCISH(glb_base, ctx, 0);
  250. SET_BPRCNSH(glb_base, ctx, 0);
  251. SET_BPSHCFG(glb_base, ctx, 0);
  252. SET_BPMTCFG(glb_base, ctx, 0);
  253. SET_ACTLR(base, ctx, 0);
  254. SET_SCTLR(base, ctx, 0);
  255. SET_FSRRESTORE(base, ctx, 0);
  256. SET_TTBR0(base, ctx, 0);
  257. SET_TTBR1(base, ctx, 0);
  258. SET_TTBCR(base, ctx, 0);
  259. SET_BFBCR(base, ctx, 0);
  260. SET_PAR(base, ctx, 0);
  261. SET_FAR(base, ctx, 0);
  262. SET_TLBFLPTER(base, ctx, 0);
  263. SET_TLBSLPTER(base, ctx, 0);
  264. SET_TLBLKCR(base, ctx, 0);
  265. SET_PRRR(base, ctx, 0);
  266. SET_NMRR(base, ctx, 0);
  267. mb();
  268. }
  269. static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
  270. int ctx, int ncb, phys_addr_t pgtable,
  271. int redirect, int ttbr_split)
  272. {
  273. void __iomem *base = iommu_drvdata->base;
  274. void __iomem *glb_base = iommu_drvdata->glb_base;
  275. unsigned int prrr, nmrr;
  276. int i, j, found;
  277. msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
  278. __reset_context(base, glb_base, ctx);
  279. /* Set up HTW mode */
  280. /* TLB miss configuration: perform HTW on miss */
  281. SET_TLBMCFG(base, ctx, 0x3);
  282. /* V2P configuration: HTW for access */
  283. SET_V2PCFG(base, ctx, 0x3);
  284. SET_TTBCR(base, ctx, ttbr_split);
  285. SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
  286. if (ttbr_split)
  287. SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
  288. /* Enable context fault interrupt */
  289. SET_CFEIE(base, ctx, 1);
  290. /* Stall access on a context fault and let the handler deal with it */
  291. SET_CFCFG(base, ctx, 1);
  292. /* Redirect all cacheable requests to L2 slave port. */
  293. SET_RCISH(base, ctx, 1);
  294. SET_RCOSH(base, ctx, 1);
  295. SET_RCNSH(base, ctx, 1);
  296. /* Turn on TEX Remap */
  297. SET_TRE(base, ctx, 1);
  298. /* Set TEX remap attributes */
  299. RCP15_PRRR(prrr);
  300. RCP15_NMRR(nmrr);
  301. SET_PRRR(base, ctx, prrr);
  302. SET_NMRR(base, ctx, nmrr);
  303. /* Turn on BFB prefetch */
  304. SET_BFBDFE(base, ctx, 1);
  305. /* Configure page tables as inner-cacheable and shareable to reduce
  306. * the TLB miss penalty.
  307. */
  308. if (redirect) {
  309. SET_TTBR0_SH(base, ctx, 1);
  310. SET_TTBR1_SH(base, ctx, 1);
  311. SET_TTBR0_NOS(base, ctx, 1);
  312. SET_TTBR1_NOS(base, ctx, 1);
  313. SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
  314. SET_TTBR0_IRGNL(base, ctx, 1);
  315. SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
  316. SET_TTBR1_IRGNL(base, ctx, 1);
  317. SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
  318. SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
  319. }
  320. /* Find if this page table is used elsewhere, and re-use ASID */
  321. found = 0;
  322. for (i = 0; i < ncb; i++)
  323. if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
  324. i != ctx) {
  325. SET_CONTEXTIDR_ASID(base, ctx, \
  326. GET_CONTEXTIDR_ASID(base, i));
  327. found = 1;
  328. break;
  329. }
  330. /* If page table is new, find an unused ASID */
  331. if (!found) {
  332. for (i = 0; i < ncb; i++) {
  333. found = 0;
  334. for (j = 0; j < ncb; j++) {
  335. if (GET_CONTEXTIDR_ASID(base, j) == i &&
  336. j != ctx)
  337. found = 1;
  338. }
  339. if (!found) {
  340. SET_CONTEXTIDR_ASID(base, ctx, i);
  341. break;
  342. }
  343. }
  344. BUG_ON(found);
  345. }
  346. /* Enable the MMU */
  347. SET_M(base, ctx, 1);
  348. mb();
  349. msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
  350. }
  351. static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
  352. {
  353. struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  354. if (!priv)
  355. goto fail_nomem;
  356. INIT_LIST_HEAD(&priv->list_attached);
  357. priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
  358. get_order(SZ_16K));
  359. if (!priv->pt.fl_table)
  360. goto fail_nomem;
  361. #ifdef CONFIG_IOMMU_PGTABLES_L2
  362. priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
  363. #endif
  364. memset(priv->pt.fl_table, 0, SZ_16K);
  365. domain->priv = priv;
  366. clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
  367. priv->pt.redirect);
  368. return 0;
  369. fail_nomem:
  370. kfree(priv);
  371. return -ENOMEM;
  372. }
  373. static void msm_iommu_domain_destroy(struct iommu_domain *domain)
  374. {
  375. struct msm_iommu_priv *priv;
  376. unsigned long *fl_table;
  377. int i;
  378. mutex_lock(&msm_iommu_lock);
  379. priv = domain->priv;
  380. domain->priv = NULL;
  381. if (priv) {
  382. fl_table = priv->pt.fl_table;
  383. for (i = 0; i < NUM_FL_PTE; i++)
  384. if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
  385. free_page((unsigned long) __va(((fl_table[i]) &
  386. FL_BASE_MASK)));
  387. free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
  388. priv->pt.fl_table = NULL;
  389. }
  390. kfree(priv);
  391. mutex_unlock(&msm_iommu_lock);
  392. }
  393. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  394. {
  395. struct msm_iommu_priv *priv;
  396. struct msm_iommu_drvdata *iommu_drvdata;
  397. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  398. struct msm_iommu_ctx_drvdata *tmp_drvdata;
  399. int ret = 0;
  400. mutex_lock(&msm_iommu_lock);
  401. priv = domain->priv;
  402. if (!priv || !dev) {
  403. ret = -EINVAL;
  404. goto unlock;
  405. }
  406. iommu_drvdata = dev_get_drvdata(dev->parent);
  407. ctx_drvdata = dev_get_drvdata(dev);
  408. if (!iommu_drvdata || !ctx_drvdata) {
  409. ret = -EINVAL;
  410. goto unlock;
  411. }
  412. ++ctx_drvdata->attach_count;
  413. if (ctx_drvdata->attach_count > 1)
  414. goto unlock;
  415. if (!list_empty(&ctx_drvdata->attached_elm)) {
  416. ret = -EBUSY;
  417. goto unlock;
  418. }
  419. list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
  420. if (tmp_drvdata == ctx_drvdata) {
  421. ret = -EBUSY;
  422. goto unlock;
  423. }
  424. ret = apply_bus_vote(iommu_drvdata, 1);
  425. if (ret)
  426. goto unlock;
  427. ret = __enable_clocks(iommu_drvdata);
  428. if (ret)
  429. goto unlock;
  430. __program_context(iommu_drvdata,
  431. ctx_drvdata->num, iommu_drvdata->ncb,
  432. __pa(priv->pt.fl_table), priv->pt.redirect,
  433. iommu_drvdata->ttbr_split);
  434. __disable_clocks(iommu_drvdata);
  435. list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
  436. ctx_drvdata->attached_domain = domain;
  437. mutex_unlock(&msm_iommu_lock);
  438. msm_iommu_attached(dev->parent);
  439. return ret;
  440. unlock:
  441. mutex_unlock(&msm_iommu_lock);
  442. return ret;
  443. }
  444. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  445. struct device *dev)
  446. {
  447. struct msm_iommu_priv *priv;
  448. struct msm_iommu_drvdata *iommu_drvdata;
  449. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  450. int ret;
  451. msm_iommu_detached(dev->parent);
  452. mutex_lock(&msm_iommu_lock);
  453. priv = domain->priv;
  454. if (!priv || !dev)
  455. goto unlock;
  456. iommu_drvdata = dev_get_drvdata(dev->parent);
  457. ctx_drvdata = dev_get_drvdata(dev);
  458. if (!iommu_drvdata || !ctx_drvdata)
  459. goto unlock;
  460. --ctx_drvdata->attach_count;
  461. BUG_ON(ctx_drvdata->attach_count < 0);
  462. if (ctx_drvdata->attach_count > 0)
  463. goto unlock;
  464. ret = __enable_clocks(iommu_drvdata);
  465. if (ret)
  466. goto unlock;
  467. msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
  468. SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
  469. GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
  470. __reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
  471. ctx_drvdata->num);
  472. msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
  473. __disable_clocks(iommu_drvdata);
  474. apply_bus_vote(iommu_drvdata, 0);
  475. list_del_init(&ctx_drvdata->attached_elm);
  476. ctx_drvdata->attached_domain = NULL;
  477. unlock:
  478. mutex_unlock(&msm_iommu_lock);
  479. }
  480. static int __get_pgprot(int prot, int len)
  481. {
  482. unsigned int pgprot;
  483. int tex;
  484. if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
  485. prot |= IOMMU_READ | IOMMU_WRITE;
  486. WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
  487. }
  488. if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
  489. prot |= IOMMU_READ;
  490. WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
  491. }
  492. if (prot & IOMMU_CACHE)
  493. tex = (pgprot_kernel >> 2) & 0x07;
  494. else
  495. tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
  496. if (tex < 0 || tex > NUM_TEX_CLASS - 1)
  497. return 0;
  498. if (len == SZ_16M || len == SZ_1M) {
  499. pgprot = FL_SHARED;
  500. pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
  501. pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
  502. pgprot |= tex & 0x04 ? FL_TEX0 : 0;
  503. pgprot |= FL_AP0 | FL_AP1;
  504. pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
  505. } else {
  506. pgprot = SL_SHARED;
  507. pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
  508. pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
  509. pgprot |= tex & 0x04 ? SL_TEX0 : 0;
  510. pgprot |= SL_AP0 | SL_AP1;
  511. pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
  512. }
  513. return pgprot;
  514. }
  515. static unsigned long *make_second_level(struct msm_iommu_priv *priv,
  516. unsigned long *fl_pte)
  517. {
  518. unsigned long *sl;
  519. sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
  520. get_order(SZ_4K));
  521. if (!sl) {
  522. pr_debug("Could not allocate second level table\n");
  523. goto fail;
  524. }
  525. memset(sl, 0, SZ_4K);
  526. clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
  527. *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
  528. FL_TYPE_TABLE);
  529. clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
  530. fail:
  531. return sl;
  532. }
  533. static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
  534. {
  535. int ret = 0;
  536. if (*sl_pte) {
  537. ret = -EBUSY;
  538. goto fail;
  539. }
  540. *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
  541. | SL_TYPE_SMALL | pgprot;
  542. fail:
  543. return ret;
  544. }
  545. static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
  546. {
  547. int ret = 0;
  548. int i;
  549. for (i = 0; i < 16; i++)
  550. if (*(sl_pte+i)) {
  551. ret = -EBUSY;
  552. goto fail;
  553. }
  554. for (i = 0; i < 16; i++)
  555. *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
  556. | SL_SHARED | SL_TYPE_LARGE | pgprot;
  557. fail:
  558. return ret;
  559. }
  560. static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
  561. {
  562. if (*fl_pte)
  563. return -EBUSY;
  564. *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
  565. | pgprot;
  566. return 0;
  567. }
  568. static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
  569. {
  570. int i;
  571. int ret = 0;
  572. for (i = 0; i < 16; i++)
  573. if (*(fl_pte+i)) {
  574. ret = -EBUSY;
  575. goto fail;
  576. }
  577. for (i = 0; i < 16; i++)
  578. *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
  579. | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
  580. fail:
  581. return ret;
  582. }
  583. static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
  584. phys_addr_t pa, size_t len, int prot)
  585. {
  586. struct msm_iommu_priv *priv;
  587. unsigned long *fl_table;
  588. unsigned long *fl_pte;
  589. unsigned long fl_offset;
  590. unsigned long *sl_table;
  591. unsigned long *sl_pte;
  592. unsigned long sl_offset;
  593. unsigned int pgprot;
  594. int ret = 0;
  595. mutex_lock(&msm_iommu_lock);
  596. priv = domain->priv;
  597. if (!priv) {
  598. ret = -EINVAL;
  599. goto fail;
  600. }
  601. fl_table = priv->pt.fl_table;
  602. if (len != SZ_16M && len != SZ_1M &&
  603. len != SZ_64K && len != SZ_4K) {
  604. pr_debug("Bad size: %d\n", len);
  605. ret = -EINVAL;
  606. goto fail;
  607. }
  608. if (!fl_table) {
  609. pr_debug("Null page table\n");
  610. ret = -EINVAL;
  611. goto fail;
  612. }
  613. pgprot = __get_pgprot(prot, len);
  614. if (!pgprot) {
  615. ret = -EINVAL;
  616. goto fail;
  617. }
  618. fl_offset = FL_OFFSET(va); /* Upper 12 bits */
  619. fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
  620. if (len == SZ_16M) {
  621. ret = fl_16m(fl_pte, pa, pgprot);
  622. if (ret)
  623. goto fail;
  624. clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
  625. }
  626. if (len == SZ_1M) {
  627. ret = fl_1m(fl_pte, pa, pgprot);
  628. if (ret)
  629. goto fail;
  630. clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
  631. }
  632. /* Need a 2nd level table */
  633. if (len == SZ_4K || len == SZ_64K) {
  634. if (*fl_pte == 0) {
  635. if (make_second_level(priv, fl_pte) == NULL) {
  636. ret = -ENOMEM;
  637. goto fail;
  638. }
  639. }
  640. if (!(*fl_pte & FL_TYPE_TABLE)) {
  641. ret = -EBUSY;
  642. goto fail;
  643. }
  644. }
  645. sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
  646. sl_offset = SL_OFFSET(va);
  647. sl_pte = sl_table + sl_offset;
  648. if (len == SZ_4K) {
  649. ret = sl_4k(sl_pte, pa, pgprot);
  650. if (ret)
  651. goto fail;
  652. clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
  653. }
  654. if (len == SZ_64K) {
  655. ret = sl_64k(sl_pte, pa, pgprot);
  656. if (ret)
  657. goto fail;
  658. clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
  659. }
  660. ret = __flush_iotlb_va(domain, va);
  661. fail:
  662. mutex_unlock(&msm_iommu_lock);
  663. return ret;
  664. }
  665. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
  666. size_t len)
  667. {
  668. struct msm_iommu_priv *priv;
  669. unsigned long *fl_table;
  670. unsigned long *fl_pte;
  671. unsigned long fl_offset;
  672. unsigned long *sl_table;
  673. unsigned long *sl_pte;
  674. unsigned long sl_offset;
  675. int i, ret = 0;
  676. mutex_lock(&msm_iommu_lock);
  677. priv = domain->priv;
  678. if (!priv)
  679. goto fail;
  680. fl_table = priv->pt.fl_table;
  681. if (len != SZ_16M && len != SZ_1M &&
  682. len != SZ_64K && len != SZ_4K) {
  683. pr_debug("Bad length: %d\n", len);
  684. goto fail;
  685. }
  686. if (!fl_table) {
  687. pr_debug("Null page table\n");
  688. goto fail;
  689. }
  690. fl_offset = FL_OFFSET(va); /* Upper 12 bits */
  691. fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
  692. if (*fl_pte == 0) {
  693. pr_debug("First level PTE is 0\n");
  694. goto fail;
  695. }
  696. /* Unmap supersection */
  697. if (len == SZ_16M) {
  698. for (i = 0; i < 16; i++)
  699. *(fl_pte+i) = 0;
  700. clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
  701. }
  702. if (len == SZ_1M) {
  703. *fl_pte = 0;
  704. clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
  705. }
  706. sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
  707. sl_offset = SL_OFFSET(va);
  708. sl_pte = sl_table + sl_offset;
  709. if (len == SZ_64K) {
  710. for (i = 0; i < 16; i++)
  711. *(sl_pte+i) = 0;
  712. clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
  713. }
  714. if (len == SZ_4K) {
  715. *sl_pte = 0;
  716. clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
  717. }
  718. if (len == SZ_4K || len == SZ_64K) {
  719. int used = 0;
  720. for (i = 0; i < NUM_SL_PTE; i++)
  721. if (sl_table[i])
  722. used = 1;
  723. if (!used) {
  724. free_page((unsigned long)sl_table);
  725. *fl_pte = 0;
  726. clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
  727. }
  728. }
  729. ret = __flush_iotlb_va(domain, va);
  730. fail:
  731. mutex_unlock(&msm_iommu_lock);
  732. /* the IOMMU API requires us to return how many bytes were unmapped */
  733. len = ret ? 0 : len;
  734. return len;
  735. }
  736. static unsigned int get_phys_addr(struct scatterlist *sg)
  737. {
  738. /*
  739. * Try sg_dma_address first so that we can
  740. * map carveout regions that do not have a
  741. * struct page associated with them.
  742. */
  743. unsigned int pa = sg_dma_address(sg);
  744. if (pa == 0)
  745. pa = sg_phys(sg);
  746. return pa;
  747. }
  748. static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
  749. int align)
  750. {
  751. return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
  752. && (len >= align);
  753. }
  754. static int check_range(unsigned long *fl_table, unsigned int va,
  755. unsigned int len)
  756. {
  757. unsigned int offset = 0;
  758. unsigned long *fl_pte;
  759. unsigned long fl_offset;
  760. unsigned long *sl_table;
  761. unsigned long sl_start, sl_end;
  762. int i;
  763. fl_offset = FL_OFFSET(va); /* Upper 12 bits */
  764. fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
  765. while (offset < len) {
  766. if (*fl_pte & FL_TYPE_TABLE) {
  767. sl_start = SL_OFFSET(va);
  768. sl_table = __va(((*fl_pte) & FL_BASE_MASK));
  769. sl_end = ((len - offset) / SZ_4K) + sl_start;
  770. if (sl_end > NUM_SL_PTE)
  771. sl_end = NUM_SL_PTE;
  772. for (i = sl_start; i < sl_end; i++) {
  773. if (sl_table[i] != 0) {
  774. pr_err("%08x - %08x already mapped\n",
  775. va, va + SZ_4K);
  776. return -EBUSY;
  777. }
  778. offset += SZ_4K;
  779. va += SZ_4K;
  780. }
  781. sl_start = 0;
  782. } else {
  783. if (*fl_pte != 0) {
  784. pr_err("%08x - %08x already mapped\n",
  785. va, va + SZ_1M);
  786. return -EBUSY;
  787. }
  788. va += SZ_1M;
  789. offset += SZ_1M;
  790. sl_start = 0;
  791. }
  792. fl_pte++;
  793. }
  794. return 0;
  795. }
  796. static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
  797. struct scatterlist *sg, unsigned int len,
  798. int prot)
  799. {
  800. unsigned int pa;
  801. unsigned int start_va = va;
  802. unsigned int offset = 0;
  803. unsigned long *fl_table;
  804. unsigned long *fl_pte;
  805. unsigned long fl_offset;
  806. unsigned long *sl_table = NULL;
  807. unsigned long sl_offset, sl_start;
  808. unsigned int chunk_size, chunk_offset = 0;
  809. int ret = 0;
  810. struct msm_iommu_priv *priv;
  811. unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
  812. mutex_lock(&msm_iommu_lock);
  813. BUG_ON(len & (SZ_4K - 1));
  814. priv = domain->priv;
  815. fl_table = priv->pt.fl_table;
  816. pgprot4k = __get_pgprot(prot, SZ_4K);
  817. pgprot64k = __get_pgprot(prot, SZ_64K);
  818. pgprot1m = __get_pgprot(prot, SZ_1M);
  819. pgprot16m = __get_pgprot(prot, SZ_16M);
  820. if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
  821. ret = -EINVAL;
  822. goto fail;
  823. }
  824. ret = check_range(fl_table, va, len);
  825. if (ret)
  826. goto fail;
  827. fl_offset = FL_OFFSET(va); /* Upper 12 bits */
  828. fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
  829. pa = get_phys_addr(sg);
  830. while (offset < len) {
  831. chunk_size = SZ_4K;
  832. if (is_fully_aligned(va, pa, sg->length - chunk_offset,
  833. SZ_16M))
  834. chunk_size = SZ_16M;
  835. else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
  836. SZ_1M))
  837. chunk_size = SZ_1M;
  838. /* 64k or 4k determined later */
  839. /* for 1M and 16M, only first level entries are required */
  840. if (chunk_size >= SZ_1M) {
  841. if (chunk_size == SZ_16M) {
  842. ret = fl_16m(fl_pte, pa, pgprot16m);
  843. if (ret)
  844. goto fail;
  845. clean_pte(fl_pte, fl_pte + 16,
  846. priv->pt.redirect);
  847. fl_pte += 16;
  848. } else if (chunk_size == SZ_1M) {
  849. ret = fl_1m(fl_pte, pa, pgprot1m);
  850. if (ret)
  851. goto fail;
  852. clean_pte(fl_pte, fl_pte + 1,
  853. priv->pt.redirect);
  854. fl_pte++;
  855. }
  856. offset += chunk_size;
  857. chunk_offset += chunk_size;
  858. va += chunk_size;
  859. pa += chunk_size;
  860. if (chunk_offset >= sg->length && offset < len) {
  861. chunk_offset = 0;
  862. sg = sg_next(sg);
  863. pa = get_phys_addr(sg);
  864. }
  865. continue;
  866. }
  867. /* for 4K or 64K, make sure there is a second level table */
  868. if (*fl_pte == 0) {
  869. if (!make_second_level(priv, fl_pte)) {
  870. ret = -ENOMEM;
  871. goto fail;
  872. }
  873. }
  874. if (!(*fl_pte & FL_TYPE_TABLE)) {
  875. ret = -EBUSY;
  876. goto fail;
  877. }
  878. sl_table = __va(((*fl_pte) & FL_BASE_MASK));
  879. sl_offset = SL_OFFSET(va);
  880. /* Keep track of initial position so we
  881. * don't clean more than we have to
  882. */
  883. sl_start = sl_offset;
  884. /* Build the 2nd level page table */
  885. while (offset < len && sl_offset < NUM_SL_PTE) {
  886. /* Map a large 64K page if the chunk is large enough and
  887. * the pa and va are aligned
  888. */
  889. if (is_fully_aligned(va, pa, sg->length - chunk_offset,
  890. SZ_64K))
  891. chunk_size = SZ_64K;
  892. else
  893. chunk_size = SZ_4K;
  894. if (chunk_size == SZ_4K) {
  895. sl_4k(&sl_table[sl_offset], pa, pgprot4k);
  896. sl_offset++;
  897. } else {
  898. BUG_ON(sl_offset + 16 > NUM_SL_PTE);
  899. sl_64k(&sl_table[sl_offset], pa, pgprot64k);
  900. sl_offset += 16;
  901. }
  902. offset += chunk_size;
  903. chunk_offset += chunk_size;
  904. va += chunk_size;
  905. pa += chunk_size;
  906. if (chunk_offset >= sg->length && offset < len) {
  907. chunk_offset = 0;
  908. sg = sg_next(sg);
  909. pa = get_phys_addr(sg);
  910. }
  911. }
  912. clean_pte(sl_table + sl_start, sl_table + sl_offset,
  913. priv->pt.redirect);
  914. fl_pte++;
  915. sl_offset = 0;
  916. }
  917. __flush_iotlb(domain);
  918. fail:
  919. mutex_unlock(&msm_iommu_lock);
  920. if (ret && offset > 0)
  921. msm_iommu_unmap_range(domain, start_va, offset);
  922. return ret;
  923. }
  924. static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
  925. unsigned int len)
  926. {
  927. unsigned int offset = 0;
  928. unsigned long *fl_table;
  929. unsigned long *fl_pte;
  930. unsigned long fl_offset;
  931. unsigned long *sl_table;
  932. unsigned long sl_start, sl_end;
  933. int used, i;
  934. struct msm_iommu_priv *priv;
  935. mutex_lock(&msm_iommu_lock);
  936. BUG_ON(len & (SZ_4K - 1));
  937. priv = domain->priv;
  938. fl_table = priv->pt.fl_table;
  939. fl_offset = FL_OFFSET(va); /* Upper 12 bits */
  940. fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
  941. while (offset < len) {
  942. if (*fl_pte & FL_TYPE_TABLE) {
  943. sl_start = SL_OFFSET(va);
  944. sl_table = __va(((*fl_pte) & FL_BASE_MASK));
  945. sl_end = ((len - offset) / SZ_4K) + sl_start;
  946. if (sl_end > NUM_SL_PTE)
  947. sl_end = NUM_SL_PTE;
  948. memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
  949. clean_pte(sl_table + sl_start, sl_table + sl_end,
  950. priv->pt.redirect);
  951. offset += (sl_end - sl_start) * SZ_4K;
  952. va += (sl_end - sl_start) * SZ_4K;
  953. /* Unmap and free the 2nd level table if all mappings
  954. * in it were removed. This saves memory, but the table
  955. * will need to be re-allocated the next time someone
  956. * tries to map these VAs.
  957. */
  958. used = 0;
  959. /* If we just unmapped the whole table, don't bother
  960. * seeing if there are still used entries left.
  961. */
  962. if (sl_end - sl_start != NUM_SL_PTE)
  963. for (i = 0; i < NUM_SL_PTE; i++)
  964. if (sl_table[i]) {
  965. used = 1;
  966. break;
  967. }
  968. if (!used) {
  969. free_page((unsigned long)sl_table);
  970. *fl_pte = 0;
  971. clean_pte(fl_pte, fl_pte + 1,
  972. priv->pt.redirect);
  973. }
  974. sl_start = 0;
  975. } else {
  976. *fl_pte = 0;
  977. clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
  978. va += SZ_1M;
  979. offset += SZ_1M;
  980. sl_start = 0;
  981. }
  982. fl_pte++;
  983. }
  984. __flush_iotlb(domain);
  985. mutex_unlock(&msm_iommu_lock);
  986. return 0;
  987. }
  988. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  989. unsigned long va)
  990. {
  991. struct msm_iommu_priv *priv;
  992. struct msm_iommu_drvdata *iommu_drvdata;
  993. struct msm_iommu_ctx_drvdata *ctx_drvdata;
  994. unsigned int par;
  995. void __iomem *base;
  996. phys_addr_t ret = 0;
  997. int ctx;
  998. mutex_lock(&msm_iommu_lock);
  999. priv = domain->priv;
  1000. if (list_empty(&priv->list_attached))
  1001. goto fail;
  1002. ctx_drvdata = list_entry(priv->list_attached.next,
  1003. struct msm_iommu_ctx_drvdata, attached_elm);
  1004. iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  1005. base = iommu_drvdata->base;
  1006. ctx = ctx_drvdata->num;
  1007. ret = __enable_clocks(iommu_drvdata);
  1008. if (ret)
  1009. goto fail;
  1010. msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
  1011. SET_V2PPR(base, ctx, va & V2Pxx_VA);
  1012. mb();
  1013. par = GET_PAR(base, ctx);
  1014. /* We are dealing with a supersection */
  1015. if (GET_NOFAULT_SS(base, ctx))
  1016. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  1017. else /* Upper 20 bits from PAR, lower 12 from VA */
  1018. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  1019. if (GET_FAULT(base, ctx))
  1020. ret = 0;
  1021. msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
  1022. __disable_clocks(iommu_drvdata);
  1023. fail:
  1024. mutex_unlock(&msm_iommu_lock);
  1025. return ret;
  1026. }
  1027. static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
  1028. unsigned long cap)
  1029. {
  1030. return 0;
  1031. }
  1032. static void __print_ctx_regs(void __iomem *base, int ctx)
  1033. {
  1034. unsigned int fsr = GET_FSR(base, ctx);
  1035. pr_err("FAR = %08x PAR = %08x\n",
  1036. GET_FAR(base, ctx), GET_PAR(base, ctx));
  1037. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
  1038. (fsr & 0x02) ? "TF " : "",
  1039. (fsr & 0x04) ? "AFF " : "",
  1040. (fsr & 0x08) ? "APF " : "",
  1041. (fsr & 0x10) ? "TLBMF " : "",
  1042. (fsr & 0x20) ? "HTWDEEF " : "",
  1043. (fsr & 0x40) ? "HTWSEEF " : "",
  1044. (fsr & 0x80) ? "MHF " : "",
  1045. (fsr & 0x10000) ? "SL " : "",
  1046. (fsr & 0x40000000) ? "SS " : "",
  1047. (fsr & 0x80000000) ? "MULTI " : "");
  1048. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  1049. GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
  1050. pr_err("TTBR0 = %08x TTBR1 = %08x\n",
  1051. GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
  1052. pr_err("SCTLR = %08x ACTLR = %08x\n",
  1053. GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
  1054. pr_err("PRRR = %08x NMRR = %08x\n",
  1055. GET_PRRR(base, ctx), GET_NMRR(base, ctx));
  1056. }
  1057. irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
  1058. {
  1059. struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
  1060. struct msm_iommu_drvdata *drvdata;
  1061. void __iomem *base;
  1062. unsigned int fsr, num;
  1063. int ret;
  1064. mutex_lock(&msm_iommu_lock);
  1065. BUG_ON(!ctx_drvdata);
  1066. drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
  1067. BUG_ON(!drvdata);
  1068. base = drvdata->base;
  1069. num = ctx_drvdata->num;
  1070. ret = __enable_clocks(drvdata);
  1071. if (ret)
  1072. goto fail;
  1073. msm_iommu_remote_spin_lock(drvdata->needs_rem_spinlock);
  1074. fsr = GET_FSR(base, num);
  1075. if (fsr) {
  1076. if (!ctx_drvdata->attached_domain) {
  1077. pr_err("Bad domain in interrupt handler\n");
  1078. ret = -ENOSYS;
  1079. } else
  1080. ret = report_iommu_fault(ctx_drvdata->attached_domain,
  1081. &ctx_drvdata->pdev->dev,
  1082. GET_FAR(base, num), 0);
  1083. if (ret == -ENOSYS) {
  1084. pr_err("Unexpected IOMMU page fault!\n");
  1085. pr_err("name = %s\n", drvdata->name);
  1086. pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
  1087. pr_err("Interesting registers:\n");
  1088. __print_ctx_regs(base, num);
  1089. }
  1090. SET_FSR(base, num, fsr);
  1091. /*
  1092. * Only resume fetches if the registered fault handler
  1093. * allows it
  1094. */
  1095. if (ret != -EBUSY)
  1096. SET_RESUME(base, num, 1);
  1097. ret = IRQ_HANDLED;
  1098. } else
  1099. ret = IRQ_NONE;
  1100. msm_iommu_remote_spin_unlock(drvdata->needs_rem_spinlock);
  1101. __disable_clocks(drvdata);
  1102. fail:
  1103. mutex_unlock(&msm_iommu_lock);
  1104. return ret;
  1105. }
  1106. static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
  1107. {
  1108. struct msm_iommu_priv *priv = domain->priv;
  1109. return __pa(priv->pt.fl_table);
  1110. }
  1111. static struct iommu_ops msm_iommu_ops = {
  1112. .domain_init = msm_iommu_domain_init,
  1113. .domain_destroy = msm_iommu_domain_destroy,
  1114. .attach_dev = msm_iommu_attach_dev,
  1115. .detach_dev = msm_iommu_detach_dev,
  1116. .map = msm_iommu_map,
  1117. .unmap = msm_iommu_unmap,
  1118. .map_range = msm_iommu_map_range,
  1119. .unmap_range = msm_iommu_unmap_range,
  1120. .iova_to_phys = msm_iommu_iova_to_phys,
  1121. .domain_has_cap = msm_iommu_domain_has_cap,
  1122. .get_pt_base_addr = msm_iommu_get_pt_base_addr,
  1123. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  1124. };
  1125. static int __init get_tex_class(int icp, int ocp, int mt, int nos)
  1126. {
  1127. int i = 0;
  1128. unsigned int prrr = 0;
  1129. unsigned int nmrr = 0;
  1130. int c_icp, c_ocp, c_mt, c_nos;
  1131. RCP15_PRRR(prrr);
  1132. RCP15_NMRR(nmrr);
  1133. for (i = 0; i < NUM_TEX_CLASS; i++) {
  1134. c_nos = PRRR_NOS(prrr, i);
  1135. c_mt = PRRR_MT(prrr, i);
  1136. c_icp = NMRR_ICP(nmrr, i);
  1137. c_ocp = NMRR_OCP(nmrr, i);
  1138. if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
  1139. return i;
  1140. }
  1141. return -ENODEV;
  1142. }
  1143. static void __init setup_iommu_tex_classes(void)
  1144. {
  1145. msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
  1146. get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
  1147. msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
  1148. get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
  1149. msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
  1150. get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
  1151. msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
  1152. get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
  1153. }
  1154. static int __init msm_iommu_init(void)
  1155. {
  1156. if (!msm_soc_version_supports_iommu_v0())
  1157. return -ENODEV;
  1158. msm_iommu_lock_initialize();
  1159. setup_iommu_tex_classes();
  1160. bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
  1161. return 0;
  1162. }
  1163. subsys_initcall(msm_iommu_init);
  1164. MODULE_LICENSE("GPL v2");
  1165. MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");