kgsl_iommu.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221
  1. /* Copyright (c) 2011-2014,2016,2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/delay.h>
  15. #include <linux/device.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/genalloc.h>
  18. #include <linux/slab.h>
  19. #include <linux/iommu.h>
  20. #include <linux/msm_kgsl.h>
  21. #include <mach/socinfo.h>
  22. #include <mach/msm_iomap.h>
  23. #include <mach/board.h>
  24. #include <mach/iommu_domains.h>
  25. #include <stddef.h>
  26. #include "kgsl.h"
  27. #include "kgsl_device.h"
  28. #include "kgsl_mmu.h"
  29. #include "kgsl_sharedmem.h"
  30. #include "kgsl_iommu.h"
  31. #include "adreno_pm4types.h"
  32. #include "adreno.h"
  33. #include "kgsl_trace.h"
  34. #include "z180.h"
  35. #include "kgsl_cffdump.h"
  36. static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
  37. { 0, 0 }, /* GLOBAL_BASE */
  38. { 0x0, 1 }, /* SCTLR */
  39. { 0x10, 1 }, /* TTBR0 */
  40. { 0x14, 1 }, /* TTBR1 */
  41. { 0x20, 1 }, /* FSR */
  42. { 0x800, 1 }, /* TLBIALL */
  43. { 0x820, 1 }, /* RESUME */
  44. { 0x03C, 1 }, /* TLBLKCR */
  45. { 0x818, 1 }, /* V2PUR */
  46. { 0x2C, 1 }, /* FSYNR0 */
  47. { 0x30, 1 }, /* FSYNR1 */
  48. { 0, 0 }, /* TLBSYNC, not in v0 */
  49. { 0, 0 }, /* TLBSTATUS, not in v0 */
  50. { 0, 0 } /* IMPLDEF_MICRO_MMU_CRTL, not in v0 */
  51. };
  52. static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
  53. { 0, 0 }, /* GLOBAL_BASE */
  54. { 0x0, 1 }, /* SCTLR */
  55. { 0x20, 1 }, /* TTBR0 */
  56. { 0x28, 1 }, /* TTBR1 */
  57. { 0x58, 1 }, /* FSR */
  58. { 0x618, 1 }, /* TLBIALL */
  59. { 0x008, 1 }, /* RESUME */
  60. { 0, 0 }, /* TLBLKCR not in V1 */
  61. { 0, 0 }, /* V2PUR not in V1 */
  62. { 0x68, 1 }, /* FSYNR0 */
  63. { 0x6C, 1 }, /* FSYNR1 */
  64. { 0x7F0, 1 }, /* TLBSYNC */
  65. { 0x7F4, 1 }, /* TLBSTATUS */
  66. { 0x2000, 0 } /* IMPLDEF_MICRO_MMU_CRTL */
  67. };
  68. static struct iommu_access_ops *iommu_access_ops;
  69. static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
  70. uint32_t flags);
  71. static phys_addr_t
  72. kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu);
  73. static void _iommu_lock(struct kgsl_iommu const *iommu)
  74. {
  75. if (iommu_access_ops && iommu_access_ops->iommu_lock_acquire)
  76. iommu_access_ops->iommu_lock_acquire(
  77. iommu->sync_lock_initialized);
  78. }
  79. static void _iommu_unlock(struct kgsl_iommu const *iommu)
  80. {
  81. if (iommu_access_ops && iommu_access_ops->iommu_lock_release)
  82. iommu_access_ops->iommu_lock_release(
  83. iommu->sync_lock_initialized);
  84. }
  85. struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
  86. /*
  87. * One page allocation for a guard region to protect against over-zealous
  88. * GPU pre-fetch
  89. */
  90. static struct page *kgsl_guard_page;
  91. static int get_iommu_unit(struct device *dev, struct kgsl_mmu **mmu_out,
  92. struct kgsl_iommu_unit **iommu_unit_out)
  93. {
  94. int i, j, k;
  95. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  96. struct kgsl_mmu *mmu;
  97. struct kgsl_iommu *iommu;
  98. if (kgsl_driver.devp[i] == NULL)
  99. continue;
  100. mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
  101. if (mmu == NULL || mmu->priv == NULL)
  102. continue;
  103. iommu = mmu->priv;
  104. for (j = 0; j < iommu->unit_count; j++) {
  105. struct kgsl_iommu_unit *iommu_unit =
  106. &iommu->iommu_units[j];
  107. for (k = 0; k < iommu_unit->dev_count; k++) {
  108. if (iommu_unit->dev[k].dev == dev) {
  109. *mmu_out = mmu;
  110. *iommu_unit_out = iommu_unit;
  111. return 0;
  112. }
  113. }
  114. }
  115. }
  116. return -EINVAL;
  117. }
  118. static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
  119. struct device *dev)
  120. {
  121. int k;
  122. for (k = 0; unit && k < unit->dev_count; k++) {
  123. if (unit->dev[k].dev == dev)
  124. return &(unit->dev[k]);
  125. }
  126. return NULL;
  127. }
  128. /* These functions help find the nearest allocated memory entries on either side
  129. * of a faulting address. If we know the nearby allocations memory we can
  130. * get a better determination of what we think should have been located in the
  131. * faulting region
  132. */
  133. /*
  134. * A local structure to make it easy to store the interesting bits for the
  135. * memory entries on either side of the faulting address
  136. */
  137. struct _mem_entry {
  138. unsigned int gpuaddr;
  139. unsigned int size;
  140. unsigned int flags;
  141. unsigned int priv;
  142. pid_t pid;
  143. };
  144. /*
  145. * Find the closest alloated memory block with an smaller GPU address then the
  146. * given address
  147. */
  148. static void _prev_entry(struct kgsl_process_private *priv,
  149. unsigned int faultaddr, struct _mem_entry *ret)
  150. {
  151. struct rb_node *node;
  152. struct kgsl_mem_entry *entry;
  153. for (node = rb_first(&priv->mem_rb); node; ) {
  154. entry = rb_entry(node, struct kgsl_mem_entry, node);
  155. if (entry->memdesc.gpuaddr > faultaddr)
  156. break;
  157. /*
  158. * If this is closer to the faulting address, then copy
  159. * the entry
  160. */
  161. if (entry->memdesc.gpuaddr > ret->gpuaddr) {
  162. ret->gpuaddr = entry->memdesc.gpuaddr;
  163. ret->size = entry->memdesc.size;
  164. ret->flags = entry->memdesc.flags;
  165. ret->priv = entry->memdesc.priv;
  166. ret->pid = pid_nr(priv->pid);
  167. }
  168. node = rb_next(&entry->node);
  169. }
  170. }
  171. /*
  172. * Find the closest alloated memory block with a greater starting GPU address
  173. * then the given address
  174. */
  175. static void _next_entry(struct kgsl_process_private *priv,
  176. unsigned int faultaddr, struct _mem_entry *ret)
  177. {
  178. struct rb_node *node;
  179. struct kgsl_mem_entry *entry;
  180. for (node = rb_last(&priv->mem_rb); node; ) {
  181. entry = rb_entry(node, struct kgsl_mem_entry, node);
  182. if (entry->memdesc.gpuaddr < faultaddr)
  183. break;
  184. /*
  185. * If this is closer to the faulting address, then copy
  186. * the entry
  187. */
  188. if (entry->memdesc.gpuaddr < ret->gpuaddr) {
  189. ret->gpuaddr = entry->memdesc.gpuaddr;
  190. ret->size = entry->memdesc.size;
  191. ret->flags = entry->memdesc.flags;
  192. ret->priv = entry->memdesc.priv;
  193. ret->pid = pid_nr(priv->pid);
  194. }
  195. node = rb_prev(&entry->node);
  196. }
  197. }
  198. static void _find_mem_entries(struct kgsl_mmu *mmu, unsigned int faultaddr,
  199. unsigned int ptbase, struct _mem_entry *preventry,
  200. struct _mem_entry *nextentry)
  201. {
  202. struct kgsl_process_private *private;
  203. int id = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
  204. memset(preventry, 0, sizeof(*preventry));
  205. memset(nextentry, 0, sizeof(*nextentry));
  206. /* Set the maximum possible size as an initial value */
  207. nextentry->gpuaddr = 0xFFFFFFFF;
  208. mutex_lock(&kgsl_driver.process_mutex);
  209. list_for_each_entry(private, &kgsl_driver.process_list, list) {
  210. if (private->pagetable && (private->pagetable->name != id))
  211. continue;
  212. spin_lock(&private->mem_lock);
  213. _prev_entry(private, faultaddr, preventry);
  214. _next_entry(private, faultaddr, nextentry);
  215. spin_unlock(&private->mem_lock);
  216. }
  217. mutex_unlock(&kgsl_driver.process_mutex);
  218. }
  219. static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
  220. {
  221. char name[32];
  222. memset(name, 0, sizeof(name));
  223. kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags);
  224. KGSL_LOG_DUMP(device,
  225. "[%8.8X - %8.8X] %s (pid = %d) (%s)\n",
  226. entry->gpuaddr,
  227. entry->gpuaddr + entry->size,
  228. entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
  229. entry->pid, name);
  230. }
  231. static void _check_if_freed(struct kgsl_iommu_device *iommu_dev,
  232. unsigned long addr, unsigned int pid)
  233. {
  234. unsigned long gpuaddr = addr;
  235. unsigned long size = 0;
  236. unsigned int flags = 0;
  237. char name[32];
  238. memset(name, 0, sizeof(name));
  239. if (kgsl_memfree_find_entry(pid, &gpuaddr, &size, &flags)) {
  240. kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
  241. KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- premature free ----\n");
  242. KGSL_LOG_DUMP(iommu_dev->kgsldev,
  243. "[%8.8lX-%8.8lX] (%s) was already freed by pid %d\n",
  244. gpuaddr, gpuaddr + size, name, pid);
  245. }
  246. }
  247. static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
  248. struct device *dev, unsigned long addr, int flags, void *token)
  249. {
  250. int ret = 0;
  251. struct kgsl_mmu *mmu;
  252. struct kgsl_iommu *iommu;
  253. struct kgsl_iommu_unit *iommu_unit;
  254. struct kgsl_iommu_device *iommu_dev;
  255. unsigned int ptbase, fsr;
  256. unsigned int pid;
  257. struct _mem_entry prev, next;
  258. unsigned int fsynr0, fsynr1;
  259. int write;
  260. struct kgsl_device *device;
  261. struct adreno_device *adreno_dev;
  262. unsigned int no_page_fault_log = 0;
  263. unsigned int curr_context_id = 0;
  264. unsigned int curr_global_ts = 0;
  265. struct kgsl_context *context;
  266. ret = get_iommu_unit(dev, &mmu, &iommu_unit);
  267. if (ret)
  268. goto done;
  269. device = mmu->device;
  270. adreno_dev = ADRENO_DEVICE(device);
  271. if (atomic_read(&mmu->fault)) {
  272. if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
  273. ret = -EBUSY;
  274. goto done;
  275. }
  276. iommu_dev = get_iommu_device(iommu_unit, dev);
  277. if (!iommu_dev) {
  278. KGSL_CORE_ERR("Invalid IOMMU device %pK\n", dev);
  279. ret = -ENOSYS;
  280. goto done;
  281. }
  282. iommu = mmu->priv;
  283. /*
  284. * set the fault bits and stuff before any printks so that if fault
  285. * handler runs then it will know it's dealing with a pagefault
  286. */
  287. kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
  288. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
  289. context = kgsl_context_get(device, curr_context_id);
  290. if (context != NULL) {
  291. kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
  292. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
  293. eoptimestamp));
  294. /* save pagefault timestamp for GFT */
  295. set_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
  296. context->pagefault_ts = curr_global_ts;
  297. kgsl_context_put(context);
  298. context = NULL;
  299. }
  300. atomic_set(&mmu->fault, 1);
  301. iommu_dev->fault = 1;
  302. if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
  303. adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
  304. /* turn off GPU IRQ so we don't get faults from it too */
  305. kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
  306. adreno_dispatcher_schedule(device);
  307. }
  308. ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
  309. iommu_dev->ctx_id, TTBR0);
  310. fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
  311. iommu_dev->ctx_id, FSR);
  312. fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
  313. iommu_dev->ctx_id, FSYNR0);
  314. fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
  315. iommu_dev->ctx_id, FSYNR1);
  316. if (msm_soc_version_supports_iommu_v0())
  317. write = ((fsynr1 & (KGSL_IOMMU_FSYNR1_AWRITE_MASK <<
  318. KGSL_IOMMU_FSYNR1_AWRITE_SHIFT)) ? 1 : 0);
  319. else
  320. write = ((fsynr0 & (KGSL_IOMMU_V1_FSYNR0_WNR_MASK <<
  321. KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT)) ? 1 : 0);
  322. pid = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
  323. if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
  324. no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
  325. if (!no_page_fault_log) {
  326. KGSL_MEM_CRIT(iommu_dev->kgsldev,
  327. "GPU PAGE FAULT: addr = %lX pid = %d\n", addr, pid);
  328. KGSL_MEM_CRIT(iommu_dev->kgsldev,
  329. "context = %d FSR = %X FSYNR0 = %X FSYNR1 = %X(%s fault)\n",
  330. iommu_dev->ctx_id, fsr, fsynr0, fsynr1,
  331. write ? "write" : "read");
  332. _check_if_freed(iommu_dev, addr, pid);
  333. KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
  334. _find_mem_entries(mmu, addr, ptbase, &prev, &next);
  335. if (prev.gpuaddr)
  336. _print_entry(iommu_dev->kgsldev, &prev);
  337. else
  338. KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
  339. KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
  340. if (next.gpuaddr != 0xFFFFFFFF)
  341. _print_entry(iommu_dev->kgsldev, &next);
  342. else
  343. KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
  344. }
  345. trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
  346. kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
  347. write ? "write" : "read");
  348. /*
  349. * We do not want the h/w to resume fetching data from an iommu unit
  350. * that has faulted, this is better for debugging as it will stall
  351. * the GPU and trigger a snapshot. To stall the transaction return
  352. * EBUSY error.
  353. */
  354. if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
  355. ret = -EBUSY;
  356. done:
  357. return ret;
  358. }
  359. /*
  360. * kgsl_iommu_disable_clk - Disable iommu clocks
  361. * @mmu - Pointer to mmu structure
  362. *
  363. * Disables iommu clocks
  364. * Return - void
  365. */
  366. static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
  367. {
  368. struct kgsl_iommu *iommu = mmu->priv;
  369. struct msm_iommu_drvdata *iommu_drvdata;
  370. int i, j;
  371. for (i = 0; i < iommu->unit_count; i++) {
  372. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  373. for (j = 0; j < iommu_unit->dev_count; j++) {
  374. if (ctx_id != iommu_unit->dev[j].ctx_id)
  375. continue;
  376. atomic_dec(&iommu_unit->dev[j].clk_enable_count);
  377. BUG_ON(
  378. atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
  379. /*
  380. * the clock calls have a refcount so call them on every
  381. * enable/disable call
  382. */
  383. iommu_drvdata = dev_get_drvdata(
  384. iommu_unit->dev[j].dev->parent);
  385. if (iommu_drvdata->aclk)
  386. clk_disable_unprepare(iommu_drvdata->aclk);
  387. if (iommu_drvdata->clk)
  388. clk_disable_unprepare(iommu_drvdata->clk);
  389. clk_disable_unprepare(iommu_drvdata->pclk);
  390. }
  391. }
  392. }
  393. /*
  394. * kgsl_iommu_disable_clk_event() - Disable IOMMU clocks after timestamp
  395. * @device: The kgsl device pointer
  396. * @context: Pointer to the context that fired the event
  397. * @data: Pointer to the private data for the event
  398. * @type: Result of the callback (retired or cancelled)
  399. *
  400. * An event function that is executed when
  401. * the required timestamp is reached. It disables the IOMMU clocks if
  402. * the timestamp on which the clocks can be disabled has expired.
  403. *
  404. * Return - void
  405. */
  406. static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
  407. struct kgsl_context *context, void *data, int type)
  408. {
  409. struct kgsl_iommu_disable_clk_param *param = data;
  410. kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
  411. /* Free param we are done using it */
  412. kfree(param);
  413. }
  414. /*
  415. * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks
  416. * @mmu - The kgsl MMU pointer
  417. * @ts - Timestamp on which the clocks should be disabled
  418. * @ts_valid - Indicates whether ts parameter is valid, if this parameter
  419. * is false then it means that the calling function wants to disable the
  420. * IOMMU clocks immediately without waiting for any timestamp
  421. * @ctx_id: Context id of the IOMMU context for which clocks are to be
  422. * turned off
  423. *
  424. * Creates an event to disable the IOMMU clocks on timestamp and if event
  425. * already exists then updates the timestamp of disabling the IOMMU clocks
  426. * with the passed in ts if it is greater than the current value at which
  427. * the clocks will be disabled
  428. * Return - void
  429. */
  430. static void
  431. kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
  432. unsigned int ts, int ctx_id)
  433. {
  434. struct kgsl_iommu_disable_clk_param *param;
  435. param = kzalloc(sizeof(*param), GFP_KERNEL);
  436. if (!param) {
  437. KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*param));
  438. return;
  439. }
  440. param->mmu = mmu;
  441. param->ctx_id = ctx_id;
  442. param->ts = ts;
  443. if (kgsl_add_event(mmu->device, &mmu->device->iommu_events,
  444. ts, kgsl_iommu_clk_disable_event, param)) {
  445. KGSL_DRV_ERR(mmu->device,
  446. "Failed to add IOMMU disable clk event\n");
  447. kfree(param);
  448. }
  449. }
  450. /*
  451. * kgsl_iommu_enable_clk - Enable iommu clocks
  452. * @mmu - Pointer to mmu structure
  453. * @ctx_id - The context bank whose clocks are to be turned on
  454. *
  455. * Enables iommu clocks of a given context
  456. * Return: 0 on success else error code
  457. */
  458. static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
  459. int ctx_id)
  460. {
  461. int ret = 0;
  462. int i, j;
  463. struct kgsl_iommu *iommu = mmu->priv;
  464. struct msm_iommu_drvdata *iommu_drvdata;
  465. for (i = 0; i < iommu->unit_count; i++) {
  466. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  467. for (j = 0; j < iommu_unit->dev_count; j++) {
  468. if (ctx_id != iommu_unit->dev[j].ctx_id)
  469. continue;
  470. iommu_drvdata =
  471. dev_get_drvdata(iommu_unit->dev[j].dev->parent);
  472. ret = clk_prepare_enable(iommu_drvdata->pclk);
  473. if (ret)
  474. goto done;
  475. if (iommu_drvdata->clk) {
  476. ret = clk_prepare_enable(iommu_drvdata->clk);
  477. if (ret) {
  478. clk_disable_unprepare(
  479. iommu_drvdata->pclk);
  480. goto done;
  481. }
  482. }
  483. if (iommu_drvdata->aclk) {
  484. ret = clk_prepare_enable(iommu_drvdata->aclk);
  485. if (ret) {
  486. if (iommu_drvdata->clk)
  487. clk_disable_unprepare(
  488. iommu_drvdata->clk);
  489. clk_disable_unprepare(
  490. iommu_drvdata->pclk);
  491. goto done;
  492. }
  493. }
  494. atomic_inc(&iommu_unit->dev[j].clk_enable_count);
  495. }
  496. }
  497. done:
  498. if (ret) {
  499. struct kgsl_iommu_unit *iommu_unit;
  500. if (iommu->unit_count == i)
  501. i--;
  502. iommu_unit = &iommu->iommu_units[i];
  503. do {
  504. for (j--; j >= 0; j--)
  505. kgsl_iommu_disable_clk(mmu, ctx_id);
  506. i--;
  507. if (i >= 0) {
  508. iommu_unit = &iommu->iommu_units[i];
  509. j = iommu_unit->dev_count;
  510. }
  511. } while (i >= 0);
  512. }
  513. return ret;
  514. }
  515. /*
  516. * kgsl_iommu_pt_equal - Check if pagetables are equal
  517. * @mmu - Pointer to mmu structure
  518. * @pt - Pointer to pagetable
  519. * @pt_base - Address of a pagetable that the IOMMU register is
  520. * programmed with
  521. *
  522. * Checks whether the pt_base is equal to the base address of
  523. * the pagetable which is contained in the pt structure
  524. * Return - Non-zero if the pagetable addresses are equal else 0
  525. */
  526. static int kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
  527. struct kgsl_pagetable *pt,
  528. phys_addr_t pt_base)
  529. {
  530. struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
  531. phys_addr_t domain_ptbase;
  532. if (iommu_pt == NULL)
  533. return 0;
  534. domain_ptbase = iommu_get_pt_base_addr(iommu_pt->domain)
  535. & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  536. pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  537. return (domain_ptbase == pt_base);
  538. }
  539. /*
  540. * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
  541. * @mmu_specific_pt - Pointer to pagetable which is to be freed
  542. *
  543. * Return - void
  544. */
  545. static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
  546. {
  547. struct kgsl_iommu_pt *iommu_pt = pt->priv;
  548. if (iommu_pt->domain)
  549. msm_unregister_domain(iommu_pt->domain);
  550. kfree(iommu_pt);
  551. iommu_pt = NULL;
  552. }
  553. /*
  554. * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
  555. *
  556. * Allocate memory to hold a pagetable and allocate the IOMMU
  557. * domain which is the actual IOMMU pagetable
  558. * Return - void
  559. */
  560. void *kgsl_iommu_create_pagetable(void)
  561. {
  562. int domain_num;
  563. struct kgsl_iommu_pt *iommu_pt;
  564. struct msm_iova_partition kgsl_partition = {
  565. .start = 0,
  566. .size = 0xFFFFFFFF,
  567. };
  568. struct msm_iova_layout kgsl_layout = {
  569. .partitions = &kgsl_partition,
  570. .npartitions = 1,
  571. .client_name = "kgsl",
  572. .domain_flags = 0,
  573. };
  574. iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
  575. if (!iommu_pt) {
  576. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  577. sizeof(struct kgsl_iommu_pt));
  578. return NULL;
  579. }
  580. /* L2 redirect is not stable on IOMMU v1 */
  581. if (msm_soc_version_supports_iommu_v0())
  582. kgsl_layout.domain_flags = MSM_IOMMU_DOMAIN_PT_CACHEABLE;
  583. domain_num = msm_register_domain(&kgsl_layout);
  584. if (domain_num >= 0) {
  585. iommu_pt->domain = msm_get_iommu_domain(domain_num);
  586. if (iommu_pt->domain) {
  587. iommu_set_fault_handler(iommu_pt->domain,
  588. kgsl_iommu_fault_handler, NULL);
  589. return iommu_pt;
  590. }
  591. }
  592. KGSL_CORE_ERR("Failed to create iommu domain\n");
  593. kfree(iommu_pt);
  594. return NULL;
  595. }
  596. /*
  597. * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a
  598. * pagetable
  599. * @mmu - Pointer to the device mmu structure
  600. * @priv - Flag indicating whether the private or user context is to be
  601. * detached
  602. *
  603. * Detach the IOMMU unit with the domain that is contained in the
  604. * hwpagetable of the given mmu. After detaching the IOMMU unit is not
  605. * in use because the PTBR will not be set after a detach
  606. * Return - void
  607. */
  608. static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
  609. {
  610. struct kgsl_iommu_pt *iommu_pt;
  611. struct kgsl_iommu *iommu = mmu->priv;
  612. int i, j;
  613. for (i = 0; i < iommu->unit_count; i++) {
  614. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  615. iommu_pt = mmu->defaultpagetable->priv;
  616. for (j = 0; j < iommu_unit->dev_count; j++) {
  617. /*
  618. * If there is a 2nd default pagetable then priv domain
  619. * is attached with this pagetable
  620. */
  621. if (mmu->priv_bank_table &&
  622. (KGSL_IOMMU_CONTEXT_PRIV == j))
  623. iommu_pt = mmu->priv_bank_table->priv;
  624. if (iommu_unit->dev[j].attached) {
  625. iommu_detach_device(iommu_pt->domain,
  626. iommu_unit->dev[j].dev);
  627. iommu_unit->dev[j].attached = false;
  628. KGSL_MEM_INFO(mmu->device, "iommu %pK detached "
  629. "from user dev of MMU: %pK\n",
  630. iommu_pt->domain, mmu);
  631. }
  632. }
  633. }
  634. }
  635. /*
  636. * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a
  637. * pagetable, i.e set the IOMMU's PTBR to the pagetable address and
  638. * setup other IOMMU registers for the device so that it becomes
  639. * active
  640. * @mmu - Pointer to the device mmu structure
  641. * @priv - Flag indicating whether the private or user context is to be
  642. * attached
  643. *
  644. * Attach the IOMMU unit with the domain that is contained in the
  645. * hwpagetable of the given mmu.
  646. * Return - 0 on success else error code
  647. */
  648. static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
  649. {
  650. struct kgsl_iommu_pt *iommu_pt;
  651. struct kgsl_iommu *iommu = mmu->priv;
  652. int i, j, ret = 0;
  653. /*
  654. * Loop through all the iommu devcies under all iommu units and
  655. * attach the domain
  656. */
  657. for (i = 0; i < iommu->unit_count; i++) {
  658. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  659. iommu_pt = mmu->defaultpagetable->priv;
  660. for (j = 0; j < iommu_unit->dev_count; j++) {
  661. /*
  662. * If there is a 2nd default pagetable then priv domain
  663. * is attached to this pagetable
  664. */
  665. if (mmu->priv_bank_table &&
  666. (KGSL_IOMMU_CONTEXT_PRIV == j))
  667. iommu_pt = mmu->priv_bank_table->priv;
  668. if (!iommu_unit->dev[j].attached) {
  669. ret = iommu_attach_device(iommu_pt->domain,
  670. iommu_unit->dev[j].dev);
  671. if (ret) {
  672. KGSL_MEM_ERR(mmu->device,
  673. "Failed to attach device, err %d\n",
  674. ret);
  675. goto done;
  676. }
  677. iommu_unit->dev[j].attached = true;
  678. KGSL_MEM_INFO(mmu->device,
  679. "iommu pt %pK attached to dev %pK, ctx_id %d\n",
  680. iommu_pt->domain, iommu_unit->dev[j].dev,
  681. iommu_unit->dev[j].ctx_id);
  682. }
  683. }
  684. }
  685. done:
  686. return ret;
  687. }
  688. /*
  689. * _get_iommu_ctxs - Get device pointer to IOMMU contexts
  690. * @mmu - Pointer to mmu device
  691. * data - Pointer to the platform data containing information about
  692. * iommu devices for one iommu unit
  693. * unit_id - The IOMMU unit number. This is not a specific ID but just
  694. * a serial number. The serial numbers are treated as ID's of the
  695. * IOMMU units
  696. *
  697. * Return - 0 on success else error code
  698. */
  699. static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
  700. struct kgsl_device_iommu_data *data, unsigned int unit_id)
  701. {
  702. struct kgsl_iommu *iommu = mmu->priv;
  703. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
  704. int i, j;
  705. int found_ctx;
  706. int ret = 0;
  707. for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
  708. found_ctx = 0;
  709. for (i = 0; i < data->iommu_ctx_count; i++) {
  710. if (j == data->iommu_ctxs[i].ctx_id) {
  711. found_ctx = 1;
  712. break;
  713. }
  714. }
  715. if (!found_ctx)
  716. break;
  717. if (!data->iommu_ctxs[i].iommu_ctx_name) {
  718. KGSL_CORE_ERR("Context name invalid\n");
  719. ret = -EINVAL;
  720. goto done;
  721. }
  722. atomic_set(
  723. &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
  724. 0);
  725. iommu_unit->dev[iommu_unit->dev_count].dev =
  726. msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
  727. if (NULL == iommu_unit->dev[iommu_unit->dev_count].dev)
  728. ret = -EINVAL;
  729. if (IS_ERR(iommu_unit->dev[iommu_unit->dev_count].dev)) {
  730. ret = PTR_ERR(
  731. iommu_unit->dev[iommu_unit->dev_count].dev);
  732. iommu_unit->dev[iommu_unit->dev_count].dev = NULL;
  733. }
  734. if (ret)
  735. goto done;
  736. iommu_unit->dev[iommu_unit->dev_count].ctx_id =
  737. data->iommu_ctxs[i].ctx_id;
  738. iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
  739. KGSL_DRV_INFO(mmu->device,
  740. "Obtained dev handle %pK for iommu context %s\n",
  741. iommu_unit->dev[iommu_unit->dev_count].dev,
  742. data->iommu_ctxs[i].iommu_ctx_name);
  743. iommu_unit->dev_count++;
  744. }
  745. done:
  746. if (!iommu_unit->dev_count && !ret)
  747. ret = -EINVAL;
  748. if (ret) {
  749. /*
  750. * If at least the first context is initialized on v1
  751. * then we can continue
  752. */
  753. if (!msm_soc_version_supports_iommu_v0() &&
  754. iommu_unit->dev_count)
  755. ret = 0;
  756. else
  757. KGSL_CORE_ERR(
  758. "Failed to initialize iommu contexts, err: %d\n", ret);
  759. }
  760. return ret;
  761. }
  762. /*
  763. * kgsl_iommu_start_sync_lock - Initialize some variables during MMU start up
  764. * for GPU CPU synchronization
  765. * @mmu - Pointer to mmu device
  766. *
  767. * Return - 0 on success else error code
  768. */
  769. static int kgsl_iommu_start_sync_lock(struct kgsl_mmu *mmu)
  770. {
  771. struct kgsl_iommu *iommu = mmu->priv;
  772. uint32_t lock_gpu_addr = 0;
  773. if (KGSL_DEVICE_3D0 != mmu->device->id ||
  774. !msm_soc_version_supports_iommu_v0() ||
  775. !kgsl_mmu_is_perprocess(mmu) ||
  776. iommu->sync_lock_vars)
  777. return 0;
  778. if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
  779. KGSL_DRV_ERR(mmu->device,
  780. "The GPU microcode does not support IOMMUv1 sync opcodes\n");
  781. return -ENXIO;
  782. }
  783. /* Store Lock variables GPU address */
  784. lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr +
  785. iommu->sync_lock_offset);
  786. kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
  787. (offsetof(struct remote_iommu_petersons_spinlock,
  788. flag[PROC_APPS])));
  789. kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
  790. (offsetof(struct remote_iommu_petersons_spinlock,
  791. flag[PROC_GPU])));
  792. kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
  793. (offsetof(struct remote_iommu_petersons_spinlock, turn)));
  794. iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;
  795. return 0;
  796. }
  797. #ifdef CONFIG_MSM_IOMMU_GPU_SYNC
  798. /*
  799. * kgsl_get_sync_lock - Init Sync Lock between GPU and CPU
  800. * @mmu - Pointer to mmu device
  801. *
  802. * Return - 0 on success else error code
  803. */
  804. static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
  805. {
  806. struct kgsl_iommu *iommu = mmu->priv;
  807. int status = 0;
  808. uint32_t lock_phy_addr = 0;
  809. uint32_t page_offset = 0;
  810. if (!msm_soc_version_supports_iommu_v0() ||
  811. !kgsl_mmu_is_perprocess(mmu))
  812. return status;
  813. /*
  814. * For 2D devices cpu side sync lock is required. For 3D device,
  815. * since we only have a single 3D core and we always ensure that
  816. * 3D core is idle while writing to IOMMU register using CPU this
  817. * lock is not required
  818. */
  819. if (KGSL_DEVICE_2D0 == mmu->device->id ||
  820. KGSL_DEVICE_2D1 == mmu->device->id) {
  821. return status;
  822. }
  823. /* Return if already initialized */
  824. if (iommu->sync_lock_initialized)
  825. return status;
  826. iommu_access_ops = msm_get_iommu_access_ops();
  827. if (iommu_access_ops && iommu_access_ops->iommu_lock_initialize) {
  828. lock_phy_addr = (uint32_t)
  829. iommu_access_ops->iommu_lock_initialize();
  830. if (!lock_phy_addr) {
  831. iommu_access_ops = NULL;
  832. return status;
  833. }
  834. lock_phy_addr = lock_phy_addr - (uint32_t)MSM_SHARED_RAM_BASE +
  835. (uint32_t)msm_shared_ram_phys;
  836. }
  837. /* Align the physical address to PAGE boundary and store the offset */
  838. page_offset = (lock_phy_addr & (PAGE_SIZE - 1));
  839. lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1));
  840. iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr;
  841. iommu->sync_lock_offset = page_offset;
  842. iommu->sync_lock_desc.size =
  843. PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars));
  844. status = memdesc_sg_phys(&iommu->sync_lock_desc,
  845. iommu->sync_lock_desc.physaddr,
  846. iommu->sync_lock_desc.size);
  847. if (status) {
  848. iommu_access_ops = NULL;
  849. return status;
  850. }
  851. /* Flag Sync Lock is Initialized */
  852. iommu->sync_lock_initialized = 1;
  853. return status;
  854. }
  855. #else
  856. static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
  857. {
  858. return 0;
  859. }
  860. #endif
  861. /*
  862. * kgsl_iommu_sync_lock - Acquire Sync Lock between GPU and CPU
  863. * @mmu - Pointer to mmu device
  864. * @cmds - Pointer to array of commands
  865. *
  866. * Return - int - number of commands.
  867. */
  868. inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu,
  869. unsigned int *cmds)
  870. {
  871. struct kgsl_device *device = mmu->device;
  872. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  873. struct kgsl_iommu *iommu = mmu->device->mmu.priv;
  874. struct remote_iommu_petersons_spinlock *lock_vars =
  875. iommu->sync_lock_vars;
  876. unsigned int *start = cmds;
  877. if (!iommu->sync_lock_initialized)
  878. return 0;
  879. *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
  880. *cmds++ = lock_vars->flag[PROC_GPU];
  881. *cmds++ = 1;
  882. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  883. *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
  884. /* MEM SPACE = memory, FUNCTION = equals */
  885. *cmds++ = 0x13;
  886. *cmds++ = lock_vars->flag[PROC_GPU];
  887. *cmds++ = 0x1;
  888. *cmds++ = 0x1;
  889. *cmds++ = 0x1;
  890. /* WAIT_REG_MEM turns back on protected mode - push it off */
  891. *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  892. *cmds++ = 0;
  893. *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
  894. *cmds++ = lock_vars->turn;
  895. *cmds++ = 0;
  896. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  897. *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
  898. /* MEM SPACE = memory, FUNCTION = equals */
  899. *cmds++ = 0x13;
  900. *cmds++ = lock_vars->flag[PROC_GPU];
  901. *cmds++ = 0x1;
  902. *cmds++ = 0x1;
  903. *cmds++ = 0x1;
  904. /* WAIT_REG_MEM turns back on protected mode - push it off */
  905. *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  906. *cmds++ = 0;
  907. *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3);
  908. *cmds++ = lock_vars->flag[PROC_APPS];
  909. *cmds++ = lock_vars->turn;
  910. *cmds++ = 0;
  911. /* TEST_TWO_MEMS turns back on protected mode - push it off */
  912. *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  913. *cmds++ = 0;
  914. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  915. return cmds - start;
  916. }
  917. /*
  918. * kgsl_iommu_sync_lock - Release Sync Lock between GPU and CPU
  919. * @mmu - Pointer to mmu device
  920. * @cmds - Pointer to array of commands
  921. *
  922. * Return - int - number of commands.
  923. */
  924. inline unsigned int kgsl_iommu_sync_unlock(struct kgsl_mmu *mmu,
  925. unsigned int *cmds)
  926. {
  927. struct kgsl_device *device = mmu->device;
  928. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  929. struct kgsl_iommu *iommu = mmu->device->mmu.priv;
  930. struct remote_iommu_petersons_spinlock *lock_vars =
  931. iommu->sync_lock_vars;
  932. unsigned int *start = cmds;
  933. if (!iommu->sync_lock_initialized)
  934. return 0;
  935. *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
  936. *cmds++ = lock_vars->flag[PROC_GPU];
  937. *cmds++ = 0;
  938. *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
  939. /* MEM SPACE = memory, FUNCTION = equals */
  940. *cmds++ = 0x13;
  941. *cmds++ = lock_vars->flag[PROC_GPU];
  942. *cmds++ = 0x0;
  943. *cmds++ = 0x1;
  944. *cmds++ = 0x1;
  945. /* WAIT_REG_MEM turns back on protected mode - push it off */
  946. *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  947. *cmds++ = 0;
  948. cmds += adreno_add_idle_cmds(adreno_dev, cmds);
  949. return cmds - start;
  950. }
  951. /*
  952. * kgsl_get_iommu_ctxt - Get device pointer to IOMMU contexts
  953. * @mmu - Pointer to mmu device
  954. *
  955. * Get the device pointers for the IOMMU user and priv contexts of the
  956. * kgsl device
  957. * Return - 0 on success else error code
  958. */
  959. static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu)
  960. {
  961. struct platform_device *pdev =
  962. container_of(mmu->device->parentdev, struct platform_device,
  963. dev);
  964. struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
  965. struct kgsl_iommu *iommu = mmu->device->mmu.priv;
  966. int i, ret = 0;
  967. /* Go through the IOMMU data and get all the context devices */
  968. if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) {
  969. KGSL_CORE_ERR("Too many IOMMU units defined\n");
  970. ret = -EINVAL;
  971. goto done;
  972. }
  973. for (i = 0; i < pdata_dev->iommu_count; i++) {
  974. ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i);
  975. if (ret)
  976. break;
  977. }
  978. iommu->unit_count = pdata_dev->iommu_count;
  979. done:
  980. return ret;
  981. }
  982. /*
  983. * kgsl_set_register_map - Map the IOMMU regsiters in the memory descriptors
  984. * of the respective iommu units
  985. * @mmu - Pointer to mmu structure
  986. *
  987. * Return - 0 on success else error code
  988. */
  989. static int kgsl_set_register_map(struct kgsl_mmu *mmu)
  990. {
  991. struct platform_device *pdev =
  992. container_of(mmu->device->parentdev, struct platform_device,
  993. dev);
  994. struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
  995. struct kgsl_iommu *iommu = mmu->device->mmu.priv;
  996. struct kgsl_iommu_unit *iommu_unit;
  997. int i = 0, ret = 0;
  998. for (; i < pdata_dev->iommu_count; i++) {
  999. struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i];
  1000. iommu_unit = &iommu->iommu_units[i];
  1001. /* set up the IOMMU register map for the given IOMMU unit */
  1002. if (!data.physstart || !data.physend) {
  1003. KGSL_CORE_ERR("The register range for IOMMU unit not"
  1004. " specified\n");
  1005. ret = -EINVAL;
  1006. goto err;
  1007. }
  1008. iommu_unit->reg_map.hostptr = ioremap(data.physstart,
  1009. data.physend - data.physstart + 1);
  1010. if (!iommu_unit->reg_map.hostptr) {
  1011. KGSL_CORE_ERR("Failed to map SMMU register address "
  1012. "space from %x to %x\n", data.physstart,
  1013. data.physend - data.physstart + 1);
  1014. ret = -ENOMEM;
  1015. i--;
  1016. goto err;
  1017. }
  1018. iommu_unit->reg_map.size = data.physend - data.physstart + 1;
  1019. iommu_unit->reg_map.physaddr = data.physstart;
  1020. ret = memdesc_sg_phys(&iommu_unit->reg_map, data.physstart,
  1021. iommu_unit->reg_map.size);
  1022. if (ret)
  1023. goto err;
  1024. if (!msm_soc_version_supports_iommu_v0())
  1025. iommu_unit->iommu_halt_enable = 1;
  1026. iommu_unit->ahb_base = data.physstart - mmu->device->reg_phys;
  1027. }
  1028. iommu->unit_count = pdata_dev->iommu_count;
  1029. return ret;
  1030. err:
  1031. /* Unmap any mapped IOMMU regions */
  1032. for (; i >= 0; i--) {
  1033. iommu_unit = &iommu->iommu_units[i];
  1034. iounmap(iommu_unit->reg_map.hostptr);
  1035. iommu_unit->reg_map.size = 0;
  1036. iommu_unit->reg_map.physaddr = 0;
  1037. }
  1038. return ret;
  1039. }
  1040. /*
  1041. * kgsl_iommu_get_pt_base_addr - Get the address of the pagetable that the
  1042. * IOMMU ttbr0 register is programmed with
  1043. * @mmu - Pointer to mmu
  1044. * @pt - kgsl pagetable pointer that contains the IOMMU domain pointer
  1045. *
  1046. * Return - actual pagetable address that the ttbr0 register is programmed
  1047. * with
  1048. */
  1049. static phys_addr_t kgsl_iommu_get_pt_base_addr(struct kgsl_mmu *mmu,
  1050. struct kgsl_pagetable *pt)
  1051. {
  1052. struct kgsl_iommu_pt *iommu_pt = pt->priv;
  1053. return iommu_get_pt_base_addr(iommu_pt->domain) &
  1054. KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  1055. }
  1056. /*
  1057. * kgsl_iommu_get_default_ttbr0 - Return the ttbr0 value programmed by
  1058. * iommu driver
  1059. * @mmu - Pointer to mmu structure
  1060. * @hostptr - Pointer to the IOMMU register map. This is used to match
  1061. * the iommu device whose lsb value is to be returned
  1062. * @ctx_id - The context bank whose lsb valus is to be returned
  1063. * Return - returns the ttbr0 value programmed by iommu driver
  1064. */
  1065. static phys_addr_t kgsl_iommu_get_default_ttbr0(struct kgsl_mmu *mmu,
  1066. unsigned int unit_id,
  1067. enum kgsl_iommu_context_id ctx_id)
  1068. {
  1069. struct kgsl_iommu *iommu = mmu->priv;
  1070. int i, j;
  1071. for (i = 0; i < iommu->unit_count; i++) {
  1072. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  1073. for (j = 0; j < iommu_unit->dev_count; j++)
  1074. if (unit_id == i &&
  1075. ctx_id == iommu_unit->dev[j].ctx_id)
  1076. return iommu_unit->dev[j].default_ttbr0;
  1077. }
  1078. return 0;
  1079. }
  1080. static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
  1081. struct kgsl_pagetable *pagetable,
  1082. unsigned int context_id)
  1083. {
  1084. int ret = 0;
  1085. if (mmu->flags & KGSL_FLAGS_STARTED) {
  1086. /* page table not current, then setup mmu to use new
  1087. * specified page table
  1088. */
  1089. if (mmu->hwpagetable != pagetable) {
  1090. unsigned int flags = 0;
  1091. mmu->hwpagetable = pagetable;
  1092. flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
  1093. mmu->device->id) |
  1094. KGSL_MMUFLAGS_TLBFLUSH;
  1095. ret = kgsl_setstate(mmu, context_id,
  1096. KGSL_MMUFLAGS_PTUPDATE | flags);
  1097. }
  1098. }
  1099. return ret;
  1100. }
  1101. /*
  1102. * kgsl_iommu_setup_regs - map iommu registers into a pagetable
  1103. * @mmu: Pointer to mmu structure
  1104. * @pt: the pagetable
  1105. *
  1106. * To do pagetable switches from the GPU command stream, the IOMMU
  1107. * registers need to be mapped into the GPU's pagetable. This function
  1108. * is used differently on different targets. On 8960, the registers
  1109. * are mapped into every pagetable during kgsl_setup_pt(). On
  1110. * all other targets, the registers are mapped only into the second
  1111. * context bank.
  1112. *
  1113. * Return - 0 on success else error code
  1114. */
  1115. static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu,
  1116. struct kgsl_pagetable *pt)
  1117. {
  1118. int status;
  1119. int i = 0;
  1120. struct kgsl_iommu *iommu = mmu->priv;
  1121. if (!msm_soc_version_supports_iommu_v0())
  1122. return 0;
  1123. for (i = 0; i < iommu->unit_count; i++) {
  1124. status = kgsl_mmu_map_global(pt,
  1125. &(iommu->iommu_units[i].reg_map));
  1126. if (status)
  1127. goto err;
  1128. }
  1129. /* Map Lock variables to GPU pagetable */
  1130. if (iommu->sync_lock_initialized) {
  1131. status = kgsl_mmu_map_global(pt, &iommu->sync_lock_desc);
  1132. if (status)
  1133. goto err;
  1134. }
  1135. return 0;
  1136. err:
  1137. for (i--; i >= 0; i--)
  1138. kgsl_mmu_unmap(pt,
  1139. &(iommu->iommu_units[i].reg_map));
  1140. return status;
  1141. }
  1142. /*
  1143. * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable
  1144. * @mmu: Pointer to mmu structure
  1145. * @pt: the pagetable
  1146. *
  1147. * Removes mappings created by kgsl_iommu_setup_regs().
  1148. *
  1149. * Return - 0 on success else error code
  1150. */
  1151. static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu,
  1152. struct kgsl_pagetable *pt)
  1153. {
  1154. struct kgsl_iommu *iommu = mmu->priv;
  1155. int i;
  1156. for (i = 0; i < iommu->unit_count; i++)
  1157. kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map));
  1158. if (iommu->sync_lock_desc.gpuaddr)
  1159. kgsl_mmu_unmap(pt, &iommu->sync_lock_desc);
  1160. }
  1161. /*
  1162. * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
  1163. * @mmu - Pointer to mmu structure
  1164. * @iommu_unit - The iommu unit for which base address is requested
  1165. * @ctx_id - The context ID of the IOMMU ctx
  1166. * @reg - The register for which address is required
  1167. *
  1168. * Return - The address of register which can be used in type0 packet
  1169. */
  1170. static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
  1171. int iommu_unit, int ctx_id,
  1172. enum kgsl_iommu_reg_map reg)
  1173. {
  1174. struct kgsl_iommu *iommu = mmu->priv;
  1175. if (iommu->iommu_reg_list[reg].ctx_reg)
  1176. return iommu->iommu_units[iommu_unit].ahb_base +
  1177. iommu->iommu_reg_list[reg].reg_offset +
  1178. (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
  1179. else
  1180. return iommu->iommu_units[iommu_unit].ahb_base +
  1181. iommu->iommu_reg_list[reg].reg_offset;
  1182. }
  1183. static int kgsl_iommu_init(struct kgsl_mmu *mmu)
  1184. {
  1185. /*
  1186. * intialize device mmu
  1187. *
  1188. * call this with the global lock held
  1189. */
  1190. int status = 0;
  1191. struct kgsl_iommu *iommu;
  1192. atomic_set(&mmu->fault, 0);
  1193. iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
  1194. if (!iommu) {
  1195. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  1196. sizeof(struct kgsl_iommu));
  1197. return -ENOMEM;
  1198. }
  1199. mmu->priv = iommu;
  1200. status = kgsl_get_iommu_ctxt(mmu);
  1201. if (status)
  1202. goto done;
  1203. status = kgsl_set_register_map(mmu);
  1204. if (status)
  1205. goto done;
  1206. /*
  1207. * IOMMU-v1 requires hardware halt support to do in stream
  1208. * pagetable switching. This check assumes that if there are
  1209. * multiple units, they will be matching hardware.
  1210. */
  1211. mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT &&
  1212. (msm_soc_version_supports_iommu_v0() ||
  1213. iommu->iommu_units[0].iommu_halt_enable);
  1214. /*
  1215. * For IOMMU per-process pagetables, the allocatable range
  1216. * and the kernel global range must both be outside
  1217. * the userspace address range. There is a 1Mb gap
  1218. * between these address ranges to make overrun
  1219. * detection easier.
  1220. * For the shared pagetable case use 2GB and because
  1221. * mirroring the CPU address space is not possible and
  1222. * we're better off with extra room.
  1223. */
  1224. if (mmu->pt_per_process) {
  1225. #ifndef CONFIG_MSM_KGSL_CFF_DUMP
  1226. mmu->pt_base = PAGE_OFFSET;
  1227. mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE
  1228. - kgsl_mmu_get_base_addr(mmu) - SZ_1M;
  1229. mmu->use_cpu_map = true;
  1230. #else
  1231. mmu->pt_base = KGSL_PAGETABLE_BASE;
  1232. mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE +
  1233. KGSL_IOMMU_GLOBAL_MEM_SIZE -
  1234. KGSL_PAGETABLE_BASE;
  1235. mmu->use_cpu_map = false;
  1236. #endif
  1237. } else {
  1238. mmu->pt_base = KGSL_PAGETABLE_BASE;
  1239. #ifndef CONFIG_MSM_KGSL_CFF_DUMP
  1240. mmu->pt_size = SZ_2G;
  1241. #else
  1242. mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE +
  1243. KGSL_IOMMU_GLOBAL_MEM_SIZE -
  1244. KGSL_PAGETABLE_BASE;
  1245. #endif
  1246. mmu->use_cpu_map = false;
  1247. }
  1248. status = kgsl_iommu_init_sync_lock(mmu);
  1249. if (status)
  1250. goto done;
  1251. iommu->iommu_reg_list = kgsl_iommuv0_reg;
  1252. iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
  1253. if (msm_soc_version_supports_iommu_v0()) {
  1254. iommu->iommu_reg_list = kgsl_iommuv0_reg;
  1255. iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
  1256. } else {
  1257. iommu->iommu_reg_list = kgsl_iommuv1_reg;
  1258. iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;
  1259. }
  1260. /* A nop is required in an indirect buffer when switching
  1261. * pagetables in-stream */
  1262. kgsl_sharedmem_writel(mmu->device, &mmu->setstate_memory,
  1263. KGSL_IOMMU_SETSTATE_NOP_OFFSET,
  1264. cp_nop_packet(1));
  1265. if (cpu_is_msm8960()) {
  1266. /*
  1267. * 8960 doesn't have a second context bank, so the IOMMU
  1268. * registers must be mapped into every pagetable.
  1269. */
  1270. iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs;
  1271. iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs;
  1272. }
  1273. if (kgsl_guard_page == NULL) {
  1274. kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
  1275. __GFP_HIGHMEM);
  1276. if (kgsl_guard_page == NULL) {
  1277. status = -ENOMEM;
  1278. goto done;
  1279. }
  1280. }
  1281. dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
  1282. __func__);
  1283. done:
  1284. if (status) {
  1285. kfree(iommu);
  1286. mmu->priv = NULL;
  1287. }
  1288. return status;
  1289. }
  1290. /*
  1291. * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
  1292. * for iommu. This function is only called once during first start, successive
  1293. * start do not call this funciton.
  1294. * @mmu - Pointer to mmu structure
  1295. *
  1296. * Create the initial defaultpagetable and setup the iommu mappings to it
  1297. * Return - 0 on success else error code
  1298. */
  1299. static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
  1300. {
  1301. int status = 0;
  1302. /* If chip is not 8960 then we use the 2nd context bank for pagetable
  1303. * switching on the 3D side for which a separate table is allocated */
  1304. if (msm_soc_version_supports_iommu_v0()) {
  1305. mmu->priv_bank_table =
  1306. kgsl_mmu_getpagetable(mmu,
  1307. KGSL_MMU_PRIV_BANK_TABLE_NAME);
  1308. if (mmu->priv_bank_table == NULL) {
  1309. status = -ENOMEM;
  1310. goto err;
  1311. }
  1312. status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table);
  1313. if (status)
  1314. goto err;
  1315. }
  1316. mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
  1317. /* Return error if the default pagetable doesn't exist */
  1318. if (mmu->defaultpagetable == NULL) {
  1319. status = -ENOMEM;
  1320. goto err;
  1321. }
  1322. return status;
  1323. err:
  1324. if (mmu->priv_bank_table) {
  1325. kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
  1326. kgsl_mmu_putpagetable(mmu->priv_bank_table);
  1327. mmu->priv_bank_table = NULL;
  1328. }
  1329. if (mmu->defaultpagetable) {
  1330. kgsl_mmu_putpagetable(mmu->defaultpagetable);
  1331. mmu->defaultpagetable = NULL;
  1332. }
  1333. return status;
  1334. }
  1335. /*
  1336. * kgsl_iommu_lock_rb_in_tlb - Allocates tlb entries and locks the
  1337. * virtual to physical address translation of ringbuffer for 3D
  1338. * device into tlb.
  1339. * @mmu - Pointer to mmu structure
  1340. *
  1341. * Return - void
  1342. */
  1343. static void kgsl_iommu_lock_rb_in_tlb(struct kgsl_mmu *mmu)
  1344. {
  1345. struct kgsl_device *device = mmu->device;
  1346. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1347. struct adreno_ringbuffer *rb;
  1348. struct kgsl_iommu *iommu = mmu->priv;
  1349. unsigned int num_tlb_entries;
  1350. unsigned int tlblkcr = 0;
  1351. unsigned int v2pxx = 0;
  1352. unsigned int vaddr = 0;
  1353. int i, j, k, l;
  1354. if (!iommu->sync_lock_initialized)
  1355. return;
  1356. rb = &adreno_dev->ringbuffer;
  1357. num_tlb_entries = rb->buffer_desc.size / PAGE_SIZE;
  1358. for (i = 0; i < iommu->unit_count; i++) {
  1359. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  1360. for (j = 0; j < iommu_unit->dev_count; j++) {
  1361. tlblkcr = 0;
  1362. if (cpu_is_msm8960())
  1363. tlblkcr |= ((num_tlb_entries &
  1364. KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
  1365. KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
  1366. else
  1367. tlblkcr |= (((num_tlb_entries *
  1368. iommu_unit->dev_count) &
  1369. KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
  1370. KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
  1371. /* Do not invalidate locked entries on tlbiall flush */
  1372. tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK)
  1373. << KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT);
  1374. tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK)
  1375. << KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT);
  1376. tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK)
  1377. << KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT);
  1378. /* Enable tlb locking */
  1379. tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_LKE_MASK)
  1380. << KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
  1381. KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
  1382. iommu_unit->dev[j].ctx_id,
  1383. TLBLKCR, tlblkcr);
  1384. }
  1385. for (j = 0; j < iommu_unit->dev_count; j++) {
  1386. /* skip locking entries for private bank on 8960 */
  1387. if (cpu_is_msm8960() && KGSL_IOMMU_CONTEXT_PRIV == j)
  1388. continue;
  1389. /* Lock the ringbuffer virtual address into tlb */
  1390. vaddr = rb->buffer_desc.gpuaddr;
  1391. for (k = 0; k < num_tlb_entries; k++) {
  1392. v2pxx = 0;
  1393. v2pxx |= (((k + j * num_tlb_entries) &
  1394. KGSL_IOMMU_V2PXX_INDEX_MASK)
  1395. << KGSL_IOMMU_V2PXX_INDEX_SHIFT);
  1396. v2pxx |= vaddr & (KGSL_IOMMU_V2PXX_VA_MASK <<
  1397. KGSL_IOMMU_V2PXX_VA_SHIFT);
  1398. KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
  1399. iommu_unit->dev[j].ctx_id,
  1400. V2PUR, v2pxx);
  1401. mb();
  1402. vaddr += PAGE_SIZE;
  1403. for (l = 0; l < iommu_unit->dev_count; l++) {
  1404. tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu,
  1405. iommu_unit,
  1406. iommu_unit->dev[l].ctx_id,
  1407. TLBLKCR);
  1408. mb();
  1409. tlblkcr &=
  1410. ~(KGSL_IOMMU_TLBLKCR_VICTIM_MASK
  1411. << KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
  1412. tlblkcr |= (((k + 1 +
  1413. (j * num_tlb_entries)) &
  1414. KGSL_IOMMU_TLBLKCR_VICTIM_MASK) <<
  1415. KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
  1416. KGSL_IOMMU_SET_CTX_REG(iommu,
  1417. iommu_unit,
  1418. iommu_unit->dev[l].ctx_id,
  1419. TLBLKCR, tlblkcr);
  1420. }
  1421. }
  1422. }
  1423. for (j = 0; j < iommu_unit->dev_count; j++) {
  1424. tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
  1425. iommu_unit->dev[j].ctx_id,
  1426. TLBLKCR);
  1427. mb();
  1428. /* Disable tlb locking */
  1429. tlblkcr &= ~(KGSL_IOMMU_TLBLKCR_LKE_MASK
  1430. << KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
  1431. KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
  1432. iommu_unit->dev[j].ctx_id, TLBLKCR, tlblkcr);
  1433. }
  1434. }
  1435. }
  1436. static int kgsl_iommu_start(struct kgsl_mmu *mmu)
  1437. {
  1438. int status;
  1439. struct kgsl_iommu *iommu = mmu->priv;
  1440. int i, j;
  1441. int sctlr_val = 0;
  1442. struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device);
  1443. if (mmu->flags & KGSL_FLAGS_STARTED)
  1444. return 0;
  1445. if (mmu->defaultpagetable == NULL) {
  1446. status = kgsl_iommu_setup_defaultpagetable(mmu);
  1447. if (status)
  1448. return -ENOMEM;
  1449. }
  1450. status = kgsl_iommu_start_sync_lock(mmu);
  1451. if (status)
  1452. return status;
  1453. /* We use the GPU MMU to control access to IOMMU registers on 8960 with
  1454. * a225, hence we still keep the MMU active on 8960 */
  1455. if (cpu_is_msm8960() && KGSL_DEVICE_3D0 == mmu->device->id) {
  1456. struct kgsl_mh *mh = &(mmu->device->mh);
  1457. BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 &&
  1458. mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr);
  1459. kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
  1460. kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
  1461. mh->mpu_base + mh->mpu_range);
  1462. }
  1463. mmu->hwpagetable = mmu->defaultpagetable;
  1464. status = kgsl_attach_pagetable_iommu_domain(mmu);
  1465. if (status) {
  1466. mmu->hwpagetable = NULL;
  1467. goto done;
  1468. }
  1469. status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1470. if (status) {
  1471. KGSL_CORE_ERR("clk enable failed\n");
  1472. goto done;
  1473. }
  1474. status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
  1475. if (status) {
  1476. kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1477. KGSL_CORE_ERR("clk enable failed\n");
  1478. goto done;
  1479. }
  1480. /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
  1481. * that value should not change when we change pagetables, so while
  1482. * changing pagetables we can use this lsb value of the pagetable w/o
  1483. * having to read it again
  1484. */
  1485. _iommu_lock(iommu);
  1486. for (i = 0; i < iommu->unit_count; i++) {
  1487. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  1488. for (j = 0; j < iommu_unit->dev_count; j++) {
  1489. /*
  1490. * For IOMMU V1 do not halt IOMMU on pagefault if
  1491. * FT pagefault policy is set accordingly
  1492. */
  1493. if ((!msm_soc_version_supports_iommu_v0()) &&
  1494. (!(adreno_dev->ft_pf_policy &
  1495. KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))) {
  1496. sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu,
  1497. iommu_unit,
  1498. iommu_unit->dev[j].ctx_id,
  1499. SCTLR);
  1500. sctlr_val |= (0x1 <<
  1501. KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
  1502. KGSL_IOMMU_SET_CTX_REG(iommu,
  1503. iommu_unit,
  1504. iommu_unit->dev[j].ctx_id,
  1505. SCTLR, sctlr_val);
  1506. }
  1507. if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
  1508. iommu_unit->dev[j].default_ttbr0 =
  1509. KGSL_IOMMU_GET_CTX_REG_LL(iommu,
  1510. iommu_unit,
  1511. iommu_unit->dev[j].ctx_id,
  1512. TTBR0);
  1513. } else {
  1514. iommu_unit->dev[j].default_ttbr0 =
  1515. KGSL_IOMMU_GET_CTX_REG(iommu,
  1516. iommu_unit,
  1517. iommu_unit->dev[j].ctx_id,
  1518. TTBR0);
  1519. }
  1520. }
  1521. }
  1522. kgsl_iommu_lock_rb_in_tlb(mmu);
  1523. _iommu_unlock(iommu);
  1524. /* For complete CFF */
  1525. kgsl_cffdump_setmem(mmu->device, mmu->setstate_memory.gpuaddr +
  1526. KGSL_IOMMU_SETSTATE_NOP_OFFSET,
  1527. cp_nop_packet(1), sizeof(unsigned int));
  1528. kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1529. kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
  1530. mmu->flags |= KGSL_FLAGS_STARTED;
  1531. done:
  1532. return status;
  1533. }
  1534. static void kgsl_iommu_flush_tlb_pt_current(struct kgsl_pagetable *pt)
  1535. {
  1536. int lock_taken = 0;
  1537. struct kgsl_device *device = pt->mmu->device;
  1538. struct kgsl_iommu *iommu = pt->mmu->priv;
  1539. /*
  1540. * Check to see if the current thread already holds the device mutex.
  1541. * If it does not, then take the device mutex which is required for
  1542. * flushing the tlb
  1543. */
  1544. if (!kgsl_mutex_lock(&device->mutex, &device->mutex_owner))
  1545. lock_taken = 1;
  1546. /*
  1547. * Flush the tlb only if the iommu device is attached and the pagetable
  1548. * hasn't been switched yet
  1549. */
  1550. if (kgsl_mmu_is_perprocess(pt->mmu) &&
  1551. iommu->iommu_units[0].dev[KGSL_IOMMU_CONTEXT_USER].attached &&
  1552. kgsl_iommu_pt_equal(pt->mmu, pt,
  1553. kgsl_iommu_get_current_ptbase(pt->mmu)))
  1554. kgsl_iommu_default_setstate(pt->mmu, KGSL_MMUFLAGS_TLBFLUSH);
  1555. if (lock_taken)
  1556. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  1557. }
  1558. static int
  1559. kgsl_iommu_unmap(struct kgsl_pagetable *pt,
  1560. struct kgsl_memdesc *memdesc,
  1561. unsigned int *tlb_flags)
  1562. {
  1563. int ret = 0;
  1564. unsigned int range = memdesc->size;
  1565. struct kgsl_iommu_pt *iommu_pt = pt->priv;
  1566. /* All GPU addresses as assigned are page aligned, but some
  1567. functions purturb the gpuaddr with an offset, so apply the
  1568. mask here to make sure we have the right address */
  1569. unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
  1570. if (range == 0 || gpuaddr == 0)
  1571. return 0;
  1572. if (kgsl_memdesc_has_guard_page(memdesc))
  1573. range += PAGE_SIZE;
  1574. ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
  1575. if (ret) {
  1576. KGSL_CORE_ERR("iommu_unmap_range(%pK, %x, %d) failed "
  1577. "with err: %d\n", iommu_pt->domain, gpuaddr,
  1578. range, ret);
  1579. return ret;
  1580. }
  1581. kgsl_iommu_flush_tlb_pt_current(pt);
  1582. return ret;
  1583. }
  1584. static int
  1585. kgsl_iommu_map(struct kgsl_pagetable *pt,
  1586. struct kgsl_memdesc *memdesc,
  1587. unsigned int protflags,
  1588. unsigned int *tlb_flags)
  1589. {
  1590. int ret;
  1591. unsigned int iommu_virt_addr;
  1592. struct kgsl_iommu_pt *iommu_pt = pt->priv;
  1593. int size = memdesc->size;
  1594. BUG_ON(NULL == iommu_pt);
  1595. iommu_virt_addr = memdesc->gpuaddr;
  1596. ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
  1597. size, protflags);
  1598. if (ret) {
  1599. KGSL_CORE_ERR("iommu_map_range(%pK, %x, %pK, %d, %x) err: %d\n",
  1600. iommu_pt->domain, iommu_virt_addr, memdesc->sg, size,
  1601. protflags, ret);
  1602. return ret;
  1603. }
  1604. if (kgsl_memdesc_has_guard_page(memdesc)) {
  1605. ret = iommu_map(iommu_pt->domain, iommu_virt_addr + size,
  1606. page_to_phys(kgsl_guard_page), PAGE_SIZE,
  1607. protflags & ~IOMMU_WRITE);
  1608. if (ret) {
  1609. KGSL_CORE_ERR("iommu_map(%pK, %x, guard, %x) err: %d\n",
  1610. iommu_pt->domain, iommu_virt_addr + size,
  1611. protflags & ~IOMMU_WRITE,
  1612. ret);
  1613. /* cleanup the partial mapping */
  1614. iommu_unmap_range(iommu_pt->domain, iommu_virt_addr,
  1615. size);
  1616. }
  1617. }
  1618. /*
  1619. * IOMMU V1 BFBs pre-fetch data beyond what is being used by the core.
  1620. * This can include both allocated pages and un-allocated pages.
  1621. * If an un-allocated page is cached, and later used (if it has been
  1622. * newly dynamically allocated by SW) the SMMU HW should automatically
  1623. * re-fetch the pages from memory (rather than using the cached
  1624. * un-allocated page). This logic is known as the re-fetch logic.
  1625. * In current chips we suspect this re-fetch logic is broken,
  1626. * it can result in bad translations which can either cause downstream
  1627. * bus errors, or upstream cores being hung (because of garbage data
  1628. * being read) -> causing TLB sync stuck issues. As a result SW must
  1629. * implement the invalidate+map.
  1630. */
  1631. if (!msm_soc_version_supports_iommu_v0())
  1632. kgsl_iommu_flush_tlb_pt_current(pt);
  1633. return ret;
  1634. }
  1635. void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
  1636. {
  1637. struct kgsl_iommu *iommu = mmu->priv;
  1638. int i, j;
  1639. if (atomic_read(&mmu->fault)) {
  1640. for (i = 0; i < iommu->unit_count; i++) {
  1641. struct kgsl_iommu_unit *iommu_unit =
  1642. &iommu->iommu_units[i];
  1643. for (j = 0; j < iommu_unit->dev_count; j++) {
  1644. if (iommu_unit->dev[j].fault) {
  1645. kgsl_iommu_enable_clk(mmu, j);
  1646. _iommu_lock(iommu);
  1647. KGSL_IOMMU_SET_CTX_REG(iommu,
  1648. iommu_unit,
  1649. iommu_unit->dev[j].ctx_id,
  1650. RESUME, 1);
  1651. KGSL_IOMMU_SET_CTX_REG(iommu,
  1652. iommu_unit,
  1653. iommu_unit->dev[j].ctx_id,
  1654. FSR, 0);
  1655. kgsl_iommu_disable_clk(mmu, j);
  1656. _iommu_unlock(iommu);
  1657. iommu_unit->dev[j].fault = 0;
  1658. }
  1659. }
  1660. }
  1661. atomic_set(&mmu->fault, 0);
  1662. }
  1663. }
  1664. static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
  1665. {
  1666. /*
  1667. * stop device mmu
  1668. *
  1669. * call this with the global lock held
  1670. */
  1671. if (mmu->flags & KGSL_FLAGS_STARTED) {
  1672. /* detach iommu attachment */
  1673. kgsl_detach_pagetable_iommu_domain(mmu);
  1674. mmu->hwpagetable = NULL;
  1675. mmu->flags &= ~KGSL_FLAGS_STARTED;
  1676. kgsl_iommu_pagefault_resume(mmu);
  1677. }
  1678. /* switch off MMU clocks and cancel any events it has queued */
  1679. kgsl_cancel_events(mmu->device, &mmu->device->iommu_events);
  1680. }
  1681. static int kgsl_iommu_close(struct kgsl_mmu *mmu)
  1682. {
  1683. struct kgsl_iommu *iommu = mmu->priv;
  1684. int i;
  1685. if (mmu->priv_bank_table != NULL) {
  1686. kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
  1687. kgsl_mmu_putpagetable(mmu->priv_bank_table);
  1688. }
  1689. if (mmu->defaultpagetable != NULL)
  1690. kgsl_mmu_putpagetable(mmu->defaultpagetable);
  1691. for (i = 0; i < iommu->unit_count; i++) {
  1692. struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map;
  1693. if (reg_map->hostptr)
  1694. iounmap(reg_map->hostptr);
  1695. kgsl_sg_free(reg_map->sg, reg_map->sglen);
  1696. reg_map->priv &= ~KGSL_MEMDESC_GLOBAL;
  1697. }
  1698. /* clear IOMMU GPU CPU sync structures */
  1699. kgsl_sg_free(iommu->sync_lock_desc.sg, iommu->sync_lock_desc.sglen);
  1700. memset(&iommu->sync_lock_desc, 0, sizeof(iommu->sync_lock_desc));
  1701. iommu->sync_lock_vars = NULL;
  1702. kfree(iommu);
  1703. if (kgsl_guard_page != NULL) {
  1704. __free_page(kgsl_guard_page);
  1705. kgsl_guard_page = NULL;
  1706. }
  1707. return 0;
  1708. }
  1709. static phys_addr_t
  1710. kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
  1711. {
  1712. phys_addr_t pt_base;
  1713. struct kgsl_iommu *iommu = mmu->priv;
  1714. /* We cannot enable or disable the clocks in interrupt context, this
  1715. function is called from interrupt context if there is an axi error */
  1716. if (in_interrupt())
  1717. return 0;
  1718. /* Return the current pt base by reading IOMMU pt_base register */
  1719. kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1720. pt_base = KGSL_IOMMU_GET_CTX_REG(iommu,
  1721. (&iommu->iommu_units[0]),
  1722. KGSL_IOMMU_CONTEXT_USER, TTBR0);
  1723. kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1724. return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  1725. }
  1726. /*
  1727. * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
  1728. * of the primary context bank
  1729. * @mmu - Pointer to mmu structure
  1730. * @flags - Flags indicating whether pagetable has to chnage or tlb is to be
  1731. * flushed or both
  1732. *
  1733. * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or
  1734. * do both by doing direct register writes to the IOMMu registers through the
  1735. * cpu
  1736. * Return - void
  1737. */
  1738. static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
  1739. uint32_t flags)
  1740. {
  1741. struct kgsl_iommu *iommu = mmu->priv;
  1742. int temp;
  1743. int i;
  1744. int ret = 0;
  1745. phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu,
  1746. mmu->hwpagetable);
  1747. phys_addr_t pt_val;
  1748. ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1749. if (ret) {
  1750. KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
  1751. return ret;
  1752. }
  1753. /* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
  1754. if (msm_soc_version_supports_iommu_v0()) {
  1755. ret = kgsl_idle(mmu->device);
  1756. if (ret)
  1757. return ret;
  1758. }
  1759. /* Acquire GPU-CPU sync Lock here */
  1760. _iommu_lock(iommu);
  1761. if (flags & KGSL_MMUFLAGS_PTUPDATE) {
  1762. if (!msm_soc_version_supports_iommu_v0()) {
  1763. ret = kgsl_idle(mmu->device);
  1764. if (ret)
  1765. goto unlock;
  1766. }
  1767. for (i = 0; i < iommu->unit_count; i++) {
  1768. /* get the lsb value which should not change when
  1769. * changing ttbr0 */
  1770. pt_val = kgsl_iommu_get_default_ttbr0(mmu, i,
  1771. KGSL_IOMMU_CONTEXT_USER);
  1772. pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  1773. pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
  1774. pt_val |= pt_base;
  1775. if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
  1776. KGSL_IOMMU_SET_CTX_REG_LL(iommu,
  1777. (&iommu->iommu_units[i]),
  1778. KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
  1779. } else {
  1780. KGSL_IOMMU_SET_CTX_REG(iommu,
  1781. (&iommu->iommu_units[i]),
  1782. KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
  1783. }
  1784. mb();
  1785. temp = KGSL_IOMMU_GET_CTX_REG(iommu,
  1786. (&iommu->iommu_units[i]),
  1787. KGSL_IOMMU_CONTEXT_USER, TTBR0);
  1788. }
  1789. }
  1790. /* Flush tlb */
  1791. if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
  1792. unsigned long wait_for_flush;
  1793. for (i = 0; i < iommu->unit_count; i++) {
  1794. KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]),
  1795. KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1);
  1796. mb();
  1797. /*
  1798. * Wait for flush to complete by polling the flush
  1799. * status bit of TLBSTATUS register for not more than
  1800. * 2 s. After 2s just exit, at that point the SMMU h/w
  1801. * may be stuck and will eventually cause GPU to hang
  1802. * or bring the system down.
  1803. */
  1804. if (!msm_soc_version_supports_iommu_v0()) {
  1805. wait_for_flush = jiffies +
  1806. msecs_to_jiffies(2000);
  1807. KGSL_IOMMU_SET_CTX_REG(iommu,
  1808. (&iommu->iommu_units[i]),
  1809. KGSL_IOMMU_CONTEXT_USER, TLBSYNC, 0);
  1810. while (KGSL_IOMMU_GET_CTX_REG(iommu,
  1811. (&iommu->iommu_units[i]),
  1812. KGSL_IOMMU_CONTEXT_USER, TLBSTATUS) &
  1813. (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
  1814. if (time_after(jiffies,
  1815. wait_for_flush)) {
  1816. KGSL_DRV_ERR(mmu->device,
  1817. "Wait limit reached for IOMMU tlb flush\n");
  1818. break;
  1819. }
  1820. cpu_relax();
  1821. }
  1822. }
  1823. }
  1824. }
  1825. unlock:
  1826. /* Release GPU-CPU sync Lock here */
  1827. _iommu_unlock(iommu);
  1828. /* Disable smmu clock */
  1829. kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1830. return ret;
  1831. }
  1832. /*
  1833. * kgsl_iommu_get_reg_gpuaddr - Returns the gpu address of IOMMU regsiter
  1834. * @mmu - Pointer to mmu structure
  1835. * @iommu_unit - The iommu unit for which base address is requested
  1836. * @ctx_id - The context ID of the IOMMU ctx
  1837. * @reg - The register for which address is required
  1838. *
  1839. * Return - The gpu address of register which can be used in type3 packet
  1840. */
  1841. static unsigned int kgsl_iommu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
  1842. int iommu_unit, int ctx_id, int reg)
  1843. {
  1844. struct kgsl_iommu *iommu = mmu->priv;
  1845. if (KGSL_IOMMU_GLOBAL_BASE == reg)
  1846. return iommu->iommu_units[iommu_unit].reg_map.gpuaddr;
  1847. if (iommu->iommu_reg_list[reg].ctx_reg)
  1848. return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
  1849. iommu->iommu_reg_list[reg].reg_offset +
  1850. (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
  1851. else
  1852. return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
  1853. iommu->iommu_reg_list[reg].reg_offset;
  1854. }
  1855. /*
  1856. * kgsl_iommu_hw_halt_supported - Returns whether IOMMU halt command is
  1857. * supported
  1858. * @mmu - Pointer to mmu structure
  1859. * @iommu_unit - The iommu unit for which the property is requested
  1860. */
  1861. static int kgsl_iommu_hw_halt_supported(struct kgsl_mmu *mmu, int iommu_unit)
  1862. {
  1863. struct kgsl_iommu *iommu = mmu->priv;
  1864. return iommu->iommu_units[iommu_unit].iommu_halt_enable;
  1865. }
  1866. static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu)
  1867. {
  1868. struct kgsl_iommu *iommu = mmu->priv;
  1869. return iommu->unit_count;
  1870. }
  1871. /*
  1872. * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
  1873. * @mmu: Pointer to mmu structure
  1874. * @pf_policy: The pagefault polict to set
  1875. *
  1876. * Check if the new policy indicated by pf_policy is same as current
  1877. * policy, if same then return else set the policy
  1878. */
  1879. static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
  1880. unsigned int pf_policy)
  1881. {
  1882. int i, j;
  1883. struct kgsl_iommu *iommu = mmu->priv;
  1884. struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device);
  1885. int ret = 0;
  1886. unsigned int sctlr_val;
  1887. if ((adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) ==
  1888. (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))
  1889. return ret;
  1890. if (msm_soc_version_supports_iommu_v0())
  1891. return ret;
  1892. ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
  1893. if (ret) {
  1894. KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
  1895. return ret;
  1896. }
  1897. ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
  1898. if (ret) {
  1899. KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
  1900. kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
  1901. return ret;
  1902. }
  1903. /* Need to idle device before changing options */
  1904. ret = mmu->device->ftbl->idle(mmu->device);
  1905. if (ret) {
  1906. kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
  1907. return ret;
  1908. }
  1909. for (i = 0; i < iommu->unit_count; i++) {
  1910. struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
  1911. for (j = 0; j < iommu_unit->dev_count; j++) {
  1912. sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu,
  1913. iommu_unit,
  1914. iommu_unit->dev[j].ctx_id,
  1915. SCTLR);
  1916. if (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
  1917. sctlr_val &= ~(0x1 <<
  1918. KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
  1919. else
  1920. sctlr_val |= (0x1 <<
  1921. KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
  1922. KGSL_IOMMU_SET_CTX_REG(iommu,
  1923. iommu_unit,
  1924. iommu_unit->dev[j].ctx_id,
  1925. SCTLR, sctlr_val);
  1926. }
  1927. }
  1928. kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
  1929. return ret;
  1930. }
  1931. struct kgsl_mmu_ops iommu_ops = {
  1932. .mmu_init = kgsl_iommu_init,
  1933. .mmu_close = kgsl_iommu_close,
  1934. .mmu_start = kgsl_iommu_start,
  1935. .mmu_stop = kgsl_iommu_stop,
  1936. .mmu_setstate = kgsl_iommu_setstate,
  1937. .mmu_device_setstate = kgsl_iommu_default_setstate,
  1938. .mmu_pagefault = NULL,
  1939. .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
  1940. .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
  1941. .mmu_enable_clk = kgsl_iommu_enable_clk,
  1942. .mmu_disable_clk = kgsl_iommu_disable_clk,
  1943. .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
  1944. .mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
  1945. .mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
  1946. .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
  1947. .mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
  1948. .mmu_pt_equal = kgsl_iommu_pt_equal,
  1949. .mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
  1950. .mmu_hw_halt_supported = kgsl_iommu_hw_halt_supported,
  1951. /* These callbacks will be set on some chipsets */
  1952. .mmu_setup_pt = NULL,
  1953. .mmu_cleanup_pt = NULL,
  1954. .mmu_sync_lock = kgsl_iommu_sync_lock,
  1955. .mmu_sync_unlock = kgsl_iommu_sync_unlock,
  1956. .mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
  1957. };
  1958. struct kgsl_mmu_pt_ops iommu_pt_ops = {
  1959. .mmu_map = kgsl_iommu_map,
  1960. .mmu_unmap = kgsl_iommu_unmap,
  1961. .mmu_create_pagetable = kgsl_iommu_create_pagetable,
  1962. .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
  1963. };