kgsl_gpummu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /* Copyright (c) 2011,2013-2015,2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/device.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/genalloc.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include "kgsl.h"
  20. #include "kgsl_mmu.h"
  21. #include "kgsl_gpummu.h"
  22. #include "kgsl_device.h"
  23. #include "kgsl_sharedmem.h"
  24. #include "kgsl_trace.h"
  25. #include "adreno.h"
  26. #define KGSL_PAGETABLE_SIZE \
  27. ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
  28. KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
  29. static ssize_t
  30. sysfs_show_ptpool_entries(struct kobject *kobj,
  31. struct kobj_attribute *attr,
  32. char *buf)
  33. {
  34. struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
  35. kgsl_driver.ptpool;
  36. return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
  37. }
  38. static ssize_t
  39. sysfs_show_ptpool_min(struct kobject *kobj,
  40. struct kobj_attribute *attr,
  41. char *buf)
  42. {
  43. struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
  44. kgsl_driver.ptpool;
  45. return snprintf(buf, PAGE_SIZE, "%d\n",
  46. pool->static_entries);
  47. }
  48. static ssize_t
  49. sysfs_show_ptpool_chunks(struct kobject *kobj,
  50. struct kobj_attribute *attr,
  51. char *buf)
  52. {
  53. struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
  54. kgsl_driver.ptpool;
  55. return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
  56. }
  57. static ssize_t
  58. sysfs_show_ptpool_ptsize(struct kobject *kobj,
  59. struct kobj_attribute *attr,
  60. char *buf)
  61. {
  62. struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
  63. kgsl_driver.ptpool;
  64. return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
  65. }
  66. static struct kobj_attribute attr_ptpool_entries = {
  67. .attr = { .name = "ptpool_entries", .mode = 0444 },
  68. .show = sysfs_show_ptpool_entries,
  69. .store = NULL,
  70. };
  71. static struct kobj_attribute attr_ptpool_min = {
  72. .attr = { .name = "ptpool_min", .mode = 0444 },
  73. .show = sysfs_show_ptpool_min,
  74. .store = NULL,
  75. };
  76. static struct kobj_attribute attr_ptpool_chunks = {
  77. .attr = { .name = "ptpool_chunks", .mode = 0444 },
  78. .show = sysfs_show_ptpool_chunks,
  79. .store = NULL,
  80. };
  81. static struct kobj_attribute attr_ptpool_ptsize = {
  82. .attr = { .name = "ptpool_ptsize", .mode = 0444 },
  83. .show = sysfs_show_ptpool_ptsize,
  84. .store = NULL,
  85. };
  86. static struct attribute *ptpool_attrs[] = {
  87. &attr_ptpool_entries.attr,
  88. &attr_ptpool_min.attr,
  89. &attr_ptpool_chunks.attr,
  90. &attr_ptpool_ptsize.attr,
  91. NULL,
  92. };
  93. static struct attribute_group ptpool_attr_group = {
  94. .attrs = ptpool_attrs,
  95. };
  96. static int
  97. _kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
  98. {
  99. struct kgsl_ptpool_chunk *chunk;
  100. size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
  101. BUG_ON(count == 0);
  102. if (get_order(size) >= MAX_ORDER) {
  103. KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
  104. return -EINVAL;
  105. }
  106. chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
  107. if (chunk == NULL) {
  108. KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
  109. return -ENOMEM;
  110. }
  111. chunk->size = size;
  112. chunk->count = count;
  113. chunk->dynamic = dynamic;
  114. chunk->data = dma_alloc_coherent(NULL, size,
  115. &chunk->phys, GFP_KERNEL);
  116. if (chunk->data == NULL) {
  117. KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
  118. goto err;
  119. }
  120. chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
  121. if (chunk->bitmap == NULL) {
  122. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  123. BITS_TO_LONGS(count) * 4);
  124. goto err_dma;
  125. }
  126. list_add_tail(&chunk->list, &pool->list);
  127. pool->chunks++;
  128. pool->entries += count;
  129. if (!dynamic)
  130. pool->static_entries += count;
  131. return 0;
  132. err_dma:
  133. dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
  134. err:
  135. kfree(chunk);
  136. return -ENOMEM;
  137. }
  138. static void *
  139. _kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, phys_addr_t *physaddr)
  140. {
  141. struct kgsl_ptpool_chunk *chunk;
  142. list_for_each_entry(chunk, &pool->list, list) {
  143. int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
  144. if (bit >= chunk->count)
  145. continue;
  146. set_bit(bit, chunk->bitmap);
  147. *physaddr = chunk->phys + (bit * pool->ptsize);
  148. return chunk->data + (bit * pool->ptsize);
  149. }
  150. return NULL;
  151. }
  152. /**
  153. * kgsl_ptpool_add
  154. * @pool: A pointer to a ptpool structure
  155. * @entries: Number of entries to add
  156. *
  157. * Add static entries to the pagetable pool.
  158. */
  159. static int
  160. kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
  161. {
  162. int ret = 0;
  163. BUG_ON(count == 0);
  164. mutex_lock(&pool->lock);
  165. /* Only 4MB can be allocated in one chunk, so larger allocations
  166. need to be split into multiple sections */
  167. while (count) {
  168. int entries = ((count * pool->ptsize) > SZ_4M) ?
  169. SZ_4M / pool->ptsize : count;
  170. /* Add the entries as static, i.e. they don't ever stand
  171. a chance of being removed */
  172. ret = _kgsl_ptpool_add_entries(pool, entries, 0);
  173. if (ret)
  174. break;
  175. count -= entries;
  176. }
  177. mutex_unlock(&pool->lock);
  178. return ret;
  179. }
  180. /**
  181. * kgsl_ptpool_alloc
  182. * @pool: A pointer to a ptpool structure
  183. * @addr: A pointer to store the physical address of the chunk
  184. *
  185. * Allocate a pagetable from the pool. Returns the virtual address
  186. * of the pagetable, the physical address is returned in physaddr
  187. */
  188. static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
  189. phys_addr_t *physaddr)
  190. {
  191. void *addr = NULL;
  192. int ret;
  193. mutex_lock(&pool->lock);
  194. addr = _kgsl_ptpool_get_entry(pool, physaddr);
  195. if (addr)
  196. goto done;
  197. /* Add a chunk for 1 more pagetable and mark it as dynamic */
  198. ret = _kgsl_ptpool_add_entries(pool, 1, 1);
  199. if (ret)
  200. goto done;
  201. addr = _kgsl_ptpool_get_entry(pool, physaddr);
  202. done:
  203. mutex_unlock(&pool->lock);
  204. return addr;
  205. }
  206. static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
  207. {
  208. list_del(&chunk->list);
  209. if (chunk->data)
  210. dma_free_coherent(NULL, chunk->size, chunk->data,
  211. chunk->phys);
  212. kfree(chunk->bitmap);
  213. kfree(chunk);
  214. }
  215. /**
  216. * kgsl_ptpool_free
  217. * @pool: A pointer to a ptpool structure
  218. * @addr: A pointer to the virtual address to free
  219. *
  220. * Free a pagetable allocated from the pool
  221. */
  222. static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
  223. {
  224. struct kgsl_ptpool_chunk *chunk, *tmp;
  225. if (pool == NULL || addr == NULL)
  226. return;
  227. mutex_lock(&pool->lock);
  228. list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
  229. if (addr >= chunk->data &&
  230. addr < chunk->data + chunk->size) {
  231. int bit = ((unsigned long) (addr - chunk->data)) /
  232. pool->ptsize;
  233. clear_bit(bit, chunk->bitmap);
  234. memset(addr, 0, pool->ptsize);
  235. if (chunk->dynamic &&
  236. bitmap_empty(chunk->bitmap, chunk->count))
  237. _kgsl_ptpool_rm_chunk(chunk);
  238. break;
  239. }
  240. }
  241. mutex_unlock(&pool->lock);
  242. }
  243. void kgsl_gpummu_ptpool_destroy(void *ptpool)
  244. {
  245. struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
  246. struct kgsl_ptpool_chunk *chunk, *tmp;
  247. if (pool == NULL)
  248. return;
  249. mutex_lock(&pool->lock);
  250. list_for_each_entry_safe(chunk, tmp, &pool->list, list)
  251. _kgsl_ptpool_rm_chunk(chunk);
  252. mutex_unlock(&pool->lock);
  253. kfree(pool);
  254. }
  255. /**
  256. * kgsl_ptpool_init
  257. * @pool: A pointer to a ptpool structure to initialize
  258. * @entries: The number of inital entries to add to the pool
  259. *
  260. * Initalize a pool and allocate an initial chunk of entries.
  261. */
  262. void *kgsl_gpummu_ptpool_init(int entries)
  263. {
  264. int ptsize = KGSL_PAGETABLE_SIZE;
  265. struct kgsl_ptpool *pool;
  266. int ret = 0;
  267. pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
  268. if (!pool) {
  269. KGSL_CORE_ERR("Failed to allocate memory "
  270. "for ptpool\n");
  271. return NULL;
  272. }
  273. pool->ptsize = ptsize;
  274. mutex_init(&pool->lock);
  275. INIT_LIST_HEAD(&pool->list);
  276. if (entries) {
  277. ret = kgsl_ptpool_add(pool, entries);
  278. if (ret)
  279. goto err_ptpool_remove;
  280. }
  281. ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
  282. if (ret) {
  283. KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
  284. "statistics: %d\n", ret);
  285. goto err_ptpool_remove;
  286. }
  287. return (void *)pool;
  288. err_ptpool_remove:
  289. kgsl_gpummu_ptpool_destroy(pool);
  290. return NULL;
  291. }
  292. int kgsl_gpummu_pt_equal(struct kgsl_mmu *mmu,
  293. struct kgsl_pagetable *pt,
  294. phys_addr_t pt_base)
  295. {
  296. struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
  297. return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
  298. }
  299. void kgsl_gpummu_destroy_pagetable(struct kgsl_pagetable *pt)
  300. {
  301. struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
  302. kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
  303. gpummu_pt->base.hostptr);
  304. atomic_sub(KGSL_PAGETABLE_SIZE, &kgsl_driver.stats.coherent);
  305. kfree(gpummu_pt->tlbflushfilter.base);
  306. kfree(gpummu_pt);
  307. }
  308. static inline uint32_t
  309. kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
  310. {
  311. return (va - va_base) >> PAGE_SHIFT;
  312. }
  313. static inline void
  314. kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
  315. {
  316. uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
  317. BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
  318. baseptr[pte] = val;
  319. }
  320. static inline uint32_t
  321. kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
  322. {
  323. uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
  324. BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
  325. return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
  326. }
  327. static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
  328. {
  329. unsigned int reg;
  330. unsigned int ptbase;
  331. struct kgsl_device *device;
  332. struct adreno_device *adreno_dev;
  333. unsigned int no_page_fault_log = 0;
  334. device = mmu->device;
  335. adreno_dev = ADRENO_DEVICE(device);
  336. kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
  337. kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
  338. if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
  339. no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, reg);
  340. if (!no_page_fault_log)
  341. KGSL_MEM_CRIT(mmu->device,
  342. "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
  343. reg & ~(PAGE_SIZE - 1),
  344. kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
  345. reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
  346. trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
  347. kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
  348. reg & 0x02 ? "WRITE" : "READ");
  349. }
  350. static void *kgsl_gpummu_create_pagetable(void)
  351. {
  352. struct kgsl_gpummu_pt *gpummu_pt;
  353. gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
  354. GFP_KERNEL);
  355. if (!gpummu_pt)
  356. return NULL;
  357. gpummu_pt->last_superpte = 0;
  358. gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
  359. (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
  360. gpummu_pt->tlbflushfilter.base = (unsigned int *)
  361. kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
  362. if (!gpummu_pt->tlbflushfilter.base) {
  363. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  364. gpummu_pt->tlbflushfilter.size);
  365. goto err_free_gpummu;
  366. }
  367. GSL_TLBFLUSH_FILTER_RESET();
  368. gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
  369. kgsl_driver.ptpool,
  370. &gpummu_pt->base.physaddr);
  371. if (gpummu_pt->base.hostptr == NULL)
  372. goto err_flushfilter;
  373. /* Do a check before truncating phys_addr_t to unsigned 32 */
  374. if (sizeof(phys_addr_t) > sizeof(unsigned int)) {
  375. WARN_ONCE(1, "Cannot use LPAE with gpummu\n");
  376. goto err_flushfilter;
  377. }
  378. gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
  379. gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
  380. /* ptpool allocations are from coherent memory, so update the
  381. device statistics acordingly */
  382. KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, &kgsl_driver.stats.coherent,
  383. &kgsl_driver.stats.coherent_max);
  384. return (void *)gpummu_pt;
  385. err_flushfilter:
  386. kfree(gpummu_pt->tlbflushfilter.base);
  387. err_free_gpummu:
  388. kfree(gpummu_pt);
  389. return NULL;
  390. }
  391. static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
  392. uint32_t flags)
  393. {
  394. struct kgsl_gpummu_pt *gpummu_pt;
  395. if (!kgsl_mmu_enabled())
  396. return 0;
  397. if (flags & KGSL_MMUFLAGS_PTUPDATE) {
  398. int ret = kgsl_idle(mmu->device);
  399. if (ret)
  400. return ret;
  401. gpummu_pt = mmu->hwpagetable->priv;
  402. kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
  403. gpummu_pt->base.gpuaddr);
  404. }
  405. if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
  406. /* Invalidate all and tc */
  407. kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
  408. }
  409. return 0;
  410. }
  411. static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
  412. struct kgsl_pagetable *pagetable,
  413. unsigned int context_id)
  414. {
  415. int ret = 0;
  416. if (mmu->flags & KGSL_FLAGS_STARTED) {
  417. /* page table not current, then setup mmu to use new
  418. * specified page table
  419. */
  420. if (mmu->hwpagetable != pagetable) {
  421. mmu->hwpagetable = pagetable;
  422. /* Since we do a TLB flush the tlb_flags should
  423. * be cleared by calling kgsl_mmu_pt_get_flags
  424. */
  425. kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
  426. /* call device specific set page table */
  427. ret = kgsl_setstate(mmu, context_id,
  428. KGSL_MMUFLAGS_TLBFLUSH |
  429. KGSL_MMUFLAGS_PTUPDATE);
  430. }
  431. }
  432. return ret;
  433. }
  434. static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
  435. {
  436. /*
  437. * intialize device mmu
  438. *
  439. * call this with the global lock held
  440. */
  441. int status = 0;
  442. mmu->pt_base = KGSL_PAGETABLE_BASE;
  443. mmu->pt_size = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
  444. mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT;
  445. mmu->use_cpu_map = false;
  446. /* sub-client MMU lookups require address translation */
  447. if ((mmu->config & ~0x1) > 0) {
  448. /*make sure virtual address range is a multiple of 64Kb */
  449. if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
  450. KGSL_CORE_ERR("Invalid pagetable size requested "
  451. "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
  452. return -EINVAL;
  453. }
  454. }
  455. dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
  456. __func__);
  457. return status;
  458. }
  459. static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
  460. {
  461. /*
  462. * intialize device mmu
  463. *
  464. * call this with the global lock held
  465. */
  466. struct kgsl_device *device = mmu->device;
  467. struct kgsl_gpummu_pt *gpummu_pt;
  468. int ret;
  469. if (mmu->flags & KGSL_FLAGS_STARTED)
  470. return 0;
  471. /* MMU not enabled */
  472. if ((mmu->config & 0x1) == 0)
  473. return 0;
  474. /* setup MMU and sub-client behavior */
  475. kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
  476. /* enable axi interrupts */
  477. kgsl_regwrite(device, MH_INTERRUPT_MASK,
  478. GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
  479. kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
  480. mmu->setstate_memory.size);
  481. /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
  482. * to complete transactions in case of an MMU fault. Note that
  483. * we'll leave the bottom 32 bytes of the setstate_memory for other
  484. * purposes (e.g. use it when dummy read cycles are needed
  485. * for other blocks) */
  486. kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
  487. mmu->setstate_memory.physaddr + 32);
  488. if (mmu->defaultpagetable == NULL)
  489. mmu->defaultpagetable =
  490. kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
  491. /* Return error if the default pagetable doesn't exist */
  492. if (mmu->defaultpagetable == NULL)
  493. return -ENOMEM;
  494. mmu->hwpagetable = mmu->defaultpagetable;
  495. gpummu_pt = mmu->hwpagetable->priv;
  496. kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
  497. gpummu_pt->base.gpuaddr);
  498. kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
  499. (KGSL_PAGETABLE_BASE |
  500. (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
  501. ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
  502. if (!ret)
  503. mmu->flags |= KGSL_FLAGS_STARTED;
  504. return ret;
  505. }
  506. static int
  507. kgsl_gpummu_unmap(struct kgsl_pagetable *pt,
  508. struct kgsl_memdesc *memdesc,
  509. unsigned int *tlb_flags)
  510. {
  511. unsigned int numpages;
  512. unsigned int pte, ptefirst, ptelast, superpte;
  513. unsigned int range = memdesc->size;
  514. struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
  515. /* All GPU addresses as assigned are page aligned, but some
  516. functions purturb the gpuaddr with an offset, so apply the
  517. mask here to make sure we have the right address */
  518. unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
  519. numpages = (range >> PAGE_SHIFT);
  520. if (range & (PAGE_SIZE - 1))
  521. numpages++;
  522. ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
  523. ptelast = ptefirst + numpages;
  524. superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
  525. GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
  526. for (pte = ptefirst; pte < ptelast; pte++) {
  527. #ifdef VERBOSE_DEBUG
  528. /* check if PTE exists */
  529. if (!kgsl_pt_map_get(gpummu_pt, pte))
  530. KGSL_CORE_ERR("pt entry %x is already "
  531. "unmapped for pagetable %pK\n", pte, gpummu_pt);
  532. #endif
  533. kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
  534. superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
  535. if (pte == superpte)
  536. GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
  537. GSL_PT_SUPER_PTE);
  538. }
  539. /* Post all writes to the pagetable */
  540. wmb();
  541. return 0;
  542. }
  543. #define SUPERPTE_IS_DIRTY(_p) \
  544. (((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
  545. GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
  546. static int
  547. kgsl_gpummu_map(struct kgsl_pagetable *pt,
  548. struct kgsl_memdesc *memdesc,
  549. unsigned int protflags,
  550. unsigned int *tlb_flags)
  551. {
  552. unsigned int pte;
  553. struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
  554. struct scatterlist *s;
  555. int flushtlb = 0;
  556. int i;
  557. pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
  558. /* Flush the TLB if the first PTE isn't at the superpte boundary */
  559. if (pte & (GSL_PT_SUPER_PTE - 1))
  560. flushtlb = 1;
  561. for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
  562. unsigned int paddr = kgsl_get_sg_pa(s);
  563. unsigned int j;
  564. /* Each sg entry might be multiple pages long */
  565. for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
  566. if (SUPERPTE_IS_DIRTY(pte))
  567. flushtlb = 1;
  568. kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
  569. }
  570. }
  571. /* Flush the TLB if the last PTE isn't at the superpte boundary */
  572. if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
  573. flushtlb = 1;
  574. wmb();
  575. if (flushtlb) {
  576. /*set all devices as needing flushing*/
  577. *tlb_flags = UINT_MAX;
  578. GSL_TLBFLUSH_FILTER_RESET();
  579. }
  580. return 0;
  581. }
  582. static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
  583. {
  584. mmu->flags &= ~KGSL_FLAGS_STARTED;
  585. }
  586. static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
  587. {
  588. /*
  589. * close device mmu
  590. *
  591. * call this with the global lock held
  592. */
  593. if (mmu->setstate_memory.gpuaddr)
  594. kgsl_sharedmem_free(&mmu->setstate_memory);
  595. if (mmu->defaultpagetable)
  596. kgsl_mmu_putpagetable(mmu->defaultpagetable);
  597. return 0;
  598. }
  599. static phys_addr_t
  600. kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
  601. {
  602. unsigned int ptbase;
  603. kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
  604. return ptbase;
  605. }
  606. static phys_addr_t
  607. kgsl_gpummu_get_pt_base_addr(struct kgsl_mmu *mmu,
  608. struct kgsl_pagetable *pt)
  609. {
  610. struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
  611. return gpummu_pt->base.gpuaddr;
  612. }
  613. static int kgsl_gpummu_get_num_iommu_units(struct kgsl_mmu *mmu)
  614. {
  615. return 1;
  616. }
  617. struct kgsl_mmu_ops gpummu_ops = {
  618. .mmu_init = kgsl_gpummu_init,
  619. .mmu_close = kgsl_gpummu_close,
  620. .mmu_start = kgsl_gpummu_start,
  621. .mmu_stop = kgsl_gpummu_stop,
  622. .mmu_setstate = kgsl_gpummu_setstate,
  623. .mmu_device_setstate = kgsl_gpummu_default_setstate,
  624. .mmu_pagefault = kgsl_gpummu_pagefault,
  625. .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
  626. .mmu_pt_equal = kgsl_gpummu_pt_equal,
  627. .mmu_get_pt_base_addr = kgsl_gpummu_get_pt_base_addr,
  628. .mmu_enable_clk = NULL,
  629. .mmu_disable_clk_on_ts = NULL,
  630. .mmu_get_default_ttbr0 = NULL,
  631. .mmu_get_reg_gpuaddr = NULL,
  632. .mmu_get_reg_ahbaddr = NULL,
  633. .mmu_get_num_iommu_units = kgsl_gpummu_get_num_iommu_units,
  634. .mmu_hw_halt_supported = NULL,
  635. };
  636. struct kgsl_mmu_pt_ops gpummu_pt_ops = {
  637. .mmu_map = kgsl_gpummu_map,
  638. .mmu_unmap = kgsl_gpummu_unmap,
  639. .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
  640. .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
  641. };