kgsl_mmu.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /* Copyright (c) 2002,2007-2014,2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/export.h>
  14. #include <linux/types.h>
  15. #include <linux/device.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/genalloc.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/iommu.h>
  21. #include <mach/iommu.h>
  22. #include <mach/socinfo.h>
  23. #include "kgsl.h"
  24. #include "kgsl_mmu.h"
  25. #include "kgsl_gpummu.h"
  26. #include "kgsl_device.h"
  27. #include "kgsl_sharedmem.h"
  28. #include "adreno.h"
  29. static enum kgsl_mmutype kgsl_mmu_type;
  30. static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
  31. static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
  32. {
  33. int i;
  34. struct kgsl_device *device;
  35. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  36. device = kgsl_driver.devp[i];
  37. if (device)
  38. device->ftbl->cleanup_pt(device, pt);
  39. }
  40. /* Only the 3d device needs mmu specific pt entries */
  41. device = kgsl_driver.devp[KGSL_DEVICE_3D0];
  42. if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
  43. device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
  44. return 0;
  45. }
  46. static int kgsl_setup_pt(struct kgsl_pagetable *pt)
  47. {
  48. int i = 0;
  49. int status = 0;
  50. struct kgsl_device *device;
  51. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  52. device = kgsl_driver.devp[i];
  53. if (device) {
  54. status = device->ftbl->setup_pt(device, pt);
  55. if (status)
  56. goto error_pt;
  57. }
  58. }
  59. /* Only the 3d device needs mmu specific pt entries */
  60. device = kgsl_driver.devp[KGSL_DEVICE_3D0];
  61. if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
  62. status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
  63. if (status) {
  64. i = KGSL_DEVICE_MAX - 1;
  65. goto error_pt;
  66. }
  67. }
  68. return status;
  69. error_pt:
  70. while (i >= 0) {
  71. struct kgsl_device *device = kgsl_driver.devp[i];
  72. if (device)
  73. device->ftbl->cleanup_pt(device, pt);
  74. i--;
  75. }
  76. return status;
  77. }
  78. static void kgsl_destroy_pagetable(struct kref *kref)
  79. {
  80. struct kgsl_pagetable *pagetable = container_of(kref,
  81. struct kgsl_pagetable, refcount);
  82. unsigned long flags;
  83. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  84. list_del(&pagetable->list);
  85. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  86. pagetable_remove_sysfs_objects(pagetable);
  87. kgsl_cleanup_pt(pagetable);
  88. if (pagetable->kgsl_pool)
  89. gen_pool_destroy(pagetable->kgsl_pool);
  90. if (pagetable->pool)
  91. gen_pool_destroy(pagetable->pool);
  92. pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
  93. kfree(pagetable);
  94. }
  95. static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
  96. {
  97. if (pagetable)
  98. kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
  99. }
  100. static struct kgsl_pagetable *
  101. kgsl_get_pagetable(unsigned long name)
  102. {
  103. struct kgsl_pagetable *pt, *ret = NULL;
  104. unsigned long flags;
  105. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  106. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  107. if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
  108. ret = pt;
  109. break;
  110. }
  111. }
  112. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  113. return ret;
  114. }
  115. static struct kgsl_pagetable *
  116. _get_pt_from_kobj(struct kobject *kobj)
  117. {
  118. unsigned int ptname;
  119. if (!kobj)
  120. return NULL;
  121. if (kstrtou32(kobj->name, 0, &ptname))
  122. return NULL;
  123. return kgsl_get_pagetable(ptname);
  124. }
  125. static ssize_t
  126. sysfs_show_entries(struct kobject *kobj,
  127. struct kobj_attribute *attr,
  128. char *buf)
  129. {
  130. struct kgsl_pagetable *pt;
  131. int ret = 0;
  132. pt = _get_pt_from_kobj(kobj);
  133. if (pt) {
  134. unsigned int val = atomic_read(&pt->stats.entries);
  135. ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
  136. }
  137. kgsl_put_pagetable(pt);
  138. return ret;
  139. }
  140. static ssize_t
  141. sysfs_show_mapped(struct kobject *kobj,
  142. struct kobj_attribute *attr,
  143. char *buf)
  144. {
  145. struct kgsl_pagetable *pt;
  146. int ret = 0;
  147. pt = _get_pt_from_kobj(kobj);
  148. if (pt) {
  149. unsigned int val = atomic_read(&pt->stats.mapped);
  150. ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
  151. }
  152. kgsl_put_pagetable(pt);
  153. return ret;
  154. }
  155. static ssize_t
  156. sysfs_show_va_range(struct kobject *kobj,
  157. struct kobj_attribute *attr,
  158. char *buf)
  159. {
  160. struct kgsl_pagetable *pt;
  161. int ret = 0;
  162. pt = _get_pt_from_kobj(kobj);
  163. if (pt) {
  164. ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
  165. kgsl_mmu_get_ptsize(pt->mmu));
  166. }
  167. kgsl_put_pagetable(pt);
  168. return ret;
  169. }
  170. static ssize_t
  171. sysfs_show_max_mapped(struct kobject *kobj,
  172. struct kobj_attribute *attr,
  173. char *buf)
  174. {
  175. struct kgsl_pagetable *pt;
  176. int ret = 0;
  177. pt = _get_pt_from_kobj(kobj);
  178. if (pt) {
  179. unsigned int val = atomic_read(&pt->stats.max_mapped);
  180. ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
  181. }
  182. kgsl_put_pagetable(pt);
  183. return ret;
  184. }
  185. static ssize_t
  186. sysfs_show_max_entries(struct kobject *kobj,
  187. struct kobj_attribute *attr,
  188. char *buf)
  189. {
  190. struct kgsl_pagetable *pt;
  191. int ret = 0;
  192. pt = _get_pt_from_kobj(kobj);
  193. if (pt) {
  194. unsigned int val = atomic_read(&pt->stats.max_entries);
  195. ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
  196. }
  197. kgsl_put_pagetable(pt);
  198. return ret;
  199. }
  200. static struct kobj_attribute attr_entries = {
  201. .attr = { .name = "entries", .mode = 0444 },
  202. .show = sysfs_show_entries,
  203. .store = NULL,
  204. };
  205. static struct kobj_attribute attr_mapped = {
  206. .attr = { .name = "mapped", .mode = 0444 },
  207. .show = sysfs_show_mapped,
  208. .store = NULL,
  209. };
  210. static struct kobj_attribute attr_va_range = {
  211. .attr = { .name = "va_range", .mode = 0444 },
  212. .show = sysfs_show_va_range,
  213. .store = NULL,
  214. };
  215. static struct kobj_attribute attr_max_mapped = {
  216. .attr = { .name = "max_mapped", .mode = 0444 },
  217. .show = sysfs_show_max_mapped,
  218. .store = NULL,
  219. };
  220. static struct kobj_attribute attr_max_entries = {
  221. .attr = { .name = "max_entries", .mode = 0444 },
  222. .show = sysfs_show_max_entries,
  223. .store = NULL,
  224. };
  225. static struct attribute *pagetable_attrs[] = {
  226. &attr_entries.attr,
  227. &attr_mapped.attr,
  228. &attr_va_range.attr,
  229. &attr_max_mapped.attr,
  230. &attr_max_entries.attr,
  231. NULL,
  232. };
  233. static struct attribute_group pagetable_attr_group = {
  234. .attrs = pagetable_attrs,
  235. };
  236. static void
  237. pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
  238. {
  239. if (pagetable->kobj)
  240. sysfs_remove_group(pagetable->kobj,
  241. &pagetable_attr_group);
  242. kobject_put(pagetable->kobj);
  243. }
  244. static int
  245. pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
  246. {
  247. char ptname[16];
  248. int ret = -ENOMEM;
  249. snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
  250. pagetable->kobj = kobject_create_and_add(ptname,
  251. kgsl_driver.ptkobj);
  252. if (pagetable->kobj == NULL)
  253. goto err;
  254. ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
  255. err:
  256. if (ret) {
  257. if (pagetable->kobj)
  258. kobject_put(pagetable->kobj);
  259. pagetable->kobj = NULL;
  260. }
  261. return ret;
  262. }
  263. int
  264. kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, phys_addr_t pt_base)
  265. {
  266. struct kgsl_pagetable *pt;
  267. int ptid = -1;
  268. if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
  269. return KGSL_MMU_GLOBAL_PT;
  270. spin_lock(&kgsl_driver.ptlock);
  271. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  272. if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
  273. ptid = (int) pt->name;
  274. break;
  275. }
  276. }
  277. spin_unlock(&kgsl_driver.ptlock);
  278. return ptid;
  279. }
  280. EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
  281. unsigned int
  282. kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base,
  283. unsigned int addr)
  284. {
  285. struct kgsl_pagetable *pt;
  286. unsigned int ret = 0;
  287. if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
  288. return 0;
  289. spin_lock(&kgsl_driver.ptlock);
  290. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  291. if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
  292. if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
  293. ret = 1;
  294. break;
  295. } else {
  296. pt->fault_addr =
  297. (addr & ~(PAGE_SIZE-1));
  298. ret = 0;
  299. break;
  300. }
  301. }
  302. }
  303. spin_unlock(&kgsl_driver.ptlock);
  304. return ret;
  305. }
  306. EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
  307. int kgsl_mmu_init(struct kgsl_device *device)
  308. {
  309. int status = 0;
  310. struct kgsl_mmu *mmu = &device->mmu;
  311. mmu->device = device;
  312. status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
  313. if (status)
  314. return status;
  315. /* Mark the setstate memory as read only */
  316. mmu->setstate_memory.flags |= KGSL_MEMFLAGS_GPUREADONLY;
  317. kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
  318. mmu->setstate_memory.size);
  319. if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
  320. dev_info(device->dev, "|%s| MMU type set for device is "
  321. "NOMMU\n", __func__);
  322. goto done;
  323. } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
  324. mmu->mmu_ops = &gpummu_ops;
  325. else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
  326. mmu->mmu_ops = &iommu_ops;
  327. status = mmu->mmu_ops->mmu_init(mmu);
  328. done:
  329. if (status)
  330. kgsl_sharedmem_free(&mmu->setstate_memory);
  331. return status;
  332. }
  333. EXPORT_SYMBOL(kgsl_mmu_init);
  334. int kgsl_mmu_start(struct kgsl_device *device)
  335. {
  336. struct kgsl_mmu *mmu = &device->mmu;
  337. if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
  338. kgsl_regwrite(device, MH_MMU_CONFIG, 0);
  339. /* Setup gpuaddr of global mappings */
  340. if (!mmu->setstate_memory.gpuaddr)
  341. kgsl_setup_pt(NULL);
  342. return 0;
  343. } else {
  344. return mmu->mmu_ops->mmu_start(mmu);
  345. }
  346. }
  347. EXPORT_SYMBOL(kgsl_mmu_start);
  348. static void mh_axi_error(struct kgsl_device *device, const char* type)
  349. {
  350. unsigned int reg, gpu_err, phys_err;
  351. phys_addr_t pt_base;
  352. kgsl_regread(device, MH_AXI_ERROR, &reg);
  353. pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
  354. /*
  355. * Read gpu virtual and physical addresses that
  356. * caused the error from the debug data.
  357. */
  358. kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
  359. kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
  360. kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
  361. kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
  362. KGSL_MEM_CRIT(device,
  363. "axi %s error: %08x pt %pa gpu %08x phys %08x\n",
  364. type, reg, &pt_base, gpu_err, phys_err);
  365. }
  366. void kgsl_mh_intrcallback(struct kgsl_device *device)
  367. {
  368. unsigned int status = 0;
  369. kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
  370. if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
  371. mh_axi_error(device, "read");
  372. if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
  373. mh_axi_error(device, "write");
  374. if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
  375. device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
  376. status &= KGSL_MMU_INT_MASK;
  377. kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
  378. }
  379. EXPORT_SYMBOL(kgsl_mh_intrcallback);
  380. static struct kgsl_pagetable *
  381. kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
  382. unsigned int name)
  383. {
  384. int status = 0;
  385. struct kgsl_pagetable *pagetable = NULL;
  386. unsigned long flags;
  387. unsigned int ptsize;
  388. pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
  389. if (pagetable == NULL) {
  390. KGSL_CORE_ERR("kzalloc(%d) failed\n",
  391. sizeof(struct kgsl_pagetable));
  392. return NULL;
  393. }
  394. kref_init(&pagetable->refcount);
  395. spin_lock_init(&pagetable->lock);
  396. ptsize = kgsl_mmu_get_ptsize(mmu);
  397. pagetable->mmu = mmu;
  398. pagetable->name = name;
  399. pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
  400. pagetable->fault_addr = 0xFFFFFFFF;
  401. atomic_set(&pagetable->stats.entries, 0);
  402. atomic_set(&pagetable->stats.mapped, 0);
  403. atomic_set(&pagetable->stats.max_mapped, 0);
  404. atomic_set(&pagetable->stats.max_entries, 0);
  405. /*
  406. * create a separate kgsl pool for IOMMU, global mappings can be mapped
  407. * just once from this pool of the defaultpagetable
  408. */
  409. if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
  410. ((KGSL_MMU_GLOBAL_PT == name) ||
  411. (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
  412. pagetable->kgsl_pool = gen_pool_create(ilog2(SZ_8K), -1);
  413. if (pagetable->kgsl_pool == NULL) {
  414. KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
  415. ilog2(SZ_8K));
  416. goto err_alloc;
  417. }
  418. if (gen_pool_add(pagetable->kgsl_pool,
  419. KGSL_IOMMU_GLOBAL_MEM_BASE,
  420. KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
  421. KGSL_CORE_ERR("gen_pool_add failed\n");
  422. goto err_kgsl_pool;
  423. }
  424. }
  425. pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
  426. if (pagetable->pool == NULL) {
  427. KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
  428. PAGE_SHIFT);
  429. goto err_kgsl_pool;
  430. }
  431. if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(mmu),
  432. ptsize, -1)) {
  433. KGSL_CORE_ERR("gen_pool_add failed\n");
  434. goto err_pool;
  435. }
  436. if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
  437. pagetable->pt_ops = &gpummu_pt_ops;
  438. else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
  439. pagetable->pt_ops = &iommu_pt_ops;
  440. pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
  441. if (!pagetable->priv)
  442. goto err_pool;
  443. status = kgsl_setup_pt(pagetable);
  444. if (status)
  445. goto err_mmu_create;
  446. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  447. list_add(&pagetable->list, &kgsl_driver.pagetable_list);
  448. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  449. /* Create the sysfs entries */
  450. pagetable_add_sysfs_objects(pagetable);
  451. return pagetable;
  452. err_mmu_create:
  453. pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
  454. err_pool:
  455. gen_pool_destroy(pagetable->pool);
  456. err_kgsl_pool:
  457. if (pagetable->kgsl_pool)
  458. gen_pool_destroy(pagetable->kgsl_pool);
  459. err_alloc:
  460. kfree(pagetable);
  461. return NULL;
  462. }
  463. struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
  464. unsigned long name)
  465. {
  466. struct kgsl_pagetable *pt;
  467. if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
  468. return (void *)(-1);
  469. if (!kgsl_mmu_is_perprocess(mmu))
  470. name = KGSL_MMU_GLOBAL_PT;
  471. pt = kgsl_get_pagetable(name);
  472. if (pt == NULL)
  473. pt = kgsl_mmu_createpagetableobject(mmu, name);
  474. return pt;
  475. }
  476. void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
  477. {
  478. kgsl_put_pagetable(pagetable);
  479. }
  480. EXPORT_SYMBOL(kgsl_mmu_putpagetable);
  481. int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
  482. uint32_t flags)
  483. {
  484. struct kgsl_device *device = mmu->device;
  485. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  486. if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
  487. && !adreno_is_a2xx(adreno_dev))
  488. return 0;
  489. if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
  490. return 0;
  491. else if (device->ftbl->setstate)
  492. return device->ftbl->setstate(device, context_id, flags);
  493. else if (mmu->mmu_ops->mmu_device_setstate)
  494. return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
  495. return 0;
  496. }
  497. EXPORT_SYMBOL(kgsl_setstate);
  498. void kgsl_mh_start(struct kgsl_device *device)
  499. {
  500. struct kgsl_mh *mh = &device->mh;
  501. /* force mmu off to for now*/
  502. kgsl_regwrite(device, MH_MMU_CONFIG, 0);
  503. /* define physical memory range accessible by the core */
  504. kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
  505. kgsl_regwrite(device, MH_MMU_MPU_END,
  506. mh->mpu_base + mh->mpu_range);
  507. kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
  508. if (mh->mh_intf_cfg1 != 0)
  509. kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
  510. mh->mh_intf_cfg1);
  511. if (mh->mh_intf_cfg2 != 0)
  512. kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
  513. mh->mh_intf_cfg2);
  514. /*
  515. * Interrupts are enabled on a per-device level when
  516. * kgsl_pwrctrl_irq() is called
  517. */
  518. }
  519. EXPORT_SYMBOL(kgsl_mh_start);
  520. /**
  521. * kgsl_mmu_get_gpuaddr - Assign a memdesc with a gpuadddr from the gen pool
  522. * @pagetable - pagetable whose pool is to be used
  523. * @memdesc - memdesc to which gpuaddr is assigned
  524. *
  525. * returns - 0 on success else error code
  526. */
  527. int
  528. kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
  529. struct kgsl_memdesc *memdesc)
  530. {
  531. struct gen_pool *pool = NULL;
  532. int size;
  533. int page_align = ilog2(PAGE_SIZE);
  534. if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
  535. if (memdesc->sglen == 1) {
  536. memdesc->gpuaddr = sg_dma_address(memdesc->sg);
  537. if (!memdesc->gpuaddr)
  538. memdesc->gpuaddr = sg_phys(memdesc->sg);
  539. if (!memdesc->gpuaddr) {
  540. KGSL_CORE_ERR("Unable to get a valid physical "
  541. "address for memdesc\n");
  542. return -EINVAL;
  543. }
  544. return 0;
  545. } else {
  546. KGSL_CORE_ERR("Memory is not contigious "
  547. "(sglen = %d)\n", memdesc->sglen);
  548. return -EINVAL;
  549. }
  550. }
  551. /* Add space for the guard page when allocating the mmu VA. */
  552. size = memdesc->size;
  553. if (kgsl_memdesc_has_guard_page(memdesc))
  554. size += PAGE_SIZE;
  555. pool = pagetable->pool;
  556. if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
  557. /* Allocate aligned virtual addresses for iommu. This allows
  558. * more efficient pagetable entries if the physical memory
  559. * is also aligned. Don't do this for GPUMMU, because
  560. * the address space is so small.
  561. */
  562. if (kgsl_memdesc_get_align(memdesc) > 0)
  563. page_align = kgsl_memdesc_get_align(memdesc);
  564. if (kgsl_memdesc_is_global(memdesc)) {
  565. /*
  566. * Only the default pagetable has a kgsl_pool, and
  567. * it is responsible for creating the mapping for
  568. * each global buffer. The mapping will be reused
  569. * in all other pagetables and it must already exist
  570. * when we're creating other pagetables which do not
  571. * have a kgsl_pool.
  572. */
  573. pool = pagetable->kgsl_pool;
  574. if (pool == NULL && memdesc->gpuaddr == 0) {
  575. KGSL_CORE_ERR(
  576. "No address for global mapping into pt %d\n",
  577. pagetable->name);
  578. return -EINVAL;
  579. }
  580. } else if (kgsl_memdesc_use_cpu_map(memdesc)) {
  581. if (memdesc->gpuaddr == 0)
  582. return -EINVAL;
  583. pool = NULL;
  584. }
  585. }
  586. if (pool) {
  587. memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
  588. page_align);
  589. if (memdesc->gpuaddr == 0) {
  590. unsigned int entries = atomic_read(&pagetable->stats.entries);
  591. unsigned int mapped = atomic_read(&pagetable->stats.mapped);
  592. KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
  593. size,
  594. (pool == pagetable->kgsl_pool) ?
  595. "kgsl_pool" : "general_pool");
  596. KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
  597. pagetable->name,
  598. mapped,
  599. entries);
  600. return -ENOMEM;
  601. }
  602. }
  603. return 0;
  604. }
  605. EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
  606. int
  607. kgsl_mmu_map(struct kgsl_pagetable *pagetable,
  608. struct kgsl_memdesc *memdesc)
  609. {
  610. int ret = 0;
  611. int size;
  612. unsigned int protflags = kgsl_memdesc_protflags(memdesc);
  613. if (!memdesc->gpuaddr)
  614. return -EINVAL;
  615. /* Only global mappings should be mapped multiple times */
  616. if (!kgsl_memdesc_is_global(memdesc) &&
  617. (KGSL_MEMDESC_MAPPED & memdesc->priv))
  618. return -EINVAL;
  619. /* Add space for the guard page when allocating the mmu VA. */
  620. size = memdesc->size;
  621. if (kgsl_memdesc_has_guard_page(memdesc))
  622. size += PAGE_SIZE;
  623. ret = pagetable->pt_ops->mmu_map(pagetable, memdesc, protflags,
  624. &pagetable->tlb_flags);
  625. if (ret == 0) {
  626. KGSL_STATS_ADD(size, &pagetable->stats.mapped,
  627. &pagetable->stats.max_mapped);
  628. KGSL_STATS_ADD(1, &pagetable->stats.entries,
  629. &pagetable->stats.max_entries);
  630. memdesc->priv |= KGSL_MEMDESC_MAPPED;
  631. }
  632. return ret;
  633. }
  634. EXPORT_SYMBOL(kgsl_mmu_map);
  635. /**
  636. * kgsl_mmu_put_gpuaddr - Free a gpuaddress from memory pool
  637. * @pagetable - pagetable whose pool memory is freed from
  638. * @memdesc - memdesc whose gpuaddress is freed
  639. *
  640. * returns - 0 on success else error code
  641. */
  642. int
  643. kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
  644. struct kgsl_memdesc *memdesc)
  645. {
  646. struct gen_pool *pool;
  647. int size;
  648. if (memdesc->size == 0 || memdesc->gpuaddr == 0)
  649. return 0;
  650. if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
  651. goto done;
  652. /* Add space for the guard page when freeing the mmu VA. */
  653. size = memdesc->size;
  654. if (kgsl_memdesc_has_guard_page(memdesc))
  655. size += PAGE_SIZE;
  656. pool = pagetable->pool;
  657. if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
  658. if (kgsl_memdesc_is_global(memdesc))
  659. pool = pagetable->kgsl_pool;
  660. else if (kgsl_memdesc_use_cpu_map(memdesc))
  661. pool = NULL;
  662. }
  663. if (pool)
  664. gen_pool_free(pool, memdesc->gpuaddr, size);
  665. /*
  666. * Don't clear the gpuaddr on global mappings because they
  667. * may be in use by other pagetables
  668. */
  669. done:
  670. if (!kgsl_memdesc_is_global(memdesc))
  671. memdesc->gpuaddr = 0;
  672. return 0;
  673. }
  674. EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
  675. int
  676. kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
  677. struct kgsl_memdesc *memdesc)
  678. {
  679. int size;
  680. unsigned int start_addr = 0;
  681. unsigned int end_addr = 0;
  682. if (memdesc->size == 0 || memdesc->gpuaddr == 0 ||
  683. !(KGSL_MEMDESC_MAPPED & memdesc->priv))
  684. return -EINVAL;
  685. if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
  686. return 0;
  687. /* Add space for the guard page when freeing the mmu VA. */
  688. size = memdesc->size;
  689. if (kgsl_memdesc_has_guard_page(memdesc))
  690. size += PAGE_SIZE;
  691. start_addr = memdesc->gpuaddr;
  692. end_addr = (memdesc->gpuaddr + size);
  693. pagetable->pt_ops->mmu_unmap(pagetable, memdesc,
  694. &pagetable->tlb_flags);
  695. /* If buffer is unmapped 0 fault addr */
  696. if ((pagetable->fault_addr >= start_addr) &&
  697. (pagetable->fault_addr < end_addr))
  698. pagetable->fault_addr = 0;
  699. /* Remove the statistics */
  700. atomic_dec(&pagetable->stats.entries);
  701. atomic_sub(size, &pagetable->stats.mapped);
  702. if (!kgsl_memdesc_is_global(memdesc))
  703. memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
  704. return 0;
  705. }
  706. EXPORT_SYMBOL(kgsl_mmu_unmap);
  707. int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
  708. struct kgsl_memdesc *memdesc)
  709. {
  710. int result = -EINVAL;
  711. unsigned int gpuaddr = 0;
  712. if (memdesc == NULL) {
  713. KGSL_CORE_ERR("invalid memdesc\n");
  714. goto error;
  715. }
  716. /* Not all global mappings are needed for all MMU types */
  717. if (!memdesc->size)
  718. return 0;
  719. gpuaddr = memdesc->gpuaddr;
  720. memdesc->priv |= KGSL_MEMDESC_GLOBAL;
  721. result = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
  722. if (result)
  723. goto error;
  724. result = kgsl_mmu_map(pagetable, memdesc);
  725. if (result)
  726. goto error_put_gpuaddr;
  727. /*global mappings must have the same gpu address in all pagetables*/
  728. if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
  729. KGSL_CORE_ERR("pt %pK addr mismatch phys %pa gpu 0x%0x 0x%08x",
  730. pagetable, &memdesc->physaddr, gpuaddr, memdesc->gpuaddr);
  731. goto error_unmap;
  732. }
  733. return result;
  734. error_unmap:
  735. kgsl_mmu_unmap(pagetable, memdesc);
  736. error_put_gpuaddr:
  737. kgsl_mmu_put_gpuaddr(pagetable, memdesc);
  738. error:
  739. return result;
  740. }
  741. EXPORT_SYMBOL(kgsl_mmu_map_global);
  742. int kgsl_mmu_close(struct kgsl_device *device)
  743. {
  744. struct kgsl_mmu *mmu = &device->mmu;
  745. kgsl_sharedmem_free(&mmu->setstate_memory);
  746. if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
  747. return 0;
  748. else
  749. return mmu->mmu_ops->mmu_close(mmu);
  750. }
  751. EXPORT_SYMBOL(kgsl_mmu_close);
  752. int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
  753. enum kgsl_deviceid id)
  754. {
  755. unsigned int result = 0;
  756. if (pt == NULL)
  757. return 0;
  758. spin_lock(&pt->lock);
  759. if (pt->tlb_flags & (1<<id)) {
  760. result = KGSL_MMUFLAGS_TLBFLUSH;
  761. pt->tlb_flags &= ~(1<<id);
  762. }
  763. spin_unlock(&pt->lock);
  764. return result;
  765. }
  766. EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
  767. void kgsl_mmu_ptpool_destroy(void *ptpool)
  768. {
  769. if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
  770. kgsl_gpummu_ptpool_destroy(ptpool);
  771. ptpool = 0;
  772. }
  773. EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
  774. void *kgsl_mmu_ptpool_init(int entries)
  775. {
  776. if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
  777. return kgsl_gpummu_ptpool_init(entries);
  778. else
  779. return (void *)(-1);
  780. }
  781. EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
  782. int kgsl_mmu_enabled(void)
  783. {
  784. if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
  785. return 1;
  786. else
  787. return 0;
  788. }
  789. EXPORT_SYMBOL(kgsl_mmu_enabled);
  790. enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
  791. {
  792. return kgsl_mmu_type;
  793. }
  794. EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
  795. void kgsl_mmu_set_mmutype(char *mmutype)
  796. {
  797. /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
  798. kgsl_mmu_type =
  799. cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
  800. /* Use the IOMMU if it is found */
  801. if (iommu_present(&platform_bus_type))
  802. kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
  803. if (mmutype && !strncmp(mmutype, "gpummu", 6))
  804. kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
  805. if (iommu_present(&platform_bus_type) && mmutype &&
  806. !strncmp(mmutype, "iommu", 5))
  807. kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
  808. if (mmutype && !strncmp(mmutype, "nommu", 5))
  809. kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
  810. }
  811. EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
  812. int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr)
  813. {
  814. if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
  815. return 1;
  816. if (gpuaddr >= kgsl_mmu_get_base_addr(pt->mmu) &&
  817. gpuaddr < kgsl_mmu_get_base_addr(pt->mmu) +
  818. kgsl_mmu_get_ptsize(pt->mmu))
  819. return 1;
  820. if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
  821. && kgsl_mmu_is_perprocess(pt->mmu))
  822. return (gpuaddr > 0 && gpuaddr < TASK_SIZE);
  823. return 0;
  824. }
  825. EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);