etnaviv_iommu.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/iommu.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/sizes.h>
  19. #include <linux/slab.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/bitops.h>
  22. #include "etnaviv_gpu.h"
  23. #include "etnaviv_mmu.h"
  24. #include "etnaviv_iommu.h"
  25. #include "state_hi.xml.h"
  26. #define PT_SIZE SZ_2M
  27. #define PT_ENTRIES (PT_SIZE / sizeof(u32))
  28. #define GPU_MEM_START 0x80000000
  29. struct etnaviv_iommu_domain_pgtable {
  30. u32 *pgtable;
  31. dma_addr_t paddr;
  32. };
  33. struct etnaviv_iommu_domain {
  34. struct iommu_domain domain;
  35. struct device *dev;
  36. void *bad_page_cpu;
  37. dma_addr_t bad_page_dma;
  38. struct etnaviv_iommu_domain_pgtable pgtable;
  39. spinlock_t map_lock;
  40. };
  41. static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
  42. {
  43. return container_of(domain, struct etnaviv_iommu_domain, domain);
  44. }
  45. static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
  46. size_t size)
  47. {
  48. pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
  49. if (!pgtable->pgtable)
  50. return -ENOMEM;
  51. return 0;
  52. }
  53. static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
  54. size_t size)
  55. {
  56. dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
  57. }
  58. static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
  59. unsigned long iova)
  60. {
  61. /* calcuate index into page table */
  62. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  63. phys_addr_t paddr;
  64. paddr = pgtable->pgtable[index];
  65. return paddr;
  66. }
  67. static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
  68. unsigned long iova, phys_addr_t paddr)
  69. {
  70. /* calcuate index into page table */
  71. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  72. pgtable->pgtable[index] = paddr;
  73. }
  74. static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
  75. {
  76. u32 *p;
  77. int ret, i;
  78. etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
  79. SZ_4K,
  80. &etnaviv_domain->bad_page_dma,
  81. GFP_KERNEL);
  82. if (!etnaviv_domain->bad_page_cpu)
  83. return -ENOMEM;
  84. p = etnaviv_domain->bad_page_cpu;
  85. for (i = 0; i < SZ_4K / 4; i++)
  86. *p++ = 0xdead55aa;
  87. ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
  88. if (ret < 0) {
  89. dma_free_coherent(etnaviv_domain->dev, SZ_4K,
  90. etnaviv_domain->bad_page_cpu,
  91. etnaviv_domain->bad_page_dma);
  92. return ret;
  93. }
  94. for (i = 0; i < PT_ENTRIES; i++)
  95. etnaviv_domain->pgtable.pgtable[i] =
  96. etnaviv_domain->bad_page_dma;
  97. spin_lock_init(&etnaviv_domain->map_lock);
  98. return 0;
  99. }
  100. static void etnaviv_domain_free(struct iommu_domain *domain)
  101. {
  102. struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
  103. pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
  104. dma_free_coherent(etnaviv_domain->dev, SZ_4K,
  105. etnaviv_domain->bad_page_cpu,
  106. etnaviv_domain->bad_page_dma);
  107. kfree(etnaviv_domain);
  108. }
  109. static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
  110. phys_addr_t paddr, size_t size, int prot)
  111. {
  112. struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
  113. if (size != SZ_4K)
  114. return -EINVAL;
  115. spin_lock(&etnaviv_domain->map_lock);
  116. pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
  117. spin_unlock(&etnaviv_domain->map_lock);
  118. return 0;
  119. }
  120. static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
  121. unsigned long iova, size_t size)
  122. {
  123. struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
  124. if (size != SZ_4K)
  125. return -EINVAL;
  126. spin_lock(&etnaviv_domain->map_lock);
  127. pgtable_write(&etnaviv_domain->pgtable, iova,
  128. etnaviv_domain->bad_page_dma);
  129. spin_unlock(&etnaviv_domain->map_lock);
  130. return SZ_4K;
  131. }
  132. static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
  133. dma_addr_t iova)
  134. {
  135. struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
  136. return pgtable_read(&etnaviv_domain->pgtable, iova);
  137. }
  138. static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
  139. {
  140. return PT_SIZE;
  141. }
  142. static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
  143. {
  144. struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
  145. memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
  146. }
  147. static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
  148. .ops = {
  149. .domain_free = etnaviv_domain_free,
  150. .map = etnaviv_iommuv1_map,
  151. .unmap = etnaviv_iommuv1_unmap,
  152. .iova_to_phys = etnaviv_iommu_iova_to_phys,
  153. .pgsize_bitmap = SZ_4K,
  154. },
  155. .dump_size = etnaviv_iommuv1_dump_size,
  156. .dump = etnaviv_iommuv1_dump,
  157. };
  158. void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
  159. {
  160. struct etnaviv_iommu_domain *etnaviv_domain =
  161. to_etnaviv_domain(gpu->mmu->domain);
  162. u32 pgtable;
  163. /* set base addresses */
  164. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
  165. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
  166. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
  167. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
  168. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
  169. /* set page table address in MC */
  170. pgtable = (u32)etnaviv_domain->pgtable.paddr;
  171. gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
  172. gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
  173. gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
  174. gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
  175. gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
  176. }
  177. struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
  178. {
  179. struct etnaviv_iommu_domain *etnaviv_domain;
  180. int ret;
  181. etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
  182. if (!etnaviv_domain)
  183. return NULL;
  184. etnaviv_domain->dev = gpu->dev;
  185. etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
  186. etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
  187. etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
  188. etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
  189. etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
  190. ret = __etnaviv_iommu_init(etnaviv_domain);
  191. if (ret)
  192. goto out_free;
  193. return &etnaviv_domain->domain;
  194. out_free:
  195. kfree(etnaviv_domain);
  196. return NULL;
  197. }