iommu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * iommu.c: IOMMU specific routines for memory management.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
  6. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/slab.h>
  13. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  14. #include <linux/scatterlist.h>
  15. #include <linux/of.h>
  16. #include <linux/of_device.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/io.h>
  20. #include <asm/mxcc.h>
  21. #include <asm/mbus.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/bitext.h>
  25. #include <asm/iommu.h>
  26. #include <asm/dma.h>
  27. /*
  28. * This can be sized dynamically, but we will do this
  29. * only when we have a guidance about actual I/O pressures.
  30. */
  31. #define IOMMU_RNGE IOMMU_RNGE_256MB
  32. #define IOMMU_START 0xF0000000
  33. #define IOMMU_WINSIZE (256*1024*1024U)
  34. #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
  35. #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
  36. /* srmmu.c */
  37. extern int viking_mxcc_present;
  38. BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
  39. #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
  40. extern int flush_page_for_dma_global;
  41. static int viking_flush;
  42. /* viking.S */
  43. extern void viking_flush_page(unsigned long page);
  44. extern void viking_mxcc_flush_page(unsigned long page);
  45. /*
  46. * Values precomputed according to CPU type.
  47. */
  48. static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
  49. static pgprot_t dvma_prot; /* Consistent mapping pte flags */
  50. #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
  51. #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
  52. static void __init sbus_iommu_init(struct platform_device *op)
  53. {
  54. struct iommu_struct *iommu;
  55. unsigned int impl, vers;
  56. unsigned long *bitmap;
  57. unsigned long tmp;
  58. iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
  59. if (!iommu) {
  60. prom_printf("Unable to allocate iommu structure\n");
  61. prom_halt();
  62. }
  63. iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
  64. "iommu_regs");
  65. if (!iommu->regs) {
  66. prom_printf("Cannot map IOMMU registers\n");
  67. prom_halt();
  68. }
  69. impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
  70. vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
  71. tmp = iommu->regs->control;
  72. tmp &= ~(IOMMU_CTRL_RNGE);
  73. tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
  74. iommu->regs->control = tmp;
  75. iommu_invalidate(iommu->regs);
  76. iommu->start = IOMMU_START;
  77. iommu->end = 0xffffffff;
  78. /* Allocate IOMMU page table */
  79. /* Stupid alignment constraints give me a headache.
  80. We need 256K or 512K or 1M or 2M area aligned to
  81. its size and current gfp will fortunately give
  82. it to us. */
  83. tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
  84. if (!tmp) {
  85. prom_printf("Unable to allocate iommu table [0x%08x]\n",
  86. IOMMU_NPTES*sizeof(iopte_t));
  87. prom_halt();
  88. }
  89. iommu->page_table = (iopte_t *)tmp;
  90. /* Initialize new table. */
  91. memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
  92. flush_cache_all();
  93. flush_tlb_all();
  94. iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
  95. iommu_invalidate(iommu->regs);
  96. bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
  97. if (!bitmap) {
  98. prom_printf("Unable to allocate iommu bitmap [%d]\n",
  99. (int)(IOMMU_NPTES>>3));
  100. prom_halt();
  101. }
  102. bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
  103. /* To be coherent on HyperSparc, the page color of DVMA
  104. * and physical addresses must match.
  105. */
  106. if (srmmu_modtype == HyperSparc)
  107. iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
  108. else
  109. iommu->usemap.num_colors = 1;
  110. printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
  111. impl, vers, iommu->page_table,
  112. (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
  113. op->dev.archdata.iommu = iommu;
  114. }
  115. static int __init iommu_init(void)
  116. {
  117. struct device_node *dp;
  118. for_each_node_by_name(dp, "iommu") {
  119. struct platform_device *op = of_find_device_by_node(dp);
  120. sbus_iommu_init(op);
  121. of_propagate_archdata(op);
  122. }
  123. return 0;
  124. }
  125. subsys_initcall(iommu_init);
  126. /* This begs to be btfixup-ed by srmmu. */
  127. /* Flush the iotlb entries to ram. */
  128. /* This could be better if we didn't have to flush whole pages. */
  129. static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
  130. {
  131. unsigned long start;
  132. unsigned long end;
  133. start = (unsigned long)iopte;
  134. end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
  135. start &= PAGE_MASK;
  136. if (viking_mxcc_present) {
  137. while(start < end) {
  138. viking_mxcc_flush_page(start);
  139. start += PAGE_SIZE;
  140. }
  141. } else if (viking_flush) {
  142. while(start < end) {
  143. viking_flush_page(start);
  144. start += PAGE_SIZE;
  145. }
  146. } else {
  147. while(start < end) {
  148. __flush_page_to_ram(start);
  149. start += PAGE_SIZE;
  150. }
  151. }
  152. }
  153. static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
  154. {
  155. struct iommu_struct *iommu = dev->archdata.iommu;
  156. int ioptex;
  157. iopte_t *iopte, *iopte0;
  158. unsigned int busa, busa0;
  159. int i;
  160. /* page color = pfn of page */
  161. ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
  162. if (ioptex < 0)
  163. panic("iommu out");
  164. busa0 = iommu->start + (ioptex << PAGE_SHIFT);
  165. iopte0 = &iommu->page_table[ioptex];
  166. busa = busa0;
  167. iopte = iopte0;
  168. for (i = 0; i < npages; i++) {
  169. iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
  170. iommu_invalidate_page(iommu->regs, busa);
  171. busa += PAGE_SIZE;
  172. iopte++;
  173. page++;
  174. }
  175. iommu_flush_iotlb(iopte0, npages);
  176. return busa0;
  177. }
  178. static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
  179. {
  180. unsigned long off;
  181. int npages;
  182. struct page *page;
  183. u32 busa;
  184. off = (unsigned long)vaddr & ~PAGE_MASK;
  185. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  186. page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
  187. busa = iommu_get_one(dev, page, npages);
  188. return busa + off;
  189. }
  190. static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
  191. {
  192. return iommu_get_scsi_one(dev, vaddr, len);
  193. }
  194. static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
  195. {
  196. flush_page_for_dma(0);
  197. return iommu_get_scsi_one(dev, vaddr, len);
  198. }
  199. static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
  200. {
  201. unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
  202. while(page < ((unsigned long)(vaddr + len))) {
  203. flush_page_for_dma(page);
  204. page += PAGE_SIZE;
  205. }
  206. return iommu_get_scsi_one(dev, vaddr, len);
  207. }
  208. static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
  209. {
  210. int n;
  211. while (sz != 0) {
  212. --sz;
  213. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  214. sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  215. sg->dma_length = sg->length;
  216. sg = sg_next(sg);
  217. }
  218. }
  219. static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
  220. {
  221. int n;
  222. flush_page_for_dma(0);
  223. while (sz != 0) {
  224. --sz;
  225. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  226. sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  227. sg->dma_length = sg->length;
  228. sg = sg_next(sg);
  229. }
  230. }
  231. static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
  232. {
  233. unsigned long page, oldpage = 0;
  234. int n, i;
  235. while(sz != 0) {
  236. --sz;
  237. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  238. /*
  239. * We expect unmapped highmem pages to be not in the cache.
  240. * XXX Is this a good assumption?
  241. * XXX What if someone else unmaps it here and races us?
  242. */
  243. if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
  244. for (i = 0; i < n; i++) {
  245. if (page != oldpage) { /* Already flushed? */
  246. flush_page_for_dma(page);
  247. oldpage = page;
  248. }
  249. page += PAGE_SIZE;
  250. }
  251. }
  252. sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  253. sg->dma_length = sg->length;
  254. sg = sg_next(sg);
  255. }
  256. }
  257. static void iommu_release_one(struct device *dev, u32 busa, int npages)
  258. {
  259. struct iommu_struct *iommu = dev->archdata.iommu;
  260. int ioptex;
  261. int i;
  262. BUG_ON(busa < iommu->start);
  263. ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  264. for (i = 0; i < npages; i++) {
  265. iopte_val(iommu->page_table[ioptex + i]) = 0;
  266. iommu_invalidate_page(iommu->regs, busa);
  267. busa += PAGE_SIZE;
  268. }
  269. bit_map_clear(&iommu->usemap, ioptex, npages);
  270. }
  271. static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
  272. {
  273. unsigned long off;
  274. int npages;
  275. off = vaddr & ~PAGE_MASK;
  276. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  277. iommu_release_one(dev, vaddr & PAGE_MASK, npages);
  278. }
  279. static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
  280. {
  281. int n;
  282. while(sz != 0) {
  283. --sz;
  284. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  285. iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
  286. sg->dma_address = 0x21212121;
  287. sg = sg_next(sg);
  288. }
  289. }
  290. #ifdef CONFIG_SBUS
  291. static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
  292. unsigned long addr, int len)
  293. {
  294. struct iommu_struct *iommu = dev->archdata.iommu;
  295. unsigned long page, end;
  296. iopte_t *iopte = iommu->page_table;
  297. iopte_t *first;
  298. int ioptex;
  299. BUG_ON((va & ~PAGE_MASK) != 0);
  300. BUG_ON((addr & ~PAGE_MASK) != 0);
  301. BUG_ON((len & ~PAGE_MASK) != 0);
  302. /* page color = physical address */
  303. ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
  304. addr >> PAGE_SHIFT);
  305. if (ioptex < 0)
  306. panic("iommu out");
  307. iopte += ioptex;
  308. first = iopte;
  309. end = addr + len;
  310. while(addr < end) {
  311. page = va;
  312. {
  313. pgd_t *pgdp;
  314. pmd_t *pmdp;
  315. pte_t *ptep;
  316. if (viking_mxcc_present)
  317. viking_mxcc_flush_page(page);
  318. else if (viking_flush)
  319. viking_flush_page(page);
  320. else
  321. __flush_page_to_ram(page);
  322. pgdp = pgd_offset(&init_mm, addr);
  323. pmdp = pmd_offset(pgdp, addr);
  324. ptep = pte_offset_map(pmdp, addr);
  325. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  326. }
  327. iopte_val(*iopte++) =
  328. MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
  329. addr += PAGE_SIZE;
  330. va += PAGE_SIZE;
  331. }
  332. /* P3: why do we need this?
  333. *
  334. * DAVEM: Because there are several aspects, none of which
  335. * are handled by a single interface. Some cpus are
  336. * completely not I/O DMA coherent, and some have
  337. * virtually indexed caches. The driver DMA flushing
  338. * methods handle the former case, but here during
  339. * IOMMU page table modifications, and usage of non-cacheable
  340. * cpu mappings of pages potentially in the cpu caches, we have
  341. * to handle the latter case as well.
  342. */
  343. flush_cache_all();
  344. iommu_flush_iotlb(first, len >> PAGE_SHIFT);
  345. flush_tlb_all();
  346. iommu_invalidate(iommu->regs);
  347. *pba = iommu->start + (ioptex << PAGE_SHIFT);
  348. return 0;
  349. }
  350. static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
  351. {
  352. struct iommu_struct *iommu = dev->archdata.iommu;
  353. iopte_t *iopte = iommu->page_table;
  354. unsigned long end;
  355. int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  356. BUG_ON((busa & ~PAGE_MASK) != 0);
  357. BUG_ON((len & ~PAGE_MASK) != 0);
  358. iopte += ioptex;
  359. end = busa + len;
  360. while (busa < end) {
  361. iopte_val(*iopte++) = 0;
  362. busa += PAGE_SIZE;
  363. }
  364. flush_tlb_all();
  365. iommu_invalidate(iommu->regs);
  366. bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
  367. }
  368. #endif
  369. static char *iommu_lockarea(char *vaddr, unsigned long len)
  370. {
  371. return vaddr;
  372. }
  373. static void iommu_unlockarea(char *vaddr, unsigned long len)
  374. {
  375. }
  376. void __init ld_mmu_iommu(void)
  377. {
  378. viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
  379. BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
  380. BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
  381. if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
  382. /* IO coherent chip */
  383. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
  384. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
  385. } else if (flush_page_for_dma_global) {
  386. /* flush_page_for_dma flushes everything, no matter of what page is it */
  387. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
  388. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
  389. } else {
  390. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
  391. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
  392. }
  393. BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
  394. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
  395. #ifdef CONFIG_SBUS
  396. BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
  397. BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
  398. #endif
  399. if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
  400. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  401. ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
  402. } else {
  403. dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
  404. ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
  405. }
  406. }