ioremap.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. * (C) Copyright 2001, 2002 Ralf Baechle
  8. */
  9. #include <linux/export.h>
  10. #include <asm/addrspace.h>
  11. #include <asm/byteorder.h>
  12. #include <linux/ioport.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/io.h>
  18. #include <asm/tlbflush.h>
  19. static inline void remap_area_pte(pte_t * pte, unsigned long address,
  20. phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
  21. {
  22. phys_addr_t end;
  23. unsigned long pfn;
  24. pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
  25. | __WRITEABLE | flags);
  26. address &= ~PMD_MASK;
  27. end = address + size;
  28. if (end > PMD_SIZE)
  29. end = PMD_SIZE;
  30. BUG_ON(address >= end);
  31. pfn = phys_addr >> PAGE_SHIFT;
  32. do {
  33. if (!pte_none(*pte)) {
  34. printk("remap_area_pte: page already exists\n");
  35. BUG();
  36. }
  37. set_pte(pte, pfn_pte(pfn, pgprot));
  38. address += PAGE_SIZE;
  39. pfn++;
  40. pte++;
  41. } while (address && (address < end));
  42. }
  43. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
  44. phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
  45. {
  46. phys_addr_t end;
  47. address &= ~PGDIR_MASK;
  48. end = address + size;
  49. if (end > PGDIR_SIZE)
  50. end = PGDIR_SIZE;
  51. phys_addr -= address;
  52. BUG_ON(address >= end);
  53. do {
  54. pte_t * pte = pte_alloc_kernel(pmd, address);
  55. if (!pte)
  56. return -ENOMEM;
  57. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  58. address = (address + PMD_SIZE) & PMD_MASK;
  59. pmd++;
  60. } while (address && (address < end));
  61. return 0;
  62. }
  63. static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
  64. phys_addr_t size, unsigned long flags)
  65. {
  66. int error;
  67. pgd_t * dir;
  68. unsigned long end = address + size;
  69. phys_addr -= address;
  70. dir = pgd_offset(&init_mm, address);
  71. flush_cache_all();
  72. BUG_ON(address >= end);
  73. do {
  74. pud_t *pud;
  75. pmd_t *pmd;
  76. error = -ENOMEM;
  77. pud = pud_alloc(&init_mm, dir, address);
  78. if (!pud)
  79. break;
  80. pmd = pmd_alloc(&init_mm, pud, address);
  81. if (!pmd)
  82. break;
  83. if (remap_area_pmd(pmd, address, end - address,
  84. phys_addr + address, flags))
  85. break;
  86. error = 0;
  87. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  88. dir++;
  89. } while (address && (address < end));
  90. flush_tlb_all();
  91. return error;
  92. }
  93. static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
  94. void *arg)
  95. {
  96. unsigned long i;
  97. for (i = 0; i < nr_pages; i++) {
  98. if (pfn_valid(start_pfn + i) &&
  99. !PageReserved(pfn_to_page(start_pfn + i)))
  100. return 1;
  101. }
  102. return 0;
  103. }
  104. /*
  105. * Generic mapping function (not visible outside):
  106. */
  107. /*
  108. * Remap an arbitrary physical address space into the kernel virtual
  109. * address space. Needed when the kernel wants to access high addresses
  110. * directly.
  111. *
  112. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  113. * have to convert them into an offset in a page-aligned mapping, but the
  114. * caller shouldn't need to know that small detail.
  115. */
  116. #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
  117. void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
  118. {
  119. unsigned long offset, pfn, last_pfn;
  120. struct vm_struct * area;
  121. phys_addr_t last_addr;
  122. void * addr;
  123. phys_addr = fixup_bigphys_addr(phys_addr, size);
  124. /* Don't allow wraparound or zero size */
  125. last_addr = phys_addr + size - 1;
  126. if (!size || last_addr < phys_addr)
  127. return NULL;
  128. /*
  129. * Map uncached objects in the low 512mb of address space using KSEG1,
  130. * otherwise map using page tables.
  131. */
  132. if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
  133. flags == _CACHE_UNCACHED)
  134. return (void __iomem *) CKSEG1ADDR(phys_addr);
  135. /*
  136. * Don't allow anybody to remap RAM that may be allocated by the page
  137. * allocator, since that could lead to races & data clobbering.
  138. */
  139. pfn = PFN_DOWN(phys_addr);
  140. last_pfn = PFN_DOWN(last_addr);
  141. if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
  142. __ioremap_check_ram) == 1) {
  143. WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
  144. &phys_addr, &last_addr);
  145. return NULL;
  146. }
  147. /*
  148. * Mappings have to be page-aligned
  149. */
  150. offset = phys_addr & ~PAGE_MASK;
  151. phys_addr &= PAGE_MASK;
  152. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  153. /*
  154. * Ok, go for it..
  155. */
  156. area = get_vm_area(size, VM_IOREMAP);
  157. if (!area)
  158. return NULL;
  159. addr = area->addr;
  160. if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
  161. vunmap(addr);
  162. return NULL;
  163. }
  164. return (void __iomem *) (offset + (char *)addr);
  165. }
  166. #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
  167. void __iounmap(const volatile void __iomem *addr)
  168. {
  169. struct vm_struct *p;
  170. if (IS_KSEG1(addr))
  171. return;
  172. p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  173. if (!p)
  174. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  175. kfree(p);
  176. }
  177. EXPORT_SYMBOL(__ioremap);
  178. EXPORT_SYMBOL(__iounmap);