highmem.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * highmem.c: virtual kernel memory mappings for high memory
  3. *
  4. * Provides kernel-static versions of atomic kmap functions originally
  5. * found as inlines in include/asm-sparc/highmem.h. These became
  6. * needed as kmap_atomic() and kunmap_atomic() started getting
  7. * called from within modules.
  8. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  9. *
  10. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11. * modules because they are loaded with btfixup-ped functions.
  12. */
  13. /*
  14. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  15. * gives a more generic (and caching) interface. But kmap_atomic can
  16. * be used in IRQ contexts, so in some (very limited) cases we need it.
  17. *
  18. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  19. * provided you remember that they are atomic and not try to sleep
  20. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  21. * shared by CPUs, and so precious, and establishing them requires IPI.
  22. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <linux/export.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/fixmap.h>
  31. void *kmap_atomic(struct page *page)
  32. {
  33. unsigned long vaddr;
  34. long idx, type;
  35. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  36. pagefault_disable();
  37. if (!PageHighMem(page))
  38. return page_address(page);
  39. type = kmap_atomic_idx_push();
  40. idx = type + KM_TYPE_NR*smp_processor_id();
  41. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  42. /* XXX Fix - Anton */
  43. #if 0
  44. __flush_cache_one(vaddr);
  45. #else
  46. flush_cache_all();
  47. #endif
  48. #ifdef CONFIG_DEBUG_HIGHMEM
  49. BUG_ON(!pte_none(*(kmap_pte-idx)));
  50. #endif
  51. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  52. /* XXX Fix - Anton */
  53. #if 0
  54. __flush_tlb_one(vaddr);
  55. #else
  56. flush_tlb_all();
  57. #endif
  58. return (void*) vaddr;
  59. }
  60. EXPORT_SYMBOL(kmap_atomic);
  61. void __kunmap_atomic(void *kvaddr)
  62. {
  63. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  64. int type;
  65. if (vaddr < FIXADDR_START) { // FIXME
  66. pagefault_enable();
  67. return;
  68. }
  69. type = kmap_atomic_idx();
  70. #ifdef CONFIG_DEBUG_HIGHMEM
  71. {
  72. unsigned long idx;
  73. idx = type + KM_TYPE_NR * smp_processor_id();
  74. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  75. /* XXX Fix - Anton */
  76. #if 0
  77. __flush_cache_one(vaddr);
  78. #else
  79. flush_cache_all();
  80. #endif
  81. /*
  82. * force other mappings to Oops if they'll try to access
  83. * this pte without first remap it
  84. */
  85. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  86. /* XXX Fix - Anton */
  87. #if 0
  88. __flush_tlb_one(vaddr);
  89. #else
  90. flush_tlb_all();
  91. #endif
  92. }
  93. #endif
  94. kmap_atomic_idx_pop();
  95. pagefault_enable();
  96. }
  97. EXPORT_SYMBOL(__kunmap_atomic);
  98. /* We may be fed a pagetable here by ptep_to_xxx and others. */
  99. struct page *kmap_atomic_to_page(void *ptr)
  100. {
  101. unsigned long idx, vaddr = (unsigned long)ptr;
  102. pte_t *pte;
  103. if (vaddr < SRMMU_NOCACHE_VADDR)
  104. return virt_to_page(ptr);
  105. if (vaddr < PKMAP_BASE)
  106. return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
  107. BUG_ON(vaddr < FIXADDR_START);
  108. BUG_ON(vaddr > FIXADDR_TOP);
  109. idx = virt_to_fix(vaddr);
  110. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  111. return pte_page(*pte);
  112. }