highmem.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * highmem.c: virtual kernel memory mappings for high memory
  3. *
  4. * Provides kernel-static versions of atomic kmap functions originally
  5. * found as inlines in include/asm-sparc/highmem.h. These became
  6. * needed as kmap_atomic() and kunmap_atomic() started getting
  7. * called from within modules.
  8. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  9. *
  10. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11. * modules because they are loaded with btfixup-ped functions.
  12. */
  13. /*
  14. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  15. * gives a more generic (and caching) interface. But kmap_atomic can
  16. * be used in IRQ contexts, so in some (very limited) cases we need it.
  17. *
  18. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  19. * provided you remember that they are atomic and not try to sleep
  20. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  21. * shared by CPUs, and so precious, and establishing them requires IPI.
  22. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/fixmap.h>
  30. void *__kmap_atomic(struct page *page)
  31. {
  32. unsigned long vaddr;
  33. long idx, type;
  34. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  35. pagefault_disable();
  36. if (!PageHighMem(page))
  37. return page_address(page);
  38. type = kmap_atomic_idx_push();
  39. idx = type + KM_TYPE_NR*smp_processor_id();
  40. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  41. /* XXX Fix - Anton */
  42. #if 0
  43. __flush_cache_one(vaddr);
  44. #else
  45. flush_cache_all();
  46. #endif
  47. #ifdef CONFIG_DEBUG_HIGHMEM
  48. BUG_ON(!pte_none(*(kmap_pte-idx)));
  49. #endif
  50. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  51. /* XXX Fix - Anton */
  52. #if 0
  53. __flush_tlb_one(vaddr);
  54. #else
  55. flush_tlb_all();
  56. #endif
  57. return (void*) vaddr;
  58. }
  59. EXPORT_SYMBOL(__kmap_atomic);
  60. void __kunmap_atomic(void *kvaddr)
  61. {
  62. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  63. int type;
  64. if (vaddr < FIXADDR_START) { // FIXME
  65. pagefault_enable();
  66. return;
  67. }
  68. type = kmap_atomic_idx();
  69. #ifdef CONFIG_DEBUG_HIGHMEM
  70. {
  71. unsigned long idx;
  72. idx = type + KM_TYPE_NR * smp_processor_id();
  73. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  74. /* XXX Fix - Anton */
  75. #if 0
  76. __flush_cache_one(vaddr);
  77. #else
  78. flush_cache_all();
  79. #endif
  80. /*
  81. * force other mappings to Oops if they'll try to access
  82. * this pte without first remap it
  83. */
  84. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  85. /* XXX Fix - Anton */
  86. #if 0
  87. __flush_tlb_one(vaddr);
  88. #else
  89. flush_tlb_all();
  90. #endif
  91. }
  92. #endif
  93. kmap_atomic_idx_pop();
  94. pagefault_enable();
  95. }
  96. EXPORT_SYMBOL(__kunmap_atomic);
  97. /* We may be fed a pagetable here by ptep_to_xxx and others. */
  98. struct page *kmap_atomic_to_page(void *ptr)
  99. {
  100. unsigned long idx, vaddr = (unsigned long)ptr;
  101. pte_t *pte;
  102. if (vaddr < SRMMU_NOCACHE_VADDR)
  103. return virt_to_page(ptr);
  104. if (vaddr < PKMAP_BASE)
  105. return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
  106. BUG_ON(vaddr < FIXADDR_START);
  107. BUG_ON(vaddr > FIXADDR_TOP);
  108. idx = virt_to_fix(vaddr);
  109. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  110. return pte_page(*pte);
  111. }