highmem.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #include <linux/compiler.h>
  2. #include <linux/init.h>
  3. #include <linux/export.h>
  4. #include <linux/highmem.h>
  5. #include <linux/sched.h>
  6. #include <linux/smp.h>
  7. #include <asm/fixmap.h>
  8. #include <asm/tlbflush.h>
  9. static pte_t *kmap_pte;
  10. unsigned long highstart_pfn, highend_pfn;
  11. void *kmap(struct page *page)
  12. {
  13. void *addr;
  14. might_sleep();
  15. if (!PageHighMem(page))
  16. return page_address(page);
  17. addr = kmap_high(page);
  18. flush_tlb_one((unsigned long)addr);
  19. return addr;
  20. }
  21. EXPORT_SYMBOL(kmap);
  22. void kunmap(struct page *page)
  23. {
  24. BUG_ON(in_interrupt());
  25. if (!PageHighMem(page))
  26. return;
  27. kunmap_high(page);
  28. }
  29. EXPORT_SYMBOL(kunmap);
  30. /*
  31. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  32. * no global lock is needed and because the kmap code must perform a global TLB
  33. * invalidation when the kmap pool wraps.
  34. *
  35. * However when holding an atomic kmap is is not legal to sleep, so atomic
  36. * kmaps are appropriate for short, tight code paths only.
  37. */
  38. void *kmap_atomic(struct page *page)
  39. {
  40. unsigned long vaddr;
  41. int idx, type;
  42. preempt_disable();
  43. pagefault_disable();
  44. if (!PageHighMem(page))
  45. return page_address(page);
  46. type = kmap_atomic_idx_push();
  47. idx = type + KM_TYPE_NR*smp_processor_id();
  48. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  49. #ifdef CONFIG_DEBUG_HIGHMEM
  50. BUG_ON(!pte_none(*(kmap_pte - idx)));
  51. #endif
  52. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  53. local_flush_tlb_one((unsigned long)vaddr);
  54. return (void*) vaddr;
  55. }
  56. EXPORT_SYMBOL(kmap_atomic);
  57. void __kunmap_atomic(void *kvaddr)
  58. {
  59. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  60. int type __maybe_unused;
  61. if (vaddr < FIXADDR_START) { // FIXME
  62. pagefault_enable();
  63. preempt_enable();
  64. return;
  65. }
  66. type = kmap_atomic_idx();
  67. #ifdef CONFIG_DEBUG_HIGHMEM
  68. {
  69. int idx = type + KM_TYPE_NR * smp_processor_id();
  70. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  71. /*
  72. * force other mappings to Oops if they'll try to access
  73. * this pte without first remap it
  74. */
  75. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  76. local_flush_tlb_one(vaddr);
  77. }
  78. #endif
  79. kmap_atomic_idx_pop();
  80. pagefault_enable();
  81. preempt_enable();
  82. }
  83. EXPORT_SYMBOL(__kunmap_atomic);
  84. /*
  85. * This is the same as kmap_atomic() but can map memory that doesn't
  86. * have a struct page associated with it.
  87. */
  88. void *kmap_atomic_pfn(unsigned long pfn)
  89. {
  90. unsigned long vaddr;
  91. int idx, type;
  92. preempt_disable();
  93. pagefault_disable();
  94. type = kmap_atomic_idx_push();
  95. idx = type + KM_TYPE_NR*smp_processor_id();
  96. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  97. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  98. flush_tlb_one(vaddr);
  99. return (void*) vaddr;
  100. }
  101. void __init kmap_init(void)
  102. {
  103. unsigned long kmap_vstart;
  104. /* cache the first kmap pte */
  105. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  106. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  107. }