highmem_32.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. #include <linux/highmem.h>
  2. #include <linux/export.h>
  3. #include <linux/swap.h> /* for totalram_pages */
  4. #include <linux/bootmem.h>
  5. void *kmap(struct page *page)
  6. {
  7. might_sleep();
  8. if (!PageHighMem(page))
  9. return page_address(page);
  10. return kmap_high(page);
  11. }
  12. EXPORT_SYMBOL(kmap);
  13. void kunmap(struct page *page)
  14. {
  15. if (in_interrupt())
  16. BUG();
  17. if (!PageHighMem(page))
  18. return;
  19. kunmap_high(page);
  20. }
  21. EXPORT_SYMBOL(kunmap);
  22. /*
  23. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  24. * no global lock is needed and because the kmap code must perform a global TLB
  25. * invalidation when the kmap pool wraps.
  26. *
  27. * However when holding an atomic kmap it is not legal to sleep, so atomic
  28. * kmaps are appropriate for short, tight code paths only.
  29. */
  30. void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  31. {
  32. unsigned long vaddr;
  33. int idx, type;
  34. preempt_disable();
  35. pagefault_disable();
  36. if (!PageHighMem(page))
  37. return page_address(page);
  38. type = kmap_atomic_idx_push();
  39. idx = type + KM_TYPE_NR*smp_processor_id();
  40. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  41. BUG_ON(!pte_none(*(kmap_pte-idx)));
  42. set_pte(kmap_pte-idx, mk_pte(page, prot));
  43. arch_flush_lazy_mmu_mode();
  44. return (void *)vaddr;
  45. }
  46. EXPORT_SYMBOL(kmap_atomic_prot);
  47. void *kmap_atomic(struct page *page)
  48. {
  49. return kmap_atomic_prot(page, kmap_prot);
  50. }
  51. EXPORT_SYMBOL(kmap_atomic);
  52. /*
  53. * This is the same as kmap_atomic() but can map memory that doesn't
  54. * have a struct page associated with it.
  55. */
  56. void *kmap_atomic_pfn(unsigned long pfn)
  57. {
  58. return kmap_atomic_prot_pfn(pfn, kmap_prot);
  59. }
  60. EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
  61. void __kunmap_atomic(void *kvaddr)
  62. {
  63. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  64. if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
  65. vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
  66. int idx, type;
  67. type = kmap_atomic_idx();
  68. idx = type + KM_TYPE_NR * smp_processor_id();
  69. #ifdef CONFIG_DEBUG_HIGHMEM
  70. WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  71. #endif
  72. /*
  73. * Force other mappings to Oops if they'll try to access this
  74. * pte without first remap it. Keeping stale mappings around
  75. * is a bad idea also, in case the page changes cacheability
  76. * attributes or becomes a protected page in a hypervisor.
  77. */
  78. kpte_clear_flush(kmap_pte-idx, vaddr);
  79. kmap_atomic_idx_pop();
  80. arch_flush_lazy_mmu_mode();
  81. }
  82. #ifdef CONFIG_DEBUG_HIGHMEM
  83. else {
  84. BUG_ON(vaddr < PAGE_OFFSET);
  85. BUG_ON(vaddr >= (unsigned long)high_memory);
  86. }
  87. #endif
  88. pagefault_enable();
  89. preempt_enable();
  90. }
  91. EXPORT_SYMBOL(__kunmap_atomic);
  92. void __init set_highmem_pages_init(void)
  93. {
  94. struct zone *zone;
  95. int nid;
  96. /*
  97. * Explicitly reset zone->managed_pages because set_highmem_pages_init()
  98. * is invoked before free_all_bootmem()
  99. */
  100. reset_all_zones_managed_pages();
  101. for_each_zone(zone) {
  102. unsigned long zone_start_pfn, zone_end_pfn;
  103. if (!is_highmem(zone))
  104. continue;
  105. zone_start_pfn = zone->zone_start_pfn;
  106. zone_end_pfn = zone_start_pfn + zone->spanned_pages;
  107. nid = zone_to_nid(zone);
  108. printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
  109. zone->name, nid, zone_start_pfn, zone_end_pfn);
  110. add_highpages_with_active_regions(nid, zone_start_pfn,
  111. zone_end_pfn);
  112. }
  113. }