hibernate_32.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Hibernation support specific for i386 - temporary page tables
  3. *
  4. * Distribute under GPLv2
  5. *
  6. * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  7. */
  8. #include <linux/gfp.h>
  9. #include <linux/suspend.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/page.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/mmzone.h>
  14. /* Defined in hibernate_asm_32.S */
  15. extern int restore_image(void);
  16. /* References to section boundaries */
  17. extern const void __nosave_begin, __nosave_end;
  18. /* Pointer to the temporary resume page tables */
  19. pgd_t *resume_pg_dir;
  20. /* The following three functions are based on the analogous code in
  21. * arch/x86/mm/init_32.c
  22. */
  23. /*
  24. * Create a middle page table on a resume-safe page and put a pointer to it in
  25. * the given global directory entry. This only returns the gd entry
  26. * in non-PAE compilation mode, since the middle layer is folded.
  27. */
  28. static pmd_t *resume_one_md_table_init(pgd_t *pgd)
  29. {
  30. pud_t *pud;
  31. pmd_t *pmd_table;
  32. #ifdef CONFIG_X86_PAE
  33. pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
  34. if (!pmd_table)
  35. return NULL;
  36. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  37. pud = pud_offset(pgd, 0);
  38. BUG_ON(pmd_table != pmd_offset(pud, 0));
  39. #else
  40. pud = pud_offset(pgd, 0);
  41. pmd_table = pmd_offset(pud, 0);
  42. #endif
  43. return pmd_table;
  44. }
  45. /*
  46. * Create a page table on a resume-safe page and place a pointer to it in
  47. * a middle page directory entry.
  48. */
  49. static pte_t *resume_one_page_table_init(pmd_t *pmd)
  50. {
  51. if (pmd_none(*pmd)) {
  52. pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
  53. if (!page_table)
  54. return NULL;
  55. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  56. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  57. return page_table;
  58. }
  59. return pte_offset_kernel(pmd, 0);
  60. }
  61. /*
  62. * This maps the physical memory to kernel virtual address space, a total
  63. * of max_low_pfn pages, by creating page tables starting from address
  64. * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
  65. */
  66. static int resume_physical_mapping_init(pgd_t *pgd_base)
  67. {
  68. unsigned long pfn;
  69. pgd_t *pgd;
  70. pmd_t *pmd;
  71. pte_t *pte;
  72. int pgd_idx, pmd_idx;
  73. pgd_idx = pgd_index(PAGE_OFFSET);
  74. pgd = pgd_base + pgd_idx;
  75. pfn = 0;
  76. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  77. pmd = resume_one_md_table_init(pgd);
  78. if (!pmd)
  79. return -ENOMEM;
  80. if (pfn >= max_low_pfn)
  81. continue;
  82. for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
  83. if (pfn >= max_low_pfn)
  84. break;
  85. /* Map with big pages if possible, otherwise create
  86. * normal page tables.
  87. * NOTE: We can mark everything as executable here
  88. */
  89. if (cpu_has_pse) {
  90. set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
  91. pfn += PTRS_PER_PTE;
  92. } else {
  93. pte_t *max_pte;
  94. pte = resume_one_page_table_init(pmd);
  95. if (!pte)
  96. return -ENOMEM;
  97. max_pte = pte + PTRS_PER_PTE;
  98. for (; pte < max_pte; pte++, pfn++) {
  99. if (pfn >= max_low_pfn)
  100. break;
  101. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
  102. }
  103. }
  104. }
  105. }
  106. return 0;
  107. }
  108. static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
  109. {
  110. #ifdef CONFIG_X86_PAE
  111. int i;
  112. /* Init entries of the first-level page table to the zero page */
  113. for (i = 0; i < PTRS_PER_PGD; i++)
  114. set_pgd(pg_dir + i,
  115. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  116. #endif
  117. }
  118. int swsusp_arch_resume(void)
  119. {
  120. int error;
  121. resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
  122. if (!resume_pg_dir)
  123. return -ENOMEM;
  124. resume_init_first_level_page_table(resume_pg_dir);
  125. error = resume_physical_mapping_init(resume_pg_dir);
  126. if (error)
  127. return error;
  128. /* We have got enough memory and from now on we cannot recover */
  129. restore_image();
  130. return 0;
  131. }
  132. /*
  133. * pfn_is_nosave - check if given pfn is in the 'nosave' section
  134. */
  135. int pfn_is_nosave(unsigned long pfn)
  136. {
  137. unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
  138. unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
  139. return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
  140. }