page_isolation.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * linux/mm/page_isolation.c
  3. */
  4. #include <linux/mm.h>
  5. #include <linux/page-isolation.h>
  6. #include <linux/pageblock-flags.h>
  7. #include <linux/memory.h>
  8. #include <linux/hugetlb.h>
  9. #include <linux/page_owner.h>
  10. #include "internal.h"
  11. #define CREATE_TRACE_POINTS
  12. #include <trace/events/page_isolation.h>
  13. static int set_migratetype_isolate(struct page *page,
  14. bool skip_hwpoisoned_pages)
  15. {
  16. struct zone *zone;
  17. unsigned long flags, pfn;
  18. struct memory_isolate_notify arg;
  19. int notifier_ret;
  20. int ret = -EBUSY;
  21. zone = page_zone(page);
  22. spin_lock_irqsave(&zone->lock, flags);
  23. pfn = page_to_pfn(page);
  24. arg.start_pfn = pfn;
  25. arg.nr_pages = pageblock_nr_pages;
  26. arg.pages_found = 0;
  27. /*
  28. * It may be possible to isolate a pageblock even if the
  29. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  30. * notifier chain is used by balloon drivers to return the
  31. * number of pages in a range that are held by the balloon
  32. * driver to shrink memory. If all the pages are accounted for
  33. * by balloons, are free, or on the LRU, isolation can continue.
  34. * Later, for example, when memory hotplug notifier runs, these
  35. * pages reported as "can be isolated" should be isolated(freed)
  36. * by the balloon driver through the memory notifier chain.
  37. */
  38. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  39. notifier_ret = notifier_to_errno(notifier_ret);
  40. if (notifier_ret)
  41. goto out;
  42. /*
  43. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  44. * We just check MOVABLE pages.
  45. */
  46. if (!has_unmovable_pages(zone, page, arg.pages_found,
  47. skip_hwpoisoned_pages))
  48. ret = 0;
  49. /*
  50. * immobile means "not-on-lru" pages. If immobile is larger than
  51. * removable-by-driver pages reported by notifier, we'll fail.
  52. */
  53. out:
  54. if (!ret) {
  55. unsigned long nr_pages;
  56. int migratetype = get_pageblock_migratetype(page);
  57. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  58. zone->nr_isolate_pageblock++;
  59. nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
  60. __mod_zone_freepage_state(zone, -nr_pages, migratetype);
  61. }
  62. spin_unlock_irqrestore(&zone->lock, flags);
  63. if (!ret)
  64. drain_all_pages(zone);
  65. return ret;
  66. }
  67. static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  68. {
  69. struct zone *zone;
  70. unsigned long flags, nr_pages;
  71. bool isolated_page = false;
  72. unsigned int order;
  73. unsigned long page_idx, buddy_idx;
  74. struct page *buddy;
  75. zone = page_zone(page);
  76. spin_lock_irqsave(&zone->lock, flags);
  77. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  78. goto out;
  79. /*
  80. * Because freepage with more than pageblock_order on isolated
  81. * pageblock is restricted to merge due to freepage counting problem,
  82. * it is possible that there is free buddy page.
  83. * move_freepages_block() doesn't care of merge so we need other
  84. * approach in order to merge them. Isolation and free will make
  85. * these pages to be merged.
  86. */
  87. if (PageBuddy(page)) {
  88. order = page_order(page);
  89. if (order >= pageblock_order) {
  90. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  91. buddy_idx = __find_buddy_index(page_idx, order);
  92. buddy = page + (buddy_idx - page_idx);
  93. if (pfn_valid_within(page_to_pfn(buddy)) &&
  94. !is_migrate_isolate_page(buddy)) {
  95. __isolate_free_page(page, order);
  96. isolated_page = true;
  97. }
  98. }
  99. }
  100. /*
  101. * If we isolate freepage with more than pageblock_order, there
  102. * should be no freepage in the range, so we could avoid costly
  103. * pageblock scanning for freepage moving.
  104. */
  105. if (!isolated_page) {
  106. nr_pages = move_freepages_block(zone, page, migratetype);
  107. __mod_zone_freepage_state(zone, nr_pages, migratetype);
  108. }
  109. set_pageblock_migratetype(page, migratetype);
  110. zone->nr_isolate_pageblock--;
  111. out:
  112. spin_unlock_irqrestore(&zone->lock, flags);
  113. if (isolated_page) {
  114. post_alloc_hook(page, order, __GFP_MOVABLE);
  115. __free_pages(page, order);
  116. }
  117. }
  118. static inline struct page *
  119. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  120. {
  121. int i;
  122. for (i = 0; i < nr_pages; i++)
  123. if (pfn_valid_within(pfn + i))
  124. break;
  125. if (unlikely(i == nr_pages))
  126. return NULL;
  127. return pfn_to_page(pfn + i);
  128. }
  129. /*
  130. * start_isolate_page_range() -- make page-allocation-type of range of pages
  131. * to be MIGRATE_ISOLATE.
  132. * @start_pfn: The lower PFN of the range to be isolated.
  133. * @end_pfn: The upper PFN of the range to be isolated.
  134. * @migratetype: migrate type to set in error recovery.
  135. *
  136. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  137. * the range will never be allocated. Any free pages and pages freed in the
  138. * future will not be allocated again.
  139. *
  140. * start_pfn/end_pfn must be aligned to pageblock_order.
  141. * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  142. */
  143. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  144. unsigned migratetype, bool skip_hwpoisoned_pages)
  145. {
  146. unsigned long pfn;
  147. unsigned long undo_pfn;
  148. struct page *page;
  149. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  150. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  151. for (pfn = start_pfn;
  152. pfn < end_pfn;
  153. pfn += pageblock_nr_pages) {
  154. page = __first_valid_page(pfn, pageblock_nr_pages);
  155. if (page &&
  156. set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
  157. undo_pfn = pfn;
  158. goto undo;
  159. }
  160. }
  161. return 0;
  162. undo:
  163. for (pfn = start_pfn;
  164. pfn < undo_pfn;
  165. pfn += pageblock_nr_pages)
  166. unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
  167. return -EBUSY;
  168. }
  169. /*
  170. * Make isolated pages available again.
  171. */
  172. int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  173. unsigned migratetype)
  174. {
  175. unsigned long pfn;
  176. struct page *page;
  177. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  178. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  179. for (pfn = start_pfn;
  180. pfn < end_pfn;
  181. pfn += pageblock_nr_pages) {
  182. page = __first_valid_page(pfn, pageblock_nr_pages);
  183. if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  184. continue;
  185. unset_migratetype_isolate(page, migratetype);
  186. }
  187. return 0;
  188. }
  189. /*
  190. * Test all pages in the range is free(means isolated) or not.
  191. * all pages in [start_pfn...end_pfn) must be in the same zone.
  192. * zone->lock must be held before call this.
  193. *
  194. * Returns the last tested pfn.
  195. */
  196. static unsigned long
  197. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
  198. bool skip_hwpoisoned_pages)
  199. {
  200. struct page *page;
  201. while (pfn < end_pfn) {
  202. if (!pfn_valid_within(pfn)) {
  203. pfn++;
  204. continue;
  205. }
  206. page = pfn_to_page(pfn);
  207. if (PageBuddy(page))
  208. /*
  209. * If the page is on a free list, it has to be on
  210. * the correct MIGRATE_ISOLATE freelist. There is no
  211. * simple way to verify that as VM_BUG_ON(), though.
  212. */
  213. pfn += 1 << page_order(page);
  214. else if (skip_hwpoisoned_pages && PageHWPoison(page))
  215. /* A HWPoisoned page cannot be also PageBuddy */
  216. pfn++;
  217. else
  218. break;
  219. }
  220. return pfn;
  221. }
  222. /* Caller should ensure that requested range is in a single zone */
  223. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
  224. bool skip_hwpoisoned_pages)
  225. {
  226. unsigned long pfn, flags;
  227. struct page *page;
  228. struct zone *zone;
  229. /*
  230. * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
  231. * are not aligned to pageblock_nr_pages.
  232. * Then we just check migratetype first.
  233. */
  234. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  235. page = __first_valid_page(pfn, pageblock_nr_pages);
  236. if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  237. break;
  238. }
  239. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  240. if ((pfn < end_pfn) || !page)
  241. return -EBUSY;
  242. /* Check all pages are free or marked as ISOLATED */
  243. zone = page_zone(page);
  244. spin_lock_irqsave(&zone->lock, flags);
  245. pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
  246. skip_hwpoisoned_pages);
  247. spin_unlock_irqrestore(&zone->lock, flags);
  248. trace_test_pages_isolated(start_pfn, end_pfn, pfn);
  249. return pfn < end_pfn ? -EBUSY : 0;
  250. }
  251. struct page *alloc_migrate_target(struct page *page, unsigned long private,
  252. int **resultp)
  253. {
  254. gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
  255. /*
  256. * TODO: allocate a destination hugepage from a nearest neighbor node,
  257. * accordance with memory policy of the user process if possible. For
  258. * now as a simple work-around, we use the next node for destination.
  259. */
  260. if (PageHuge(page))
  261. return alloc_huge_page_node(page_hstate(compound_head(page)),
  262. next_node_in(page_to_nid(page),
  263. node_online_map));
  264. if (PageHighMem(page))
  265. gfp_mask |= __GFP_HIGHMEM;
  266. return alloc_page(gfp_mask);
  267. }