mm_inline.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. #ifndef LINUX_MM_INLINE_H
  2. #define LINUX_MM_INLINE_H
  3. #include <linux/huge_mm.h>
  4. #include <linux/swap.h>
  5. /**
  6. * page_is_file_cache - should the page be on a file LRU or anon LRU?
  7. * @page: the page to test
  8. *
  9. * Returns 1 if @page is page cache page backed by a regular filesystem,
  10. * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
  11. * Used by functions that manipulate the LRU lists, to sort a page
  12. * onto the right LRU list.
  13. *
  14. * We would like to get this info without a page flag, but the state
  15. * needs to survive until the page is last deleted from the LRU, which
  16. * could be as far down as __page_cache_release.
  17. */
  18. static inline int page_is_file_cache(struct page *page)
  19. {
  20. return !PageSwapBacked(page);
  21. }
  22. static __always_inline void __update_lru_size(struct lruvec *lruvec,
  23. enum lru_list lru, enum zone_type zid,
  24. int nr_pages)
  25. {
  26. struct pglist_data *pgdat = lruvec_pgdat(lruvec);
  27. __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
  28. __mod_zone_page_state(&pgdat->node_zones[zid],
  29. NR_ZONE_LRU_BASE + lru, nr_pages);
  30. }
  31. static __always_inline void update_lru_size(struct lruvec *lruvec,
  32. enum lru_list lru, enum zone_type zid,
  33. int nr_pages)
  34. {
  35. __update_lru_size(lruvec, lru, zid, nr_pages);
  36. #ifdef CONFIG_MEMCG
  37. mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
  38. #endif
  39. }
  40. static __always_inline void add_page_to_lru_list(struct page *page,
  41. struct lruvec *lruvec, enum lru_list lru)
  42. {
  43. update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
  44. list_add(&page->lru, &lruvec->lists[lru]);
  45. }
  46. static __always_inline void del_page_from_lru_list(struct page *page,
  47. struct lruvec *lruvec, enum lru_list lru)
  48. {
  49. list_del(&page->lru);
  50. update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
  51. }
  52. /**
  53. * page_lru_base_type - which LRU list type should a page be on?
  54. * @page: the page to test
  55. *
  56. * Used for LRU list index arithmetic.
  57. *
  58. * Returns the base LRU type - file or anon - @page should be on.
  59. */
  60. static inline enum lru_list page_lru_base_type(struct page *page)
  61. {
  62. if (page_is_file_cache(page))
  63. return LRU_INACTIVE_FILE;
  64. return LRU_INACTIVE_ANON;
  65. }
  66. /**
  67. * page_off_lru - which LRU list was page on? clearing its lru flags.
  68. * @page: the page to test
  69. *
  70. * Returns the LRU list a page was on, as an index into the array of LRU
  71. * lists; and clears its Unevictable or Active flags, ready for freeing.
  72. */
  73. static __always_inline enum lru_list page_off_lru(struct page *page)
  74. {
  75. enum lru_list lru;
  76. if (PageUnevictable(page)) {
  77. __ClearPageUnevictable(page);
  78. lru = LRU_UNEVICTABLE;
  79. } else {
  80. lru = page_lru_base_type(page);
  81. if (PageActive(page)) {
  82. __ClearPageActive(page);
  83. lru += LRU_ACTIVE;
  84. }
  85. }
  86. return lru;
  87. }
  88. /**
  89. * page_lru - which LRU list should a page be on?
  90. * @page: the page to test
  91. *
  92. * Returns the LRU list a page should be on, as an index
  93. * into the array of LRU lists.
  94. */
  95. static __always_inline enum lru_list page_lru(struct page *page)
  96. {
  97. enum lru_list lru;
  98. if (PageUnevictable(page))
  99. lru = LRU_UNEVICTABLE;
  100. else {
  101. lru = page_lru_base_type(page);
  102. if (PageActive(page))
  103. lru += LRU_ACTIVE;
  104. }
  105. return lru;
  106. }
  107. #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
  108. #endif