page-states.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * Copyright IBM Corp. 2008
  3. *
  4. * Guest page hinting for unused pages.
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/init.h>
  14. #define ESSA_SET_STABLE 1
  15. #define ESSA_SET_UNUSED 2
  16. static int cmma_flag = 1;
  17. static int __init cmma(char *str)
  18. {
  19. char *parm;
  20. parm = strstrip(str);
  21. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  22. cmma_flag = 1;
  23. return 1;
  24. }
  25. cmma_flag = 0;
  26. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  27. return 1;
  28. return 0;
  29. }
  30. __setup("cmma=", cmma);
  31. static inline int cmma_test_essa(void)
  32. {
  33. register unsigned long tmp asm("0") = 0;
  34. register int rc asm("1") = -EOPNOTSUPP;
  35. asm volatile(
  36. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  37. "0: la %0,0\n"
  38. "1:\n"
  39. EX_TABLE(0b,1b)
  40. : "+&d" (rc), "+&d" (tmp));
  41. return rc;
  42. }
  43. void __init cmma_init(void)
  44. {
  45. if (!cmma_flag)
  46. return;
  47. if (cmma_test_essa())
  48. cmma_flag = 0;
  49. }
  50. static inline void set_page_unstable(struct page *page, int order)
  51. {
  52. int i, rc;
  53. for (i = 0; i < (1 << order); i++)
  54. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  55. : "=&d" (rc)
  56. : "a" (page_to_phys(page + i)),
  57. "i" (ESSA_SET_UNUSED));
  58. }
  59. void arch_free_page(struct page *page, int order)
  60. {
  61. if (!cmma_flag)
  62. return;
  63. set_page_unstable(page, order);
  64. }
  65. static inline void set_page_stable(struct page *page, int order)
  66. {
  67. int i, rc;
  68. for (i = 0; i < (1 << order); i++)
  69. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  70. : "=&d" (rc)
  71. : "a" (page_to_phys(page + i)),
  72. "i" (ESSA_SET_STABLE));
  73. }
  74. void arch_alloc_page(struct page *page, int order)
  75. {
  76. if (!cmma_flag)
  77. return;
  78. set_page_stable(page, order);
  79. }
  80. void arch_set_page_states(int make_stable)
  81. {
  82. unsigned long flags, order, t;
  83. struct list_head *l;
  84. struct page *page;
  85. struct zone *zone;
  86. if (!cmma_flag)
  87. return;
  88. if (make_stable)
  89. drain_local_pages(NULL);
  90. for_each_populated_zone(zone) {
  91. spin_lock_irqsave(&zone->lock, flags);
  92. for_each_migratetype_order(order, t) {
  93. list_for_each(l, &zone->free_area[order].free_list[t]) {
  94. page = list_entry(l, struct page, lru);
  95. if (make_stable)
  96. set_page_stable(page, order);
  97. else
  98. set_page_unstable(page, order);
  99. }
  100. }
  101. spin_unlock_irqrestore(&zone->lock, flags);
  102. }
  103. }