page-states.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * Copyright IBM Corp. 2008
  3. *
  4. * Guest page hinting for unused pages.
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/init.h>
  14. #include <asm/setup.h>
  15. #include <asm/ipl.h>
  16. #define ESSA_SET_STABLE 1
  17. #define ESSA_SET_UNUSED 2
  18. static int cmma_flag = 1;
  19. static int __init cmma(char *str)
  20. {
  21. char *parm;
  22. parm = strstrip(str);
  23. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  24. cmma_flag = 1;
  25. return 1;
  26. }
  27. cmma_flag = 0;
  28. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  29. return 1;
  30. return 0;
  31. }
  32. __setup("cmma=", cmma);
  33. void __init cmma_init(void)
  34. {
  35. register unsigned long tmp asm("0") = 0;
  36. register int rc asm("1") = -EOPNOTSUPP;
  37. if (!cmma_flag)
  38. return;
  39. /*
  40. * Disable CMM for dump, otherwise the tprot based memory
  41. * detection can fail because of unstable pages.
  42. */
  43. if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
  44. cmma_flag = 0;
  45. return;
  46. }
  47. asm volatile(
  48. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  49. "0: la %0,0\n"
  50. "1:\n"
  51. EX_TABLE(0b,1b)
  52. : "+&d" (rc), "+&d" (tmp));
  53. if (rc)
  54. cmma_flag = 0;
  55. }
  56. static inline void set_page_unstable(struct page *page, int order)
  57. {
  58. int i, rc;
  59. for (i = 0; i < (1 << order); i++)
  60. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  61. : "=&d" (rc)
  62. : "a" (page_to_phys(page + i)),
  63. "i" (ESSA_SET_UNUSED));
  64. }
  65. void arch_free_page(struct page *page, int order)
  66. {
  67. if (!cmma_flag)
  68. return;
  69. set_page_unstable(page, order);
  70. }
  71. static inline void set_page_stable(struct page *page, int order)
  72. {
  73. int i, rc;
  74. for (i = 0; i < (1 << order); i++)
  75. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  76. : "=&d" (rc)
  77. : "a" (page_to_phys(page + i)),
  78. "i" (ESSA_SET_STABLE));
  79. }
  80. void arch_alloc_page(struct page *page, int order)
  81. {
  82. if (!cmma_flag)
  83. return;
  84. set_page_stable(page, order);
  85. }
  86. void arch_set_page_states(int make_stable)
  87. {
  88. unsigned long flags, order, t;
  89. struct list_head *l;
  90. struct page *page;
  91. struct zone *zone;
  92. if (!cmma_flag)
  93. return;
  94. if (make_stable)
  95. drain_local_pages(NULL);
  96. for_each_populated_zone(zone) {
  97. spin_lock_irqsave(&zone->lock, flags);
  98. for_each_migratetype_order(order, t) {
  99. list_for_each(l, &zone->free_area[order].free_list[t]) {
  100. page = list_entry(l, struct page, lru);
  101. if (make_stable)
  102. set_page_stable(page, order);
  103. else
  104. set_page_unstable(page, order);
  105. }
  106. }
  107. spin_unlock_irqrestore(&zone->lock, flags);
  108. }
  109. }