mem_detect.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * Copyright IBM Corp. 2008, 2009
  3. *
  4. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <asm/ipl.h>
  9. #include <asm/sclp.h>
  10. #include <asm/setup.h>
  11. #define ADDR2G (1ULL << 31)
  12. static void find_memory_chunks(struct mem_chunk chunk[])
  13. {
  14. unsigned long long memsize, rnmax, rzm;
  15. unsigned long addr = 0, size;
  16. int i = 0, type;
  17. rzm = sclp_get_rzm();
  18. rnmax = sclp_get_rnmax();
  19. memsize = rzm * rnmax;
  20. if (!rzm)
  21. rzm = 1ULL << 17;
  22. if (sizeof(long) == 4) {
  23. rzm = min(ADDR2G, rzm);
  24. memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
  25. }
  26. do {
  27. size = 0;
  28. type = tprot(addr);
  29. do {
  30. size += rzm;
  31. if (memsize && addr + size >= memsize)
  32. break;
  33. } while (type == tprot(addr + size));
  34. if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
  35. chunk[i].addr = addr;
  36. chunk[i].size = size;
  37. chunk[i].type = type;
  38. i++;
  39. }
  40. addr += size;
  41. } while (addr < memsize && i < MEMORY_CHUNKS);
  42. }
  43. void detect_memory_layout(struct mem_chunk chunk[])
  44. {
  45. unsigned long flags, cr0;
  46. memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
  47. /* Disable IRQs, DAT and low address protection so tprot does the
  48. * right thing and we don't get scheduled away with low address
  49. * protection disabled.
  50. */
  51. flags = __arch_local_irq_stnsm(0xf8);
  52. __ctl_store(cr0, 0, 0);
  53. __ctl_clear_bit(0, 28);
  54. find_memory_chunks(chunk);
  55. __ctl_load(cr0, 0, 0);
  56. arch_local_irq_restore(flags);
  57. }
  58. EXPORT_SYMBOL(detect_memory_layout);
  59. /*
  60. * Move memory chunks array from index "from" to index "to"
  61. */
  62. static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
  63. {
  64. int cnt = MEMORY_CHUNKS - to;
  65. memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
  66. }
  67. /*
  68. * Initialize memory chunk
  69. */
  70. static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
  71. unsigned long size, int type)
  72. {
  73. chunk->type = type;
  74. chunk->addr = addr;
  75. chunk->size = size;
  76. }
  77. /*
  78. * Create memory hole with given address, size, and type
  79. */
  80. void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
  81. unsigned long size, int type)
  82. {
  83. unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
  84. int i, ch_type;
  85. for (i = 0; i < MEMORY_CHUNKS; i++) {
  86. if (chunk[i].size == 0)
  87. continue;
  88. /* Define chunk properties */
  89. ch_start = chunk[i].addr;
  90. ch_size = chunk[i].size;
  91. ch_end = ch_start + ch_size - 1;
  92. ch_type = chunk[i].type;
  93. /* Is memory chunk hit by memory hole? */
  94. if (addr + size <= ch_start)
  95. continue; /* No: memory hole in front of chunk */
  96. if (addr > ch_end)
  97. continue; /* No: memory hole after chunk */
  98. /* Yes: Define local hole properties */
  99. lh_start = max(addr, chunk[i].addr);
  100. lh_end = min(addr + size - 1, ch_end);
  101. lh_size = lh_end - lh_start + 1;
  102. if (lh_start == ch_start && lh_end == ch_end) {
  103. /* Hole covers complete memory chunk */
  104. mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  105. } else if (lh_end == ch_end) {
  106. /* Hole starts in memory chunk and convers chunk end */
  107. mem_chunk_move(chunk, i + 1, i);
  108. mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
  109. ch_type);
  110. mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
  111. i += 1;
  112. } else if (lh_start == ch_start) {
  113. /* Hole ends in memory chunk */
  114. mem_chunk_move(chunk, i + 1, i);
  115. mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  116. mem_chunk_init(&chunk[i + 1], lh_end + 1,
  117. ch_size - lh_size, ch_type);
  118. break;
  119. } else {
  120. /* Hole splits memory chunk */
  121. mem_chunk_move(chunk, i + 2, i);
  122. mem_chunk_init(&chunk[i], ch_start,
  123. lh_start - ch_start, ch_type);
  124. mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
  125. mem_chunk_init(&chunk[i + 2], lh_end + 1,
  126. ch_end - lh_end, ch_type);
  127. break;
  128. }
  129. }
  130. }