init.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * arch/s390/mm/init.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. *
  8. * Derived from "arch/i386/mm/init.c"
  9. * Copyright (C) 1995 Linus Torvalds
  10. */
  11. #include <linux/signal.h>
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/mman.h>
  19. #include <linux/mm.h>
  20. #include <linux/swap.h>
  21. #include <linux/smp.h>
  22. #include <linux/init.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/pfn.h>
  26. #include <linux/poison.h>
  27. #include <linux/initrd.h>
  28. #include <linux/export.h>
  29. #include <linux/gfp.h>
  30. #include <asm/processor.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/pgalloc.h>
  34. #include <asm/dma.h>
  35. #include <asm/lowcore.h>
  36. #include <asm/tlb.h>
  37. #include <asm/tlbflush.h>
  38. #include <asm/sections.h>
  39. #include <asm/ctl_reg.h>
  40. pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
  41. unsigned long empty_zero_page, zero_page_mask;
  42. EXPORT_SYMBOL(empty_zero_page);
  43. static unsigned long setup_zero_pages(void)
  44. {
  45. struct cpuid cpu_id;
  46. unsigned int order;
  47. unsigned long size;
  48. struct page *page;
  49. int i;
  50. get_cpu_id(&cpu_id);
  51. switch (cpu_id.machine) {
  52. case 0x9672: /* g5 */
  53. case 0x2064: /* z900 */
  54. case 0x2066: /* z900 */
  55. case 0x2084: /* z990 */
  56. case 0x2086: /* z990 */
  57. case 0x2094: /* z9-109 */
  58. case 0x2096: /* z9-109 */
  59. order = 0;
  60. break;
  61. case 0x2097: /* z10 */
  62. case 0x2098: /* z10 */
  63. default:
  64. order = 2;
  65. break;
  66. }
  67. empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  68. if (!empty_zero_page)
  69. panic("Out of memory in setup_zero_pages");
  70. page = virt_to_page((void *) empty_zero_page);
  71. split_page(page, order);
  72. for (i = 1 << order; i > 0; i--) {
  73. SetPageReserved(page);
  74. page++;
  75. }
  76. size = PAGE_SIZE << order;
  77. zero_page_mask = (size - 1) & PAGE_MASK;
  78. return 1UL << order;
  79. }
  80. /*
  81. * paging_init() sets up the page tables
  82. */
  83. void __init paging_init(void)
  84. {
  85. unsigned long max_zone_pfns[MAX_NR_ZONES];
  86. unsigned long pgd_type, asce_bits;
  87. init_mm.pgd = swapper_pg_dir;
  88. #ifdef CONFIG_64BIT
  89. if (VMALLOC_END > (1UL << 42)) {
  90. asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
  91. pgd_type = _REGION2_ENTRY_EMPTY;
  92. } else {
  93. asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
  94. pgd_type = _REGION3_ENTRY_EMPTY;
  95. }
  96. #else
  97. asce_bits = _ASCE_TABLE_LENGTH;
  98. pgd_type = _SEGMENT_ENTRY_EMPTY;
  99. #endif
  100. S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
  101. clear_table((unsigned long *) init_mm.pgd, pgd_type,
  102. sizeof(unsigned long)*2048);
  103. vmem_map_init();
  104. /* enable virtual mapping in kernel mode */
  105. __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  106. __ctl_load(S390_lowcore.kernel_asce, 7, 7);
  107. __ctl_load(S390_lowcore.kernel_asce, 13, 13);
  108. arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
  109. atomic_set(&init_mm.context.attach_count, 1);
  110. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  111. sparse_init();
  112. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  113. max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
  114. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  115. free_area_init_nodes(max_zone_pfns);
  116. fault_init();
  117. }
  118. void __init mem_init(void)
  119. {
  120. unsigned long codesize, reservedpages, datasize, initsize;
  121. max_mapnr = num_physpages = max_low_pfn;
  122. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  123. /* Setup guest page hinting */
  124. cmma_init();
  125. /* this will put all low memory onto the freelists */
  126. totalram_pages += free_all_bootmem();
  127. totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
  128. reservedpages = 0;
  129. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  130. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  131. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  132. printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
  133. nr_free_pages() << (PAGE_SHIFT-10),
  134. max_mapnr << (PAGE_SHIFT-10),
  135. codesize >> 10,
  136. reservedpages << (PAGE_SHIFT-10),
  137. datasize >>10,
  138. initsize >> 10);
  139. printk("Write protected kernel read-only data: %#lx - %#lx\n",
  140. (unsigned long)&_stext,
  141. PFN_ALIGN((unsigned long)&_eshared) - 1);
  142. }
  143. #ifdef CONFIG_DEBUG_PAGEALLOC
  144. void kernel_map_pages(struct page *page, int numpages, int enable)
  145. {
  146. pgd_t *pgd;
  147. pud_t *pud;
  148. pmd_t *pmd;
  149. pte_t *pte;
  150. unsigned long address;
  151. int i;
  152. for (i = 0; i < numpages; i++) {
  153. address = page_to_phys(page + i);
  154. pgd = pgd_offset_k(address);
  155. pud = pud_offset(pgd, address);
  156. pmd = pmd_offset(pud, address);
  157. pte = pte_offset_kernel(pmd, address);
  158. if (!enable) {
  159. __ptep_ipte(address, pte);
  160. pte_val(*pte) = _PAGE_TYPE_EMPTY;
  161. continue;
  162. }
  163. *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
  164. /* Flush cpu write queue. */
  165. mb();
  166. }
  167. }
  168. #endif
  169. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  170. {
  171. unsigned long addr = begin;
  172. if (begin >= end)
  173. return;
  174. for (; addr < end; addr += PAGE_SIZE) {
  175. ClearPageReserved(virt_to_page(addr));
  176. init_page_count(virt_to_page(addr));
  177. memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
  178. PAGE_SIZE);
  179. free_page(addr);
  180. totalram_pages++;
  181. }
  182. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  183. }
  184. void free_initmem(void)
  185. {
  186. free_init_pages("unused kernel memory",
  187. (unsigned long)&__init_begin,
  188. (unsigned long)&__init_end);
  189. }
  190. #ifdef CONFIG_BLK_DEV_INITRD
  191. void free_initrd_mem(unsigned long start, unsigned long end)
  192. {
  193. free_init_pages("initrd memory", start, end);
  194. }
  195. #endif
  196. #ifdef CONFIG_MEMORY_HOTPLUG
  197. int arch_add_memory(int nid, u64 start, u64 size)
  198. {
  199. unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
  200. unsigned long start_pfn = PFN_DOWN(start);
  201. unsigned long size_pages = PFN_DOWN(size);
  202. struct zone *zone;
  203. int rc;
  204. rc = vmem_add_mapping(start, size);
  205. if (rc)
  206. return rc;
  207. for_each_zone(zone) {
  208. if (zone_idx(zone) != ZONE_MOVABLE) {
  209. /* Add range within existing zone limits */
  210. zone_start_pfn = zone->zone_start_pfn;
  211. zone_end_pfn = zone->zone_start_pfn +
  212. zone->spanned_pages;
  213. } else {
  214. /* Add remaining range to ZONE_MOVABLE */
  215. zone_start_pfn = start_pfn;
  216. zone_end_pfn = start_pfn + size_pages;
  217. }
  218. if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
  219. continue;
  220. nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
  221. zone_end_pfn - start_pfn : size_pages;
  222. rc = __add_pages(nid, zone, start_pfn, nr_pages);
  223. if (rc)
  224. break;
  225. start_pfn += nr_pages;
  226. size_pages -= nr_pages;
  227. if (!size_pages)
  228. break;
  229. }
  230. if (rc)
  231. vmem_remove_mapping(start, size);
  232. return rc;
  233. }
  234. #endif /* CONFIG_MEMORY_HOTPLUG */