init.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * linux/arch/sh/mm/init.c
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2002 - 2011 Paul Mundt
  6. *
  7. * Based on linux/arch/i386/mm/init.c:
  8. * Copyright (C) 1995 Linus Torvalds
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/init.h>
  13. #include <linux/gfp.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/percpu.h>
  18. #include <linux/io.h>
  19. #include <linux/memblock.h>
  20. #include <linux/dma-mapping.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/mmzone.h>
  23. #include <asm/kexec.h>
  24. #include <asm/tlb.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/sections.h>
  27. #include <asm/setup.h>
  28. #include <asm/cache.h>
  29. #include <asm/sizes.h>
  30. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  31. void __init generic_mem_init(void)
  32. {
  33. memblock_add(__MEMORY_START, __MEMORY_SIZE);
  34. }
  35. void __init __weak plat_mem_setup(void)
  36. {
  37. /* Nothing to see here, move along. */
  38. }
  39. #ifdef CONFIG_MMU
  40. static pte_t *__get_pte_phys(unsigned long addr)
  41. {
  42. pgd_t *pgd;
  43. pud_t *pud;
  44. pmd_t *pmd;
  45. pgd = pgd_offset_k(addr);
  46. if (pgd_none(*pgd)) {
  47. pgd_ERROR(*pgd);
  48. return NULL;
  49. }
  50. pud = pud_alloc(NULL, pgd, addr);
  51. if (unlikely(!pud)) {
  52. pud_ERROR(*pud);
  53. return NULL;
  54. }
  55. pmd = pmd_alloc(NULL, pud, addr);
  56. if (unlikely(!pmd)) {
  57. pmd_ERROR(*pmd);
  58. return NULL;
  59. }
  60. return pte_offset_kernel(pmd, addr);
  61. }
  62. static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
  63. {
  64. pte_t *pte;
  65. pte = __get_pte_phys(addr);
  66. if (!pte_none(*pte)) {
  67. pte_ERROR(*pte);
  68. return;
  69. }
  70. set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
  71. local_flush_tlb_one(get_asid(), addr);
  72. if (pgprot_val(prot) & _PAGE_WIRED)
  73. tlb_wire_entry(NULL, addr, *pte);
  74. }
  75. static void clear_pte_phys(unsigned long addr, pgprot_t prot)
  76. {
  77. pte_t *pte;
  78. pte = __get_pte_phys(addr);
  79. if (pgprot_val(prot) & _PAGE_WIRED)
  80. tlb_unwire_entry();
  81. set_pte(pte, pfn_pte(0, __pgprot(0)));
  82. local_flush_tlb_one(get_asid(), addr);
  83. }
  84. void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
  85. {
  86. unsigned long address = __fix_to_virt(idx);
  87. if (idx >= __end_of_fixed_addresses) {
  88. BUG();
  89. return;
  90. }
  91. set_pte_phys(address, phys, prot);
  92. }
  93. void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
  94. {
  95. unsigned long address = __fix_to_virt(idx);
  96. if (idx >= __end_of_fixed_addresses) {
  97. BUG();
  98. return;
  99. }
  100. clear_pte_phys(address, prot);
  101. }
  102. static pmd_t * __init one_md_table_init(pud_t *pud)
  103. {
  104. if (pud_none(*pud)) {
  105. pmd_t *pmd;
  106. pmd = alloc_bootmem_pages(PAGE_SIZE);
  107. pud_populate(&init_mm, pud, pmd);
  108. BUG_ON(pmd != pmd_offset(pud, 0));
  109. }
  110. return pmd_offset(pud, 0);
  111. }
  112. static pte_t * __init one_page_table_init(pmd_t *pmd)
  113. {
  114. if (pmd_none(*pmd)) {
  115. pte_t *pte;
  116. pte = alloc_bootmem_pages(PAGE_SIZE);
  117. pmd_populate_kernel(&init_mm, pmd, pte);
  118. BUG_ON(pte != pte_offset_kernel(pmd, 0));
  119. }
  120. return pte_offset_kernel(pmd, 0);
  121. }
  122. static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  123. unsigned long vaddr, pte_t *lastpte)
  124. {
  125. return pte;
  126. }
  127. void __init page_table_range_init(unsigned long start, unsigned long end,
  128. pgd_t *pgd_base)
  129. {
  130. pgd_t *pgd;
  131. pud_t *pud;
  132. pmd_t *pmd;
  133. pte_t *pte = NULL;
  134. int i, j, k;
  135. unsigned long vaddr;
  136. vaddr = start;
  137. i = __pgd_offset(vaddr);
  138. j = __pud_offset(vaddr);
  139. k = __pmd_offset(vaddr);
  140. pgd = pgd_base + i;
  141. for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
  142. pud = (pud_t *)pgd;
  143. for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
  144. pmd = one_md_table_init(pud);
  145. #ifndef __PAGETABLE_PMD_FOLDED
  146. pmd += k;
  147. #endif
  148. for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
  149. pte = page_table_kmap_check(one_page_table_init(pmd),
  150. pmd, vaddr, pte);
  151. vaddr += PMD_SIZE;
  152. }
  153. k = 0;
  154. }
  155. j = 0;
  156. }
  157. }
  158. #endif /* CONFIG_MMU */
  159. void __init allocate_pgdat(unsigned int nid)
  160. {
  161. unsigned long start_pfn, end_pfn;
  162. #ifdef CONFIG_NEED_MULTIPLE_NODES
  163. unsigned long phys;
  164. #endif
  165. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  166. #ifdef CONFIG_NEED_MULTIPLE_NODES
  167. phys = __memblock_alloc_base(sizeof(struct pglist_data),
  168. SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
  169. /* Retry with all of system memory */
  170. if (!phys)
  171. phys = __memblock_alloc_base(sizeof(struct pglist_data),
  172. SMP_CACHE_BYTES, memblock_end_of_DRAM());
  173. if (!phys)
  174. panic("Can't allocate pgdat for node %d\n", nid);
  175. NODE_DATA(nid) = __va(phys);
  176. memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
  177. NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
  178. #endif
  179. NODE_DATA(nid)->node_start_pfn = start_pfn;
  180. NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
  181. }
  182. static void __init bootmem_init_one_node(unsigned int nid)
  183. {
  184. unsigned long total_pages, paddr;
  185. unsigned long end_pfn;
  186. struct pglist_data *p;
  187. p = NODE_DATA(nid);
  188. /* Nothing to do.. */
  189. if (!p->node_spanned_pages)
  190. return;
  191. end_pfn = p->node_start_pfn + p->node_spanned_pages;
  192. total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
  193. paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
  194. if (!paddr)
  195. panic("Can't allocate bootmap for nid[%d]\n", nid);
  196. init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
  197. free_bootmem_with_active_regions(nid, end_pfn);
  198. /*
  199. * XXX Handle initial reservations for the system memory node
  200. * only for the moment, we'll refactor this later for handling
  201. * reservations in other nodes.
  202. */
  203. if (nid == 0) {
  204. struct memblock_region *reg;
  205. /* Reserve the sections we're already using. */
  206. for_each_memblock(reserved, reg) {
  207. reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
  208. }
  209. }
  210. sparse_memory_present_with_active_regions(nid);
  211. }
  212. static void __init do_init_bootmem(void)
  213. {
  214. struct memblock_region *reg;
  215. int i;
  216. /* Add active regions with valid PFNs. */
  217. for_each_memblock(memory, reg) {
  218. unsigned long start_pfn, end_pfn;
  219. start_pfn = memblock_region_memory_base_pfn(reg);
  220. end_pfn = memblock_region_memory_end_pfn(reg);
  221. __add_active_range(0, start_pfn, end_pfn);
  222. }
  223. /* All of system RAM sits in node 0 for the non-NUMA case */
  224. allocate_pgdat(0);
  225. node_set_online(0);
  226. plat_mem_setup();
  227. for_each_online_node(i)
  228. bootmem_init_one_node(i);
  229. sparse_init();
  230. }
  231. static void __init early_reserve_mem(void)
  232. {
  233. unsigned long start_pfn;
  234. /*
  235. * Partially used pages are not usable - thus
  236. * we are rounding upwards:
  237. */
  238. start_pfn = PFN_UP(__pa(_end));
  239. /*
  240. * Reserve the kernel text and Reserve the bootmem bitmap. We do
  241. * this in two steps (first step was init_bootmem()), because
  242. * this catches the (definitely buggy) case of us accidentally
  243. * initializing the bootmem allocator with an invalid RAM area.
  244. */
  245. memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
  246. (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
  247. (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
  248. /*
  249. * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
  250. */
  251. if (CONFIG_ZERO_PAGE_OFFSET != 0)
  252. memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
  253. /*
  254. * Handle additional early reservations
  255. */
  256. check_for_initrd();
  257. reserve_crashkernel();
  258. }
  259. void __init paging_init(void)
  260. {
  261. unsigned long max_zone_pfns[MAX_NR_ZONES];
  262. unsigned long vaddr, end;
  263. int nid;
  264. memblock_init();
  265. sh_mv.mv_mem_init();
  266. early_reserve_mem();
  267. /*
  268. * Once the early reservations are out of the way, give the
  269. * platforms a chance to kick out some memory.
  270. */
  271. if (sh_mv.mv_mem_reserve)
  272. sh_mv.mv_mem_reserve();
  273. memblock_enforce_memory_limit(memory_limit);
  274. memblock_analyze();
  275. memblock_dump_all();
  276. /*
  277. * Determine low and high memory ranges:
  278. */
  279. max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
  280. min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
  281. nodes_clear(node_online_map);
  282. memory_start = (unsigned long)__va(__MEMORY_START);
  283. memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
  284. uncached_init();
  285. pmb_init();
  286. do_init_bootmem();
  287. ioremap_fixed_init();
  288. /* We don't need to map the kernel through the TLB, as
  289. * it is permanatly mapped using P1. So clear the
  290. * entire pgd. */
  291. memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  292. /* Set an initial value for the MMU.TTB so we don't have to
  293. * check for a null value. */
  294. set_TTB(swapper_pg_dir);
  295. /*
  296. * Populate the relevant portions of swapper_pg_dir so that
  297. * we can use the fixmap entries without calling kmalloc.
  298. * pte's will be filled in by __set_fixmap().
  299. */
  300. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  301. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  302. page_table_range_init(vaddr, end, swapper_pg_dir);
  303. kmap_coherent_init();
  304. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  305. for_each_online_node(nid) {
  306. pg_data_t *pgdat = NODE_DATA(nid);
  307. unsigned long low, start_pfn;
  308. start_pfn = pgdat->bdata->node_min_pfn;
  309. low = pgdat->bdata->node_low_pfn;
  310. if (max_zone_pfns[ZONE_NORMAL] < low)
  311. max_zone_pfns[ZONE_NORMAL] = low;
  312. printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
  313. nid, start_pfn, low);
  314. }
  315. free_area_init_nodes(max_zone_pfns);
  316. }
  317. /*
  318. * Early initialization for any I/O MMUs we might have.
  319. */
  320. static void __init iommu_init(void)
  321. {
  322. no_iommu_init();
  323. }
  324. unsigned int mem_init_done = 0;
  325. void __init mem_init(void)
  326. {
  327. int codesize, datasize, initsize;
  328. int nid;
  329. iommu_init();
  330. num_physpages = 0;
  331. high_memory = NULL;
  332. for_each_online_node(nid) {
  333. pg_data_t *pgdat = NODE_DATA(nid);
  334. unsigned long node_pages = 0;
  335. void *node_high_memory;
  336. num_physpages += pgdat->node_present_pages;
  337. if (pgdat->node_spanned_pages)
  338. node_pages = free_all_bootmem_node(pgdat);
  339. totalram_pages += node_pages;
  340. node_high_memory = (void *)__va((pgdat->node_start_pfn +
  341. pgdat->node_spanned_pages) <<
  342. PAGE_SHIFT);
  343. if (node_high_memory > high_memory)
  344. high_memory = node_high_memory;
  345. }
  346. /* Set this up early, so we can take care of the zero page */
  347. cpu_cache_init();
  348. /* clear the zero-page */
  349. memset(empty_zero_page, 0, PAGE_SIZE);
  350. __flush_wback_region(empty_zero_page, PAGE_SIZE);
  351. vsyscall_init();
  352. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  353. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  354. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  355. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  356. "%dk data, %dk init)\n",
  357. nr_free_pages() << (PAGE_SHIFT-10),
  358. num_physpages << (PAGE_SHIFT-10),
  359. codesize >> 10,
  360. datasize >> 10,
  361. initsize >> 10);
  362. printk(KERN_INFO "virtual kernel memory layout:\n"
  363. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  364. #ifdef CONFIG_HIGHMEM
  365. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  366. #endif
  367. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  368. " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
  369. #ifdef CONFIG_UNCACHED_MAPPING
  370. " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
  371. #endif
  372. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  373. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  374. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  375. FIXADDR_START, FIXADDR_TOP,
  376. (FIXADDR_TOP - FIXADDR_START) >> 10,
  377. #ifdef CONFIG_HIGHMEM
  378. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  379. (LAST_PKMAP*PAGE_SIZE) >> 10,
  380. #endif
  381. (unsigned long)VMALLOC_START, VMALLOC_END,
  382. (VMALLOC_END - VMALLOC_START) >> 20,
  383. (unsigned long)memory_start, (unsigned long)high_memory,
  384. ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
  385. #ifdef CONFIG_UNCACHED_MAPPING
  386. uncached_start, uncached_end, uncached_size >> 20,
  387. #endif
  388. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  389. ((unsigned long)&__init_end -
  390. (unsigned long)&__init_begin) >> 10,
  391. (unsigned long)&_etext, (unsigned long)&_edata,
  392. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  393. (unsigned long)&_text, (unsigned long)&_etext,
  394. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  395. mem_init_done = 1;
  396. }
  397. void free_initmem(void)
  398. {
  399. unsigned long addr;
  400. addr = (unsigned long)(&__init_begin);
  401. for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
  402. ClearPageReserved(virt_to_page(addr));
  403. init_page_count(virt_to_page(addr));
  404. free_page(addr);
  405. totalram_pages++;
  406. }
  407. printk("Freeing unused kernel memory: %ldk freed\n",
  408. ((unsigned long)&__init_end -
  409. (unsigned long)&__init_begin) >> 10);
  410. }
  411. #ifdef CONFIG_BLK_DEV_INITRD
  412. void free_initrd_mem(unsigned long start, unsigned long end)
  413. {
  414. unsigned long p;
  415. for (p = start; p < end; p += PAGE_SIZE) {
  416. ClearPageReserved(virt_to_page(p));
  417. init_page_count(virt_to_page(p));
  418. free_page(p);
  419. totalram_pages++;
  420. }
  421. printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  422. }
  423. #endif
  424. #ifdef CONFIG_MEMORY_HOTPLUG
  425. int arch_add_memory(int nid, u64 start, u64 size)
  426. {
  427. pg_data_t *pgdat;
  428. unsigned long start_pfn = start >> PAGE_SHIFT;
  429. unsigned long nr_pages = size >> PAGE_SHIFT;
  430. int ret;
  431. pgdat = NODE_DATA(nid);
  432. /* We only have ZONE_NORMAL, so this is easy.. */
  433. ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
  434. start_pfn, nr_pages);
  435. if (unlikely(ret))
  436. printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
  437. return ret;
  438. }
  439. EXPORT_SYMBOL_GPL(arch_add_memory);
  440. #ifdef CONFIG_NUMA
  441. int memory_add_physaddr_to_nid(u64 addr)
  442. {
  443. /* Node 0 for now.. */
  444. return 0;
  445. }
  446. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  447. #endif
  448. #endif /* CONFIG_MEMORY_HOTPLUG */