init.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178
  1. /*
  2. * linux/arch/arm/mm/init.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/swap.h>
  13. #include <linux/init.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/export.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/initrd.h>
  20. #include <linux/of_fdt.h>
  21. #include <linux/highmem.h>
  22. #include <linux/gfp.h>
  23. #include <linux/memblock.h>
  24. #include <linux/sort.h>
  25. #include <linux/dma-contiguous.h>
  26. #include <asm/mach-types.h>
  27. #include <asm/memblock.h>
  28. #include <asm/prom.h>
  29. #include <asm/sections.h>
  30. #include <asm/setup.h>
  31. #include <asm/sizes.h>
  32. #include <asm/system_info.h>
  33. #include <asm/tlb.h>
  34. #include <asm/fixmap.h>
  35. #include <asm/cputype.h>
  36. #include <asm/mach/arch.h>
  37. #include <asm/mach/map.h>
  38. #include <asm/cp15.h>
  39. #include "mm.h"
  40. static unsigned long phys_initrd_start __initdata = 0;
  41. static unsigned long phys_initrd_size __initdata = 0;
  42. int msm_krait_need_wfe_fixup;
  43. EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
  44. static int __init early_initrd(char *p)
  45. {
  46. unsigned long start, size;
  47. char *endp;
  48. start = memparse(p, &endp);
  49. if (*endp == ',') {
  50. size = memparse(endp + 1, NULL);
  51. phys_initrd_start = start;
  52. phys_initrd_size = size;
  53. }
  54. return 0;
  55. }
  56. early_param("initrd", early_initrd);
  57. static int __init parse_tag_initrd(const struct tag *tag)
  58. {
  59. printk(KERN_WARNING "ATAG_INITRD is deprecated; "
  60. "please update your bootloader.\n");
  61. phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
  62. phys_initrd_size = tag->u.initrd.size;
  63. return 0;
  64. }
  65. __tagtable(ATAG_INITRD, parse_tag_initrd);
  66. static int __init parse_tag_initrd2(const struct tag *tag)
  67. {
  68. phys_initrd_start = tag->u.initrd.start;
  69. phys_initrd_size = tag->u.initrd.size;
  70. return 0;
  71. }
  72. __tagtable(ATAG_INITRD2, parse_tag_initrd2);
  73. #ifdef CONFIG_OF_FLATTREE
  74. void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
  75. {
  76. phys_initrd_start = start;
  77. phys_initrd_size = end - start;
  78. }
  79. #endif /* CONFIG_OF_FLATTREE */
  80. /*
  81. * This keeps memory configuration data used by a couple memory
  82. * initialization functions, as well as show_mem() for the skipping
  83. * of holes in the memory map. It is populated by arm_add_memory().
  84. */
  85. struct meminfo meminfo;
  86. void show_mem(unsigned int filter)
  87. {
  88. int free = 0, total = 0, reserved = 0;
  89. int shared = 0, cached = 0, slab = 0, i;
  90. struct meminfo * mi = &meminfo;
  91. printk("Mem-info:\n");
  92. show_free_areas(filter);
  93. if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
  94. return;
  95. for_each_bank (i, mi) {
  96. struct membank *bank = &mi->bank[i];
  97. unsigned int pfn1, pfn2;
  98. struct page *page, *end;
  99. pfn1 = bank_pfn_start(bank);
  100. pfn2 = bank_pfn_end(bank);
  101. page = pfn_to_page(pfn1);
  102. end = pfn_to_page(pfn2 - 1) + 1;
  103. do {
  104. total++;
  105. if (PageReserved(page))
  106. reserved++;
  107. else if (PageSwapCache(page))
  108. cached++;
  109. else if (PageSlab(page))
  110. slab++;
  111. else if (!page_count(page))
  112. free++;
  113. else
  114. shared += page_count(page) - 1;
  115. page++;
  116. #ifdef CONFIG_SPARSEMEM
  117. pfn1++;
  118. if (!(pfn1 % PAGES_PER_SECTION))
  119. page = pfn_to_page(pfn1);
  120. } while (pfn1 < pfn2);
  121. #else
  122. } while (page < end);
  123. #endif
  124. }
  125. printk("%d pages of RAM\n", total);
  126. printk("%d free pages\n", free);
  127. printk("%d reserved pages\n", reserved);
  128. printk("%d slab pages\n", slab);
  129. printk("%d pages shared\n", shared);
  130. printk("%d pages swap cached\n", cached);
  131. }
  132. static void __init find_limits(unsigned long *min, unsigned long *max_low,
  133. unsigned long *max_high)
  134. {
  135. struct meminfo *mi = &meminfo;
  136. int i;
  137. /* This assumes the meminfo array is properly sorted */
  138. *min = bank_pfn_start(&mi->bank[0]);
  139. for_each_bank (i, mi)
  140. if (mi->bank[i].highmem)
  141. break;
  142. *max_low = bank_pfn_end(&mi->bank[i - 1]);
  143. *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
  144. }
  145. static void __init arm_bootmem_init(unsigned long start_pfn,
  146. unsigned long end_pfn)
  147. {
  148. struct memblock_region *reg;
  149. unsigned int boot_pages;
  150. phys_addr_t bitmap;
  151. pg_data_t *pgdat;
  152. /*
  153. * Allocate the bootmem bitmap page. This must be in a region
  154. * of memory which has already been mapped.
  155. */
  156. boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
  157. bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
  158. __pfn_to_phys(end_pfn));
  159. /*
  160. * Initialise the bootmem allocator, handing the
  161. * memory banks over to bootmem.
  162. */
  163. node_set_online(0);
  164. pgdat = NODE_DATA(0);
  165. init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
  166. /* Free the lowmem regions from memblock into bootmem. */
  167. for_each_memblock(memory, reg) {
  168. unsigned long start = memblock_region_memory_base_pfn(reg);
  169. unsigned long end = memblock_region_memory_end_pfn(reg);
  170. if (end >= end_pfn)
  171. end = end_pfn;
  172. if (start >= end)
  173. break;
  174. free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
  175. }
  176. /* Reserve the lowmem memblock reserved regions in bootmem. */
  177. for_each_memblock(reserved, reg) {
  178. unsigned long start = memblock_region_reserved_base_pfn(reg);
  179. unsigned long end = memblock_region_reserved_end_pfn(reg);
  180. if (end >= end_pfn)
  181. end = end_pfn;
  182. if (start >= end)
  183. break;
  184. reserve_bootmem(__pfn_to_phys(start),
  185. (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
  186. }
  187. }
  188. #ifdef CONFIG_ZONE_DMA
  189. unsigned long arm_dma_zone_size __read_mostly;
  190. EXPORT_SYMBOL(arm_dma_zone_size);
  191. /*
  192. * The DMA mask corresponding to the maximum bus address allocatable
  193. * using GFP_DMA. The default here places no restriction on DMA
  194. * allocations. This must be the smallest DMA mask in the system,
  195. * so a successful GFP_DMA allocation will always satisfy this.
  196. */
  197. phys_addr_t arm_dma_limit;
  198. static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
  199. unsigned long dma_size)
  200. {
  201. if (size[0] <= dma_size)
  202. return;
  203. size[ZONE_NORMAL] = size[0] - dma_size;
  204. size[ZONE_DMA] = dma_size;
  205. hole[ZONE_NORMAL] = hole[0];
  206. hole[ZONE_DMA] = 0;
  207. }
  208. #endif
  209. void __init setup_dma_zone(struct machine_desc *mdesc)
  210. {
  211. #ifdef CONFIG_ZONE_DMA
  212. if (mdesc->dma_zone_size) {
  213. arm_dma_zone_size = mdesc->dma_zone_size;
  214. arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
  215. } else
  216. arm_dma_limit = 0xffffffff;
  217. #endif
  218. }
  219. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  220. static void __init arm_bootmem_free_hmnm(unsigned long max_low,
  221. unsigned long max_high)
  222. {
  223. unsigned long max_zone_pfns[MAX_NR_ZONES];
  224. struct memblock_region *reg;
  225. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  226. max_zone_pfns[0] = max_low;
  227. #ifdef CONFIG_HIGHMEM
  228. max_zone_pfns[ZONE_HIGHMEM] = max_high;
  229. #endif
  230. for_each_memblock(memory, reg) {
  231. unsigned long start = memblock_region_memory_base_pfn(reg);
  232. unsigned long end = memblock_region_memory_end_pfn(reg);
  233. memblock_set_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
  234. }
  235. free_area_init_nodes(max_zone_pfns);
  236. }
  237. #else
  238. static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
  239. unsigned long max_high)
  240. {
  241. unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
  242. struct memblock_region *reg;
  243. /*
  244. * initialise the zones.
  245. */
  246. memset(zone_size, 0, sizeof(zone_size));
  247. /*
  248. * The memory size has already been determined. If we need
  249. * to do anything fancy with the allocation of this memory
  250. * to the zones, now is the time to do it.
  251. */
  252. zone_size[0] = max_low - min;
  253. #ifdef CONFIG_HIGHMEM
  254. zone_size[ZONE_HIGHMEM] = max_high - max_low;
  255. #endif
  256. /*
  257. * Calculate the size of the holes.
  258. * holes = node_size - sum(bank_sizes)
  259. */
  260. memcpy(zhole_size, zone_size, sizeof(zhole_size));
  261. for_each_memblock(memory, reg) {
  262. unsigned long start = memblock_region_memory_base_pfn(reg);
  263. unsigned long end = memblock_region_memory_end_pfn(reg);
  264. if (start < max_low) {
  265. unsigned long low_end = min(end, max_low);
  266. zhole_size[0] -= low_end - start;
  267. }
  268. #ifdef CONFIG_HIGHMEM
  269. if (end > max_low) {
  270. unsigned long high_start = max(start, max_low);
  271. zhole_size[ZONE_HIGHMEM] -= end - high_start;
  272. }
  273. #endif
  274. }
  275. #ifdef CONFIG_ZONE_DMA
  276. /*
  277. * Adjust the sizes according to any special requirements for
  278. * this machine type.
  279. */
  280. if (arm_dma_zone_size)
  281. arm_adjust_dma_zone(zone_size, zhole_size,
  282. arm_dma_zone_size >> PAGE_SHIFT);
  283. #endif
  284. free_area_init_node(0, zone_size, min, zhole_size);
  285. }
  286. #endif
  287. #ifdef CONFIG_HAVE_ARCH_PFN_VALID
  288. int pfn_valid(unsigned long pfn)
  289. {
  290. return memblock_is_memory(__pfn_to_phys(pfn));
  291. }
  292. EXPORT_SYMBOL(pfn_valid);
  293. #endif
  294. #ifndef CONFIG_SPARSEMEM
  295. static void __init arm_memory_present(void)
  296. {
  297. }
  298. #else
  299. static void __init arm_memory_present(void)
  300. {
  301. struct meminfo *mi = &meminfo;
  302. int i;
  303. for_each_bank(i, mi) {
  304. memory_present(0, bank_pfn_start(&mi->bank[i]),
  305. bank_pfn_end(&mi->bank[i]));
  306. }
  307. }
  308. #endif
  309. static bool arm_memblock_steal_permitted = true;
  310. phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
  311. {
  312. phys_addr_t phys;
  313. BUG_ON(!arm_memblock_steal_permitted);
  314. phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
  315. memblock_free(phys, size);
  316. memblock_remove(phys, size);
  317. return phys;
  318. }
  319. static int __init meminfo_cmp(const void *_a, const void *_b)
  320. {
  321. const struct membank *a = _a, *b = _b;
  322. long cmp = bank_pfn_start(a) - bank_pfn_start(b);
  323. return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
  324. }
  325. phys_addr_t memory_hole_offset;
  326. EXPORT_SYMBOL(memory_hole_offset);
  327. phys_addr_t memory_hole_start;
  328. EXPORT_SYMBOL(memory_hole_start);
  329. phys_addr_t memory_hole_end;
  330. EXPORT_SYMBOL(memory_hole_end);
  331. unsigned long memory_hole_align;
  332. EXPORT_SYMBOL(memory_hole_align);
  333. unsigned long virtual_hole_start;
  334. unsigned long virtual_hole_end;
  335. #ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
  336. void find_memory_hole(void)
  337. {
  338. int i;
  339. phys_addr_t hole_start;
  340. phys_addr_t hole_size;
  341. unsigned long hole_end_virt;
  342. /*
  343. * Find the start and end of the hole, using meminfo.
  344. */
  345. for (i = 0; i < (meminfo.nr_banks - 1); i++) {
  346. if ((meminfo.bank[i].start + meminfo.bank[i].size) !=
  347. meminfo.bank[i+1].start) {
  348. if (meminfo.bank[i].start + meminfo.bank[i].size
  349. <= MAX_HOLE_ADDRESS) {
  350. hole_start = meminfo.bank[i].start +
  351. meminfo.bank[i].size;
  352. hole_size = meminfo.bank[i+1].start -
  353. hole_start;
  354. if (memory_hole_start == 0 &&
  355. memory_hole_end == 0) {
  356. memory_hole_start = hole_start;
  357. memory_hole_end = hole_start +
  358. hole_size;
  359. } else if ((memory_hole_end -
  360. memory_hole_start) <= hole_size) {
  361. memory_hole_start = hole_start;
  362. memory_hole_end = hole_start +
  363. hole_size;
  364. }
  365. }
  366. }
  367. }
  368. memory_hole_offset = memory_hole_start - PHYS_OFFSET;
  369. if (!IS_ALIGNED(memory_hole_start, SECTION_SIZE)) {
  370. pr_err("memory_hole_start %pa is not aligned to %lx\n",
  371. &memory_hole_start, SECTION_SIZE);
  372. BUG();
  373. }
  374. if (!IS_ALIGNED(memory_hole_end, SECTION_SIZE)) {
  375. pr_err("memory_hole_end %pa is not aligned to %lx\n",
  376. &memory_hole_end, SECTION_SIZE);
  377. BUG();
  378. }
  379. hole_end_virt = __phys_to_virt(memory_hole_end);
  380. if ((!IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
  381. IS_ALIGNED(memory_hole_end, PMD_SIZE)) ||
  382. (IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
  383. !IS_ALIGNED(memory_hole_end, PMD_SIZE))) {
  384. memory_hole_align = !IS_ALIGNED(hole_end_virt, PMD_SIZE) ?
  385. hole_end_virt & ~PMD_MASK :
  386. memory_hole_end & ~PMD_MASK;
  387. virtual_hole_start = hole_end_virt;
  388. virtual_hole_end = hole_end_virt + memory_hole_align;
  389. pr_info("Physical memory hole is not aligned. There will be a virtual memory hole from %lx to %lx\n",
  390. virtual_hole_start, virtual_hole_end);
  391. }
  392. }
  393. #endif
  394. void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
  395. {
  396. int i;
  397. sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
  398. for (i = 0; i < mi->nr_banks; i++)
  399. memblock_add(mi->bank[i].start, mi->bank[i].size);
  400. #ifdef CONFIG_TIMA_RKP_30
  401. memblock_reserve(__pa(_text), PAGE_SIZE);
  402. #endif /*CONFIG_TIMA_RKP_30*/
  403. /* Register the kernel text, kernel data and initrd with memblock. */
  404. #ifdef CONFIG_XIP_KERNEL
  405. memblock_reserve(__pa(_sdata), _end - _sdata);
  406. #else
  407. memblock_reserve(__pa(_stext), _end - _stext);
  408. #endif
  409. #ifdef CONFIG_BLK_DEV_INITRD
  410. if (phys_initrd_size &&
  411. !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
  412. pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
  413. phys_initrd_start, phys_initrd_size);
  414. phys_initrd_start = phys_initrd_size = 0;
  415. }
  416. if (phys_initrd_size &&
  417. memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
  418. pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
  419. phys_initrd_start, phys_initrd_size);
  420. phys_initrd_start = phys_initrd_size = 0;
  421. }
  422. if (phys_initrd_size) {
  423. memblock_reserve(phys_initrd_start, phys_initrd_size);
  424. /* Now convert initrd to virtual addresses */
  425. initrd_start = __phys_to_virt(phys_initrd_start);
  426. initrd_end = initrd_start + phys_initrd_size;
  427. }
  428. #endif
  429. arm_mm_memblock_reserve();
  430. arm_dt_memblock_reserve();
  431. /* reserve any platform specific memblock areas */
  432. if (mdesc->reserve)
  433. mdesc->reserve();
  434. #if 1 //def CONFIG_SEC_DEBUG
  435. #ifndef CONFIG_SECURE_MPU_LOCK
  436. /* Debugging code for ext4 panic issue during eMBMS service(H KT)
  437. This will be backed out later */
  438. memblock_reserve(0x0, PAGE_SIZE);
  439. #endif
  440. #endif
  441. /*
  442. * reserve memory for DMA contigouos allocations,
  443. * must come from DMA area inside low memory
  444. */
  445. dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
  446. arm_memblock_steal_permitted = false;
  447. memblock_allow_resize();
  448. memblock_dump_all();
  449. }
  450. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  451. int _early_pfn_valid(unsigned long pfn)
  452. {
  453. struct meminfo *mi = &meminfo;
  454. unsigned int left = 0, right = mi->nr_banks;
  455. do {
  456. unsigned int mid = (right + left) / 2;
  457. struct membank *bank = &mi->bank[mid];
  458. if (pfn < bank_pfn_start(bank))
  459. right = mid;
  460. else if (pfn >= bank_pfn_end(bank))
  461. left = mid + 1;
  462. else
  463. return 1;
  464. } while (left < right);
  465. return 0;
  466. }
  467. EXPORT_SYMBOL(_early_pfn_valid);
  468. #endif
  469. void __init bootmem_init(void)
  470. {
  471. unsigned long min, max_low, max_high;
  472. max_low = max_high = 0;
  473. find_limits(&min, &max_low, &max_high);
  474. arm_bootmem_init(min, max_low);
  475. /*
  476. * Sparsemem tries to allocate bootmem in memory_present(),
  477. * so must be done after the fixed reservations
  478. */
  479. arm_memory_present();
  480. /*
  481. * sparse_init() needs the bootmem allocator up and running.
  482. */
  483. sparse_init();
  484. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  485. arm_bootmem_free_hmnm(max_low, max_high);
  486. #else
  487. /*
  488. * Now free the memory - free_area_init_node needs
  489. * the sparse mem_map arrays initialized by sparse_init()
  490. * for memmap_init_zone(), otherwise all PFNs are invalid.
  491. */
  492. arm_bootmem_free(min, max_low, max_high);
  493. #endif
  494. /*
  495. * This doesn't seem to be used by the Linux memory manager any
  496. * more, but is used by ll_rw_block. If we can get rid of it, we
  497. * also get rid of some of the stuff above as well.
  498. *
  499. * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
  500. * the system, not the maximum PFN.
  501. */
  502. max_low_pfn = max_low - PHYS_PFN_OFFSET;
  503. max_pfn = max_high - PHYS_PFN_OFFSET;
  504. }
  505. static inline int free_area(unsigned long pfn, unsigned long end, char *s)
  506. {
  507. unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
  508. for (; pfn < end; pfn++) {
  509. struct page *page = pfn_to_page(pfn);
  510. ClearPageReserved(page);
  511. init_page_count(page);
  512. __free_page(page);
  513. pages++;
  514. }
  515. if (size && s)
  516. printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
  517. return pages;
  518. }
  519. /*
  520. * Poison init memory with an undefined instruction (ARM) or a branch to an
  521. * undefined instruction (Thumb).
  522. */
  523. static inline void poison_init_mem(void *s, size_t count)
  524. {
  525. u32 *p = (u32 *)s;
  526. for (; count != 0; count -= 4)
  527. *p++ = 0xe7fddef0;
  528. }
  529. static inline void
  530. free_memmap(unsigned long start_pfn, unsigned long end_pfn)
  531. {
  532. struct page *start_pg, *end_pg;
  533. unsigned long pg, pgend;
  534. /*
  535. * Convert start_pfn/end_pfn to a struct page pointer.
  536. */
  537. start_pg = pfn_to_page(start_pfn - 1) + 1;
  538. end_pg = pfn_to_page(end_pfn - 1) + 1;
  539. /*
  540. * Convert to physical addresses, and
  541. * round start upwards and end downwards.
  542. */
  543. pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
  544. pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
  545. /*
  546. * If there are free pages between these,
  547. * free the section of the memmap array.
  548. */
  549. if (pg < pgend)
  550. free_bootmem(pg, pgend - pg);
  551. }
  552. /*
  553. * The mem_map array can get very big. Free as much of the unused portion of
  554. * the mem_map that we are allowed to. The page migration code moves pages
  555. * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we
  556. * can't free mem_map entries that may be dereferenced in this manner.
  557. */
  558. static void __init free_unused_memmap(struct meminfo *mi)
  559. {
  560. unsigned long bank_start, prev_bank_end = 0;
  561. unsigned int i;
  562. /*
  563. * This relies on each bank being in address order.
  564. * The banks are sorted previously in bootmem_init().
  565. */
  566. for_each_bank(i, mi) {
  567. struct membank *bank = &mi->bank[i];
  568. bank_start = round_down(bank_pfn_start(bank),
  569. MAX_ORDER_NR_PAGES);
  570. #ifdef CONFIG_SPARSEMEM
  571. /*
  572. * Take care not to free memmap entries that don't exist
  573. * due to SPARSEMEM sections which aren't present.
  574. */
  575. bank_start = min(bank_start,
  576. ALIGN(prev_bank_end, PAGES_PER_SECTION));
  577. #else
  578. /*
  579. * Align down here since the VM subsystem insists that the
  580. * memmap entries are valid from the bank start aligned to
  581. * MAX_ORDER_NR_PAGES.
  582. */
  583. bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
  584. #endif
  585. /*
  586. * If we had a previous bank, and there is a space
  587. * between the current bank and the previous, free it.
  588. */
  589. if (prev_bank_end && prev_bank_end < bank_start)
  590. free_memmap(prev_bank_end, bank_start);
  591. prev_bank_end = round_up(bank_pfn_end(bank),
  592. MAX_ORDER_NR_PAGES);
  593. }
  594. #ifdef CONFIG_SPARSEMEM
  595. if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
  596. free_memmap(prev_bank_end,
  597. ALIGN(prev_bank_end, PAGES_PER_SECTION));
  598. #endif
  599. }
  600. static void __init free_highpages(void)
  601. {
  602. #ifdef CONFIG_HIGHMEM
  603. unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
  604. struct memblock_region *mem, *res;
  605. /* set highmem page free */
  606. for_each_memblock(memory, mem) {
  607. unsigned long start = memblock_region_memory_base_pfn(mem);
  608. unsigned long end = memblock_region_memory_end_pfn(mem);
  609. /* Ignore complete lowmem entries */
  610. if (end <= max_low)
  611. continue;
  612. /* Truncate partial highmem entries */
  613. if (start < max_low)
  614. start = max_low;
  615. /* Find and exclude any reserved regions */
  616. for_each_memblock(reserved, res) {
  617. unsigned long res_start, res_end;
  618. res_start = memblock_region_reserved_base_pfn(res);
  619. res_end = memblock_region_reserved_end_pfn(res);
  620. if (res_end < start)
  621. continue;
  622. if (res_start < start)
  623. res_start = start;
  624. if (res_start > end)
  625. res_start = end;
  626. if (res_end > end)
  627. res_end = end;
  628. if (res_start != start)
  629. totalhigh_pages += free_area(start, res_start,
  630. NULL);
  631. start = res_end;
  632. if (start == end)
  633. break;
  634. }
  635. /* And now free anything which remains */
  636. if (start < end)
  637. totalhigh_pages += free_area(start, end, NULL);
  638. }
  639. totalram_pages += totalhigh_pages;
  640. #endif
  641. }
  642. #define MLK(b, t) b, t, ((t) - (b)) >> 10
  643. #define MLM(b, t) b, t, ((t) - (b)) >> 20
  644. #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
  645. #ifdef CONFIG_ENABLE_VMALLOC_SAVING
  646. static void print_vmalloc_lowmem_info(void)
  647. {
  648. int i;
  649. void *va_start, *va_end;
  650. printk(KERN_NOTICE
  651. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
  652. MLM(VMALLOC_START, VMALLOC_END));
  653. for (i = meminfo.nr_banks - 1; i >= 0; i--) {
  654. if (!meminfo.bank[i].highmem) {
  655. va_start = __va(meminfo.bank[i].start);
  656. va_end = __va(meminfo.bank[i].start +
  657. meminfo.bank[i].size);
  658. printk(KERN_NOTICE
  659. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
  660. MLM((unsigned long)va_start, (unsigned long)va_end));
  661. }
  662. if (i && ((meminfo.bank[i-1].start + meminfo.bank[i-1].size) !=
  663. meminfo.bank[i].start)) {
  664. phys_addr_t end_phys;
  665. if((meminfo.bank[i-1].start + meminfo.bank[i-1].size) > arm_lowmem_limit)
  666. continue;
  667. if(meminfo.bank[i].start > arm_lowmem_limit)
  668. end_phys = arm_lowmem_limit;
  669. else
  670. end_phys = meminfo.bank[i].start;
  671. if (meminfo.bank[i-1].start + meminfo.bank[i-1].size
  672. <= MAX_HOLE_ADDRESS) {
  673. va_start = __va(meminfo.bank[i-1].start
  674. + meminfo.bank[i-1].size);
  675. va_end = __va(end_phys);
  676. printk(KERN_NOTICE
  677. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
  678. MLM((unsigned long)va_start,
  679. (unsigned long)va_end));
  680. }
  681. }
  682. }
  683. }
  684. #endif
  685. /*
  686. * mem_init() marks the free areas in the mem_map and tells us how much
  687. * memory is free. This is done after various parts of the system have
  688. * claimed their memory after the kernel image.
  689. */
  690. void __init mem_init(void)
  691. {
  692. unsigned long reserved_pages, free_pages;
  693. struct memblock_region *reg;
  694. int i;
  695. #ifdef CONFIG_HAVE_TCM
  696. /* These pointers are filled in on TCM detection */
  697. extern u32 dtcm_end;
  698. extern u32 itcm_end;
  699. #endif
  700. max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
  701. /* this will put all unused low memory onto the freelists */
  702. free_unused_memmap(&meminfo);
  703. totalram_pages += free_all_bootmem();
  704. #ifdef CONFIG_SA1111
  705. /* now that our DMA memory is actually so designated, we can free it */
  706. totalram_pages += free_area(PHYS_PFN_OFFSET,
  707. __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
  708. #endif
  709. free_highpages();
  710. reserved_pages = free_pages = 0;
  711. for_each_bank(i, &meminfo) {
  712. struct membank *bank = &meminfo.bank[i];
  713. unsigned int pfn1, pfn2;
  714. struct page *page, *end;
  715. pfn1 = bank_pfn_start(bank);
  716. pfn2 = bank_pfn_end(bank);
  717. page = pfn_to_page(pfn1);
  718. end = pfn_to_page(pfn2 - 1) + 1;
  719. do {
  720. if (PageReserved(page))
  721. reserved_pages++;
  722. else if (!page_count(page))
  723. free_pages++;
  724. page++;
  725. #ifdef CONFIG_SPARSEMEM
  726. pfn1++;
  727. if (!(pfn1 % PAGES_PER_SECTION))
  728. page = pfn_to_page(pfn1);
  729. } while (pfn1 < pfn2);
  730. #else
  731. } while (page < end);
  732. #endif
  733. }
  734. /*
  735. * Since our memory may not be contiguous, calculate the
  736. * real number of pages we have in this system
  737. */
  738. printk(KERN_INFO "Memory:");
  739. num_physpages = 0;
  740. for_each_memblock(memory, reg) {
  741. unsigned long pages = memblock_region_memory_end_pfn(reg) -
  742. memblock_region_memory_base_pfn(reg);
  743. num_physpages += pages;
  744. printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
  745. }
  746. printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
  747. printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
  748. nr_free_pages() << (PAGE_SHIFT-10),
  749. free_pages << (PAGE_SHIFT-10),
  750. reserved_pages << (PAGE_SHIFT-10),
  751. totalhigh_pages << (PAGE_SHIFT-10));
  752. printk(KERN_NOTICE "Virtual kernel memory layout:\n"
  753. " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
  754. #ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
  755. " timers : 0x%08lx - 0x%08lx (%4ld kB)\n"
  756. #endif
  757. #ifdef CONFIG_HAVE_TCM
  758. " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
  759. " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
  760. #endif
  761. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
  762. MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
  763. (PAGE_SIZE)),
  764. #ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
  765. MLK(UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE),
  766. UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE)
  767. + (PAGE_SIZE)),
  768. #endif
  769. #ifdef CONFIG_HAVE_TCM
  770. MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
  771. MLK(ITCM_OFFSET, (unsigned long) itcm_end),
  772. #endif
  773. MLK(FIXADDR_START, FIXADDR_TOP));
  774. #ifdef CONFIG_ENABLE_VMALLOC_SAVING
  775. print_vmalloc_lowmem_info();
  776. #else
  777. printk(KERN_NOTICE
  778. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  779. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
  780. MLM(VMALLOC_START, VMALLOC_END),
  781. MLM(PAGE_OFFSET, (unsigned long)high_memory));
  782. #endif
  783. printk(KERN_NOTICE
  784. #ifdef CONFIG_HIGHMEM
  785. " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
  786. #endif
  787. #ifdef CONFIG_MODULES
  788. " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
  789. #endif
  790. " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
  791. " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
  792. " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
  793. " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
  794. #ifdef CONFIG_HIGHMEM
  795. MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
  796. (PAGE_SIZE)),
  797. #endif
  798. #ifdef CONFIG_MODULES
  799. MLM(MODULES_VADDR, MODULES_END),
  800. #endif
  801. MLK_ROUNDUP(_text, _etext),
  802. MLK_ROUNDUP(__init_begin, __init_end),
  803. MLK_ROUNDUP(_sdata, _edata),
  804. MLK_ROUNDUP(__bss_start, __bss_stop));
  805. /*
  806. * Check boundaries twice: Some fundamental inconsistencies can
  807. * be detected at build time already.
  808. */
  809. #ifdef CONFIG_MMU
  810. BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
  811. BUG_ON(TASK_SIZE > MODULES_VADDR);
  812. #endif
  813. #ifdef CONFIG_HIGHMEM
  814. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
  815. BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
  816. #endif
  817. if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
  818. extern int sysctl_overcommit_memory;
  819. /*
  820. * On a machine this small we won't get
  821. * anywhere without overcommit, so turn
  822. * it on by default.
  823. */
  824. sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
  825. }
  826. }
  827. #undef MLK
  828. #undef MLM
  829. #undef MLK_ROUNDUP
  830. #ifdef CONFIG_ARM_KERNMEM_PERMS
  831. struct section_perm {
  832. unsigned long start;
  833. unsigned long end;
  834. pmdval_t mask;
  835. pmdval_t prot;
  836. pmdval_t clear;
  837. };
  838. static struct section_perm nx_perms[] = {
  839. /* Make pages tables, etc before _stext RW (set NX). */
  840. {
  841. .start = PAGE_OFFSET,
  842. .end = (unsigned long)_stext,
  843. .mask = ~PMD_SECT_XN,
  844. .prot = PMD_SECT_XN,
  845. },
  846. /* Make init RW (set NX). */
  847. {
  848. .start = (unsigned long)__init_begin,
  849. .end = (unsigned long)_sdata,
  850. .mask = ~PMD_SECT_XN,
  851. .prot = PMD_SECT_XN,
  852. },
  853. #ifdef CONFIG_DEBUG_RODATA
  854. /* Make rodata NX (set RO in ro_perms below). */
  855. {
  856. .start = (unsigned long)__start_rodata,
  857. .end = (unsigned long)__init_begin,
  858. .mask = ~PMD_SECT_XN,
  859. .prot = PMD_SECT_XN,
  860. },
  861. #endif
  862. };
  863. #ifdef CONFIG_DEBUG_RODATA
  864. static struct section_perm ro_perms[] = {
  865. /* Make kernel code and rodata RX (set RO). */
  866. {
  867. .start = (unsigned long)_stext,
  868. .end = (unsigned long)__init_begin,
  869. #ifdef CONFIG_ARM_LPAE
  870. .mask = ~L_PMD_SECT_RDONLY,
  871. .prot = L_PMD_SECT_RDONLY,
  872. #else
  873. .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
  874. .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
  875. .clear = PMD_SECT_AP_WRITE,
  876. #endif
  877. },
  878. };
  879. #endif
  880. /*
  881. * Updates section permissions only for the current mm (sections are
  882. * copied into each mm). During startup, this is the init_mm. Is only
  883. * safe to be called with preemption disabled, as under stop_machine().
  884. */
  885. static inline void section_update(unsigned long addr, pmdval_t mask,
  886. pmdval_t prot)
  887. {
  888. struct mm_struct *mm;
  889. pmd_t *pmd;
  890. mm = current->active_mm;
  891. pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
  892. #ifdef CONFIG_ARM_LPAE
  893. pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
  894. #else
  895. if (addr & SECTION_SIZE)
  896. pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
  897. else
  898. pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
  899. #endif
  900. flush_pmd_entry(pmd);
  901. local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
  902. }
  903. /* Make sure extended page tables are in use. */
  904. static inline bool arch_has_strict_perms(void)
  905. {
  906. if (cpu_architecture() < CPU_ARCH_ARMv6)
  907. return false;
  908. return !!(get_cr() & CR_XP);
  909. }
  910. #define set_section_perms(perms, field) { \
  911. size_t i; \
  912. unsigned long addr; \
  913. \
  914. if (!arch_has_strict_perms()) \
  915. return; \
  916. \
  917. for (i = 0; i < ARRAY_SIZE(perms); i++) { \
  918. if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
  919. !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
  920. pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
  921. perms[i].start, perms[i].end, \
  922. SECTION_SIZE); \
  923. continue; \
  924. } \
  925. \
  926. for (addr = perms[i].start; \
  927. addr < perms[i].end; \
  928. addr += SECTION_SIZE) \
  929. section_update(addr, perms[i].mask, \
  930. perms[i].field); \
  931. } \
  932. }
  933. static inline void fix_kernmem_perms(void)
  934. {
  935. set_section_perms(nx_perms, prot);
  936. }
  937. #ifdef CONFIG_DEBUG_RODATA
  938. void mark_rodata_ro(void)
  939. {
  940. set_section_perms(ro_perms, prot);
  941. }
  942. void set_kernel_text_rw(void)
  943. {
  944. set_section_perms(ro_perms, clear);
  945. }
  946. void set_kernel_text_ro(void)
  947. {
  948. set_section_perms(ro_perms, prot);
  949. }
  950. #endif /* CONFIG_DEBUG_RODATA */
  951. #else
  952. static inline void fix_kernmem_perms(void) { }
  953. #endif /* CONFIG_ARM_KERNMEM_PERMS */
  954. void free_initmem(void)
  955. {
  956. unsigned long reclaimed_initmem;
  957. fix_kernmem_perms();
  958. #ifdef CONFIG_HAVE_TCM
  959. extern char __tcm_start, __tcm_end;
  960. poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
  961. totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
  962. __phys_to_pfn(__pa(&__tcm_end)),
  963. "TCM link");
  964. #endif
  965. #ifdef CONFIG_STRICT_MEMORY_RWX
  966. poison_init_mem((char *)__arch_info_begin,
  967. __init_end - (char *)__arch_info_begin);
  968. reclaimed_initmem = free_area(__phys_to_pfn(__pa(__arch_info_begin)),
  969. __phys_to_pfn(__pa(__init_end)),
  970. "init");
  971. totalram_pages += reclaimed_initmem;
  972. #else
  973. poison_init_mem(__init_begin, __init_end - __init_begin);
  974. if (!machine_is_integrator() && !machine_is_cintegrator()) {
  975. reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
  976. __phys_to_pfn(__pa(__init_end)),
  977. "init");
  978. totalram_pages += reclaimed_initmem;
  979. }
  980. #endif
  981. }
  982. #ifdef CONFIG_BLK_DEV_INITRD
  983. static int keep_initrd;
  984. void free_initrd_mem(unsigned long start, unsigned long end)
  985. {
  986. unsigned long reclaimed_initrd_mem;
  987. if (!keep_initrd) {
  988. poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
  989. reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
  990. __phys_to_pfn(__pa(end)),
  991. "initrd");
  992. totalram_pages += reclaimed_initrd_mem;
  993. }
  994. }
  995. static int __init keepinitrd_setup(char *__unused)
  996. {
  997. keep_initrd = 1;
  998. return 1;
  999. }
  1000. __setup("keepinitrd", keepinitrd_setup);
  1001. #endif
  1002. #ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
  1003. static int __init msm_krait_wfe_init(void)
  1004. {
  1005. unsigned int val, midr;
  1006. midr = read_cpuid_id() & 0xffffff00;
  1007. if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
  1008. asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
  1009. msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
  1010. }
  1011. return 0;
  1012. }
  1013. pure_initcall(msm_krait_wfe_init);
  1014. #endif