setup_32.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * Common prep/pmac/chrp boot and setup code.
  3. */
  4. #include <linux/module.h>
  5. #include <linux/string.h>
  6. #include <linux/sched.h>
  7. #include <linux/init.h>
  8. #include <linux/kernel.h>
  9. #include <linux/reboot.h>
  10. #include <linux/delay.h>
  11. #include <linux/initrd.h>
  12. #include <linux/tty.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/root_dev.h>
  15. #include <linux/cpu.h>
  16. #include <linux/console.h>
  17. #include <linux/memblock.h>
  18. #include <linux/export.h>
  19. #include <asm/io.h>
  20. #include <asm/prom.h>
  21. #include <asm/processor.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/setup.h>
  24. #include <asm/smp.h>
  25. #include <asm/elf.h>
  26. #include <asm/cputable.h>
  27. #include <asm/bootx.h>
  28. #include <asm/btext.h>
  29. #include <asm/machdep.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/pmac_feature.h>
  32. #include <asm/sections.h>
  33. #include <asm/nvram.h>
  34. #include <asm/xmon.h>
  35. #include <asm/time.h>
  36. #include <asm/serial.h>
  37. #include <asm/udbg.h>
  38. #include <asm/code-patching.h>
  39. #include <asm/cpu_has_feature.h>
  40. #define DBG(fmt...)
  41. extern void bootx_init(unsigned long r4, unsigned long phys);
  42. int boot_cpuid_phys;
  43. EXPORT_SYMBOL_GPL(boot_cpuid_phys);
  44. int smp_hw_index[NR_CPUS];
  45. EXPORT_SYMBOL(smp_hw_index);
  46. unsigned long ISA_DMA_THRESHOLD;
  47. unsigned int DMA_MODE_READ;
  48. unsigned int DMA_MODE_WRITE;
  49. EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
  50. EXPORT_SYMBOL(DMA_MODE_READ);
  51. EXPORT_SYMBOL(DMA_MODE_WRITE);
  52. /*
  53. * These are used in binfmt_elf.c to put aux entries on the stack
  54. * for each elf executable being started.
  55. */
  56. int dcache_bsize;
  57. int icache_bsize;
  58. int ucache_bsize;
  59. /*
  60. * We're called here very early in the boot.
  61. *
  62. * Note that the kernel may be running at an address which is different
  63. * from the address that it was linked at, so we must use RELOC/PTRRELOC
  64. * to access static data (including strings). -- paulus
  65. */
  66. notrace unsigned long __init early_init(unsigned long dt_ptr)
  67. {
  68. unsigned long offset = reloc_offset();
  69. /* First zero the BSS -- use memset_io, some platforms don't have
  70. * caches on yet */
  71. memset_io((void __iomem *)PTRRELOC(&__bss_start), 0,
  72. __bss_stop - __bss_start);
  73. /*
  74. * Identify the CPU type and fix up code sections
  75. * that depend on which cpu we have.
  76. */
  77. identify_cpu(offset, mfspr(SPRN_PVR));
  78. apply_feature_fixups();
  79. return KERNELBASE + offset;
  80. }
  81. /*
  82. * This is run before start_kernel(), the kernel has been relocated
  83. * and we are running with enough of the MMU enabled to have our
  84. * proper kernel virtual addresses
  85. *
  86. * We do the initial parsing of the flat device-tree and prepares
  87. * for the MMU to be fully initialized.
  88. */
  89. extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
  90. notrace void __init machine_init(u64 dt_ptr)
  91. {
  92. /* Configure static keys first, now that we're relocated. */
  93. setup_feature_keys();
  94. /* Enable early debugging if any specified (see udbg.h) */
  95. udbg_early_init();
  96. patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
  97. patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
  98. /* Do some early initialization based on the flat device tree */
  99. early_init_devtree(__va(dt_ptr));
  100. early_init_mmu();
  101. setup_kdump_trampoline();
  102. }
  103. /* Checks "l2cr=xxxx" command-line option */
  104. int __init ppc_setup_l2cr(char *str)
  105. {
  106. if (cpu_has_feature(CPU_FTR_L2CR)) {
  107. unsigned long val = simple_strtoul(str, NULL, 0);
  108. printk(KERN_INFO "l2cr set to %lx\n", val);
  109. _set_L2CR(0); /* force invalidate by disable cache */
  110. _set_L2CR(val); /* and enable it */
  111. }
  112. return 1;
  113. }
  114. __setup("l2cr=", ppc_setup_l2cr);
  115. /* Checks "l3cr=xxxx" command-line option */
  116. int __init ppc_setup_l3cr(char *str)
  117. {
  118. if (cpu_has_feature(CPU_FTR_L3CR)) {
  119. unsigned long val = simple_strtoul(str, NULL, 0);
  120. printk(KERN_INFO "l3cr set to %lx\n", val);
  121. _set_L3CR(val); /* and enable it */
  122. }
  123. return 1;
  124. }
  125. __setup("l3cr=", ppc_setup_l3cr);
  126. #ifdef CONFIG_GENERIC_NVRAM
  127. /* Generic nvram hooks used by drivers/char/gen_nvram.c */
  128. unsigned char nvram_read_byte(int addr)
  129. {
  130. if (ppc_md.nvram_read_val)
  131. return ppc_md.nvram_read_val(addr);
  132. return 0xff;
  133. }
  134. EXPORT_SYMBOL(nvram_read_byte);
  135. void nvram_write_byte(unsigned char val, int addr)
  136. {
  137. if (ppc_md.nvram_write_val)
  138. ppc_md.nvram_write_val(addr, val);
  139. }
  140. EXPORT_SYMBOL(nvram_write_byte);
  141. ssize_t nvram_get_size(void)
  142. {
  143. if (ppc_md.nvram_size)
  144. return ppc_md.nvram_size();
  145. return -1;
  146. }
  147. EXPORT_SYMBOL(nvram_get_size);
  148. void nvram_sync(void)
  149. {
  150. if (ppc_md.nvram_sync)
  151. ppc_md.nvram_sync();
  152. }
  153. EXPORT_SYMBOL(nvram_sync);
  154. #endif /* CONFIG_NVRAM */
  155. int __init ppc_init(void)
  156. {
  157. /* clear the progress line */
  158. if (ppc_md.progress)
  159. ppc_md.progress(" ", 0xffff);
  160. /* call platform init */
  161. if (ppc_md.init != NULL) {
  162. ppc_md.init();
  163. }
  164. return 0;
  165. }
  166. arch_initcall(ppc_init);
  167. void __init irqstack_early_init(void)
  168. {
  169. unsigned int i;
  170. /* interrupt stacks must be in lowmem, we get that for free on ppc32
  171. * as the memblock is limited to lowmem by default */
  172. for_each_possible_cpu(i) {
  173. softirq_ctx[i] = (struct thread_info *)
  174. __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
  175. hardirq_ctx[i] = (struct thread_info *)
  176. __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
  177. }
  178. }
  179. #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
  180. void __init exc_lvl_early_init(void)
  181. {
  182. unsigned int i, hw_cpu;
  183. /* interrupt stacks must be in lowmem, we get that for free on ppc32
  184. * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
  185. for_each_possible_cpu(i) {
  186. #ifdef CONFIG_SMP
  187. hw_cpu = get_hard_smp_processor_id(i);
  188. #else
  189. hw_cpu = 0;
  190. #endif
  191. critirq_ctx[hw_cpu] = (struct thread_info *)
  192. __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
  193. #ifdef CONFIG_BOOKE
  194. dbgirq_ctx[hw_cpu] = (struct thread_info *)
  195. __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
  196. mcheckirq_ctx[hw_cpu] = (struct thread_info *)
  197. __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
  198. #endif
  199. }
  200. }
  201. #endif
  202. void __init setup_power_save(void)
  203. {
  204. #ifdef CONFIG_6xx
  205. if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
  206. cpu_has_feature(CPU_FTR_CAN_NAP))
  207. ppc_md.power_save = ppc6xx_idle;
  208. #endif
  209. #ifdef CONFIG_E500
  210. if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
  211. cpu_has_feature(CPU_FTR_CAN_NAP))
  212. ppc_md.power_save = e500_idle;
  213. #endif
  214. }
  215. __init void initialize_cache_info(void)
  216. {
  217. /*
  218. * Set cache line size based on type of cpu as a default.
  219. * Systems with OF can look in the properties on the cpu node(s)
  220. * for a possibly more accurate value.
  221. */
  222. dcache_bsize = cur_cpu_spec->dcache_bsize;
  223. icache_bsize = cur_cpu_spec->icache_bsize;
  224. ucache_bsize = 0;
  225. if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
  226. ucache_bsize = icache_bsize = dcache_bsize;
  227. }