123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490 |
- /*
- * linux/boot/head.S
- *
- * Copyright (C) 1991, 1992, 1993 Linus Torvalds
- */
- /*
- * head.S contains the 32-bit startup code.
- *
- * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
- * the page directory will exist. The startup code will be overwritten by
- * the page directory. [According to comments etc elsewhere on a compressed
- * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
- *
- * Page 0 is deliberately kept safe, since System Management Mode code in
- * laptops may need to access the BIOS data stored there. This is also
- * useful for future device drivers that either access the BIOS via VM86
- * mode.
- */
- /*
- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
- */
- .code32
- .text
- #include <linux/init.h>
- #include <linux/linkage.h>
- #include <asm/segment.h>
- #include <asm/boot.h>
- #include <asm/msr.h>
- #include <asm/processor-flags.h>
- #include <asm/asm-offsets.h>
- #include <asm/bootparam.h>
- /*
- * Locally defined symbols should be marked hidden:
- */
- .hidden _bss
- .hidden _ebss
- .hidden _got
- .hidden _egot
- __HEAD
- .code32
- ENTRY(startup_32)
- /*
- * 32bit entry is 0 and it is ABI so immutable!
- * If we come here directly from a bootloader,
- * kernel(text+data+bss+brk) ramdisk, zero_page, command line
- * all need to be under the 4G limit.
- */
- cld
- /*
- * Test KEEP_SEGMENTS flag to see if the bootloader is asking
- * us to not reload segments
- */
- testb $KEEP_SEGMENTS, BP_loadflags(%esi)
- jnz 1f
- cli
- movl $(__BOOT_DS), %eax
- movl %eax, %ds
- movl %eax, %es
- movl %eax, %ss
- 1:
- /*
- * Calculate the delta between where we were compiled to run
- * at and where we were actually loaded at. This can only be done
- * with a short local call on x86. Nothing else will tell us what
- * address we are running at. The reserved chunk of the real-mode
- * data at 0x1e4 (defined as a scratch field) are used as the stack
- * for this calculation. Only 4 bytes are needed.
- */
- leal (BP_scratch+4)(%esi), %esp
- call 1f
- 1: popl %ebp
- subl $1b, %ebp
- /* setup a stack and make sure cpu supports long mode. */
- movl $boot_stack_end, %eax
- addl %ebp, %eax
- movl %eax, %esp
- call verify_cpu
- testl %eax, %eax
- jnz no_longmode
- /*
- * Compute the delta between where we were compiled to run at
- * and where the code will actually run at.
- *
- * %ebp contains the address we are loaded at by the boot loader and %ebx
- * contains the address where we should move the kernel image temporarily
- * for safe in-place decompression.
- */
- #ifdef CONFIG_RELOCATABLE
- movl %ebp, %ebx
- movl BP_kernel_alignment(%esi), %eax
- decl %eax
- addl %eax, %ebx
- notl %eax
- andl %eax, %ebx
- cmpl $LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
- #endif
- movl $LOAD_PHYSICAL_ADDR, %ebx
- 1:
- /* Target address to relocate to for decompression */
- movl BP_init_size(%esi), %eax
- subl $_end, %eax
- addl %eax, %ebx
- /*
- * Prepare for entering 64 bit mode
- */
- /* Load new GDT with the 64bit segments using 32bit descriptor */
- leal gdt(%ebp), %eax
- movl %eax, gdt+2(%ebp)
- lgdt gdt(%ebp)
- /* Enable PAE mode */
- movl %cr4, %eax
- orl $X86_CR4_PAE, %eax
- movl %eax, %cr4
- /*
- * Build early 4G boot pagetable
- */
- /* Initialize Page tables to 0 */
- leal pgtable(%ebx), %edi
- xorl %eax, %eax
- movl $(BOOT_INIT_PGT_SIZE/4), %ecx
- rep stosl
- /* Build Level 4 */
- leal pgtable + 0(%ebx), %edi
- leal 0x1007 (%edi), %eax
- movl %eax, 0(%edi)
- /* Build Level 3 */
- leal pgtable + 0x1000(%ebx), %edi
- leal 0x1007(%edi), %eax
- movl $4, %ecx
- 1: movl %eax, 0x00(%edi)
- addl $0x00001000, %eax
- addl $8, %edi
- decl %ecx
- jnz 1b
- /* Build Level 2 */
- leal pgtable + 0x2000(%ebx), %edi
- movl $0x00000183, %eax
- movl $2048, %ecx
- 1: movl %eax, 0(%edi)
- addl $0x00200000, %eax
- addl $8, %edi
- decl %ecx
- jnz 1b
- /* Enable the boot page tables */
- leal pgtable(%ebx), %eax
- movl %eax, %cr3
- /* Enable Long mode in EFER (Extended Feature Enable Register) */
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_LME, %eax
- wrmsr
- /* After gdt is loaded */
- xorl %eax, %eax
- lldt %ax
- movl $__BOOT_TSS, %eax
- ltr %ax
- /*
- * Setup for the jump to 64bit mode
- *
- * When the jump is performend we will be in long mode but
- * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
- * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
- * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
- * We place all of the values on our mini stack so lret can
- * used to perform that far jump.
- */
- pushl $__KERNEL_CS
- leal startup_64(%ebp), %eax
- #ifdef CONFIG_EFI_MIXED
- movl efi32_config(%ebp), %ebx
- cmp $0, %ebx
- jz 1f
- leal handover_entry(%ebp), %eax
- 1:
- #endif
- pushl %eax
- /* Enter paged protected Mode, activating Long Mode */
- movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
- movl %eax, %cr0
- /* Jump from 32bit compatibility mode into 64bit mode. */
- lret
- ENDPROC(startup_32)
- #ifdef CONFIG_EFI_MIXED
- .org 0x190
- ENTRY(efi32_stub_entry)
- add $0x4, %esp /* Discard return address */
- popl %ecx
- popl %edx
- popl %esi
- leal (BP_scratch+4)(%esi), %esp
- call 1f
- 1: pop %ebp
- subl $1b, %ebp
- movl %ecx, efi32_config(%ebp)
- movl %edx, efi32_config+8(%ebp)
- sgdtl efi32_boot_gdt(%ebp)
- leal efi32_config(%ebp), %eax
- movl %eax, efi_config(%ebp)
- jmp startup_32
- ENDPROC(efi32_stub_entry)
- #endif
- .code64
- .org 0x200
- ENTRY(startup_64)
- /*
- * 64bit entry is 0x200 and it is ABI so immutable!
- * We come here either from startup_32 or directly from a
- * 64bit bootloader.
- * If we come here from a bootloader, kernel(text+data+bss+brk),
- * ramdisk, zero_page, command line could be above 4G.
- * We depend on an identity mapped page table being provided
- * that maps our entire kernel(text+data+bss+brk), zero page
- * and command line.
- */
- #ifdef CONFIG_EFI_STUB
- /*
- * The entry point for the PE/COFF executable is efi_pe_entry, so
- * only legacy boot loaders will execute this jmp.
- */
- jmp preferred_addr
- ENTRY(efi_pe_entry)
- movq %rcx, efi64_config(%rip) /* Handle */
- movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
- leaq efi64_config(%rip), %rax
- movq %rax, efi_config(%rip)
- call 1f
- 1: popq %rbp
- subq $1b, %rbp
- /*
- * Relocate efi_config->call().
- */
- addq %rbp, efi64_config+32(%rip)
- movq %rax, %rdi
- call make_boot_params
- cmpq $0,%rax
- je fail
- mov %rax, %rsi
- leaq startup_32(%rip), %rax
- movl %eax, BP_code32_start(%rsi)
- jmp 2f /* Skip the relocation */
- handover_entry:
- call 1f
- 1: popq %rbp
- subq $1b, %rbp
- /*
- * Relocate efi_config->call().
- */
- movq efi_config(%rip), %rax
- addq %rbp, 32(%rax)
- 2:
- movq efi_config(%rip), %rdi
- call efi_main
- movq %rax,%rsi
- cmpq $0,%rax
- jne 2f
- fail:
- /* EFI init failed, so hang. */
- hlt
- jmp fail
- 2:
- movl BP_code32_start(%esi), %eax
- leaq preferred_addr(%rax), %rax
- jmp *%rax
- preferred_addr:
- #endif
- /* Setup data segments. */
- xorl %eax, %eax
- movl %eax, %ds
- movl %eax, %es
- movl %eax, %ss
- movl %eax, %fs
- movl %eax, %gs
- /*
- * Compute the decompressed kernel start address. It is where
- * we were loaded at aligned to a 2M boundary. %rbp contains the
- * decompressed kernel start address.
- *
- * If it is a relocatable kernel then decompress and run the kernel
- * from load address aligned to 2MB addr, otherwise decompress and
- * run the kernel from LOAD_PHYSICAL_ADDR
- *
- * We cannot rely on the calculation done in 32-bit mode, since we
- * may have been invoked via the 64-bit entry point.
- */
- /* Start with the delta to where the kernel will run at. */
- #ifdef CONFIG_RELOCATABLE
- leaq startup_32(%rip) /* - $startup_32 */, %rbp
- movl BP_kernel_alignment(%rsi), %eax
- decl %eax
- addq %rax, %rbp
- notq %rax
- andq %rax, %rbp
- cmpq $LOAD_PHYSICAL_ADDR, %rbp
- jge 1f
- #endif
- movq $LOAD_PHYSICAL_ADDR, %rbp
- 1:
- /* Target address to relocate to for decompression */
- movl BP_init_size(%rsi), %ebx
- subl $_end, %ebx
- addq %rbp, %rbx
- /* Set up the stack */
- leaq boot_stack_end(%rbx), %rsp
- /* Zero EFLAGS */
- pushq $0
- popfq
- /*
- * Copy the compressed kernel to the end of our buffer
- * where decompression in place becomes safe.
- */
- pushq %rsi
- leaq (_bss-8)(%rip), %rsi
- leaq (_bss-8)(%rbx), %rdi
- movq $_bss /* - $startup_32 */, %rcx
- shrq $3, %rcx
- std
- rep movsq
- cld
- popq %rsi
- /*
- * Jump to the relocated address.
- */
- leaq relocated(%rbx), %rax
- jmp *%rax
- #ifdef CONFIG_EFI_STUB
- .org 0x390
- ENTRY(efi64_stub_entry)
- movq %rdi, efi64_config(%rip) /* Handle */
- movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
- leaq efi64_config(%rip), %rax
- movq %rax, efi_config(%rip)
- movq %rdx, %rsi
- jmp handover_entry
- ENDPROC(efi64_stub_entry)
- #endif
- .text
- relocated:
- /*
- * Clear BSS (stack is currently empty)
- */
- xorl %eax, %eax
- leaq _bss(%rip), %rdi
- leaq _ebss(%rip), %rcx
- subq %rdi, %rcx
- shrq $3, %rcx
- rep stosq
- /*
- * Adjust our own GOT
- */
- leaq _got(%rip), %rdx
- leaq _egot(%rip), %rcx
- 1:
- cmpq %rcx, %rdx
- jae 2f
- addq %rbx, (%rdx)
- addq $8, %rdx
- jmp 1b
- 2:
-
- /*
- * Do the extraction, and jump to the new kernel..
- */
- pushq %rsi /* Save the real mode argument */
- movq %rsi, %rdi /* real mode address */
- leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
- leaq input_data(%rip), %rdx /* input_data */
- movl $z_input_len, %ecx /* input_len */
- movq %rbp, %r8 /* output target address */
- movq $z_output_len, %r9 /* decompressed length, end of relocs */
- call extract_kernel /* returns kernel location in %rax */
- popq %rsi
- /*
- * Jump to the decompressed kernel.
- */
- jmp *%rax
- .code32
- no_longmode:
- /* This isn't an x86-64 CPU so hang */
- 1:
- hlt
- jmp 1b
- #include "../../kernel/verify_cpu.S"
- .data
- gdt:
- .word gdt_end - gdt
- .long gdt
- .word 0
- .quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x00af9a000000ffff /* __KERNEL_CS */
- .quad 0x00cf92000000ffff /* __KERNEL_DS */
- .quad 0x0080890000000000 /* TS descriptor */
- .quad 0x0000000000000000 /* TS continued */
- gdt_end:
- #ifdef CONFIG_EFI_STUB
- efi_config:
- .quad 0
- #ifdef CONFIG_EFI_MIXED
- .global efi32_config
- efi32_config:
- .fill 4,8,0
- .quad efi64_thunk
- .byte 0
- #endif
- .global efi64_config
- efi64_config:
- .fill 4,8,0
- .quad efi_call
- .byte 1
- #endif /* CONFIG_EFI_STUB */
- /*
- * Stack and heap for uncompression
- */
- .bss
- .balign 4
- boot_heap:
- .fill BOOT_HEAP_SIZE, 1, 0
- boot_stack:
- .fill BOOT_STACK_SIZE, 1, 0
- boot_stack_end:
- /*
- * Space for page tables (not in .bss so not zeroed)
- */
- .section ".pgtable","a",@nobits
- .balign 4096
- pgtable:
- .fill BOOT_PGT_SIZE, 1, 0
|