123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226 |
- /*
- * linux/boot/head.S
- *
- * Copyright (C) 1991, 1992, 1993 Linus Torvalds
- */
- /*
- * head.S contains the 32-bit startup code.
- *
- * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
- * the page directory will exist. The startup code will be overwritten by
- * the page directory. [According to comments etc elsewhere on a compressed
- * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
- *
- * Page 0 is deliberately kept safe, since System Management Mode code in
- * laptops may need to access the BIOS data stored there. This is also
- * useful for future device drivers that either access the BIOS via VM86
- * mode.
- */
- /*
- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
- */
- .text
- #include <linux/init.h>
- #include <linux/linkage.h>
- #include <asm/segment.h>
- #include <asm/page_types.h>
- #include <asm/boot.h>
- #include <asm/asm-offsets.h>
- __HEAD
- ENTRY(startup_32)
- #ifdef CONFIG_EFI_STUB
- jmp preferred_addr
- .balign 0x10
- /*
- * We don't need the return address, so set up the stack so
- * efi_main() can find its arugments.
- */
- add $0x4, %esp
- call efi_main
- cmpl $0, %eax
- movl %eax, %esi
- jne 2f
- 1:
- /* EFI init failed, so hang. */
- hlt
- jmp 1b
- 2:
- call 3f
- 3:
- popl %eax
- subl $3b, %eax
- subl BP_pref_address(%esi), %eax
- add BP_code32_start(%esi), %eax
- leal preferred_addr(%eax), %eax
- jmp *%eax
- preferred_addr:
- #endif
- cld
- /*
- * Test KEEP_SEGMENTS flag to see if the bootloader is asking
- * us to not reload segments
- */
- testb $(1<<6), BP_loadflags(%esi)
- jnz 1f
- cli
- movl $__BOOT_DS, %eax
- movl %eax, %ds
- movl %eax, %es
- movl %eax, %fs
- movl %eax, %gs
- movl %eax, %ss
- 1:
- /*
- * Calculate the delta between where we were compiled to run
- * at and where we were actually loaded at. This can only be done
- * with a short local call on x86. Nothing else will tell us what
- * address we are running at. The reserved chunk of the real-mode
- * data at 0x1e4 (defined as a scratch field) are used as the stack
- * for this calculation. Only 4 bytes are needed.
- */
- leal (BP_scratch+4)(%esi), %esp
- call 1f
- 1: popl %ebp
- subl $1b, %ebp
- /*
- * %ebp contains the address we are loaded at by the boot loader and %ebx
- * contains the address where we should move the kernel image temporarily
- * for safe in-place decompression.
- */
- #ifdef CONFIG_RELOCATABLE
- movl %ebp, %ebx
- movl BP_kernel_alignment(%esi), %eax
- decl %eax
- addl %eax, %ebx
- notl %eax
- andl %eax, %ebx
- #else
- movl $LOAD_PHYSICAL_ADDR, %ebx
- #endif
- /* Target address to relocate to for decompression */
- addl $z_extract_offset, %ebx
- /* Set up the stack */
- leal boot_stack_end(%ebx), %esp
- /* Zero EFLAGS */
- pushl $0
- popfl
- /*
- * Copy the compressed kernel to the end of our buffer
- * where decompression in place becomes safe.
- */
- pushl %esi
- leal (_bss-4)(%ebp), %esi
- leal (_bss-4)(%ebx), %edi
- movl $(_bss - startup_32), %ecx
- shrl $2, %ecx
- std
- rep movsl
- cld
- popl %esi
- /*
- * Jump to the relocated address.
- */
- leal relocated(%ebx), %eax
- jmp *%eax
- ENDPROC(startup_32)
- .text
- relocated:
- /*
- * Clear BSS (stack is currently empty)
- */
- xorl %eax, %eax
- leal _bss(%ebx), %edi
- leal _ebss(%ebx), %ecx
- subl %edi, %ecx
- shrl $2, %ecx
- rep stosl
- /*
- * Adjust our own GOT
- */
- leal _got(%ebx), %edx
- leal _egot(%ebx), %ecx
- 1:
- cmpl %ecx, %edx
- jae 2f
- addl %ebx, (%edx)
- addl $4, %edx
- jmp 1b
- 2:
- /*
- * Do the decompression, and jump to the new kernel..
- */
- leal z_extract_offset_negative(%ebx), %ebp
- /* push arguments for decompress_kernel: */
- pushl %ebp /* output address */
- pushl $z_input_len /* input_len */
- leal input_data(%ebx), %eax
- pushl %eax /* input_data */
- leal boot_heap(%ebx), %eax
- pushl %eax /* heap area */
- pushl %esi /* real mode pointer */
- call decompress_kernel
- addl $20, %esp
- #if CONFIG_RELOCATABLE
- /*
- * Find the address of the relocations.
- */
- leal z_output_len(%ebp), %edi
- /*
- * Calculate the delta between where vmlinux was compiled to run
- * and where it was actually loaded.
- */
- movl %ebp, %ebx
- subl $LOAD_PHYSICAL_ADDR, %ebx
- jz 2f /* Nothing to be done if loaded at compiled addr. */
- /*
- * Process relocations.
- */
- 1: subl $4, %edi
- movl (%edi), %ecx
- testl %ecx, %ecx
- jz 2f
- addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
- jmp 1b
- 2:
- #endif
- /*
- * Jump to the decompressed kernel.
- */
- xorl %ebx, %ebx
- jmp *%ebp
- /*
- * Stack and heap for uncompression
- */
- .bss
- .balign 4
- boot_heap:
- .fill BOOT_HEAP_SIZE, 1, 0
- boot_stack:
- .fill BOOT_STACK_SIZE, 1, 0
- boot_stack_end:
|