vmlinux.lds.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. #include <asm/cache.h>
  2. #include <asm/ptrace.h>
  3. #include <asm/pgtable.h>
  4. #include <asm-generic/vmlinux.lds.h>
  5. OUTPUT_FORMAT("elf64-ia64-little")
  6. OUTPUT_ARCH(ia64)
  7. ENTRY(phys_start)
  8. jiffies = jiffies_64;
  9. PHDRS {
  10. code PT_LOAD;
  11. percpu PT_LOAD;
  12. data PT_LOAD;
  13. note PT_NOTE;
  14. unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
  15. }
  16. SECTIONS {
  17. /*
  18. * unwind exit sections must be discarded before
  19. * the rest of the sections get included.
  20. */
  21. /DISCARD/ : {
  22. *(.IA_64.unwind.exit.text)
  23. *(.IA_64.unwind_info.exit.text)
  24. *(.comment)
  25. *(.note)
  26. }
  27. v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
  28. phys_start = _start - LOAD_OFFSET;
  29. code : {
  30. } :code
  31. . = KERNEL_START;
  32. _text = .;
  33. _stext = .;
  34. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  35. __start_ivt_text = .;
  36. *(.text..ivt)
  37. __end_ivt_text = .;
  38. TEXT_TEXT
  39. SCHED_TEXT
  40. CPUIDLE_TEXT
  41. LOCK_TEXT
  42. KPROBES_TEXT
  43. *(.gnu.linkonce.t*)
  44. }
  45. .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
  46. *(.text2)
  47. }
  48. #ifdef CONFIG_SMP
  49. .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
  50. *(.text..lock)
  51. }
  52. #endif
  53. _etext = .;
  54. /*
  55. * Read-only data
  56. */
  57. NOTES :code :note /* put .notes in text and mark in PT_NOTE */
  58. code_continues : {
  59. } : code /* switch back to regular program... */
  60. EXCEPTION_TABLE(16)
  61. /* MCA table */
  62. . = ALIGN(16);
  63. __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
  64. __start___mca_table = .;
  65. *(__mca_table)
  66. __stop___mca_table = .;
  67. }
  68. .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
  69. __start___phys_stack_reg_patchlist = .;
  70. *(.data..patch.phys_stack_reg)
  71. __end___phys_stack_reg_patchlist = .;
  72. }
  73. /*
  74. * Global data
  75. */
  76. _data = .;
  77. /* Unwind info & table: */
  78. . = ALIGN(8);
  79. .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
  80. *(.IA_64.unwind_info*)
  81. }
  82. .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
  83. __start_unwind = .;
  84. *(.IA_64.unwind*)
  85. __end_unwind = .;
  86. } :code :unwind
  87. code_continues2 : {
  88. } : code
  89. RODATA
  90. .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
  91. *(.opd)
  92. }
  93. /*
  94. * Initialization code and data:
  95. */
  96. . = ALIGN(PAGE_SIZE);
  97. __init_begin = .;
  98. INIT_TEXT_SECTION(PAGE_SIZE)
  99. INIT_DATA_SECTION(16)
  100. .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
  101. __start___vtop_patchlist = .;
  102. *(.data..patch.vtop)
  103. __end___vtop_patchlist = .;
  104. }
  105. .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
  106. __start___rse_patchlist = .;
  107. *(.data..patch.rse)
  108. __end___rse_patchlist = .;
  109. }
  110. .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
  111. __start___mckinley_e9_bundles = .;
  112. *(.data..patch.mckinley_e9)
  113. __end___mckinley_e9_bundles = .;
  114. }
  115. #if defined(CONFIG_IA64_GENERIC)
  116. /* Machine Vector */
  117. . = ALIGN(16);
  118. .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
  119. machvec_start = .;
  120. *(.machvec)
  121. machvec_end = .;
  122. }
  123. #endif
  124. #ifdef CONFIG_SMP
  125. . = ALIGN(PERCPU_PAGE_SIZE);
  126. __cpu0_per_cpu = .;
  127. . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
  128. #endif
  129. . = ALIGN(PAGE_SIZE);
  130. __init_end = .;
  131. .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
  132. PAGE_ALIGNED_DATA(PAGE_SIZE)
  133. . = ALIGN(PAGE_SIZE);
  134. __start_gate_section = .;
  135. *(.data..gate)
  136. __stop_gate_section = .;
  137. }
  138. /*
  139. * make sure the gate page doesn't expose
  140. * kernel data
  141. */
  142. . = ALIGN(PAGE_SIZE);
  143. /* Per-cpu data: */
  144. . = ALIGN(PERCPU_PAGE_SIZE);
  145. PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
  146. __phys_per_cpu_start = __per_cpu_load;
  147. /*
  148. * ensure percpu data fits
  149. * into percpu page size
  150. */
  151. . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
  152. data : {
  153. } :data
  154. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  155. _sdata = .;
  156. INIT_TASK_DATA(PAGE_SIZE)
  157. CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
  158. READ_MOSTLY_DATA(SMP_CACHE_BYTES)
  159. DATA_DATA
  160. *(.data1)
  161. *(.gnu.linkonce.d*)
  162. CONSTRUCTORS
  163. }
  164. . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
  165. .got : AT(ADDR(.got) - LOAD_OFFSET) {
  166. *(.got.plt)
  167. *(.got)
  168. }
  169. __gp = ADDR(.got) + 0x200000;
  170. /*
  171. * We want the small data sections together,
  172. * so single-instruction offsets can access
  173. * them all, and initialized data all before
  174. * uninitialized, so we can shorten the
  175. * on-disk segment size.
  176. */
  177. .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
  178. *(.sdata)
  179. *(.sdata1)
  180. *(.srdata)
  181. }
  182. _edata = .;
  183. BSS_SECTION(0, 0, 0)
  184. _end = .;
  185. code : {
  186. } :code
  187. STABS_DEBUG
  188. DWARF_DEBUG
  189. /* Default discards */
  190. DISCARDS
  191. }