vmlinux.lds.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /*
  2. * Helper macros to support writing architecture specific
  3. * linker scripts.
  4. *
  5. * A minimal linker scripts has following content:
  6. * [This is a sample, architectures may have special requiriements]
  7. *
  8. * OUTPUT_FORMAT(...)
  9. * OUTPUT_ARCH(...)
  10. * ENTRY(...)
  11. * SECTIONS
  12. * {
  13. * . = START;
  14. * __init_begin = .;
  15. * HEAD_TEXT_SECTION
  16. * INIT_TEXT_SECTION(PAGE_SIZE)
  17. * INIT_DATA_SECTION(...)
  18. * PERCPU_SECTION(CACHELINE_SIZE)
  19. * __init_end = .;
  20. *
  21. * _stext = .;
  22. * TEXT_SECTION = 0
  23. * _etext = .;
  24. *
  25. * _sdata = .;
  26. * RO_DATA_SECTION(PAGE_SIZE)
  27. * RW_DATA_SECTION(...)
  28. * _edata = .;
  29. *
  30. * EXCEPTION_TABLE(...)
  31. * NOTES
  32. *
  33. * BSS_SECTION(0, 0, 0)
  34. * _end = .;
  35. *
  36. * STABS_DEBUG
  37. * DWARF_DEBUG
  38. *
  39. * DISCARDS // must be the last
  40. * }
  41. *
  42. * [__init_begin, __init_end] is the init section that may be freed after init
  43. * // __init_begin and __init_end should be page aligned, so that we can
  44. * // free the whole .init memory
  45. * [_stext, _etext] is the text section
  46. * [_sdata, _edata] is the data section
  47. *
  48. * Some of the included output section have their own set of constants.
  49. * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
  50. * [__nosave_begin, __nosave_end] for the nosave data
  51. */
  52. #ifndef LOAD_OFFSET
  53. #define LOAD_OFFSET 0
  54. #endif
  55. #include <linux/export.h>
  56. /* Align . to a 8 byte boundary equals to maximum function alignment. */
  57. #define ALIGN_FUNCTION() . = ALIGN(8)
  58. /*
  59. * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
  60. * generates .data.identifier sections, which need to be pulled in with
  61. * .data. We don't want to pull in .data..other sections, which Linux
  62. * has defined. Same for text and bss.
  63. *
  64. * RODATA_MAIN is not used because existing code already defines .rodata.x
  65. * sections to be brought in with rodata.
  66. */
  67. #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
  68. #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
  69. #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi
  70. #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..compoundliteral* .data..L*
  71. #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
  72. #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
  73. #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* .bss..L*
  74. #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
  75. #else
  76. #define TEXT_MAIN .text
  77. #define TEXT_CFI_MAIN .text.cfi
  78. #define DATA_MAIN .data
  79. #define SDATA_MAIN .sdata
  80. #define RODATA_MAIN .rodata
  81. #define BSS_MAIN .bss
  82. #define SBSS_MAIN .sbss
  83. #endif
  84. /*
  85. * Align to a 32 byte boundary equal to the
  86. * alignment gcc 4.5 uses for a struct
  87. */
  88. #define STRUCT_ALIGNMENT 32
  89. #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
  90. /* The actual configuration determine if the init/exit sections
  91. * are handled as text/data or they can be discarded (which
  92. * often happens at runtime)
  93. */
  94. #ifdef CONFIG_HOTPLUG_CPU
  95. #define CPU_KEEP(sec) *(.cpu##sec)
  96. #define CPU_DISCARD(sec)
  97. #else
  98. #define CPU_KEEP(sec)
  99. #define CPU_DISCARD(sec) *(.cpu##sec)
  100. #endif
  101. #if defined(CONFIG_MEMORY_HOTPLUG)
  102. #define MEM_KEEP(sec) *(.mem##sec)
  103. #define MEM_DISCARD(sec)
  104. #else
  105. #define MEM_KEEP(sec)
  106. #define MEM_DISCARD(sec) *(.mem##sec)
  107. #endif
  108. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  109. #define MCOUNT_REC() . = ALIGN(8); \
  110. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  111. KEEP(*(__mcount_loc)) \
  112. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  113. #else
  114. #define MCOUNT_REC()
  115. #endif
  116. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  117. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  118. KEEP(*(_ftrace_annotated_branch)) \
  119. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  120. #else
  121. #define LIKELY_PROFILE()
  122. #endif
  123. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  124. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  125. KEEP(*(_ftrace_branch)) \
  126. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  127. #else
  128. #define BRANCH_PROFILE()
  129. #endif
  130. #ifdef CONFIG_KPROBES
  131. #define KPROBE_BLACKLIST() . = ALIGN(8); \
  132. VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
  133. KEEP(*(_kprobe_blacklist)) \
  134. VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
  135. #else
  136. #define KPROBE_BLACKLIST()
  137. #endif
  138. #ifdef CONFIG_EVENT_TRACING
  139. #define FTRACE_EVENTS() . = ALIGN(8); \
  140. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  141. KEEP(*(_ftrace_events)) \
  142. VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
  143. VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \
  144. KEEP(*(_ftrace_eval_map)) \
  145. VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .;
  146. #else
  147. #define FTRACE_EVENTS()
  148. #endif
  149. #ifdef CONFIG_TRACING
  150. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  151. KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
  152. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  153. #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
  154. KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
  155. VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
  156. #else
  157. #define TRACE_PRINTKS()
  158. #define TRACEPOINT_STR()
  159. #endif
  160. #ifdef CONFIG_FTRACE_SYSCALLS
  161. #define TRACE_SYSCALLS() . = ALIGN(8); \
  162. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  163. KEEP(*(__syscalls_metadata)) \
  164. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  165. #else
  166. #define TRACE_SYSCALLS()
  167. #endif
  168. #ifdef CONFIG_SERIAL_EARLYCON
  169. #define EARLYCON_TABLE() . = ALIGN(8); \
  170. VMLINUX_SYMBOL(__earlycon_table) = .; \
  171. KEEP(*(__earlycon_table)) \
  172. VMLINUX_SYMBOL(__earlycon_table_end) = .;
  173. #else
  174. #define EARLYCON_TABLE()
  175. #endif
  176. #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
  177. #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
  178. #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
  179. #define _OF_TABLE_0(name)
  180. #define _OF_TABLE_1(name) \
  181. . = ALIGN(8); \
  182. VMLINUX_SYMBOL(__##name##_of_table) = .; \
  183. KEEP(*(__##name##_of_table)) \
  184. KEEP(*(__##name##_of_table_end))
  185. #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
  186. #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
  187. #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
  188. #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
  189. #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
  190. #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
  191. #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
  192. #ifdef CONFIG_ACPI
  193. #define ACPI_PROBE_TABLE(name) \
  194. . = ALIGN(8); \
  195. VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
  196. KEEP(*(__##name##_acpi_probe_table)) \
  197. VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
  198. #else
  199. #define ACPI_PROBE_TABLE(name)
  200. #endif
  201. #define KERNEL_DTB() \
  202. STRUCT_ALIGN(); \
  203. VMLINUX_SYMBOL(__dtb_start) = .; \
  204. KEEP(*(.dtb.init.rodata)) \
  205. VMLINUX_SYMBOL(__dtb_end) = .;
  206. /*
  207. * .data section
  208. */
  209. #define DATA_DATA \
  210. *(.xiptext) \
  211. *(DATA_MAIN) \
  212. *(.ref.data) \
  213. *(.data..shared_aligned) /* percpu related */ \
  214. MEM_KEEP(init.data*) \
  215. MEM_KEEP(exit.data*) \
  216. *(.data.unlikely) \
  217. STRUCT_ALIGN(); \
  218. *(__tracepoints) \
  219. /* implement dynamic printk debug */ \
  220. . = ALIGN(8); \
  221. VMLINUX_SYMBOL(__start___jump_table) = .; \
  222. KEEP(*(__jump_table)) \
  223. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  224. . = ALIGN(8); \
  225. VMLINUX_SYMBOL(__start___verbose) = .; \
  226. KEEP(*(__verbose)) \
  227. VMLINUX_SYMBOL(__stop___verbose) = .; \
  228. LIKELY_PROFILE() \
  229. BRANCH_PROFILE() \
  230. TRACE_PRINTKS() \
  231. TRACEPOINT_STR()
  232. /*
  233. * Data section helpers
  234. */
  235. #define NOSAVE_DATA \
  236. . = ALIGN(PAGE_SIZE); \
  237. VMLINUX_SYMBOL(__nosave_begin) = .; \
  238. *(.data..nosave) \
  239. . = ALIGN(PAGE_SIZE); \
  240. VMLINUX_SYMBOL(__nosave_end) = .;
  241. #define PAGE_ALIGNED_DATA(page_align) \
  242. . = ALIGN(page_align); \
  243. *(.data..page_aligned) \
  244. . = ALIGN(page_align);
  245. #define READ_MOSTLY_DATA(align) \
  246. . = ALIGN(align); \
  247. *(.data..read_mostly) \
  248. . = ALIGN(align);
  249. #define CACHELINE_ALIGNED_DATA(align) \
  250. . = ALIGN(align); \
  251. *(.data..cacheline_aligned)
  252. #define INIT_TASK_DATA(align) \
  253. . = ALIGN(align); \
  254. VMLINUX_SYMBOL(__start_init_task) = .; \
  255. *(.data..init_task) \
  256. VMLINUX_SYMBOL(__end_init_task) = .;
  257. /*
  258. * Allow architectures to handle ro_after_init data on their
  259. * own by defining an empty RO_AFTER_INIT_DATA.
  260. */
  261. #ifndef RO_AFTER_INIT_DATA
  262. #define RO_AFTER_INIT_DATA \
  263. VMLINUX_SYMBOL(__start_ro_after_init) = .; \
  264. *(.data..ro_after_init) \
  265. VMLINUX_SYMBOL(__end_ro_after_init) = .;
  266. #endif
  267. /*
  268. * Read only Data
  269. */
  270. #define RO_DATA_SECTION(align) \
  271. . = ALIGN((align)); \
  272. .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
  273. VMLINUX_SYMBOL(__start_rodata) = .; \
  274. *(.rodata) *(.rodata.*) \
  275. RO_AFTER_INIT_DATA /* Read only after init */ \
  276. KEEP(*(__vermagic)) /* Kernel version magic */ \
  277. . = ALIGN(8); \
  278. VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
  279. KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
  280. VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
  281. *(__tracepoints_strings)/* Tracepoints: strings */ \
  282. } \
  283. \
  284. .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
  285. *(.rodata1) \
  286. } \
  287. \
  288. /* PCI quirks */ \
  289. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  290. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  291. KEEP(*(.pci_fixup_early)) \
  292. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  293. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  294. KEEP(*(.pci_fixup_header)) \
  295. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  296. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  297. KEEP(*(.pci_fixup_final)) \
  298. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  299. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  300. KEEP(*(.pci_fixup_enable)) \
  301. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  302. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  303. KEEP(*(.pci_fixup_resume)) \
  304. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  305. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  306. KEEP(*(.pci_fixup_resume_early)) \
  307. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  308. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  309. KEEP(*(.pci_fixup_suspend)) \
  310. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  311. VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
  312. KEEP(*(.pci_fixup_suspend_late)) \
  313. VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
  314. } \
  315. \
  316. /* Built-in firmware blobs */ \
  317. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  318. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  319. KEEP(*(.builtin_fw)) \
  320. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  321. } \
  322. \
  323. TRACEDATA \
  324. \
  325. /* Kernel symbol table: Normal symbols */ \
  326. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  327. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  328. KEEP(*(SORT(___ksymtab+*))) \
  329. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  330. } \
  331. \
  332. /* Kernel symbol table: GPL-only symbols */ \
  333. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  334. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  335. KEEP(*(SORT(___ksymtab_gpl+*))) \
  336. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  337. } \
  338. \
  339. /* Kernel symbol table: Normal unused symbols */ \
  340. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  341. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  342. KEEP(*(SORT(___ksymtab_unused+*))) \
  343. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  344. } \
  345. \
  346. /* Kernel symbol table: GPL-only unused symbols */ \
  347. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  348. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  349. KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
  350. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  351. } \
  352. \
  353. /* Kernel symbol table: GPL-future-only symbols */ \
  354. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  355. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  356. KEEP(*(SORT(___ksymtab_gpl_future+*))) \
  357. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  358. } \
  359. \
  360. /* Kernel symbol table: Normal symbols */ \
  361. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  362. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  363. KEEP(*(SORT(___kcrctab+*))) \
  364. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  365. } \
  366. \
  367. /* Kernel symbol table: GPL-only symbols */ \
  368. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  369. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  370. KEEP(*(SORT(___kcrctab_gpl+*))) \
  371. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  372. } \
  373. \
  374. /* Kernel symbol table: Normal unused symbols */ \
  375. __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
  376. VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
  377. KEEP(*(SORT(___kcrctab_unused+*))) \
  378. VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
  379. } \
  380. \
  381. /* Kernel symbol table: GPL-only unused symbols */ \
  382. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  383. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  384. KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
  385. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  386. } \
  387. \
  388. /* Kernel symbol table: GPL-future-only symbols */ \
  389. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  390. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  391. KEEP(*(SORT(___kcrctab_gpl_future+*))) \
  392. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  393. } \
  394. \
  395. /* Kernel symbol table: strings */ \
  396. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  397. *(__ksymtab_strings) \
  398. } \
  399. \
  400. /* __*init sections */ \
  401. __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
  402. *(.ref.rodata) \
  403. MEM_KEEP(init.rodata) \
  404. MEM_KEEP(exit.rodata) \
  405. } \
  406. \
  407. /* Built-in module parameters. */ \
  408. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  409. VMLINUX_SYMBOL(__start___param) = .; \
  410. KEEP(*(__param)) \
  411. VMLINUX_SYMBOL(__stop___param) = .; \
  412. } \
  413. \
  414. /* Built-in module versions. */ \
  415. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  416. VMLINUX_SYMBOL(__start___modver) = .; \
  417. KEEP(*(__modver)) \
  418. VMLINUX_SYMBOL(__stop___modver) = .; \
  419. . = ALIGN((align)); \
  420. VMLINUX_SYMBOL(__end_rodata) = .; \
  421. } \
  422. . = ALIGN((align));
  423. /* RODATA & RO_DATA provided for backward compatibility.
  424. * All archs are supposed to use RO_DATA() */
  425. #define RODATA RO_DATA_SECTION(4096)
  426. #define RO_DATA(align) RO_DATA_SECTION(align)
  427. #define SECURITY_INIT \
  428. .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
  429. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  430. KEEP(*(.security_initcall.init)) \
  431. VMLINUX_SYMBOL(__security_initcall_end) = .; \
  432. }
  433. /*
  434. * .text section. Map to function alignment to avoid address changes
  435. * during second ld run in second ld pass when generating System.map
  436. *
  437. * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
  438. * code elimination is enabled, so these sections should be converted
  439. * to use ".." first.
  440. */
  441. #define TEXT_TEXT \
  442. ALIGN_FUNCTION(); \
  443. *(.text.hot .text.hot.*) \
  444. *(TEXT_MAIN .text.fixup) \
  445. *(.text.unlikely .text.unlikely.*) \
  446. *(.text.unknown .text.unknown.*) \
  447. *(.text..refcount) \
  448. *(.text..ftrace) \
  449. *(TEXT_CFI_MAIN) \
  450. *(.ref.text) \
  451. MEM_KEEP(init.text*) \
  452. MEM_KEEP(exit.text*) \
  453. /* sched.text is aling to function alignment to secure we have same
  454. * address even at second ld pass when generating System.map */
  455. #define SCHED_TEXT \
  456. ALIGN_FUNCTION(); \
  457. VMLINUX_SYMBOL(__sched_text_start) = .; \
  458. *(.sched.text) \
  459. VMLINUX_SYMBOL(__sched_text_end) = .;
  460. /* spinlock.text is aling to function alignment to secure we have same
  461. * address even at second ld pass when generating System.map */
  462. #define LOCK_TEXT \
  463. ALIGN_FUNCTION(); \
  464. VMLINUX_SYMBOL(__lock_text_start) = .; \
  465. *(.spinlock.text) \
  466. VMLINUX_SYMBOL(__lock_text_end) = .;
  467. #define CPUIDLE_TEXT \
  468. ALIGN_FUNCTION(); \
  469. VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
  470. *(.cpuidle.text) \
  471. VMLINUX_SYMBOL(__cpuidle_text_end) = .;
  472. #define KPROBES_TEXT \
  473. ALIGN_FUNCTION(); \
  474. VMLINUX_SYMBOL(__kprobes_text_start) = .; \
  475. *(.kprobes.text) \
  476. VMLINUX_SYMBOL(__kprobes_text_end) = .;
  477. #define ENTRY_TEXT \
  478. ALIGN_FUNCTION(); \
  479. VMLINUX_SYMBOL(__entry_text_start) = .; \
  480. *(.entry.text) \
  481. VMLINUX_SYMBOL(__entry_text_end) = .;
  482. #define IRQENTRY_TEXT \
  483. ALIGN_FUNCTION(); \
  484. VMLINUX_SYMBOL(__irqentry_text_start) = .; \
  485. *(.irqentry.text) \
  486. VMLINUX_SYMBOL(__irqentry_text_end) = .;
  487. #define SOFTIRQENTRY_TEXT \
  488. ALIGN_FUNCTION(); \
  489. VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
  490. *(.softirqentry.text) \
  491. VMLINUX_SYMBOL(__softirqentry_text_end) = .;
  492. /* Section used for early init (in .S files) */
  493. #define HEAD_TEXT KEEP(*(.head.text))
  494. #define HEAD_TEXT_SECTION \
  495. .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
  496. HEAD_TEXT \
  497. }
  498. /*
  499. * Exception table
  500. */
  501. #define EXCEPTION_TABLE(align) \
  502. . = ALIGN(align); \
  503. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  504. VMLINUX_SYMBOL(__start___ex_table) = .; \
  505. KEEP(*(__ex_table)) \
  506. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  507. }
  508. /*
  509. * Init task
  510. */
  511. #define INIT_TASK_DATA_SECTION(align) \
  512. . = ALIGN(align); \
  513. .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
  514. INIT_TASK_DATA(align) \
  515. }
  516. #ifdef CONFIG_CONSTRUCTORS
  517. #define KERNEL_CTORS() . = ALIGN(8); \
  518. VMLINUX_SYMBOL(__ctors_start) = .; \
  519. KEEP(*(.ctors)) \
  520. KEEP(*(SORT(.init_array.*))) \
  521. KEEP(*(.init_array)) \
  522. VMLINUX_SYMBOL(__ctors_end) = .;
  523. #else
  524. #define KERNEL_CTORS()
  525. #endif
  526. /* init and exit section handling */
  527. #define INIT_DATA \
  528. KEEP(*(SORT(___kentry+*))) \
  529. *(.init.data init.data.*) \
  530. MEM_DISCARD(init.data*) \
  531. KERNEL_CTORS() \
  532. MCOUNT_REC() \
  533. *(.init.rodata .init.rodata.*) \
  534. FTRACE_EVENTS() \
  535. TRACE_SYSCALLS() \
  536. KPROBE_BLACKLIST() \
  537. MEM_DISCARD(init.rodata) \
  538. CLK_OF_TABLES() \
  539. RESERVEDMEM_OF_TABLES() \
  540. TIMER_OF_TABLES() \
  541. IOMMU_OF_TABLES() \
  542. CPU_METHOD_OF_TABLES() \
  543. CPUIDLE_METHOD_OF_TABLES() \
  544. KERNEL_DTB() \
  545. IRQCHIP_OF_MATCH_TABLE() \
  546. ACPI_PROBE_TABLE(irqchip) \
  547. ACPI_PROBE_TABLE(timer) \
  548. ACPI_PROBE_TABLE(iort) \
  549. EARLYCON_TABLE()
  550. #define INIT_TEXT \
  551. *(.init.text .init.text.*) \
  552. *(.text.startup) \
  553. MEM_DISCARD(init.text*)
  554. #define EXIT_DATA \
  555. *(.exit.data .exit.data.*) \
  556. *(.fini_array) \
  557. *(.dtors) \
  558. MEM_DISCARD(exit.data*) \
  559. MEM_DISCARD(exit.rodata*)
  560. #define EXIT_TEXT \
  561. *(.exit.text) \
  562. *(.text.exit) \
  563. MEM_DISCARD(exit.text)
  564. #define EXIT_CALL \
  565. KEEP(*(.exitcall.exit))
  566. /*
  567. * bss (Block Started by Symbol) - uninitialized data
  568. * zeroed during startup
  569. */
  570. #define SBSS(sbss_align) \
  571. . = ALIGN(sbss_align); \
  572. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  573. *(.dynsbss) \
  574. *(SBSS_MAIN) \
  575. *(.scommon) \
  576. }
  577. /*
  578. * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
  579. * sections to the front of bss.
  580. */
  581. #ifndef BSS_FIRST_SECTIONS
  582. #define BSS_FIRST_SECTIONS
  583. #endif
  584. #define BSS(bss_align) \
  585. . = ALIGN(bss_align); \
  586. .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
  587. BSS_FIRST_SECTIONS \
  588. . = ALIGN(PAGE_SIZE); \
  589. *(.bss..page_aligned) \
  590. . = ALIGN(PAGE_SIZE); \
  591. *(.dynbss) \
  592. *(BSS_MAIN) \
  593. *(COMMON) \
  594. }
  595. /*
  596. * DWARF debug sections.
  597. * Symbols in the DWARF debugging sections are relative to
  598. * the beginning of the section so we begin them at 0.
  599. */
  600. #define DWARF_DEBUG \
  601. /* DWARF 1 */ \
  602. .debug 0 : { *(.debug) } \
  603. .line 0 : { *(.line) } \
  604. /* GNU DWARF 1 extensions */ \
  605. .debug_srcinfo 0 : { *(.debug_srcinfo) } \
  606. .debug_sfnames 0 : { *(.debug_sfnames) } \
  607. /* DWARF 1.1 and DWARF 2 */ \
  608. .debug_aranges 0 : { *(.debug_aranges) } \
  609. .debug_pubnames 0 : { *(.debug_pubnames) } \
  610. /* DWARF 2 */ \
  611. .debug_info 0 : { *(.debug_info \
  612. .gnu.linkonce.wi.*) } \
  613. .debug_abbrev 0 : { *(.debug_abbrev) } \
  614. .debug_line 0 : { *(.debug_line) } \
  615. .debug_frame 0 : { *(.debug_frame) } \
  616. .debug_str 0 : { *(.debug_str) } \
  617. .debug_loc 0 : { *(.debug_loc) } \
  618. .debug_macinfo 0 : { *(.debug_macinfo) } \
  619. .debug_pubtypes 0 : { *(.debug_pubtypes) } \
  620. /* DWARF 3 */ \
  621. .debug_ranges 0 : { *(.debug_ranges) } \
  622. /* SGI/MIPS DWARF 2 extensions */ \
  623. .debug_weaknames 0 : { *(.debug_weaknames) } \
  624. .debug_funcnames 0 : { *(.debug_funcnames) } \
  625. .debug_typenames 0 : { *(.debug_typenames) } \
  626. .debug_varnames 0 : { *(.debug_varnames) } \
  627. /* GNU DWARF 2 extensions */ \
  628. .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
  629. .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
  630. /* DWARF 4 */ \
  631. .debug_types 0 : { *(.debug_types) } \
  632. /* DWARF 5 */ \
  633. .debug_addr 0 : { *(.debug_addr) } \
  634. .debug_line_str 0 : { *(.debug_line_str) } \
  635. .debug_loclists 0 : { *(.debug_loclists) } \
  636. .debug_macro 0 : { *(.debug_macro) } \
  637. .debug_names 0 : { *(.debug_names) } \
  638. .debug_rnglists 0 : { *(.debug_rnglists) } \
  639. .debug_str_offsets 0 : { *(.debug_str_offsets) }
  640. /* Stabs debugging sections. */
  641. #define STABS_DEBUG \
  642. .stab 0 : { *(.stab) } \
  643. .stabstr 0 : { *(.stabstr) } \
  644. .stab.excl 0 : { *(.stab.excl) } \
  645. .stab.exclstr 0 : { *(.stab.exclstr) } \
  646. .stab.index 0 : { *(.stab.index) } \
  647. .stab.indexstr 0 : { *(.stab.indexstr) } \
  648. .comment 0 : { *(.comment) }
  649. #ifdef CONFIG_GENERIC_BUG
  650. #define BUG_TABLE \
  651. . = ALIGN(8); \
  652. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  653. VMLINUX_SYMBOL(__start___bug_table) = .; \
  654. KEEP(*(__bug_table)) \
  655. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  656. }
  657. #else
  658. #define BUG_TABLE
  659. #endif
  660. #ifdef CONFIG_UNWINDER_ORC
  661. #define ORC_UNWIND_TABLE \
  662. . = ALIGN(4); \
  663. .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
  664. VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \
  665. KEEP(*(.orc_unwind_ip)) \
  666. VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \
  667. } \
  668. . = ALIGN(2); \
  669. .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
  670. VMLINUX_SYMBOL(__start_orc_unwind) = .; \
  671. KEEP(*(.orc_unwind)) \
  672. VMLINUX_SYMBOL(__stop_orc_unwind) = .; \
  673. } \
  674. . = ALIGN(4); \
  675. .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
  676. VMLINUX_SYMBOL(orc_lookup) = .; \
  677. . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
  678. LOOKUP_BLOCK_SIZE) + 1) * 4; \
  679. VMLINUX_SYMBOL(orc_lookup_end) = .; \
  680. }
  681. #else
  682. #define ORC_UNWIND_TABLE
  683. #endif
  684. #ifdef CONFIG_PM_TRACE
  685. #define TRACEDATA \
  686. . = ALIGN(4); \
  687. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  688. VMLINUX_SYMBOL(__tracedata_start) = .; \
  689. KEEP(*(.tracedata)) \
  690. VMLINUX_SYMBOL(__tracedata_end) = .; \
  691. }
  692. #else
  693. #define TRACEDATA
  694. #endif
  695. #define NOTES \
  696. .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
  697. VMLINUX_SYMBOL(__start_notes) = .; \
  698. KEEP(*(.note.*)) \
  699. VMLINUX_SYMBOL(__stop_notes) = .; \
  700. }
  701. #define INIT_SETUP(initsetup_align) \
  702. . = ALIGN(initsetup_align); \
  703. VMLINUX_SYMBOL(__setup_start) = .; \
  704. KEEP(*(.init.setup)) \
  705. VMLINUX_SYMBOL(__setup_end) = .;
  706. #define INIT_CALLS_LEVEL(level) \
  707. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  708. KEEP(*(.initcall##level##.init)) \
  709. KEEP(*(.initcall##level##s.init)) \
  710. #define INIT_CALLS \
  711. VMLINUX_SYMBOL(__initcall_start) = .; \
  712. KEEP(*(.initcallearly.init)) \
  713. INIT_CALLS_LEVEL(0) \
  714. INIT_CALLS_LEVEL(1) \
  715. INIT_CALLS_LEVEL(2) \
  716. INIT_CALLS_LEVEL(3) \
  717. INIT_CALLS_LEVEL(4) \
  718. INIT_CALLS_LEVEL(5) \
  719. INIT_CALLS_LEVEL(rootfs) \
  720. INIT_CALLS_LEVEL(6) \
  721. INIT_CALLS_LEVEL(7) \
  722. VMLINUX_SYMBOL(__initcall_end) = .;
  723. #define CON_INITCALL \
  724. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  725. KEEP(*(.con_initcall.init)) \
  726. VMLINUX_SYMBOL(__con_initcall_end) = .;
  727. #define SECURITY_INITCALL \
  728. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  729. KEEP(*(.security_initcall.init)) \
  730. VMLINUX_SYMBOL(__security_initcall_end) = .;
  731. #ifdef CONFIG_BLK_DEV_INITRD
  732. #define INIT_RAM_FS \
  733. . = ALIGN(4); \
  734. VMLINUX_SYMBOL(__initramfs_start) = .; \
  735. KEEP(*(.init.ramfs)) \
  736. . = ALIGN(8); \
  737. KEEP(*(.init.ramfs.info))
  738. #else
  739. #define INIT_RAM_FS
  740. #endif
  741. /*
  742. * Default discarded sections.
  743. *
  744. * Some archs want to discard exit text/data at runtime rather than
  745. * link time due to cross-section references such as alt instructions,
  746. * bug table, eh_frame, etc. DISCARDS must be the last of output
  747. * section definitions so that such archs put those in earlier section
  748. * definitions.
  749. */
  750. #define DISCARDS \
  751. /DISCARD/ : { \
  752. EXIT_TEXT \
  753. EXIT_DATA \
  754. EXIT_CALL \
  755. *(.discard) \
  756. *(.discard.*) \
  757. }
  758. /**
  759. * PERCPU_INPUT - the percpu input sections
  760. * @cacheline: cacheline size
  761. *
  762. * The core percpu section names and core symbols which do not rely
  763. * directly upon load addresses.
  764. *
  765. * @cacheline is used to align subsections to avoid false cacheline
  766. * sharing between subsections for different purposes.
  767. */
  768. #define PERCPU_INPUT(cacheline) \
  769. VMLINUX_SYMBOL(__per_cpu_start) = .; \
  770. *(.data..percpu..first) \
  771. . = ALIGN(PAGE_SIZE); \
  772. *(.data..percpu..page_aligned) \
  773. . = ALIGN(cacheline); \
  774. *(.data..percpu..read_mostly) \
  775. . = ALIGN(cacheline); \
  776. *(.data..percpu) \
  777. *(.data..percpu..shared_aligned) \
  778. VMLINUX_SYMBOL(__per_cpu_end) = .;
  779. /**
  780. * PERCPU_VADDR - define output section for percpu area
  781. * @cacheline: cacheline size
  782. * @vaddr: explicit base address (optional)
  783. * @phdr: destination PHDR (optional)
  784. *
  785. * Macro which expands to output section for percpu area.
  786. *
  787. * @cacheline is used to align subsections to avoid false cacheline
  788. * sharing between subsections for different purposes.
  789. *
  790. * If @vaddr is not blank, it specifies explicit base address and all
  791. * percpu symbols will be offset from the given address. If blank,
  792. * @vaddr always equals @laddr + LOAD_OFFSET.
  793. *
  794. * @phdr defines the output PHDR to use if not blank. Be warned that
  795. * output PHDR is sticky. If @phdr is specified, the next output
  796. * section in the linker script will go there too. @phdr should have
  797. * a leading colon.
  798. *
  799. * Note that this macros defines __per_cpu_load as an absolute symbol.
  800. * If there is no need to put the percpu section at a predetermined
  801. * address, use PERCPU_SECTION.
  802. */
  803. #define PERCPU_VADDR(cacheline, vaddr, phdr) \
  804. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  805. .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
  806. - LOAD_OFFSET) { \
  807. PERCPU_INPUT(cacheline) \
  808. } phdr \
  809. . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
  810. /**
  811. * PERCPU_SECTION - define output section for percpu area, simple version
  812. * @cacheline: cacheline size
  813. *
  814. * Align to PAGE_SIZE and outputs output section for percpu area. This
  815. * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
  816. * __per_cpu_start will be identical.
  817. *
  818. * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
  819. * except that __per_cpu_load is defined as a relative symbol against
  820. * .data..percpu which is required for relocatable x86_32 configuration.
  821. */
  822. #define PERCPU_SECTION(cacheline) \
  823. . = ALIGN(PAGE_SIZE); \
  824. .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
  825. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  826. PERCPU_INPUT(cacheline) \
  827. }
  828. /*
  829. * Definition of the high level *_SECTION macros
  830. * They will fit only a subset of the architectures
  831. */
  832. /*
  833. * Writeable data.
  834. * All sections are combined in a single .data section.
  835. * The sections following CONSTRUCTORS are arranged so their
  836. * typical alignment matches.
  837. * A cacheline is typical/always less than a PAGE_SIZE so
  838. * the sections that has this restriction (or similar)
  839. * is located before the ones requiring PAGE_SIZE alignment.
  840. * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
  841. * matches the requirement of PAGE_ALIGNED_DATA.
  842. *
  843. * use 0 as page_align if page_aligned data is not used */
  844. #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
  845. . = ALIGN(PAGE_SIZE); \
  846. .data : AT(ADDR(.data) - LOAD_OFFSET) { \
  847. INIT_TASK_DATA(inittask) \
  848. NOSAVE_DATA \
  849. PAGE_ALIGNED_DATA(pagealigned) \
  850. CACHELINE_ALIGNED_DATA(cacheline) \
  851. READ_MOSTLY_DATA(cacheline) \
  852. DATA_DATA \
  853. CONSTRUCTORS \
  854. } \
  855. BUG_TABLE \
  856. #define INIT_TEXT_SECTION(inittext_align) \
  857. . = ALIGN(inittext_align); \
  858. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
  859. VMLINUX_SYMBOL(_sinittext) = .; \
  860. INIT_TEXT \
  861. VMLINUX_SYMBOL(_einittext) = .; \
  862. }
  863. #define INIT_DATA_SECTION(initsetup_align) \
  864. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
  865. INIT_DATA \
  866. INIT_SETUP(initsetup_align) \
  867. INIT_CALLS \
  868. CON_INITCALL \
  869. SECURITY_INITCALL \
  870. INIT_RAM_FS \
  871. }
  872. #define BSS_SECTION(sbss_align, bss_align, stop_align) \
  873. . = ALIGN(sbss_align); \
  874. VMLINUX_SYMBOL(__bss_start) = .; \
  875. SBSS(sbss_align) \
  876. BSS(bss_align) \
  877. . = ALIGN(stop_align); \
  878. VMLINUX_SYMBOL(__bss_stop) = .;