vmlinux.lds.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /*
  2. * Helper macros to support writing architecture specific
  3. * linker scripts.
  4. *
  5. * A minimal linker scripts has following content:
  6. * [This is a sample, architectures may have special requiriements]
  7. *
  8. * OUTPUT_FORMAT(...)
  9. * OUTPUT_ARCH(...)
  10. * ENTRY(...)
  11. * SECTIONS
  12. * {
  13. * . = START;
  14. * __init_begin = .;
  15. * HEAD_TEXT_SECTION
  16. * INIT_TEXT_SECTION(PAGE_SIZE)
  17. * INIT_DATA_SECTION(...)
  18. * PERCPU_SECTION(CACHELINE_SIZE)
  19. * __init_end = .;
  20. *
  21. * _stext = .;
  22. * TEXT_SECTION = 0
  23. * _etext = .;
  24. *
  25. * _sdata = .;
  26. * RO_DATA_SECTION(PAGE_SIZE)
  27. * RW_DATA_SECTION(...)
  28. * _edata = .;
  29. *
  30. * EXCEPTION_TABLE(...)
  31. * NOTES
  32. *
  33. * BSS_SECTION(0, 0, 0)
  34. * _end = .;
  35. *
  36. * STABS_DEBUG
  37. * DWARF_DEBUG
  38. *
  39. * DISCARDS // must be the last
  40. * }
  41. *
  42. * [__init_begin, __init_end] is the init section that may be freed after init
  43. * [_stext, _etext] is the text section
  44. * [_sdata, _edata] is the data section
  45. *
  46. * Some of the included output section have their own set of constants.
  47. * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
  48. * [__nosave_begin, __nosave_end] for the nosave data
  49. */
  50. #ifndef LOAD_OFFSET
  51. #define LOAD_OFFSET 0
  52. #endif
  53. #include <linux/export.h>
  54. /* Align . to a 8 byte boundary equals to maximum function alignment. */
  55. #define ALIGN_FUNCTION() . = ALIGN(8)
  56. /*
  57. * Align to a 32 byte boundary equal to the
  58. * alignment gcc 4.5 uses for a struct
  59. */
  60. #define STRUCT_ALIGNMENT 32
  61. #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
  62. /* The actual configuration determine if the init/exit sections
  63. * are handled as text/data or they can be discarded (which
  64. * often happens at runtime)
  65. */
  66. #ifdef CONFIG_HOTPLUG_CPU
  67. #define CPU_KEEP(sec) *(.cpu##sec)
  68. #define CPU_DISCARD(sec)
  69. #else
  70. #define CPU_KEEP(sec)
  71. #define CPU_DISCARD(sec) *(.cpu##sec)
  72. #endif
  73. #if defined(CONFIG_MEMORY_HOTPLUG)
  74. #define MEM_KEEP(sec) *(.mem##sec)
  75. #define MEM_DISCARD(sec)
  76. #else
  77. #define MEM_KEEP(sec)
  78. #define MEM_DISCARD(sec) *(.mem##sec)
  79. #endif
  80. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  81. #define MCOUNT_REC() . = ALIGN(8); \
  82. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  83. *(__mcount_loc) \
  84. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  85. #else
  86. #define MCOUNT_REC()
  87. #endif
  88. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  89. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  90. *(_ftrace_annotated_branch) \
  91. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  92. #else
  93. #define LIKELY_PROFILE()
  94. #endif
  95. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  96. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  97. *(_ftrace_branch) \
  98. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  99. #else
  100. #define BRANCH_PROFILE()
  101. #endif
  102. #ifdef CONFIG_EVENT_TRACING
  103. #define FTRACE_EVENTS() . = ALIGN(8); \
  104. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  105. *(_ftrace_events) \
  106. VMLINUX_SYMBOL(__stop_ftrace_events) = .;
  107. #else
  108. #define FTRACE_EVENTS()
  109. #endif
  110. #ifdef CONFIG_TRACING
  111. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  112. *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
  113. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  114. #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
  115. *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
  116. VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
  117. #else
  118. #define TRACE_PRINTKS()
  119. #define TRACEPOINT_STR()
  120. #endif
  121. #ifdef CONFIG_FTRACE_SYSCALLS
  122. #define TRACE_SYSCALLS() . = ALIGN(8); \
  123. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  124. *(__syscalls_metadata) \
  125. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  126. #else
  127. #define TRACE_SYSCALLS()
  128. #endif
  129. #ifdef CONFIG_CLKSRC_OF
  130. #define CLKSRC_OF_TABLES() . = ALIGN(8); \
  131. VMLINUX_SYMBOL(__clksrc_of_table) = .; \
  132. *(__clksrc_of_table) \
  133. *(__clksrc_of_table_end)
  134. #else
  135. #define CLKSRC_OF_TABLES()
  136. #endif
  137. #ifdef CONFIG_IRQCHIP
  138. #define IRQCHIP_OF_MATCH_TABLE() \
  139. . = ALIGN(8); \
  140. VMLINUX_SYMBOL(__irqchip_begin) = .; \
  141. *(__irqchip_of_table) \
  142. *(__irqchip_of_end)
  143. #else
  144. #define IRQCHIP_OF_MATCH_TABLE()
  145. #endif
  146. #ifdef CONFIG_COMMON_CLK
  147. #define CLK_OF_TABLES() . = ALIGN(8); \
  148. VMLINUX_SYMBOL(__clk_of_table) = .; \
  149. *(__clk_of_table) \
  150. *(__clk_of_table_end)
  151. #else
  152. #define CLK_OF_TABLES()
  153. #endif
  154. #define KERNEL_DTB() \
  155. STRUCT_ALIGN(); \
  156. VMLINUX_SYMBOL(__dtb_start) = .; \
  157. *(.dtb.init.rodata) \
  158. VMLINUX_SYMBOL(__dtb_end) = .;
  159. /* .data section */
  160. #define DATA_DATA \
  161. *(.data) \
  162. *(.ref.data) \
  163. *(.data..shared_aligned) /* percpu related */ \
  164. MEM_KEEP(init.data) \
  165. MEM_KEEP(exit.data) \
  166. *(.data.unlikely) \
  167. STRUCT_ALIGN(); \
  168. *(__tracepoints) \
  169. /* implement dynamic printk debug */ \
  170. . = ALIGN(8); \
  171. VMLINUX_SYMBOL(__start___jump_table) = .; \
  172. *(__jump_table) \
  173. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  174. . = ALIGN(8); \
  175. VMLINUX_SYMBOL(__start___verbose) = .; \
  176. *(__verbose) \
  177. VMLINUX_SYMBOL(__stop___verbose) = .; \
  178. LIKELY_PROFILE() \
  179. BRANCH_PROFILE() \
  180. TRACE_PRINTKS() \
  181. TRACEPOINT_STR()
  182. /*
  183. * Data section helpers
  184. */
  185. #define NOSAVE_DATA \
  186. . = ALIGN(PAGE_SIZE); \
  187. VMLINUX_SYMBOL(__nosave_begin) = .; \
  188. *(.data..nosave) \
  189. . = ALIGN(PAGE_SIZE); \
  190. VMLINUX_SYMBOL(__nosave_end) = .;
  191. #define PAGE_ALIGNED_DATA(page_align) \
  192. . = ALIGN(page_align); \
  193. *(.data..page_aligned)
  194. #define READ_MOSTLY_DATA(align) \
  195. . = ALIGN(align); \
  196. *(.data..read_mostly) \
  197. . = ALIGN(align);
  198. #define CACHELINE_ALIGNED_DATA(align) \
  199. . = ALIGN(align); \
  200. *(.data..cacheline_aligned)
  201. #define INIT_TASK_DATA(align) \
  202. . = ALIGN(align); \
  203. *(.data..init_task)
  204. /*
  205. * Read only Data
  206. */
  207. #define RO_DATA_SECTION(align) \
  208. . = ALIGN((align)); \
  209. .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
  210. VMLINUX_SYMBOL(__start_rodata) = .; \
  211. *(.rodata) *(.rodata.*) \
  212. *(__vermagic) /* Kernel version magic */ \
  213. . = ALIGN(8); \
  214. VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
  215. *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
  216. VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
  217. *(__tracepoints_strings)/* Tracepoints: strings */ \
  218. } \
  219. \
  220. .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
  221. *(.rodata1) \
  222. } \
  223. \
  224. BUG_TABLE \
  225. \
  226. /* PCI quirks */ \
  227. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  228. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  229. *(.pci_fixup_early) \
  230. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  231. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  232. *(.pci_fixup_header) \
  233. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  234. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  235. *(.pci_fixup_final) \
  236. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  237. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  238. *(.pci_fixup_enable) \
  239. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  240. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  241. *(.pci_fixup_resume) \
  242. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  243. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  244. *(.pci_fixup_resume_early) \
  245. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  246. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  247. *(.pci_fixup_suspend) \
  248. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  249. } \
  250. \
  251. /* Built-in firmware blobs */ \
  252. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  253. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  254. *(.builtin_fw) \
  255. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  256. } \
  257. \
  258. TRACEDATA \
  259. \
  260. /* Kernel symbol table: Normal symbols */ \
  261. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  262. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  263. *(SORT(___ksymtab+*)) \
  264. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  265. } \
  266. \
  267. /* Kernel symbol table: GPL-only symbols */ \
  268. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  269. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  270. *(SORT(___ksymtab_gpl+*)) \
  271. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  272. } \
  273. \
  274. /* Kernel symbol table: Normal unused symbols */ \
  275. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  276. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  277. *(SORT(___ksymtab_unused+*)) \
  278. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  279. } \
  280. \
  281. /* Kernel symbol table: GPL-only unused symbols */ \
  282. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  283. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  284. *(SORT(___ksymtab_unused_gpl+*)) \
  285. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  286. } \
  287. \
  288. /* Kernel symbol table: GPL-future-only symbols */ \
  289. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  290. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  291. *(SORT(___ksymtab_gpl_future+*)) \
  292. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  293. } \
  294. \
  295. /* Kernel symbol table: Normal symbols */ \
  296. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  297. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  298. *(SORT(___kcrctab+*)) \
  299. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  300. } \
  301. \
  302. /* Kernel symbol table: GPL-only symbols */ \
  303. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  304. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  305. *(SORT(___kcrctab_gpl+*)) \
  306. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  307. } \
  308. \
  309. /* Kernel symbol table: Normal unused symbols */ \
  310. __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
  311. VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
  312. *(SORT(___kcrctab_unused+*)) \
  313. VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
  314. } \
  315. \
  316. /* Kernel symbol table: GPL-only unused symbols */ \
  317. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  318. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  319. *(SORT(___kcrctab_unused_gpl+*)) \
  320. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  321. } \
  322. \
  323. /* Kernel symbol table: GPL-future-only symbols */ \
  324. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  325. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  326. *(SORT(___kcrctab_gpl_future+*)) \
  327. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  328. } \
  329. \
  330. /* Kernel symbol table: strings */ \
  331. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  332. *(__ksymtab_strings) \
  333. } \
  334. \
  335. /* __*init sections */ \
  336. __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
  337. *(.ref.rodata) \
  338. MEM_KEEP(init.rodata) \
  339. MEM_KEEP(exit.rodata) \
  340. } \
  341. \
  342. /* Built-in module parameters. */ \
  343. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  344. VMLINUX_SYMBOL(__start___param) = .; \
  345. *(__param) \
  346. VMLINUX_SYMBOL(__stop___param) = .; \
  347. } \
  348. \
  349. /* Built-in module versions. */ \
  350. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  351. VMLINUX_SYMBOL(__start___modver) = .; \
  352. *(__modver) \
  353. VMLINUX_SYMBOL(__stop___modver) = .; \
  354. . = ALIGN((align)); \
  355. VMLINUX_SYMBOL(__end_rodata) = .; \
  356. } \
  357. . = ALIGN((align));
  358. /* RODATA & RO_DATA provided for backward compatibility.
  359. * All archs are supposed to use RO_DATA() */
  360. #define RODATA RO_DATA_SECTION(4096)
  361. #define RO_DATA(align) RO_DATA_SECTION(align)
  362. #define SECURITY_INIT \
  363. .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
  364. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  365. *(.security_initcall.init) \
  366. VMLINUX_SYMBOL(__security_initcall_end) = .; \
  367. }
  368. /* .text section. Map to function alignment to avoid address changes
  369. * during second ld run in second ld pass when generating System.map */
  370. #define TEXT_TEXT \
  371. ALIGN_FUNCTION(); \
  372. *(.text.hot) \
  373. *(.text) \
  374. *(.ref.text) \
  375. MEM_KEEP(init.text) \
  376. MEM_KEEP(exit.text) \
  377. *(.text.unlikely)
  378. /* sched.text is aling to function alignment to secure we have same
  379. * address even at second ld pass when generating System.map */
  380. #define SCHED_TEXT \
  381. ALIGN_FUNCTION(); \
  382. VMLINUX_SYMBOL(__sched_text_start) = .; \
  383. *(.sched.text) \
  384. VMLINUX_SYMBOL(__sched_text_end) = .;
  385. /* spinlock.text is aling to function alignment to secure we have same
  386. * address even at second ld pass when generating System.map */
  387. #define LOCK_TEXT \
  388. ALIGN_FUNCTION(); \
  389. VMLINUX_SYMBOL(__lock_text_start) = .; \
  390. *(.spinlock.text) \
  391. VMLINUX_SYMBOL(__lock_text_end) = .;
  392. #define KPROBES_TEXT \
  393. ALIGN_FUNCTION(); \
  394. VMLINUX_SYMBOL(__kprobes_text_start) = .; \
  395. *(.kprobes.text) \
  396. VMLINUX_SYMBOL(__kprobes_text_end) = .;
  397. #define ENTRY_TEXT \
  398. ALIGN_FUNCTION(); \
  399. VMLINUX_SYMBOL(__entry_text_start) = .; \
  400. *(.entry.text) \
  401. VMLINUX_SYMBOL(__entry_text_end) = .;
  402. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  403. #define IRQENTRY_TEXT \
  404. ALIGN_FUNCTION(); \
  405. VMLINUX_SYMBOL(__irqentry_text_start) = .; \
  406. *(.irqentry.text) \
  407. VMLINUX_SYMBOL(__irqentry_text_end) = .;
  408. #else
  409. #define IRQENTRY_TEXT
  410. #endif
  411. /* Section used for early init (in .S files) */
  412. #define HEAD_TEXT *(.head.text)
  413. #define HEAD_TEXT_SECTION \
  414. .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
  415. HEAD_TEXT \
  416. }
  417. /*
  418. * Exception table
  419. */
  420. #define EXCEPTION_TABLE(align) \
  421. . = ALIGN(align); \
  422. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  423. VMLINUX_SYMBOL(__start___ex_table) = .; \
  424. *(__ex_table) \
  425. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  426. }
  427. /*
  428. * Init task
  429. */
  430. #define INIT_TASK_DATA_SECTION(align) \
  431. . = ALIGN(align); \
  432. .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
  433. INIT_TASK_DATA(align) \
  434. }
  435. #ifdef CONFIG_CONSTRUCTORS
  436. #define KERNEL_CTORS() . = ALIGN(8); \
  437. VMLINUX_SYMBOL(__ctors_start) = .; \
  438. *(.ctors) \
  439. VMLINUX_SYMBOL(__ctors_end) = .;
  440. #else
  441. #define KERNEL_CTORS()
  442. #endif
  443. /* init and exit section handling */
  444. #define INIT_DATA \
  445. *(.init.data) \
  446. MEM_DISCARD(init.data) \
  447. KERNEL_CTORS() \
  448. MCOUNT_REC() \
  449. *(.init.rodata) \
  450. FTRACE_EVENTS() \
  451. TRACE_SYSCALLS() \
  452. MEM_DISCARD(init.rodata) \
  453. CLK_OF_TABLES() \
  454. CLKSRC_OF_TABLES() \
  455. KERNEL_DTB() \
  456. IRQCHIP_OF_MATCH_TABLE()
  457. #define INIT_TEXT \
  458. *(.init.text) \
  459. MEM_DISCARD(init.text)
  460. #define EXIT_DATA \
  461. *(.exit.data) \
  462. MEM_DISCARD(exit.data) \
  463. MEM_DISCARD(exit.rodata)
  464. #define EXIT_TEXT \
  465. *(.exit.text) \
  466. MEM_DISCARD(exit.text)
  467. #define EXIT_CALL \
  468. *(.exitcall.exit)
  469. /*
  470. * bss (Block Started by Symbol) - uninitialized data
  471. * zeroed during startup
  472. */
  473. #define SBSS(sbss_align) \
  474. . = ALIGN(sbss_align); \
  475. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  476. *(.sbss) \
  477. *(.scommon) \
  478. }
  479. /*
  480. * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
  481. * sections to the front of bss.
  482. */
  483. #ifndef BSS_FIRST_SECTIONS
  484. #define BSS_FIRST_SECTIONS
  485. #endif
  486. #define BSS(bss_align) \
  487. . = ALIGN(bss_align); \
  488. .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
  489. BSS_FIRST_SECTIONS \
  490. *(.bss..page_aligned) \
  491. *(.dynbss) \
  492. *(.bss) \
  493. *(COMMON) \
  494. }
  495. /*
  496. * DWARF debug sections.
  497. * Symbols in the DWARF debugging sections are relative to
  498. * the beginning of the section so we begin them at 0.
  499. */
  500. #define DWARF_DEBUG \
  501. /* DWARF 1 */ \
  502. .debug 0 : { *(.debug) } \
  503. .line 0 : { *(.line) } \
  504. /* GNU DWARF 1 extensions */ \
  505. .debug_srcinfo 0 : { *(.debug_srcinfo) } \
  506. .debug_sfnames 0 : { *(.debug_sfnames) } \
  507. /* DWARF 1.1 and DWARF 2 */ \
  508. .debug_aranges 0 : { *(.debug_aranges) } \
  509. .debug_pubnames 0 : { *(.debug_pubnames) } \
  510. /* DWARF 2 */ \
  511. .debug_info 0 : { *(.debug_info \
  512. .gnu.linkonce.wi.*) } \
  513. .debug_abbrev 0 : { *(.debug_abbrev) } \
  514. .debug_line 0 : { *(.debug_line) } \
  515. .debug_frame 0 : { *(.debug_frame) } \
  516. .debug_str 0 : { *(.debug_str) } \
  517. .debug_loc 0 : { *(.debug_loc) } \
  518. .debug_macinfo 0 : { *(.debug_macinfo) } \
  519. /* SGI/MIPS DWARF 2 extensions */ \
  520. .debug_weaknames 0 : { *(.debug_weaknames) } \
  521. .debug_funcnames 0 : { *(.debug_funcnames) } \
  522. .debug_typenames 0 : { *(.debug_typenames) } \
  523. .debug_varnames 0 : { *(.debug_varnames) } \
  524. /* Stabs debugging sections. */
  525. #define STABS_DEBUG \
  526. .stab 0 : { *(.stab) } \
  527. .stabstr 0 : { *(.stabstr) } \
  528. .stab.excl 0 : { *(.stab.excl) } \
  529. .stab.exclstr 0 : { *(.stab.exclstr) } \
  530. .stab.index 0 : { *(.stab.index) } \
  531. .stab.indexstr 0 : { *(.stab.indexstr) } \
  532. .comment 0 : { *(.comment) }
  533. #ifdef CONFIG_GENERIC_BUG
  534. #define BUG_TABLE \
  535. . = ALIGN(8); \
  536. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  537. VMLINUX_SYMBOL(__start___bug_table) = .; \
  538. *(__bug_table) \
  539. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  540. }
  541. #else
  542. #define BUG_TABLE
  543. #endif
  544. #ifdef CONFIG_PM_TRACE
  545. #define TRACEDATA \
  546. . = ALIGN(4); \
  547. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  548. VMLINUX_SYMBOL(__tracedata_start) = .; \
  549. *(.tracedata) \
  550. VMLINUX_SYMBOL(__tracedata_end) = .; \
  551. }
  552. #else
  553. #define TRACEDATA
  554. #endif
  555. #define NOTES \
  556. .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
  557. VMLINUX_SYMBOL(__start_notes) = .; \
  558. *(.note.*) \
  559. VMLINUX_SYMBOL(__stop_notes) = .; \
  560. }
  561. #define INIT_SETUP(initsetup_align) \
  562. . = ALIGN(initsetup_align); \
  563. VMLINUX_SYMBOL(__setup_start) = .; \
  564. *(.init.setup) \
  565. VMLINUX_SYMBOL(__setup_end) = .;
  566. #define INIT_CALLS_LEVEL(level) \
  567. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  568. *(.initcall##level##.init) \
  569. *(.initcall##level##s.init) \
  570. #define INIT_CALLS \
  571. VMLINUX_SYMBOL(__initcall_start) = .; \
  572. *(.initcallearly.init) \
  573. INIT_CALLS_LEVEL(0) \
  574. INIT_CALLS_LEVEL(1) \
  575. INIT_CALLS_LEVEL(2) \
  576. INIT_CALLS_LEVEL(3) \
  577. INIT_CALLS_LEVEL(4) \
  578. INIT_CALLS_LEVEL(5) \
  579. INIT_CALLS_LEVEL(rootfs) \
  580. INIT_CALLS_LEVEL(6) \
  581. INIT_CALLS_LEVEL(7) \
  582. VMLINUX_SYMBOL(__initcall_end) = .;
  583. #define CON_INITCALL \
  584. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  585. *(.con_initcall.init) \
  586. VMLINUX_SYMBOL(__con_initcall_end) = .;
  587. #define SECURITY_INITCALL \
  588. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  589. *(.security_initcall.init) \
  590. VMLINUX_SYMBOL(__security_initcall_end) = .;
  591. #ifdef CONFIG_BLK_DEV_INITRD
  592. #define INIT_RAM_FS \
  593. . = ALIGN(4); \
  594. VMLINUX_SYMBOL(__initramfs_start) = .; \
  595. *(.init.ramfs) \
  596. . = ALIGN(8); \
  597. *(.init.ramfs.info)
  598. #else
  599. #define INIT_RAM_FS
  600. #endif
  601. /*
  602. * Default discarded sections.
  603. *
  604. * Some archs want to discard exit text/data at runtime rather than
  605. * link time due to cross-section references such as alt instructions,
  606. * bug table, eh_frame, etc. DISCARDS must be the last of output
  607. * section definitions so that such archs put those in earlier section
  608. * definitions.
  609. */
  610. #define DISCARDS \
  611. /DISCARD/ : { \
  612. EXIT_TEXT \
  613. EXIT_DATA \
  614. EXIT_CALL \
  615. *(.discard) \
  616. *(.discard.*) \
  617. }
  618. /**
  619. * PERCPU_INPUT - the percpu input sections
  620. * @cacheline: cacheline size
  621. *
  622. * The core percpu section names and core symbols which do not rely
  623. * directly upon load addresses.
  624. *
  625. * @cacheline is used to align subsections to avoid false cacheline
  626. * sharing between subsections for different purposes.
  627. */
  628. #define PERCPU_INPUT(cacheline) \
  629. VMLINUX_SYMBOL(__per_cpu_start) = .; \
  630. *(.data..percpu..first) \
  631. . = ALIGN(PAGE_SIZE); \
  632. *(.data..percpu..page_aligned) \
  633. . = ALIGN(cacheline); \
  634. *(.data..percpu..readmostly) \
  635. . = ALIGN(cacheline); \
  636. *(.data..percpu) \
  637. *(.data..percpu..shared_aligned) \
  638. VMLINUX_SYMBOL(__per_cpu_end) = .;
  639. /**
  640. * PERCPU_VADDR - define output section for percpu area
  641. * @cacheline: cacheline size
  642. * @vaddr: explicit base address (optional)
  643. * @phdr: destination PHDR (optional)
  644. *
  645. * Macro which expands to output section for percpu area.
  646. *
  647. * @cacheline is used to align subsections to avoid false cacheline
  648. * sharing between subsections for different purposes.
  649. *
  650. * If @vaddr is not blank, it specifies explicit base address and all
  651. * percpu symbols will be offset from the given address. If blank,
  652. * @vaddr always equals @laddr + LOAD_OFFSET.
  653. *
  654. * @phdr defines the output PHDR to use if not blank. Be warned that
  655. * output PHDR is sticky. If @phdr is specified, the next output
  656. * section in the linker script will go there too. @phdr should have
  657. * a leading colon.
  658. *
  659. * Note that this macros defines __per_cpu_load as an absolute symbol.
  660. * If there is no need to put the percpu section at a predetermined
  661. * address, use PERCPU_SECTION.
  662. */
  663. #define PERCPU_VADDR(cacheline, vaddr, phdr) \
  664. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  665. .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
  666. - LOAD_OFFSET) { \
  667. PERCPU_INPUT(cacheline) \
  668. } phdr \
  669. . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
  670. /**
  671. * PERCPU_SECTION - define output section for percpu area, simple version
  672. * @cacheline: cacheline size
  673. *
  674. * Align to PAGE_SIZE and outputs output section for percpu area. This
  675. * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
  676. * __per_cpu_start will be identical.
  677. *
  678. * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
  679. * except that __per_cpu_load is defined as a relative symbol against
  680. * .data..percpu which is required for relocatable x86_32 configuration.
  681. */
  682. #define PERCPU_SECTION(cacheline) \
  683. . = ALIGN(PAGE_SIZE); \
  684. .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
  685. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  686. PERCPU_INPUT(cacheline) \
  687. }
  688. /*
  689. * Definition of the high level *_SECTION macros
  690. * They will fit only a subset of the architectures
  691. */
  692. /*
  693. * Writeable data.
  694. * All sections are combined in a single .data section.
  695. * The sections following CONSTRUCTORS are arranged so their
  696. * typical alignment matches.
  697. * A cacheline is typical/always less than a PAGE_SIZE so
  698. * the sections that has this restriction (or similar)
  699. * is located before the ones requiring PAGE_SIZE alignment.
  700. * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
  701. * matches the requirement of PAGE_ALIGNED_DATA.
  702. *
  703. * use 0 as page_align if page_aligned data is not used */
  704. #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
  705. . = ALIGN(PAGE_SIZE); \
  706. .data : AT(ADDR(.data) - LOAD_OFFSET) { \
  707. INIT_TASK_DATA(inittask) \
  708. NOSAVE_DATA \
  709. PAGE_ALIGNED_DATA(pagealigned) \
  710. CACHELINE_ALIGNED_DATA(cacheline) \
  711. READ_MOSTLY_DATA(cacheline) \
  712. DATA_DATA \
  713. CONSTRUCTORS \
  714. }
  715. #define INIT_TEXT_SECTION(inittext_align) \
  716. . = ALIGN(inittext_align); \
  717. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
  718. VMLINUX_SYMBOL(_sinittext) = .; \
  719. INIT_TEXT \
  720. VMLINUX_SYMBOL(_einittext) = .; \
  721. }
  722. #define INIT_DATA_SECTION(initsetup_align) \
  723. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
  724. INIT_DATA \
  725. INIT_SETUP(initsetup_align) \
  726. INIT_CALLS \
  727. CON_INITCALL \
  728. SECURITY_INITCALL \
  729. INIT_RAM_FS \
  730. }
  731. #define BSS_SECTION(sbss_align, bss_align, stop_align) \
  732. . = ALIGN(sbss_align); \
  733. VMLINUX_SYMBOL(__bss_start) = .; \
  734. SBSS(sbss_align) \
  735. BSS(bss_align) \
  736. . = ALIGN(stop_align); \
  737. VMLINUX_SYMBOL(__bss_stop) = .;