vmlinux.lds.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * Helper macros to support writing architecture specific
  3. * linker scripts.
  4. *
  5. * A minimal linker scripts has following content:
  6. * [This is a sample, architectures may have special requiriements]
  7. *
  8. * OUTPUT_FORMAT(...)
  9. * OUTPUT_ARCH(...)
  10. * ENTRY(...)
  11. * SECTIONS
  12. * {
  13. * . = START;
  14. * __init_begin = .;
  15. * HEAD_TEXT_SECTION
  16. * INIT_TEXT_SECTION(PAGE_SIZE)
  17. * INIT_DATA_SECTION(...)
  18. * PERCPU_SECTION(CACHELINE_SIZE)
  19. * __init_end = .;
  20. *
  21. * _stext = .;
  22. * TEXT_SECTION = 0
  23. * _etext = .;
  24. *
  25. * _sdata = .;
  26. * RO_DATA_SECTION(PAGE_SIZE)
  27. * RW_DATA_SECTION(...)
  28. * _edata = .;
  29. *
  30. * EXCEPTION_TABLE(...)
  31. * NOTES
  32. *
  33. * BSS_SECTION(0, 0, 0)
  34. * _end = .;
  35. *
  36. * STABS_DEBUG
  37. * DWARF_DEBUG
  38. *
  39. * DISCARDS // must be the last
  40. * }
  41. *
  42. * [__init_begin, __init_end] is the init section that may be freed after init
  43. * [_stext, _etext] is the text section
  44. * [_sdata, _edata] is the data section
  45. *
  46. * Some of the included output section have their own set of constants.
  47. * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
  48. * [__nosave_begin, __nosave_end] for the nosave data
  49. */
  50. #ifndef LOAD_OFFSET
  51. #define LOAD_OFFSET 0
  52. #endif
  53. #include <linux/export.h>
  54. /* Align . to a 8 byte boundary equals to maximum function alignment. */
  55. #define ALIGN_FUNCTION() . = ALIGN(8)
  56. /*
  57. * Align to a 32 byte boundary equal to the
  58. * alignment gcc 4.5 uses for a struct
  59. */
  60. #define STRUCT_ALIGNMENT 32
  61. #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
  62. /* The actual configuration determine if the init/exit sections
  63. * are handled as text/data or they can be discarded (which
  64. * often happens at runtime)
  65. */
  66. #ifdef CONFIG_HOTPLUG_CPU
  67. #define CPU_KEEP(sec) *(.cpu##sec)
  68. #define CPU_DISCARD(sec)
  69. #else
  70. #define CPU_KEEP(sec)
  71. #define CPU_DISCARD(sec) *(.cpu##sec)
  72. #endif
  73. #if defined(CONFIG_MEMORY_HOTPLUG)
  74. #define MEM_KEEP(sec) *(.mem##sec)
  75. #define MEM_DISCARD(sec)
  76. #else
  77. #define MEM_KEEP(sec)
  78. #define MEM_DISCARD(sec) *(.mem##sec)
  79. #endif
  80. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  81. #define MCOUNT_REC() . = ALIGN(8); \
  82. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  83. *(__mcount_loc) \
  84. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  85. #else
  86. #define MCOUNT_REC()
  87. #endif
  88. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  89. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  90. *(_ftrace_annotated_branch) \
  91. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  92. #else
  93. #define LIKELY_PROFILE()
  94. #endif
  95. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  96. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  97. *(_ftrace_branch) \
  98. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  99. #else
  100. #define BRANCH_PROFILE()
  101. #endif
  102. #ifdef CONFIG_EVENT_TRACING
  103. #define FTRACE_EVENTS() . = ALIGN(8); \
  104. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  105. *(_ftrace_events) \
  106. VMLINUX_SYMBOL(__stop_ftrace_events) = .;
  107. #else
  108. #define FTRACE_EVENTS()
  109. #endif
  110. #ifdef CONFIG_TRACING
  111. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  112. *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
  113. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  114. #else
  115. #define TRACE_PRINTKS()
  116. #endif
  117. #ifdef CONFIG_FTRACE_SYSCALLS
  118. #define TRACE_SYSCALLS() . = ALIGN(8); \
  119. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  120. *(__syscalls_metadata) \
  121. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  122. #else
  123. #define TRACE_SYSCALLS()
  124. #endif
  125. #ifdef CONFIG_CLKSRC_OF
  126. #define CLKSRC_OF_TABLES() . = ALIGN(8); \
  127. VMLINUX_SYMBOL(__clksrc_of_table) = .; \
  128. *(__clksrc_of_table) \
  129. *(__clksrc_of_table_end)
  130. #else
  131. #define CLKSRC_OF_TABLES()
  132. #endif
  133. #ifdef CONFIG_IRQCHIP
  134. #define IRQCHIP_OF_MATCH_TABLE() \
  135. . = ALIGN(8); \
  136. VMLINUX_SYMBOL(__irqchip_begin) = .; \
  137. *(__irqchip_of_table) \
  138. *(__irqchip_of_end)
  139. #else
  140. #define IRQCHIP_OF_MATCH_TABLE()
  141. #endif
  142. #ifdef CONFIG_COMMON_CLK
  143. #define CLK_OF_TABLES() . = ALIGN(8); \
  144. VMLINUX_SYMBOL(__clk_of_table) = .; \
  145. *(__clk_of_table) \
  146. *(__clk_of_table_end)
  147. #else
  148. #define CLK_OF_TABLES()
  149. #endif
  150. #define KERNEL_DTB() \
  151. STRUCT_ALIGN(); \
  152. VMLINUX_SYMBOL(__dtb_start) = .; \
  153. *(.dtb.init.rodata) \
  154. VMLINUX_SYMBOL(__dtb_end) = .;
  155. /* .data section */
  156. #define DATA_DATA \
  157. *(.data) \
  158. *(.ref.data) \
  159. *(.data..shared_aligned) /* percpu related */ \
  160. MEM_KEEP(init.data) \
  161. MEM_KEEP(exit.data) \
  162. *(.data.unlikely) \
  163. STRUCT_ALIGN(); \
  164. *(__tracepoints) \
  165. /* implement dynamic printk debug */ \
  166. . = ALIGN(8); \
  167. VMLINUX_SYMBOL(__start___jump_table) = .; \
  168. *(__jump_table) \
  169. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  170. . = ALIGN(8); \
  171. VMLINUX_SYMBOL(__start___verbose) = .; \
  172. *(__verbose) \
  173. VMLINUX_SYMBOL(__stop___verbose) = .; \
  174. LIKELY_PROFILE() \
  175. BRANCH_PROFILE() \
  176. TRACE_PRINTKS()
  177. /*
  178. * Data section helpers
  179. */
  180. #define NOSAVE_DATA \
  181. . = ALIGN(PAGE_SIZE); \
  182. VMLINUX_SYMBOL(__nosave_begin) = .; \
  183. *(.data..nosave) \
  184. . = ALIGN(PAGE_SIZE); \
  185. VMLINUX_SYMBOL(__nosave_end) = .;
  186. #define PAGE_ALIGNED_DATA(page_align) \
  187. . = ALIGN(page_align); \
  188. *(.data..page_aligned)
  189. #define READ_MOSTLY_DATA(align) \
  190. . = ALIGN(align); \
  191. *(.data..read_mostly) \
  192. . = ALIGN(align);
  193. #define CACHELINE_ALIGNED_DATA(align) \
  194. . = ALIGN(align); \
  195. *(.data..cacheline_aligned)
  196. #define INIT_TASK_DATA(align) \
  197. . = ALIGN(align); \
  198. *(.data..init_task)
  199. /*
  200. * Read only Data
  201. */
  202. #define RO_DATA_SECTION(align) \
  203. . = ALIGN((align)); \
  204. .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
  205. VMLINUX_SYMBOL(__start_rodata) = .; \
  206. *(.rodata) *(.rodata.*) \
  207. *(__vermagic) /* Kernel version magic */ \
  208. . = ALIGN(8); \
  209. VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
  210. *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
  211. VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
  212. *(__tracepoints_strings)/* Tracepoints: strings */ \
  213. } \
  214. \
  215. .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
  216. *(.rodata1) \
  217. } \
  218. \
  219. BUG_TABLE \
  220. \
  221. /* PCI quirks */ \
  222. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  223. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  224. *(.pci_fixup_early) \
  225. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  226. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  227. *(.pci_fixup_header) \
  228. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  229. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  230. *(.pci_fixup_final) \
  231. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  232. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  233. *(.pci_fixup_enable) \
  234. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  235. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  236. *(.pci_fixup_resume) \
  237. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  238. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  239. *(.pci_fixup_resume_early) \
  240. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  241. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  242. *(.pci_fixup_suspend) \
  243. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  244. } \
  245. \
  246. /* Built-in firmware blobs */ \
  247. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  248. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  249. *(.builtin_fw) \
  250. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  251. } \
  252. \
  253. TRACEDATA \
  254. \
  255. /* Kernel symbol table: Normal symbols */ \
  256. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  257. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  258. *(SORT(___ksymtab+*)) \
  259. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  260. } \
  261. \
  262. /* Kernel symbol table: GPL-only symbols */ \
  263. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  264. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  265. *(SORT(___ksymtab_gpl+*)) \
  266. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  267. } \
  268. \
  269. /* Kernel symbol table: Normal unused symbols */ \
  270. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  271. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  272. *(SORT(___ksymtab_unused+*)) \
  273. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  274. } \
  275. \
  276. /* Kernel symbol table: GPL-only unused symbols */ \
  277. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  278. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  279. *(SORT(___ksymtab_unused_gpl+*)) \
  280. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  281. } \
  282. \
  283. /* Kernel symbol table: GPL-future-only symbols */ \
  284. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  285. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  286. *(SORT(___ksymtab_gpl_future+*)) \
  287. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  288. } \
  289. \
  290. /* Kernel symbol table: Normal symbols */ \
  291. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  292. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  293. *(SORT(___kcrctab+*)) \
  294. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  295. } \
  296. \
  297. /* Kernel symbol table: GPL-only symbols */ \
  298. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  299. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  300. *(SORT(___kcrctab_gpl+*)) \
  301. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  302. } \
  303. \
  304. /* Kernel symbol table: Normal unused symbols */ \
  305. __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
  306. VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
  307. *(SORT(___kcrctab_unused+*)) \
  308. VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
  309. } \
  310. \
  311. /* Kernel symbol table: GPL-only unused symbols */ \
  312. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  313. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  314. *(SORT(___kcrctab_unused_gpl+*)) \
  315. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  316. } \
  317. \
  318. /* Kernel symbol table: GPL-future-only symbols */ \
  319. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  320. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  321. *(SORT(___kcrctab_gpl_future+*)) \
  322. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  323. } \
  324. \
  325. /* Kernel symbol table: strings */ \
  326. __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
  327. *(__ksymtab_strings) \
  328. } \
  329. \
  330. /* __*init sections */ \
  331. __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
  332. *(.ref.rodata) \
  333. MEM_KEEP(init.rodata) \
  334. MEM_KEEP(exit.rodata) \
  335. } \
  336. \
  337. /* Built-in module parameters. */ \
  338. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  339. VMLINUX_SYMBOL(__start___param) = .; \
  340. *(__param) \
  341. VMLINUX_SYMBOL(__stop___param) = .; \
  342. } \
  343. \
  344. /* Built-in module versions. */ \
  345. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  346. VMLINUX_SYMBOL(__start___modver) = .; \
  347. *(__modver) \
  348. VMLINUX_SYMBOL(__stop___modver) = .; \
  349. . = ALIGN((align)); \
  350. VMLINUX_SYMBOL(__end_rodata) = .; \
  351. } \
  352. . = ALIGN((align));
  353. /* RODATA & RO_DATA provided for backward compatibility.
  354. * All archs are supposed to use RO_DATA() */
  355. #define RODATA RO_DATA_SECTION(4096)
  356. #define RO_DATA(align) RO_DATA_SECTION(align)
  357. #define SECURITY_INIT \
  358. .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
  359. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  360. *(.security_initcall.init) \
  361. VMLINUX_SYMBOL(__security_initcall_end) = .; \
  362. }
  363. /* .text section. Map to function alignment to avoid address changes
  364. * during second ld run in second ld pass when generating System.map */
  365. #define TEXT_TEXT \
  366. ALIGN_FUNCTION(); \
  367. *(.text.hot) \
  368. *(.text) \
  369. *(.ref.text) \
  370. MEM_KEEP(init.text) \
  371. MEM_KEEP(exit.text) \
  372. *(.text.unlikely)
  373. /* sched.text is aling to function alignment to secure we have same
  374. * address even at second ld pass when generating System.map */
  375. #define SCHED_TEXT \
  376. ALIGN_FUNCTION(); \
  377. VMLINUX_SYMBOL(__sched_text_start) = .; \
  378. *(.sched.text) \
  379. VMLINUX_SYMBOL(__sched_text_end) = .;
  380. /* spinlock.text is aling to function alignment to secure we have same
  381. * address even at second ld pass when generating System.map */
  382. #define LOCK_TEXT \
  383. ALIGN_FUNCTION(); \
  384. VMLINUX_SYMBOL(__lock_text_start) = .; \
  385. *(.spinlock.text) \
  386. VMLINUX_SYMBOL(__lock_text_end) = .;
  387. #define KPROBES_TEXT \
  388. ALIGN_FUNCTION(); \
  389. VMLINUX_SYMBOL(__kprobes_text_start) = .; \
  390. *(.kprobes.text) \
  391. VMLINUX_SYMBOL(__kprobes_text_end) = .;
  392. #define ENTRY_TEXT \
  393. ALIGN_FUNCTION(); \
  394. VMLINUX_SYMBOL(__entry_text_start) = .; \
  395. *(.entry.text) \
  396. VMLINUX_SYMBOL(__entry_text_end) = .;
  397. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  398. #define IRQENTRY_TEXT \
  399. ALIGN_FUNCTION(); \
  400. VMLINUX_SYMBOL(__irqentry_text_start) = .; \
  401. *(.irqentry.text) \
  402. VMLINUX_SYMBOL(__irqentry_text_end) = .;
  403. #else
  404. #define IRQENTRY_TEXT
  405. #endif
  406. /* Section used for early init (in .S files) */
  407. #define HEAD_TEXT *(.head.text)
  408. #define HEAD_TEXT_SECTION \
  409. .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
  410. HEAD_TEXT \
  411. }
  412. /*
  413. * Exception table
  414. */
  415. #define EXCEPTION_TABLE(align) \
  416. . = ALIGN(align); \
  417. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  418. VMLINUX_SYMBOL(__start___ex_table) = .; \
  419. *(__ex_table) \
  420. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  421. }
  422. /*
  423. * Init task
  424. */
  425. #define INIT_TASK_DATA_SECTION(align) \
  426. . = ALIGN(align); \
  427. .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
  428. INIT_TASK_DATA(align) \
  429. }
  430. #ifdef CONFIG_CONSTRUCTORS
  431. #define KERNEL_CTORS() . = ALIGN(8); \
  432. VMLINUX_SYMBOL(__ctors_start) = .; \
  433. *(.ctors) \
  434. VMLINUX_SYMBOL(__ctors_end) = .;
  435. #else
  436. #define KERNEL_CTORS()
  437. #endif
  438. /* init and exit section handling */
  439. #define INIT_DATA \
  440. *(.init.data) \
  441. MEM_DISCARD(init.data) \
  442. KERNEL_CTORS() \
  443. MCOUNT_REC() \
  444. *(.init.rodata) \
  445. FTRACE_EVENTS() \
  446. TRACE_SYSCALLS() \
  447. MEM_DISCARD(init.rodata) \
  448. CLK_OF_TABLES() \
  449. CLKSRC_OF_TABLES() \
  450. KERNEL_DTB() \
  451. IRQCHIP_OF_MATCH_TABLE()
  452. #define INIT_TEXT \
  453. *(.init.text) \
  454. MEM_DISCARD(init.text)
  455. #define EXIT_DATA \
  456. *(.exit.data) \
  457. MEM_DISCARD(exit.data) \
  458. MEM_DISCARD(exit.rodata)
  459. #define EXIT_TEXT \
  460. *(.exit.text) \
  461. MEM_DISCARD(exit.text)
  462. #define EXIT_CALL \
  463. *(.exitcall.exit)
  464. /*
  465. * bss (Block Started by Symbol) - uninitialized data
  466. * zeroed during startup
  467. */
  468. #define SBSS(sbss_align) \
  469. . = ALIGN(sbss_align); \
  470. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  471. *(.sbss) \
  472. *(.scommon) \
  473. }
  474. /*
  475. * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
  476. * sections to the front of bss.
  477. */
  478. #ifndef BSS_FIRST_SECTIONS
  479. #define BSS_FIRST_SECTIONS
  480. #endif
  481. #define BSS(bss_align) \
  482. . = ALIGN(bss_align); \
  483. .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
  484. BSS_FIRST_SECTIONS \
  485. *(.bss..page_aligned) \
  486. *(.dynbss) \
  487. *(.bss) \
  488. *(COMMON) \
  489. }
  490. /*
  491. * DWARF debug sections.
  492. * Symbols in the DWARF debugging sections are relative to
  493. * the beginning of the section so we begin them at 0.
  494. */
  495. #define DWARF_DEBUG \
  496. /* DWARF 1 */ \
  497. .debug 0 : { *(.debug) } \
  498. .line 0 : { *(.line) } \
  499. /* GNU DWARF 1 extensions */ \
  500. .debug_srcinfo 0 : { *(.debug_srcinfo) } \
  501. .debug_sfnames 0 : { *(.debug_sfnames) } \
  502. /* DWARF 1.1 and DWARF 2 */ \
  503. .debug_aranges 0 : { *(.debug_aranges) } \
  504. .debug_pubnames 0 : { *(.debug_pubnames) } \
  505. /* DWARF 2 */ \
  506. .debug_info 0 : { *(.debug_info \
  507. .gnu.linkonce.wi.*) } \
  508. .debug_abbrev 0 : { *(.debug_abbrev) } \
  509. .debug_line 0 : { *(.debug_line) } \
  510. .debug_frame 0 : { *(.debug_frame) } \
  511. .debug_str 0 : { *(.debug_str) } \
  512. .debug_loc 0 : { *(.debug_loc) } \
  513. .debug_macinfo 0 : { *(.debug_macinfo) } \
  514. /* SGI/MIPS DWARF 2 extensions */ \
  515. .debug_weaknames 0 : { *(.debug_weaknames) } \
  516. .debug_funcnames 0 : { *(.debug_funcnames) } \
  517. .debug_typenames 0 : { *(.debug_typenames) } \
  518. .debug_varnames 0 : { *(.debug_varnames) } \
  519. /* Stabs debugging sections. */
  520. #define STABS_DEBUG \
  521. .stab 0 : { *(.stab) } \
  522. .stabstr 0 : { *(.stabstr) } \
  523. .stab.excl 0 : { *(.stab.excl) } \
  524. .stab.exclstr 0 : { *(.stab.exclstr) } \
  525. .stab.index 0 : { *(.stab.index) } \
  526. .stab.indexstr 0 : { *(.stab.indexstr) } \
  527. .comment 0 : { *(.comment) }
  528. #ifdef CONFIG_GENERIC_BUG
  529. #define BUG_TABLE \
  530. . = ALIGN(8); \
  531. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  532. VMLINUX_SYMBOL(__start___bug_table) = .; \
  533. *(__bug_table) \
  534. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  535. }
  536. #else
  537. #define BUG_TABLE
  538. #endif
  539. #ifdef CONFIG_PM_TRACE
  540. #define TRACEDATA \
  541. . = ALIGN(4); \
  542. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  543. VMLINUX_SYMBOL(__tracedata_start) = .; \
  544. *(.tracedata) \
  545. VMLINUX_SYMBOL(__tracedata_end) = .; \
  546. }
  547. #else
  548. #define TRACEDATA
  549. #endif
  550. #define NOTES \
  551. .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
  552. VMLINUX_SYMBOL(__start_notes) = .; \
  553. *(.note.*) \
  554. VMLINUX_SYMBOL(__stop_notes) = .; \
  555. }
  556. #define INIT_SETUP(initsetup_align) \
  557. . = ALIGN(initsetup_align); \
  558. VMLINUX_SYMBOL(__setup_start) = .; \
  559. *(.init.setup) \
  560. VMLINUX_SYMBOL(__setup_end) = .;
  561. #define INIT_CALLS_LEVEL(level) \
  562. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  563. *(.initcall##level##.init) \
  564. *(.initcall##level##s.init) \
  565. #define INIT_CALLS \
  566. VMLINUX_SYMBOL(__initcall_start) = .; \
  567. *(.initcallearly.init) \
  568. INIT_CALLS_LEVEL(0) \
  569. INIT_CALLS_LEVEL(1) \
  570. INIT_CALLS_LEVEL(2) \
  571. INIT_CALLS_LEVEL(3) \
  572. INIT_CALLS_LEVEL(4) \
  573. INIT_CALLS_LEVEL(5) \
  574. INIT_CALLS_LEVEL(rootfs) \
  575. INIT_CALLS_LEVEL(6) \
  576. INIT_CALLS_LEVEL(7) \
  577. VMLINUX_SYMBOL(__initcall_end) = .;
  578. #define CON_INITCALL \
  579. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  580. *(.con_initcall.init) \
  581. VMLINUX_SYMBOL(__con_initcall_end) = .;
  582. #define SECURITY_INITCALL \
  583. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  584. *(.security_initcall.init) \
  585. VMLINUX_SYMBOL(__security_initcall_end) = .;
  586. #ifdef CONFIG_BLK_DEV_INITRD
  587. #define INIT_RAM_FS \
  588. . = ALIGN(4); \
  589. VMLINUX_SYMBOL(__initramfs_start) = .; \
  590. *(.init.ramfs) \
  591. . = ALIGN(8); \
  592. *(.init.ramfs.info)
  593. #else
  594. #define INIT_RAM_FS
  595. #endif
  596. /*
  597. * Default discarded sections.
  598. *
  599. * Some archs want to discard exit text/data at runtime rather than
  600. * link time due to cross-section references such as alt instructions,
  601. * bug table, eh_frame, etc. DISCARDS must be the last of output
  602. * section definitions so that such archs put those in earlier section
  603. * definitions.
  604. */
  605. #define DISCARDS \
  606. /DISCARD/ : { \
  607. EXIT_TEXT \
  608. EXIT_DATA \
  609. EXIT_CALL \
  610. *(.discard) \
  611. *(.discard.*) \
  612. }
  613. /**
  614. * PERCPU_INPUT - the percpu input sections
  615. * @cacheline: cacheline size
  616. *
  617. * The core percpu section names and core symbols which do not rely
  618. * directly upon load addresses.
  619. *
  620. * @cacheline is used to align subsections to avoid false cacheline
  621. * sharing between subsections for different purposes.
  622. */
  623. #define PERCPU_INPUT(cacheline) \
  624. VMLINUX_SYMBOL(__per_cpu_start) = .; \
  625. *(.data..percpu..first) \
  626. . = ALIGN(PAGE_SIZE); \
  627. *(.data..percpu..page_aligned) \
  628. . = ALIGN(cacheline); \
  629. *(.data..percpu..readmostly) \
  630. . = ALIGN(cacheline); \
  631. *(.data..percpu) \
  632. *(.data..percpu..shared_aligned) \
  633. VMLINUX_SYMBOL(__per_cpu_end) = .;
  634. /**
  635. * PERCPU_VADDR - define output section for percpu area
  636. * @cacheline: cacheline size
  637. * @vaddr: explicit base address (optional)
  638. * @phdr: destination PHDR (optional)
  639. *
  640. * Macro which expands to output section for percpu area.
  641. *
  642. * @cacheline is used to align subsections to avoid false cacheline
  643. * sharing between subsections for different purposes.
  644. *
  645. * If @vaddr is not blank, it specifies explicit base address and all
  646. * percpu symbols will be offset from the given address. If blank,
  647. * @vaddr always equals @laddr + LOAD_OFFSET.
  648. *
  649. * @phdr defines the output PHDR to use if not blank. Be warned that
  650. * output PHDR is sticky. If @phdr is specified, the next output
  651. * section in the linker script will go there too. @phdr should have
  652. * a leading colon.
  653. *
  654. * Note that this macros defines __per_cpu_load as an absolute symbol.
  655. * If there is no need to put the percpu section at a predetermined
  656. * address, use PERCPU_SECTION.
  657. */
  658. #define PERCPU_VADDR(cacheline, vaddr, phdr) \
  659. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  660. .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
  661. - LOAD_OFFSET) { \
  662. PERCPU_INPUT(cacheline) \
  663. } phdr \
  664. . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
  665. /**
  666. * PERCPU_SECTION - define output section for percpu area, simple version
  667. * @cacheline: cacheline size
  668. *
  669. * Align to PAGE_SIZE and outputs output section for percpu area. This
  670. * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
  671. * __per_cpu_start will be identical.
  672. *
  673. * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
  674. * except that __per_cpu_load is defined as a relative symbol against
  675. * .data..percpu which is required for relocatable x86_32 configuration.
  676. */
  677. #define PERCPU_SECTION(cacheline) \
  678. . = ALIGN(PAGE_SIZE); \
  679. .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
  680. VMLINUX_SYMBOL(__per_cpu_load) = .; \
  681. PERCPU_INPUT(cacheline) \
  682. }
  683. /*
  684. * Definition of the high level *_SECTION macros
  685. * They will fit only a subset of the architectures
  686. */
  687. /*
  688. * Writeable data.
  689. * All sections are combined in a single .data section.
  690. * The sections following CONSTRUCTORS are arranged so their
  691. * typical alignment matches.
  692. * A cacheline is typical/always less than a PAGE_SIZE so
  693. * the sections that has this restriction (or similar)
  694. * is located before the ones requiring PAGE_SIZE alignment.
  695. * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
  696. * matches the requirement of PAGE_ALIGNED_DATA.
  697. *
  698. * use 0 as page_align if page_aligned data is not used */
  699. #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
  700. . = ALIGN(PAGE_SIZE); \
  701. .data : AT(ADDR(.data) - LOAD_OFFSET) { \
  702. INIT_TASK_DATA(inittask) \
  703. NOSAVE_DATA \
  704. PAGE_ALIGNED_DATA(pagealigned) \
  705. CACHELINE_ALIGNED_DATA(cacheline) \
  706. READ_MOSTLY_DATA(cacheline) \
  707. DATA_DATA \
  708. CONSTRUCTORS \
  709. }
  710. #define INIT_TEXT_SECTION(inittext_align) \
  711. . = ALIGN(inittext_align); \
  712. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
  713. VMLINUX_SYMBOL(_sinittext) = .; \
  714. INIT_TEXT \
  715. VMLINUX_SYMBOL(_einittext) = .; \
  716. }
  717. #define INIT_DATA_SECTION(initsetup_align) \
  718. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
  719. INIT_DATA \
  720. INIT_SETUP(initsetup_align) \
  721. INIT_CALLS \
  722. CON_INITCALL \
  723. SECURITY_INITCALL \
  724. INIT_RAM_FS \
  725. }
  726. #define BSS_SECTION(sbss_align, bss_align, stop_align) \
  727. . = ALIGN(sbss_align); \
  728. VMLINUX_SYMBOL(__bss_start) = .; \
  729. SBSS(sbss_align) \
  730. BSS(bss_align) \
  731. . = ALIGN(stop_align); \
  732. VMLINUX_SYMBOL(__bss_stop) = .;