vmlinux_64.lds.S 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/asm-offsets.h>
  7. #include <asm/page_types.h>
  8. #undef i386 /* in case the preprocessor is a 32bit one */
  9. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  10. OUTPUT_ARCH(i386:x86-64)
  11. ENTRY(phys_startup_64)
  12. jiffies_64 = jiffies;
  13. PHDRS {
  14. text PT_LOAD FLAGS(5); /* R_E */
  15. data PT_LOAD FLAGS(7); /* RWE */
  16. user PT_LOAD FLAGS(7); /* RWE */
  17. data.init PT_LOAD FLAGS(7); /* RWE */
  18. #ifdef CONFIG_SMP
  19. percpu PT_LOAD FLAGS(7); /* RWE */
  20. #endif
  21. data.init2 PT_LOAD FLAGS(7); /* RWE */
  22. note PT_NOTE FLAGS(0); /* ___ */
  23. }
  24. SECTIONS
  25. {
  26. . = __START_KERNEL;
  27. phys_startup_64 = startup_64 - LOAD_OFFSET;
  28. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  29. _text = .; /* Text and read-only data */
  30. /* First the code that has to be first for bootstrapping */
  31. *(.text.head)
  32. _stext = .;
  33. /* Then the rest */
  34. TEXT_TEXT
  35. SCHED_TEXT
  36. LOCK_TEXT
  37. KPROBES_TEXT
  38. IRQENTRY_TEXT
  39. *(.fixup)
  40. *(.gnu.warning)
  41. _etext = .; /* End of text section */
  42. } :text = 0x9090
  43. NOTES :text :note
  44. . = ALIGN(16); /* Exception table */
  45. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  46. __start___ex_table = .;
  47. *(__ex_table)
  48. __stop___ex_table = .;
  49. } :text = 0x9090
  50. RODATA
  51. . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
  52. /* Data */
  53. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  54. DATA_DATA
  55. CONSTRUCTORS
  56. _edata = .; /* End of data section */
  57. } :data
  58. .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  59. . = ALIGN(PAGE_SIZE);
  60. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  61. *(.data.cacheline_aligned)
  62. }
  63. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  64. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  65. *(.data.read_mostly)
  66. }
  67. #define VSYSCALL_ADDR (-10*1024*1024)
  68. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  69. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  70. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  71. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  72. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  73. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  74. . = VSYSCALL_ADDR;
  75. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
  76. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  77. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  78. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
  79. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  80. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
  81. { *(.vsyscall_gtod_data) }
  82. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  83. .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
  84. { *(.vsyscall_clock) }
  85. vsyscall_clock = VVIRT(.vsyscall_clock);
  86. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
  87. { *(.vsyscall_1) }
  88. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
  89. { *(.vsyscall_2) }
  90. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
  91. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  92. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  93. .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
  94. jiffies = VVIRT(.jiffies);
  95. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
  96. { *(.vsyscall_3) }
  97. . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  98. #undef VSYSCALL_ADDR
  99. #undef VSYSCALL_PHYS_ADDR
  100. #undef VSYSCALL_VIRT_ADDR
  101. #undef VLOAD_OFFSET
  102. #undef VLOAD
  103. #undef VVIRT_OFFSET
  104. #undef VVIRT
  105. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  106. . = ALIGN(THREAD_SIZE); /* init_task */
  107. *(.data.init_task)
  108. }:data.init
  109. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  110. . = ALIGN(PAGE_SIZE);
  111. *(.data.page_aligned)
  112. }
  113. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  114. /* might get freed after init */
  115. . = ALIGN(PAGE_SIZE);
  116. __smp_alt_begin = .;
  117. __smp_locks = .;
  118. *(.smp_locks)
  119. __smp_locks_end = .;
  120. . = ALIGN(PAGE_SIZE);
  121. __smp_alt_end = .;
  122. }
  123. . = ALIGN(PAGE_SIZE); /* Init code and data */
  124. __init_begin = .; /* paired with __init_end */
  125. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  126. _sinittext = .;
  127. INIT_TEXT
  128. _einittext = .;
  129. }
  130. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  131. __initdata_begin = .;
  132. INIT_DATA
  133. __initdata_end = .;
  134. }
  135. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
  136. . = ALIGN(16);
  137. __setup_start = .;
  138. *(.init.setup)
  139. __setup_end = .;
  140. }
  141. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  142. __initcall_start = .;
  143. INITCALLS
  144. __initcall_end = .;
  145. }
  146. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  147. __con_initcall_start = .;
  148. *(.con_initcall.init)
  149. __con_initcall_end = .;
  150. }
  151. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  152. __x86_cpu_dev_start = .;
  153. *(.x86_cpu_dev.init)
  154. __x86_cpu_dev_end = .;
  155. }
  156. SECURITY_INIT
  157. . = ALIGN(8);
  158. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  159. __parainstructions = .;
  160. *(.parainstructions)
  161. __parainstructions_end = .;
  162. }
  163. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  164. . = ALIGN(8);
  165. __alt_instructions = .;
  166. *(.altinstructions)
  167. __alt_instructions_end = .;
  168. }
  169. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  170. *(.altinstr_replacement)
  171. }
  172. /* .exit.text is discard at runtime, not link time, to deal with references
  173. from .altinstructions and .eh_frame */
  174. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  175. EXIT_TEXT
  176. }
  177. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  178. EXIT_DATA
  179. }
  180. #ifdef CONFIG_BLK_DEV_INITRD
  181. . = ALIGN(PAGE_SIZE);
  182. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
  183. __initramfs_start = .;
  184. *(.init.ramfs)
  185. __initramfs_end = .;
  186. }
  187. #endif
  188. #ifdef CONFIG_SMP
  189. /*
  190. * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
  191. * output PHDR, so the next output section - __data_nosave - should
  192. * start another section data.init2. Also, pda should be at the head of
  193. * percpu area. Preallocate it and define the percpu offset symbol
  194. * so that it can be accessed as a percpu variable.
  195. */
  196. . = ALIGN(PAGE_SIZE);
  197. PERCPU_VADDR(0, :percpu)
  198. #else
  199. PERCPU(PAGE_SIZE)
  200. #endif
  201. . = ALIGN(PAGE_SIZE);
  202. __init_end = .;
  203. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  204. . = ALIGN(PAGE_SIZE);
  205. __nosave_begin = .;
  206. *(.data.nosave)
  207. . = ALIGN(PAGE_SIZE);
  208. __nosave_end = .;
  209. } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
  210. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  211. . = ALIGN(PAGE_SIZE);
  212. __bss_start = .; /* BSS */
  213. *(.bss.page_aligned)
  214. *(.bss)
  215. __bss_stop = .;
  216. }
  217. .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
  218. . = ALIGN(PAGE_SIZE);
  219. __brk_base = . ;
  220. . += 64 * 1024 ; /* 64k alignment slop space */
  221. *(.brk_reservation) /* areas brk users have reserved */
  222. __brk_limit = . ;
  223. }
  224. _end = . ;
  225. /* Sections to be discarded */
  226. /DISCARD/ : {
  227. *(.exitcall.exit)
  228. *(.eh_frame)
  229. *(.discard)
  230. }
  231. STABS_DEBUG
  232. DWARF_DEBUG
  233. }
  234. /*
  235. * Per-cpu symbols which need to be offset from __per_cpu_load
  236. * for the boot processor.
  237. */
  238. #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
  239. INIT_PER_CPU(gdt_page);
  240. INIT_PER_CPU(irq_stack_union);
  241. /*
  242. * Build-time check on the image size:
  243. */
  244. ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  245. "kernel image bigger than KERNEL_IMAGE_SIZE")
  246. #ifdef CONFIG_SMP
  247. ASSERT((per_cpu__irq_stack_union == 0),
  248. "irq_stack_union is not at start of per-cpu area");
  249. #endif
  250. #ifdef CONFIG_KEXEC
  251. #include <asm/kexec.h>
  252. ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  253. "kexec control code size is too big")
  254. #endif