vmlinux_64.lds.S 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/asm-offsets.h>
  7. #include <asm/page_types.h>
  8. #undef i386 /* in case the preprocessor is a 32bit one */
  9. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  10. OUTPUT_ARCH(i386:x86-64)
  11. ENTRY(phys_startup_64)
  12. jiffies_64 = jiffies;
  13. PHDRS {
  14. text PT_LOAD FLAGS(5); /* R_E */
  15. data PT_LOAD FLAGS(7); /* RWE */
  16. user PT_LOAD FLAGS(7); /* RWE */
  17. data.init PT_LOAD FLAGS(7); /* RWE */
  18. #ifdef CONFIG_SMP
  19. percpu PT_LOAD FLAGS(7); /* RWE */
  20. #endif
  21. data.init2 PT_LOAD FLAGS(7); /* RWE */
  22. note PT_NOTE FLAGS(0); /* ___ */
  23. }
  24. SECTIONS
  25. {
  26. . = __START_KERNEL;
  27. phys_startup_64 = startup_64 - LOAD_OFFSET;
  28. /* Text and read-only data */
  29. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  30. _text = .;
  31. /* First the code that has to be first for bootstrapping */
  32. *(.text.head)
  33. _stext = .;
  34. /* Then the rest */
  35. TEXT_TEXT
  36. SCHED_TEXT
  37. LOCK_TEXT
  38. KPROBES_TEXT
  39. IRQENTRY_TEXT
  40. *(.fixup)
  41. *(.gnu.warning)
  42. /* End of text section */
  43. _etext = .;
  44. } :text = 0x9090
  45. NOTES :text :note
  46. /* Exception table */
  47. . = ALIGN(16);
  48. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  49. __start___ex_table = .;
  50. *(__ex_table)
  51. __stop___ex_table = .;
  52. } :text = 0x9090
  53. RODATA
  54. /* Align data segment to page size boundary */
  55. . = ALIGN(PAGE_SIZE);
  56. /* Data */
  57. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  58. DATA_DATA
  59. CONSTRUCTORS
  60. /* End of data section */
  61. _edata = .;
  62. } :data
  63. .data.cacheline_aligned :
  64. AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  65. . = ALIGN(PAGE_SIZE);
  66. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  67. *(.data.cacheline_aligned)
  68. }
  69. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  70. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  71. *(.data.read_mostly)
  72. }
  73. #define VSYSCALL_ADDR (-10*1024*1024)
  74. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
  75. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  76. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
  77. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  78. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  79. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  80. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  81. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  82. . = VSYSCALL_ADDR;
  83. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
  84. *(.vsyscall_0)
  85. } :user
  86. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  87. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  88. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
  89. *(.vsyscall_fn)
  90. }
  91. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  92. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
  93. *(.vsyscall_gtod_data)
  94. }
  95. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  96. .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
  97. *(.vsyscall_clock)
  98. }
  99. vsyscall_clock = VVIRT(.vsyscall_clock);
  100. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
  101. *(.vsyscall_1)
  102. }
  103. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
  104. *(.vsyscall_2)
  105. }
  106. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
  107. *(.vgetcpu_mode)
  108. }
  109. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  110. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  111. .jiffies : AT(VLOAD(.jiffies)) {
  112. *(.jiffies)
  113. }
  114. jiffies = VVIRT(.jiffies);
  115. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
  116. *(.vsyscall_3)
  117. }
  118. . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  119. #undef VSYSCALL_ADDR
  120. #undef VSYSCALL_PHYS_ADDR
  121. #undef VSYSCALL_VIRT_ADDR
  122. #undef VLOAD_OFFSET
  123. #undef VLOAD
  124. #undef VVIRT_OFFSET
  125. #undef VVIRT
  126. /* init_task */
  127. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  128. . = ALIGN(THREAD_SIZE);
  129. *(.data.init_task)
  130. } :data.init
  131. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  132. . = ALIGN(PAGE_SIZE);
  133. *(.data.page_aligned)
  134. }
  135. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  136. /* might get freed after init */
  137. . = ALIGN(PAGE_SIZE);
  138. __smp_alt_begin = .;
  139. __smp_locks = .;
  140. *(.smp_locks)
  141. __smp_locks_end = .;
  142. . = ALIGN(PAGE_SIZE);
  143. __smp_alt_end = .;
  144. }
  145. /* Init code and data */
  146. . = ALIGN(PAGE_SIZE);
  147. __init_begin = .; /* paired with __init_end */
  148. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  149. _sinittext = .;
  150. INIT_TEXT
  151. _einittext = .;
  152. }
  153. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  154. __initdata_begin = .;
  155. INIT_DATA
  156. __initdata_end = .;
  157. }
  158. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
  159. . = ALIGN(16);
  160. __setup_start = .;
  161. *(.init.setup)
  162. __setup_end = .;
  163. }
  164. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  165. __initcall_start = .;
  166. INITCALLS
  167. __initcall_end = .;
  168. }
  169. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  170. __con_initcall_start = .;
  171. *(.con_initcall.init)
  172. __con_initcall_end = .;
  173. }
  174. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  175. __x86_cpu_dev_start = .;
  176. *(.x86_cpu_dev.init)
  177. __x86_cpu_dev_end = .;
  178. }
  179. SECURITY_INIT
  180. . = ALIGN(8);
  181. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  182. __parainstructions = .;
  183. *(.parainstructions)
  184. __parainstructions_end = .;
  185. }
  186. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  187. . = ALIGN(8);
  188. __alt_instructions = .;
  189. *(.altinstructions)
  190. __alt_instructions_end = .;
  191. }
  192. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  193. *(.altinstr_replacement)
  194. }
  195. /*
  196. * .exit.text is discard at runtime, not link time, to deal with
  197. * references from .altinstructions and .eh_frame
  198. */
  199. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  200. EXIT_TEXT
  201. }
  202. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  203. EXIT_DATA
  204. }
  205. #ifdef CONFIG_BLK_DEV_INITRD
  206. . = ALIGN(PAGE_SIZE);
  207. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
  208. __initramfs_start = .;
  209. *(.init.ramfs)
  210. __initramfs_end = .;
  211. }
  212. #endif
  213. #ifdef CONFIG_SMP
  214. /*
  215. * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
  216. * output PHDR, so the next output section - __data_nosave - should
  217. * start another section data.init2. Also, pda should be at the head of
  218. * percpu area. Preallocate it and define the percpu offset symbol
  219. * so that it can be accessed as a percpu variable.
  220. */
  221. . = ALIGN(PAGE_SIZE);
  222. PERCPU_VADDR(0, :percpu)
  223. #else
  224. PERCPU(PAGE_SIZE)
  225. #endif
  226. . = ALIGN(PAGE_SIZE);
  227. __init_end = .;
  228. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  229. . = ALIGN(PAGE_SIZE);
  230. __nosave_begin = .;
  231. *(.data.nosave)
  232. . = ALIGN(PAGE_SIZE);
  233. __nosave_end = .;
  234. } :data.init2
  235. /* use another section data.init2, see PERCPU_VADDR() above */
  236. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  237. . = ALIGN(PAGE_SIZE);
  238. __bss_start = .; /* BSS */
  239. *(.bss.page_aligned)
  240. *(.bss)
  241. __bss_stop = .;
  242. }
  243. .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
  244. . = ALIGN(PAGE_SIZE);
  245. __brk_base = .;
  246. . += 64 * 1024; /* 64k alignment slop space */
  247. *(.brk_reservation) /* areas brk users have reserved */
  248. __brk_limit = .;
  249. }
  250. _end = . ;
  251. /* Sections to be discarded */
  252. /DISCARD/ : {
  253. *(.exitcall.exit)
  254. *(.eh_frame)
  255. *(.discard)
  256. }
  257. STABS_DEBUG
  258. DWARF_DEBUG
  259. }
  260. /*
  261. * Per-cpu symbols which need to be offset from __per_cpu_load
  262. * for the boot processor.
  263. */
  264. #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
  265. INIT_PER_CPU(gdt_page);
  266. INIT_PER_CPU(irq_stack_union);
  267. /*
  268. * Build-time check on the image size:
  269. */
  270. ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  271. "kernel image bigger than KERNEL_IMAGE_SIZE")
  272. #ifdef CONFIG_SMP
  273. ASSERT((per_cpu__irq_stack_union == 0),
  274. "irq_stack_union is not at start of per-cpu area");
  275. #endif
  276. #ifdef CONFIG_KEXEC
  277. #include <asm/kexec.h>
  278. ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  279. "kexec control code size is too big")
  280. #endif