vmlinux_64.lds.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/page.h>
  7. #undef i386 /* in case the preprocessor is a 32bit one */
  8. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  9. OUTPUT_ARCH(i386:x86-64)
  10. ENTRY(phys_startup_64)
  11. jiffies_64 = jiffies;
  12. _proxy_pda = 1;
  13. PHDRS {
  14. text PT_LOAD FLAGS(5); /* R_E */
  15. data PT_LOAD FLAGS(7); /* RWE */
  16. user PT_LOAD FLAGS(7); /* RWE */
  17. data.init PT_LOAD FLAGS(7); /* RWE */
  18. note PT_NOTE FLAGS(0); /* ___ */
  19. }
  20. SECTIONS
  21. {
  22. . = __START_KERNEL;
  23. phys_startup_64 = startup_64 - LOAD_OFFSET;
  24. _text = .; /* Text and read-only data */
  25. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  26. /* First the code that has to be first for bootstrapping */
  27. *(.text.head)
  28. _stext = .;
  29. /* Then the rest */
  30. TEXT_TEXT
  31. SCHED_TEXT
  32. LOCK_TEXT
  33. KPROBES_TEXT
  34. *(.fixup)
  35. *(.gnu.warning)
  36. _etext = .; /* End of text section */
  37. } :text = 0x9090
  38. NOTES :text :note
  39. . = ALIGN(16); /* Exception table */
  40. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  41. __start___ex_table = .;
  42. *(__ex_table)
  43. __stop___ex_table = .;
  44. } :text = 0x9090
  45. RODATA
  46. . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
  47. /* Data */
  48. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  49. DATA_DATA
  50. CONSTRUCTORS
  51. } :data
  52. _edata = .; /* End of data section */
  53. . = ALIGN(PAGE_SIZE);
  54. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  55. .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  56. *(.data.cacheline_aligned)
  57. }
  58. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  59. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  60. *(.data.read_mostly)
  61. }
  62. #define VSYSCALL_ADDR (-10*1024*1024)
  63. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  64. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  65. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  66. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  67. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  68. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  69. . = VSYSCALL_ADDR;
  70. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
  71. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  72. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  73. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
  74. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  75. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
  76. { *(.vsyscall_gtod_data) }
  77. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  78. .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
  79. { *(.vsyscall_clock) }
  80. vsyscall_clock = VVIRT(.vsyscall_clock);
  81. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
  82. { *(.vsyscall_1) }
  83. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
  84. { *(.vsyscall_2) }
  85. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
  86. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  87. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  88. .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
  89. jiffies = VVIRT(.jiffies);
  90. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
  91. { *(.vsyscall_3) }
  92. . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  93. #undef VSYSCALL_ADDR
  94. #undef VSYSCALL_PHYS_ADDR
  95. #undef VSYSCALL_VIRT_ADDR
  96. #undef VLOAD_OFFSET
  97. #undef VLOAD
  98. #undef VVIRT_OFFSET
  99. #undef VVIRT
  100. . = ALIGN(THREAD_SIZE); /* init_task */
  101. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  102. *(.data.init_task)
  103. }:data.init
  104. . = ALIGN(PAGE_SIZE);
  105. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  106. *(.data.page_aligned)
  107. }
  108. /* might get freed after init */
  109. . = ALIGN(PAGE_SIZE);
  110. __smp_alt_begin = .;
  111. __smp_locks = .;
  112. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  113. *(.smp_locks)
  114. }
  115. __smp_locks_end = .;
  116. . = ALIGN(PAGE_SIZE);
  117. __smp_alt_end = .;
  118. . = ALIGN(PAGE_SIZE); /* Init code and data */
  119. __init_begin = .;
  120. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  121. _sinittext = .;
  122. INIT_TEXT
  123. _einittext = .;
  124. }
  125. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  126. __initdata_begin = .;
  127. INIT_DATA
  128. __initdata_end = .;
  129. }
  130. . = ALIGN(16);
  131. __setup_start = .;
  132. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
  133. __setup_end = .;
  134. __initcall_start = .;
  135. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  136. INITCALLS
  137. }
  138. __initcall_end = .;
  139. __con_initcall_start = .;
  140. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  141. *(.con_initcall.init)
  142. }
  143. __con_initcall_end = .;
  144. . = ALIGN(16);
  145. __x86cpuvendor_start = .;
  146. .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
  147. *(.x86cpuvendor.init)
  148. }
  149. __x86cpuvendor_end = .;
  150. SECURITY_INIT
  151. . = ALIGN(8);
  152. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  153. __parainstructions = .;
  154. *(.parainstructions)
  155. __parainstructions_end = .;
  156. }
  157. . = ALIGN(8);
  158. __alt_instructions = .;
  159. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  160. *(.altinstructions)
  161. }
  162. __alt_instructions_end = .;
  163. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  164. *(.altinstr_replacement)
  165. }
  166. /* .exit.text is discard at runtime, not link time, to deal with references
  167. from .altinstructions and .eh_frame */
  168. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  169. EXIT_TEXT
  170. }
  171. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  172. EXIT_DATA
  173. }
  174. #ifdef CONFIG_BLK_DEV_INITRD
  175. . = ALIGN(PAGE_SIZE);
  176. __initramfs_start = .;
  177. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
  178. __initramfs_end = .;
  179. #endif
  180. PERCPU(PAGE_SIZE)
  181. . = ALIGN(PAGE_SIZE);
  182. __init_end = .;
  183. . = ALIGN(PAGE_SIZE);
  184. __nosave_begin = .;
  185. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
  186. . = ALIGN(PAGE_SIZE);
  187. __nosave_end = .;
  188. __bss_start = .; /* BSS */
  189. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  190. *(.bss.page_aligned)
  191. *(.bss)
  192. }
  193. __bss_stop = .;
  194. _end = . ;
  195. /* Sections to be discarded */
  196. /DISCARD/ : {
  197. *(.exitcall.exit)
  198. *(.eh_frame)
  199. }
  200. STABS_DEBUG
  201. DWARF_DEBUG
  202. }
  203. /*
  204. * Build-time check on the image size:
  205. */
  206. ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  207. "kernel image bigger than KERNEL_IMAGE_SIZE")