vmlinux.lds.S 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/page.h>
  7. #include <linux/config.h>
  8. #undef i386 /* in case the preprocessor is a 32bit one */
  9. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  10. OUTPUT_ARCH(i386:x86-64)
  11. ENTRY(phys_startup_64)
  12. jiffies_64 = jiffies;
  13. SECTIONS
  14. {
  15. . = __START_KERNEL;
  16. phys_startup_64 = startup_64 - LOAD_OFFSET;
  17. _text = .; /* Text and read-only data */
  18. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  19. /* First the code that has to be first for bootstrapping */
  20. *(.bootstrap.text)
  21. /* Then all the functions that are "hot" in profiles, to group them
  22. onto the same hugetlb entry */
  23. #include "functionlist"
  24. /* Then the rest */
  25. *(.text)
  26. SCHED_TEXT
  27. LOCK_TEXT
  28. KPROBES_TEXT
  29. *(.fixup)
  30. *(.gnu.warning)
  31. } = 0x9090
  32. /* out-of-line lock text */
  33. .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
  34. _etext = .; /* End of text section */
  35. . = ALIGN(16); /* Exception table */
  36. __start___ex_table = .;
  37. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  38. __stop___ex_table = .;
  39. RODATA
  40. #ifdef CONFIG_STACK_UNWIND
  41. . = ALIGN(8);
  42. .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
  43. __start_unwind = .;
  44. *(.eh_frame)
  45. __end_unwind = .;
  46. }
  47. #endif
  48. /* Data */
  49. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  50. *(.data)
  51. CONSTRUCTORS
  52. }
  53. _edata = .; /* End of data section */
  54. __bss_start = .; /* BSS */
  55. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  56. *(.bss.page_aligned)
  57. *(.bss)
  58. }
  59. __bss_stop = .;
  60. . = ALIGN(PAGE_SIZE);
  61. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  62. .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  63. *(.data.cacheline_aligned)
  64. }
  65. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  66. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  67. *(.data.read_mostly)
  68. }
  69. #define VSYSCALL_ADDR (-10*1024*1024)
  70. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  71. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  72. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  73. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  74. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  75. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  76. . = VSYSCALL_ADDR;
  77. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
  78. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  79. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  80. .xtime_lock : AT(VLOAD(.xtime_lock)) { *(.xtime_lock) }
  81. xtime_lock = VVIRT(.xtime_lock);
  82. .vxtime : AT(VLOAD(.vxtime)) { *(.vxtime) }
  83. vxtime = VVIRT(.vxtime);
  84. .wall_jiffies : AT(VLOAD(.wall_jiffies)) { *(.wall_jiffies) }
  85. wall_jiffies = VVIRT(.wall_jiffies);
  86. .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) }
  87. sys_tz = VVIRT(.sys_tz);
  88. .sysctl_vsyscall : AT(VLOAD(.sysctl_vsyscall)) { *(.sysctl_vsyscall) }
  89. sysctl_vsyscall = VVIRT(.sysctl_vsyscall);
  90. .xtime : AT(VLOAD(.xtime)) { *(.xtime) }
  91. xtime = VVIRT(.xtime);
  92. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  93. .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
  94. jiffies = VVIRT(.jiffies);
  95. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { *(.vsyscall_1) }
  96. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { *(.vsyscall_2) }
  97. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) }
  98. . = VSYSCALL_VIRT_ADDR + 4096;
  99. #undef VSYSCALL_ADDR
  100. #undef VSYSCALL_PHYS_ADDR
  101. #undef VSYSCALL_VIRT_ADDR
  102. #undef VLOAD_OFFSET
  103. #undef VLOAD
  104. #undef VVIRT_OFFSET
  105. #undef VVIRT
  106. . = ALIGN(8192); /* init_task */
  107. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  108. *(.data.init_task)
  109. }
  110. . = ALIGN(4096);
  111. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  112. *(.data.page_aligned)
  113. }
  114. /* might get freed after init */
  115. . = ALIGN(4096);
  116. __smp_alt_begin = .;
  117. __smp_alt_instructions = .;
  118. .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
  119. *(.smp_altinstructions)
  120. }
  121. __smp_alt_instructions_end = .;
  122. . = ALIGN(8);
  123. __smp_locks = .;
  124. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  125. *(.smp_locks)
  126. }
  127. __smp_locks_end = .;
  128. .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
  129. *(.smp_altinstr_replacement)
  130. }
  131. . = ALIGN(4096);
  132. __smp_alt_end = .;
  133. . = ALIGN(4096); /* Init code and data */
  134. __init_begin = .;
  135. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  136. _sinittext = .;
  137. *(.init.text)
  138. _einittext = .;
  139. }
  140. __initdata_begin = .;
  141. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
  142. __initdata_end = .;
  143. . = ALIGN(16);
  144. __setup_start = .;
  145. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
  146. __setup_end = .;
  147. __initcall_start = .;
  148. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  149. *(.initcall1.init)
  150. *(.initcall2.init)
  151. *(.initcall3.init)
  152. *(.initcall4.init)
  153. *(.initcall5.init)
  154. *(.initcall6.init)
  155. *(.initcall7.init)
  156. }
  157. __initcall_end = .;
  158. __con_initcall_start = .;
  159. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  160. *(.con_initcall.init)
  161. }
  162. __con_initcall_end = .;
  163. SECURITY_INIT
  164. . = ALIGN(8);
  165. __alt_instructions = .;
  166. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  167. *(.altinstructions)
  168. }
  169. __alt_instructions_end = .;
  170. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  171. *(.altinstr_replacement)
  172. }
  173. /* .exit.text is discard at runtime, not link time, to deal with references
  174. from .altinstructions and .eh_frame */
  175. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
  176. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
  177. . = ALIGN(4096);
  178. __initramfs_start = .;
  179. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
  180. __initramfs_end = .;
  181. /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
  182. complain */
  183. . = ALIGN(4096);
  184. __init_end = .;
  185. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  186. __per_cpu_start = .;
  187. .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
  188. __per_cpu_end = .;
  189. . = ALIGN(4096);
  190. __nosave_begin = .;
  191. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
  192. . = ALIGN(4096);
  193. __nosave_end = .;
  194. _end = . ;
  195. /* Sections to be discarded */
  196. /DISCARD/ : {
  197. *(.exitcall.exit)
  198. #ifndef CONFIG_UNWIND_INFO
  199. *(.eh_frame)
  200. #endif
  201. }
  202. STABS_DEBUG
  203. DWARF_DEBUG
  204. }