vmlinux.lds.S 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/page.h>
  7. #undef i386 /* in case the preprocessor is a 32bit one */
  8. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  9. OUTPUT_ARCH(i386:x86-64)
  10. ENTRY(phys_startup_64)
  11. jiffies_64 = jiffies;
  12. SECTIONS
  13. {
  14. . = __START_KERNEL;
  15. phys_startup_64 = startup_64 - LOAD_OFFSET;
  16. _text = .; /* Text and read-only data */
  17. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  18. /* First the code that has to be first for bootstrapping */
  19. *(.bootstrap.text)
  20. /* Then all the functions that are "hot" in profiles, to group them
  21. onto the same hugetlb entry */
  22. #include "functionlist"
  23. /* Then the rest */
  24. *(.text)
  25. SCHED_TEXT
  26. LOCK_TEXT
  27. KPROBES_TEXT
  28. *(.fixup)
  29. *(.gnu.warning)
  30. } = 0x9090
  31. /* out-of-line lock text */
  32. .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
  33. _etext = .; /* End of text section */
  34. . = ALIGN(16); /* Exception table */
  35. __start___ex_table = .;
  36. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  37. __stop___ex_table = .;
  38. RODATA
  39. #ifdef CONFIG_STACK_UNWIND
  40. . = ALIGN(8);
  41. .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
  42. __start_unwind = .;
  43. *(.eh_frame)
  44. __end_unwind = .;
  45. }
  46. #endif
  47. /* Data */
  48. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  49. *(.data)
  50. CONSTRUCTORS
  51. }
  52. _edata = .; /* End of data section */
  53. __bss_start = .; /* BSS */
  54. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  55. *(.bss.page_aligned)
  56. *(.bss)
  57. }
  58. __bss_stop = .;
  59. . = ALIGN(PAGE_SIZE);
  60. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  61. .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  62. *(.data.cacheline_aligned)
  63. }
  64. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  65. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  66. *(.data.read_mostly)
  67. }
  68. #define VSYSCALL_ADDR (-10*1024*1024)
  69. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  70. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  71. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  72. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  73. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  74. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  75. . = VSYSCALL_ADDR;
  76. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
  77. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  78. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  79. .xtime_lock : AT(VLOAD(.xtime_lock)) { *(.xtime_lock) }
  80. xtime_lock = VVIRT(.xtime_lock);
  81. .vxtime : AT(VLOAD(.vxtime)) { *(.vxtime) }
  82. vxtime = VVIRT(.vxtime);
  83. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
  84. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  85. .wall_jiffies : AT(VLOAD(.wall_jiffies)) { *(.wall_jiffies) }
  86. wall_jiffies = VVIRT(.wall_jiffies);
  87. .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) }
  88. sys_tz = VVIRT(.sys_tz);
  89. .sysctl_vsyscall : AT(VLOAD(.sysctl_vsyscall)) { *(.sysctl_vsyscall) }
  90. sysctl_vsyscall = VVIRT(.sysctl_vsyscall);
  91. .xtime : AT(VLOAD(.xtime)) { *(.xtime) }
  92. xtime = VVIRT(.xtime);
  93. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  94. .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
  95. jiffies = VVIRT(.jiffies);
  96. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { *(.vsyscall_1) }
  97. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { *(.vsyscall_2) }
  98. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) }
  99. . = VSYSCALL_VIRT_ADDR + 4096;
  100. #undef VSYSCALL_ADDR
  101. #undef VSYSCALL_PHYS_ADDR
  102. #undef VSYSCALL_VIRT_ADDR
  103. #undef VLOAD_OFFSET
  104. #undef VLOAD
  105. #undef VVIRT_OFFSET
  106. #undef VVIRT
  107. . = ALIGN(8192); /* init_task */
  108. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  109. *(.data.init_task)
  110. }
  111. . = ALIGN(4096);
  112. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  113. *(.data.page_aligned)
  114. }
  115. /* might get freed after init */
  116. . = ALIGN(4096);
  117. __smp_alt_begin = .;
  118. __smp_alt_instructions = .;
  119. .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
  120. *(.smp_altinstructions)
  121. }
  122. __smp_alt_instructions_end = .;
  123. . = ALIGN(8);
  124. __smp_locks = .;
  125. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  126. *(.smp_locks)
  127. }
  128. __smp_locks_end = .;
  129. .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
  130. *(.smp_altinstr_replacement)
  131. }
  132. . = ALIGN(4096);
  133. __smp_alt_end = .;
  134. . = ALIGN(4096); /* Init code and data */
  135. __init_begin = .;
  136. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  137. _sinittext = .;
  138. *(.init.text)
  139. _einittext = .;
  140. }
  141. __initdata_begin = .;
  142. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
  143. __initdata_end = .;
  144. . = ALIGN(16);
  145. __setup_start = .;
  146. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
  147. __setup_end = .;
  148. __initcall_start = .;
  149. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  150. *(.initcall1.init)
  151. *(.initcall2.init)
  152. *(.initcall3.init)
  153. *(.initcall4.init)
  154. *(.initcall5.init)
  155. *(.initcall6.init)
  156. *(.initcall7.init)
  157. }
  158. __initcall_end = .;
  159. __con_initcall_start = .;
  160. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  161. *(.con_initcall.init)
  162. }
  163. __con_initcall_end = .;
  164. SECURITY_INIT
  165. . = ALIGN(8);
  166. __alt_instructions = .;
  167. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  168. *(.altinstructions)
  169. }
  170. __alt_instructions_end = .;
  171. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  172. *(.altinstr_replacement)
  173. }
  174. /* .exit.text is discard at runtime, not link time, to deal with references
  175. from .altinstructions and .eh_frame */
  176. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
  177. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
  178. . = ALIGN(4096);
  179. __initramfs_start = .;
  180. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
  181. __initramfs_end = .;
  182. /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
  183. complain */
  184. . = ALIGN(4096);
  185. __init_end = .;
  186. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  187. __per_cpu_start = .;
  188. .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
  189. __per_cpu_end = .;
  190. . = ALIGN(4096);
  191. __nosave_begin = .;
  192. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
  193. . = ALIGN(4096);
  194. __nosave_end = .;
  195. _end = . ;
  196. /* Sections to be discarded */
  197. /DISCARD/ : {
  198. *(.exitcall.exit)
  199. #ifndef CONFIG_UNWIND_INFO
  200. *(.eh_frame)
  201. #endif
  202. }
  203. STABS_DEBUG
  204. DWARF_DEBUG
  205. }