vmlinux.lds.S 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /* ld script to make x86-64 Linux kernel
  2. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  3. */
  4. #define LOAD_OFFSET __START_KERNEL_map
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/page.h>
  7. #include <linux/config.h>
  8. OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  9. OUTPUT_ARCH(i386:x86-64)
  10. ENTRY(phys_startup_64)
  11. jiffies_64 = jiffies;
  12. SECTIONS
  13. {
  14. . = __START_KERNEL;
  15. phys_startup_64 = startup_64 - LOAD_OFFSET;
  16. _text = .; /* Text and read-only data */
  17. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  18. *(.text)
  19. SCHED_TEXT
  20. LOCK_TEXT
  21. *(.fixup)
  22. *(.gnu.warning)
  23. } = 0x9090
  24. /* out-of-line lock text */
  25. .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
  26. _etext = .; /* End of text section */
  27. . = ALIGN(16); /* Exception table */
  28. __start___ex_table = .;
  29. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  30. __stop___ex_table = .;
  31. RODATA
  32. /* Data */
  33. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  34. *(.data)
  35. CONSTRUCTORS
  36. }
  37. _edata = .; /* End of data section */
  38. __bss_start = .; /* BSS */
  39. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  40. *(.bss.page_aligned)
  41. *(.bss)
  42. }
  43. __bss_end = .;
  44. . = ALIGN(PAGE_SIZE);
  45. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  46. .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  47. *(.data.cacheline_aligned)
  48. }
  49. #define VSYSCALL_ADDR (-10*1024*1024)
  50. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095))
  51. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095))
  52. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  53. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  54. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  55. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  56. . = VSYSCALL_ADDR;
  57. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
  58. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  59. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  60. .xtime_lock : AT(VLOAD(.xtime_lock)) { *(.xtime_lock) }
  61. xtime_lock = VVIRT(.xtime_lock);
  62. .vxtime : AT(VLOAD(.vxtime)) { *(.vxtime) }
  63. vxtime = VVIRT(.vxtime);
  64. .wall_jiffies : AT(VLOAD(.wall_jiffies)) { *(.wall_jiffies) }
  65. wall_jiffies = VVIRT(.wall_jiffies);
  66. .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) }
  67. sys_tz = VVIRT(.sys_tz);
  68. .sysctl_vsyscall : AT(VLOAD(.sysctl_vsyscall)) { *(.sysctl_vsyscall) }
  69. sysctl_vsyscall = VVIRT(.sysctl_vsyscall);
  70. .xtime : AT(VLOAD(.xtime)) { *(.xtime) }
  71. xtime = VVIRT(.xtime);
  72. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  73. .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
  74. jiffies = VVIRT(.jiffies);
  75. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { *(.vsyscall_1) }
  76. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { *(.vsyscall_2) }
  77. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) }
  78. . = VSYSCALL_VIRT_ADDR + 4096;
  79. #undef VSYSCALL_ADDR
  80. #undef VSYSCALL_PHYS_ADDR
  81. #undef VSYSCALL_VIRT_ADDR
  82. #undef VLOAD_OFFSET
  83. #undef VLOAD
  84. #undef VVIRT_OFFSET
  85. #undef VVIRT
  86. . = ALIGN(8192); /* init_task */
  87. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  88. *(.data.init_task)
  89. }
  90. . = ALIGN(4096);
  91. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  92. *(.data.page_aligned)
  93. }
  94. . = ALIGN(4096); /* Init code and data */
  95. __init_begin = .;
  96. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  97. _sinittext = .;
  98. *(.init.text)
  99. _einittext = .;
  100. }
  101. __initdata_begin = .;
  102. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
  103. __initdata_end = .;
  104. . = ALIGN(16);
  105. __setup_start = .;
  106. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
  107. __setup_end = .;
  108. __initcall_start = .;
  109. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  110. *(.initcall1.init)
  111. *(.initcall2.init)
  112. *(.initcall3.init)
  113. *(.initcall4.init)
  114. *(.initcall5.init)
  115. *(.initcall6.init)
  116. *(.initcall7.init)
  117. }
  118. __initcall_end = .;
  119. __con_initcall_start = .;
  120. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  121. *(.con_initcall.init)
  122. }
  123. __con_initcall_end = .;
  124. SECURITY_INIT
  125. . = ALIGN(8);
  126. __alt_instructions = .;
  127. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  128. *(.altinstructions)
  129. }
  130. __alt_instructions_end = .;
  131. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  132. *(.altinstr_replacement)
  133. }
  134. /* .exit.text is discard at runtime, not link time, to deal with references
  135. from .altinstructions and .eh_frame */
  136. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
  137. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
  138. . = ALIGN(4096);
  139. __initramfs_start = .;
  140. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
  141. __initramfs_end = .;
  142. . = ALIGN(32);
  143. __per_cpu_start = .;
  144. .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
  145. __per_cpu_end = .;
  146. . = ALIGN(4096);
  147. __init_end = .;
  148. . = ALIGN(4096);
  149. __nosave_begin = .;
  150. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
  151. . = ALIGN(4096);
  152. __nosave_end = .;
  153. _end = . ;
  154. /* Sections to be discarded */
  155. /DISCARD/ : {
  156. *(.exitcall.exit)
  157. #ifndef CONFIG_DEBUG_INFO
  158. *(.eh_frame)
  159. #endif
  160. }
  161. /* DWARF 2 */
  162. .debug_info 0 : { *(.debug_info) }
  163. .debug_abbrev 0 : { *(.debug_abbrev) }
  164. .debug_line 0 : { *(.debug_line) }
  165. .debug_frame 0 : { *(.debug_frame) }
  166. .debug_str 0 : { *(.debug_str) }
  167. .debug_loc 0 : { *(.debug_loc) }
  168. .debug_macinfo 0 : { *(.debug_macinfo) }
  169. /* SGI/MIPS DWARF 2 extensions */
  170. .debug_weaknames 0 : { *(.debug_weaknames) }
  171. .debug_funcnames 0 : { *(.debug_funcnames) }
  172. .debug_typenames 0 : { *(.debug_typenames) }
  173. .debug_varnames 0 : { *(.debug_varnames) }
  174. .comment 0 : { *(.comment) }
  175. }