vmlinux.lds.S 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * ld script for the x86 kernel
  3. *
  4. * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  5. *
  6. * Modernisation and unification done by Sam Ravnborg <sam@ravnborg.org>
  7. *
  8. *
  9. * Don't define absolute symbols until and unless you know that symbol
  10. * value is should remain constant even if kernel image is relocated
  11. * at run time. Absolute symbols are not relocated. If symbol value should
  12. * change if kernel is relocated, make the symbol section relative and
  13. * put it inside the section definition.
  14. */
  15. #ifdef CONFIG_X86_32
  16. #define LOAD_OFFSET __PAGE_OFFSET
  17. #else
  18. #define LOAD_OFFSET __START_KERNEL_map
  19. #endif
  20. #include <asm-generic/vmlinux.lds.h>
  21. #include <asm/asm-offsets.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/page_types.h>
  24. #include <asm/cache.h>
  25. #include <asm/boot.h>
  26. #undef i386 /* in case the preprocessor is a 32bit one */
  27. OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
  28. #ifdef CONFIG_X86_32
  29. OUTPUT_ARCH(i386)
  30. ENTRY(phys_startup_32)
  31. jiffies = jiffies_64;
  32. #else
  33. OUTPUT_ARCH(i386:x86-64)
  34. ENTRY(phys_startup_64)
  35. jiffies_64 = jiffies;
  36. #endif
  37. PHDRS {
  38. text PT_LOAD FLAGS(5); /* R_E */
  39. data PT_LOAD FLAGS(7); /* RWE */
  40. #ifdef CONFIG_X86_64
  41. user PT_LOAD FLAGS(7); /* RWE */
  42. data.init PT_LOAD FLAGS(7); /* RWE */
  43. #ifdef CONFIG_SMP
  44. percpu PT_LOAD FLAGS(7); /* RWE */
  45. #endif
  46. data.init2 PT_LOAD FLAGS(7); /* RWE */
  47. #endif
  48. note PT_NOTE FLAGS(0); /* ___ */
  49. }
  50. SECTIONS
  51. {
  52. #ifdef CONFIG_X86_32
  53. . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
  54. phys_startup_32 = startup_32 - LOAD_OFFSET;
  55. #else
  56. . = __START_KERNEL;
  57. phys_startup_64 = startup_64 - LOAD_OFFSET;
  58. #endif
  59. /* Text and read-only data */
  60. /* bootstrapping code */
  61. .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
  62. _text = .;
  63. *(.text.head)
  64. } :text = 0x9090
  65. /* The rest of the text */
  66. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  67. #ifdef CONFIG_X86_32
  68. /* not really needed, already page aligned */
  69. . = ALIGN(PAGE_SIZE);
  70. *(.text.page_aligned)
  71. #endif
  72. . = ALIGN(8);
  73. _stext = .;
  74. TEXT_TEXT
  75. SCHED_TEXT
  76. LOCK_TEXT
  77. KPROBES_TEXT
  78. IRQENTRY_TEXT
  79. *(.fixup)
  80. *(.gnu.warning)
  81. /* End of text section */
  82. _etext = .;
  83. } :text = 0x9090
  84. NOTES :text :note
  85. /* Exception table */
  86. . = ALIGN(16);
  87. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  88. __start___ex_table = .;
  89. *(__ex_table)
  90. __stop___ex_table = .;
  91. } :text = 0x9090
  92. RODATA
  93. /* Data */
  94. . = ALIGN(PAGE_SIZE);
  95. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  96. DATA_DATA
  97. CONSTRUCTORS
  98. #ifdef CONFIG_X86_64
  99. /* End of data section */
  100. _edata = .;
  101. #endif
  102. } :data
  103. #ifdef CONFIG_X86_32
  104. /* 32 bit has nosave before _edata */
  105. . = ALIGN(PAGE_SIZE);
  106. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  107. __nosave_begin = .;
  108. *(.data.nosave)
  109. . = ALIGN(PAGE_SIZE);
  110. __nosave_end = .;
  111. }
  112. #endif
  113. . = ALIGN(PAGE_SIZE);
  114. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  115. *(.data.page_aligned)
  116. *(.data.idt)
  117. }
  118. #ifdef CONFIG_X86_32
  119. . = ALIGN(32);
  120. #else
  121. . = ALIGN(PAGE_SIZE);
  122. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  123. #endif
  124. .data.cacheline_aligned :
  125. AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  126. *(.data.cacheline_aligned)
  127. }
  128. /* rarely changed data like cpu maps */
  129. #ifdef CONFIG_X86_32
  130. . = ALIGN(32);
  131. #else
  132. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  133. #endif
  134. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  135. *(.data.read_mostly)
  136. #ifdef CONFIG_X86_32
  137. /* End of data section */
  138. _edata = .;
  139. #endif
  140. }
  141. #ifdef CONFIG_X86_64
  142. #define VSYSCALL_ADDR (-10*1024*1024)
  143. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
  144. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  145. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
  146. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  147. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  148. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  149. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  150. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  151. . = VSYSCALL_ADDR;
  152. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
  153. *(.vsyscall_0)
  154. } :user
  155. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  156. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  157. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
  158. *(.vsyscall_fn)
  159. }
  160. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  161. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
  162. *(.vsyscall_gtod_data)
  163. }
  164. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  165. .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
  166. *(.vsyscall_clock)
  167. }
  168. vsyscall_clock = VVIRT(.vsyscall_clock);
  169. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
  170. *(.vsyscall_1)
  171. }
  172. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
  173. *(.vsyscall_2)
  174. }
  175. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
  176. *(.vgetcpu_mode)
  177. }
  178. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  179. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  180. .jiffies : AT(VLOAD(.jiffies)) {
  181. *(.jiffies)
  182. }
  183. jiffies = VVIRT(.jiffies);
  184. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
  185. *(.vsyscall_3)
  186. }
  187. . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  188. #undef VSYSCALL_ADDR
  189. #undef VSYSCALL_PHYS_ADDR
  190. #undef VSYSCALL_VIRT_ADDR
  191. #undef VLOAD_OFFSET
  192. #undef VLOAD
  193. #undef VVIRT_OFFSET
  194. #undef VVIRT
  195. #endif /* CONFIG_X86_64 */
  196. /* init_task */
  197. . = ALIGN(THREAD_SIZE);
  198. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  199. *(.data.init_task)
  200. }
  201. #ifdef CONFIG_X86_64
  202. :data.init
  203. #endif
  204. /*
  205. * smp_locks might be freed after init
  206. * start/end must be page aligned
  207. */
  208. . = ALIGN(PAGE_SIZE);
  209. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  210. __smp_locks = .;
  211. *(.smp_locks)
  212. __smp_locks_end = .;
  213. . = ALIGN(PAGE_SIZE);
  214. }
  215. /* Init code and data - will be freed after init */
  216. . = ALIGN(PAGE_SIZE);
  217. __init_begin = .; /* paired with __init_end */
  218. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  219. _sinittext = .;
  220. INIT_TEXT
  221. _einittext = .;
  222. }
  223. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  224. INIT_DATA
  225. }
  226. . = ALIGN(16);
  227. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
  228. __setup_start = .;
  229. *(.init.setup)
  230. __setup_end = .;
  231. }
  232. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  233. __initcall_start = .;
  234. INITCALLS
  235. __initcall_end = .;
  236. }
  237. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  238. __con_initcall_start = .;
  239. *(.con_initcall.init)
  240. __con_initcall_end = .;
  241. }
  242. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  243. __x86_cpu_dev_start = .;
  244. *(.x86_cpu_dev.init)
  245. __x86_cpu_dev_end = .;
  246. }
  247. SECURITY_INIT
  248. #ifdef CONFIG_X86_32
  249. # include "vmlinux_32.lds.S"
  250. #else
  251. # include "vmlinux_64.lds.S"
  252. #endif
  253. STABS_DEBUG
  254. DWARF_DEBUG
  255. }
  256. #ifdef CONFIG_X86_32
  257. ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
  258. "kernel image bigger than KERNEL_IMAGE_SIZE")
  259. #else
  260. /*
  261. * Per-cpu symbols which need to be offset from __per_cpu_load
  262. * for the boot processor.
  263. */
  264. #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
  265. INIT_PER_CPU(gdt_page);
  266. INIT_PER_CPU(irq_stack_union);
  267. /*
  268. * Build-time check on the image size:
  269. */
  270. ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  271. "kernel image bigger than KERNEL_IMAGE_SIZE")
  272. #ifdef CONFIG_SMP
  273. ASSERT((per_cpu__irq_stack_union == 0),
  274. "irq_stack_union is not at start of per-cpu area");
  275. #endif
  276. #endif /* CONFIG_X86_32 */
  277. #ifdef CONFIG_KEXEC
  278. #include <asm/kexec.h>
  279. ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  280. "kexec control code size is too big")
  281. #endif