vmlinux.lds.S 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * ld script for the x86 kernel
  3. *
  4. * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  5. *
  6. * Modernisation, unification and other changes and fixes:
  7. * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
  8. *
  9. *
  10. * Don't define absolute symbols until and unless you know that symbol
  11. * value is should remain constant even if kernel image is relocated
  12. * at run time. Absolute symbols are not relocated. If symbol value should
  13. * change if kernel is relocated, make the symbol section relative and
  14. * put it inside the section definition.
  15. */
  16. #ifdef CONFIG_X86_32
  17. #define LOAD_OFFSET __PAGE_OFFSET
  18. #else
  19. #define LOAD_OFFSET __START_KERNEL_map
  20. #endif
  21. #include <asm-generic/vmlinux.lds.h>
  22. #include <asm/asm-offsets.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/page_types.h>
  25. #include <asm/cache.h>
  26. #include <asm/boot.h>
  27. #undef i386 /* in case the preprocessor is a 32bit one */
  28. OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
  29. #ifdef CONFIG_X86_32
  30. OUTPUT_ARCH(i386)
  31. ENTRY(phys_startup_32)
  32. jiffies = jiffies_64;
  33. #else
  34. OUTPUT_ARCH(i386:x86-64)
  35. ENTRY(phys_startup_64)
  36. jiffies_64 = jiffies;
  37. #endif
  38. #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
  39. #define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
  40. #define X64_ALIGN_DEBUG_RODATA_END \
  41. . = ALIGN(HPAGE_SIZE); \
  42. __end_rodata_hpage_align = .;
  43. #else
  44. #define X64_ALIGN_DEBUG_RODATA_BEGIN
  45. #define X64_ALIGN_DEBUG_RODATA_END
  46. #endif
  47. PHDRS {
  48. text PT_LOAD FLAGS(5); /* R_E */
  49. data PT_LOAD FLAGS(7); /* RWE */
  50. #ifdef CONFIG_X86_64
  51. user PT_LOAD FLAGS(5); /* R_E */
  52. #ifdef CONFIG_SMP
  53. percpu PT_LOAD FLAGS(6); /* RW_ */
  54. #endif
  55. init PT_LOAD FLAGS(7); /* RWE */
  56. #endif
  57. note PT_NOTE FLAGS(0); /* ___ */
  58. }
  59. SECTIONS
  60. {
  61. #ifdef CONFIG_X86_32
  62. . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
  63. phys_startup_32 = startup_32 - LOAD_OFFSET;
  64. #else
  65. . = __START_KERNEL;
  66. phys_startup_64 = startup_64 - LOAD_OFFSET;
  67. #endif
  68. /* Text and read-only data */
  69. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  70. _text = .;
  71. /* bootstrapping code */
  72. HEAD_TEXT
  73. #ifdef CONFIG_X86_32
  74. . = ALIGN(PAGE_SIZE);
  75. *(.text.page_aligned)
  76. #endif
  77. . = ALIGN(8);
  78. _stext = .;
  79. TEXT_TEXT
  80. SCHED_TEXT
  81. LOCK_TEXT
  82. KPROBES_TEXT
  83. IRQENTRY_TEXT
  84. *(.fixup)
  85. *(.gnu.warning)
  86. /* End of text section */
  87. _etext = .;
  88. } :text = 0x9090
  89. NOTES :text :note
  90. EXCEPTION_TABLE(16) :text = 0x9090
  91. X64_ALIGN_DEBUG_RODATA_BEGIN
  92. RO_DATA(PAGE_SIZE)
  93. X64_ALIGN_DEBUG_RODATA_END
  94. /* Data */
  95. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  96. /* Start of data section */
  97. _sdata = .;
  98. /* init_task */
  99. INIT_TASK_DATA(THREAD_SIZE)
  100. #ifdef CONFIG_X86_32
  101. /* 32 bit has nosave before _edata */
  102. NOSAVE_DATA
  103. #endif
  104. PAGE_ALIGNED_DATA(PAGE_SIZE)
  105. CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
  106. DATA_DATA
  107. CONSTRUCTORS
  108. /* rarely changed data like cpu maps */
  109. READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
  110. /* End of data section */
  111. _edata = .;
  112. } :data
  113. #ifdef CONFIG_X86_64
  114. #define VSYSCALL_ADDR (-10*1024*1024)
  115. #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
  116. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  117. #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
  118. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  119. . = ALIGN(4096);
  120. __vsyscall_0 = .;
  121. . = VSYSCALL_ADDR;
  122. .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
  123. *(.vsyscall_0)
  124. } :user
  125. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  126. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
  127. *(.vsyscall_fn)
  128. }
  129. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  130. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
  131. *(.vsyscall_gtod_data)
  132. }
  133. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  134. .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
  135. *(.vsyscall_clock)
  136. }
  137. vsyscall_clock = VVIRT(.vsyscall_clock);
  138. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
  139. *(.vsyscall_1)
  140. }
  141. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
  142. *(.vsyscall_2)
  143. }
  144. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
  145. *(.vgetcpu_mode)
  146. }
  147. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  148. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  149. .jiffies : AT(VLOAD(.jiffies)) {
  150. *(.jiffies)
  151. }
  152. jiffies = VVIRT(.jiffies);
  153. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
  154. *(.vsyscall_3)
  155. }
  156. . = __vsyscall_0 + PAGE_SIZE;
  157. #undef VSYSCALL_ADDR
  158. #undef VLOAD_OFFSET
  159. #undef VLOAD
  160. #undef VVIRT_OFFSET
  161. #undef VVIRT
  162. #endif /* CONFIG_X86_64 */
  163. /* Init code and data - will be freed after init */
  164. . = ALIGN(PAGE_SIZE);
  165. .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
  166. __init_begin = .; /* paired with __init_end */
  167. }
  168. #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
  169. /*
  170. * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
  171. * output PHDR, so the next output section - .init.text - should
  172. * start another segment - init.
  173. */
  174. PERCPU_VADDR(0, :percpu)
  175. #endif
  176. INIT_TEXT_SECTION(PAGE_SIZE)
  177. #ifdef CONFIG_X86_64
  178. :init
  179. #endif
  180. INIT_DATA_SECTION(16)
  181. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  182. __x86_cpu_dev_start = .;
  183. *(.x86_cpu_dev.init)
  184. __x86_cpu_dev_end = .;
  185. }
  186. . = ALIGN(8);
  187. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  188. __parainstructions = .;
  189. *(.parainstructions)
  190. __parainstructions_end = .;
  191. }
  192. . = ALIGN(8);
  193. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  194. __alt_instructions = .;
  195. *(.altinstructions)
  196. __alt_instructions_end = .;
  197. }
  198. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  199. *(.altinstr_replacement)
  200. }
  201. /*
  202. * .exit.text is discard at runtime, not link time, to deal with
  203. * references from .altinstructions and .eh_frame
  204. */
  205. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  206. EXIT_TEXT
  207. }
  208. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  209. EXIT_DATA
  210. }
  211. #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
  212. PERCPU(PAGE_SIZE)
  213. #endif
  214. . = ALIGN(PAGE_SIZE);
  215. /* freed after init ends here */
  216. .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
  217. __init_end = .;
  218. }
  219. /*
  220. * smp_locks might be freed after init
  221. * start/end must be page aligned
  222. */
  223. . = ALIGN(PAGE_SIZE);
  224. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  225. __smp_locks = .;
  226. *(.smp_locks)
  227. __smp_locks_end = .;
  228. . = ALIGN(PAGE_SIZE);
  229. }
  230. #ifdef CONFIG_X86_64
  231. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  232. NOSAVE_DATA
  233. }
  234. #endif
  235. /* BSS */
  236. . = ALIGN(PAGE_SIZE);
  237. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  238. __bss_start = .;
  239. *(.bss.page_aligned)
  240. *(.bss)
  241. . = ALIGN(4);
  242. __bss_stop = .;
  243. }
  244. . = ALIGN(PAGE_SIZE);
  245. .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
  246. __brk_base = .;
  247. . += 64 * 1024; /* 64k alignment slop space */
  248. *(.brk_reservation) /* areas brk users have reserved */
  249. __brk_limit = .;
  250. }
  251. .end : AT(ADDR(.end) - LOAD_OFFSET) {
  252. _end = .;
  253. }
  254. STABS_DEBUG
  255. DWARF_DEBUG
  256. /* Sections to be discarded */
  257. DISCARDS
  258. /DISCARD/ : { *(.eh_frame) }
  259. }
  260. #ifdef CONFIG_X86_32
  261. . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
  262. "kernel image bigger than KERNEL_IMAGE_SIZE");
  263. #else
  264. /*
  265. * Per-cpu symbols which need to be offset from __per_cpu_load
  266. * for the boot processor.
  267. */
  268. #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
  269. INIT_PER_CPU(gdt_page);
  270. INIT_PER_CPU(irq_stack_union);
  271. /*
  272. * Build-time check on the image size:
  273. */
  274. . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  275. "kernel image bigger than KERNEL_IMAGE_SIZE");
  276. #ifdef CONFIG_SMP
  277. . = ASSERT((per_cpu__irq_stack_union == 0),
  278. "irq_stack_union is not at start of per-cpu area");
  279. #endif
  280. #endif /* CONFIG_X86_32 */
  281. #ifdef CONFIG_KEXEC
  282. #include <asm/kexec.h>
  283. . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  284. "kexec control code size is too big");
  285. #endif