vmlinux.lds.S 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * ld script for the x86 kernel
  3. *
  4. * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  5. *
  6. * Modernisation, unification and other changes and fixes:
  7. * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
  8. *
  9. *
  10. * Don't define absolute symbols until and unless you know that symbol
  11. * value is should remain constant even if kernel image is relocated
  12. * at run time. Absolute symbols are not relocated. If symbol value should
  13. * change if kernel is relocated, make the symbol section relative and
  14. * put it inside the section definition.
  15. */
  16. #ifdef CONFIG_X86_32
  17. #define LOAD_OFFSET __PAGE_OFFSET
  18. #else
  19. #define LOAD_OFFSET __START_KERNEL_map
  20. #endif
  21. #include <asm-generic/vmlinux.lds.h>
  22. #include <asm/asm-offsets.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/page_types.h>
  25. #include <asm/cache.h>
  26. #include <asm/boot.h>
  27. #undef i386 /* in case the preprocessor is a 32bit one */
  28. OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
  29. #ifdef CONFIG_X86_32
  30. OUTPUT_ARCH(i386)
  31. ENTRY(phys_startup_32)
  32. jiffies = jiffies_64;
  33. #else
  34. OUTPUT_ARCH(i386:x86-64)
  35. ENTRY(phys_startup_64)
  36. jiffies_64 = jiffies;
  37. #endif
  38. #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
  39. /*
  40. * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
  41. * we retain large page mappings for boundaries spanning kernel text, rodata
  42. * and data sections.
  43. *
  44. * However, kernel identity mappings will have different RWX permissions
  45. * to the pages mapping to text and to the pages padding (which are freed) the
  46. * text section. Hence kernel identity mappings will be broken to smaller
  47. * pages. For 64-bit, kernel text and kernel identity mappings are different,
  48. * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
  49. * as well as retain 2MB large page mappings for kernel text.
  50. */
  51. #define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
  52. #define X64_ALIGN_DEBUG_RODATA_END \
  53. . = ALIGN(HPAGE_SIZE); \
  54. __end_rodata_hpage_align = .;
  55. #else
  56. #define X64_ALIGN_DEBUG_RODATA_BEGIN
  57. #define X64_ALIGN_DEBUG_RODATA_END
  58. #endif
  59. PHDRS {
  60. text PT_LOAD FLAGS(5); /* R_E */
  61. data PT_LOAD FLAGS(6); /* RW_ */
  62. #ifdef CONFIG_X86_64
  63. user PT_LOAD FLAGS(5); /* R_E */
  64. #ifdef CONFIG_SMP
  65. percpu PT_LOAD FLAGS(6); /* RW_ */
  66. #endif
  67. init PT_LOAD FLAGS(7); /* RWE */
  68. #endif
  69. note PT_NOTE FLAGS(0); /* ___ */
  70. }
  71. SECTIONS
  72. {
  73. #ifdef CONFIG_X86_32
  74. . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
  75. phys_startup_32 = startup_32 - LOAD_OFFSET;
  76. #else
  77. . = __START_KERNEL;
  78. phys_startup_64 = startup_64 - LOAD_OFFSET;
  79. #endif
  80. /* Text and read-only data */
  81. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  82. _text = .;
  83. /* bootstrapping code */
  84. HEAD_TEXT
  85. #ifdef CONFIG_X86_32
  86. . = ALIGN(PAGE_SIZE);
  87. *(.text..page_aligned)
  88. #endif
  89. . = ALIGN(8);
  90. _stext = .;
  91. TEXT_TEXT
  92. SCHED_TEXT
  93. LOCK_TEXT
  94. KPROBES_TEXT
  95. IRQENTRY_TEXT
  96. *(.fixup)
  97. *(.gnu.warning)
  98. /* End of text section */
  99. _etext = .;
  100. } :text = 0x9090
  101. NOTES :text :note
  102. EXCEPTION_TABLE(16) :text = 0x9090
  103. #if defined(CONFIG_DEBUG_RODATA)
  104. /* .text should occupy whole number of pages */
  105. . = ALIGN(PAGE_SIZE);
  106. #endif
  107. X64_ALIGN_DEBUG_RODATA_BEGIN
  108. RO_DATA(PAGE_SIZE)
  109. X64_ALIGN_DEBUG_RODATA_END
  110. /* Data */
  111. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  112. /* Start of data section */
  113. _sdata = .;
  114. /* init_task */
  115. INIT_TASK_DATA(THREAD_SIZE)
  116. #ifdef CONFIG_X86_32
  117. /* 32 bit has nosave before _edata */
  118. NOSAVE_DATA
  119. #endif
  120. PAGE_ALIGNED_DATA(PAGE_SIZE)
  121. CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
  122. DATA_DATA
  123. CONSTRUCTORS
  124. /* rarely changed data like cpu maps */
  125. READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
  126. /* End of data section */
  127. _edata = .;
  128. } :data
  129. #ifdef CONFIG_X86_64
  130. #define VSYSCALL_ADDR (-10*1024*1024)
  131. #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
  132. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  133. #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
  134. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  135. . = ALIGN(4096);
  136. __vsyscall_0 = .;
  137. . = VSYSCALL_ADDR;
  138. .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
  139. *(.vsyscall_0)
  140. } :user
  141. . = ALIGN(L1_CACHE_BYTES);
  142. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
  143. *(.vsyscall_fn)
  144. }
  145. . = ALIGN(L1_CACHE_BYTES);
  146. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
  147. *(.vsyscall_gtod_data)
  148. }
  149. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  150. .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
  151. *(.vsyscall_clock)
  152. }
  153. vsyscall_clock = VVIRT(.vsyscall_clock);
  154. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
  155. *(.vsyscall_1)
  156. }
  157. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
  158. *(.vsyscall_2)
  159. }
  160. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
  161. *(.vgetcpu_mode)
  162. }
  163. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  164. . = ALIGN(L1_CACHE_BYTES);
  165. .jiffies : AT(VLOAD(.jiffies)) {
  166. *(.jiffies)
  167. }
  168. jiffies = VVIRT(.jiffies);
  169. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
  170. *(.vsyscall_3)
  171. }
  172. . = __vsyscall_0 + PAGE_SIZE;
  173. #undef VSYSCALL_ADDR
  174. #undef VLOAD_OFFSET
  175. #undef VLOAD
  176. #undef VVIRT_OFFSET
  177. #undef VVIRT
  178. #endif /* CONFIG_X86_64 */
  179. /* Init code and data - will be freed after init */
  180. . = ALIGN(PAGE_SIZE);
  181. .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
  182. __init_begin = .; /* paired with __init_end */
  183. }
  184. #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
  185. /*
  186. * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
  187. * output PHDR, so the next output section - .init.text - should
  188. * start another segment - init.
  189. */
  190. PERCPU_VADDR(0, :percpu)
  191. #endif
  192. INIT_TEXT_SECTION(PAGE_SIZE)
  193. #ifdef CONFIG_X86_64
  194. :init
  195. #endif
  196. INIT_DATA_SECTION(16)
  197. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  198. __x86_cpu_dev_start = .;
  199. *(.x86_cpu_dev.init)
  200. __x86_cpu_dev_end = .;
  201. }
  202. /*
  203. * start address and size of operations which during runtime
  204. * can be patched with virtualization friendly instructions or
  205. * baremetal native ones. Think page table operations.
  206. * Details in paravirt_types.h
  207. */
  208. . = ALIGN(8);
  209. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  210. __parainstructions = .;
  211. *(.parainstructions)
  212. __parainstructions_end = .;
  213. }
  214. /*
  215. * struct alt_inst entries. From the header (alternative.h):
  216. * "Alternative instructions for different CPU types or capabilities"
  217. * Think locking instructions on spinlocks.
  218. */
  219. . = ALIGN(8);
  220. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  221. __alt_instructions = .;
  222. *(.altinstructions)
  223. __alt_instructions_end = .;
  224. }
  225. /*
  226. * And here are the replacement instructions. The linker sticks
  227. * them as binary blobs. The .altinstructions has enough data to
  228. * get the address and the length of them to patch the kernel safely.
  229. */
  230. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  231. *(.altinstr_replacement)
  232. }
  233. /*
  234. * struct iommu_table_entry entries are injected in this section.
  235. * It is an array of IOMMUs which during run time gets sorted depending
  236. * on its dependency order. After rootfs_initcall is complete
  237. * this section can be safely removed.
  238. */
  239. .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
  240. __iommu_table = .;
  241. *(.iommu_table)
  242. __iommu_table_end = .;
  243. }
  244. . = ALIGN(8);
  245. /*
  246. * .exit.text is discard at runtime, not link time, to deal with
  247. * references from .altinstructions and .eh_frame
  248. */
  249. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  250. EXIT_TEXT
  251. }
  252. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  253. EXIT_DATA
  254. }
  255. #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
  256. PERCPU(THREAD_SIZE)
  257. #endif
  258. . = ALIGN(PAGE_SIZE);
  259. /* freed after init ends here */
  260. .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
  261. __init_end = .;
  262. }
  263. /*
  264. * smp_locks might be freed after init
  265. * start/end must be page aligned
  266. */
  267. . = ALIGN(PAGE_SIZE);
  268. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  269. __smp_locks = .;
  270. *(.smp_locks)
  271. . = ALIGN(PAGE_SIZE);
  272. __smp_locks_end = .;
  273. }
  274. #ifdef CONFIG_X86_64
  275. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  276. NOSAVE_DATA
  277. }
  278. #endif
  279. /* BSS */
  280. . = ALIGN(PAGE_SIZE);
  281. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  282. __bss_start = .;
  283. *(.bss..page_aligned)
  284. *(.bss)
  285. . = ALIGN(PAGE_SIZE);
  286. __bss_stop = .;
  287. }
  288. . = ALIGN(PAGE_SIZE);
  289. .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
  290. __brk_base = .;
  291. . += 64 * 1024; /* 64k alignment slop space */
  292. *(.brk_reservation) /* areas brk users have reserved */
  293. __brk_limit = .;
  294. }
  295. _end = .;
  296. STABS_DEBUG
  297. DWARF_DEBUG
  298. /* Sections to be discarded */
  299. DISCARDS
  300. /DISCARD/ : { *(.eh_frame) }
  301. }
  302. #ifdef CONFIG_X86_32
  303. /*
  304. * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
  305. */
  306. . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
  307. "kernel image bigger than KERNEL_IMAGE_SIZE");
  308. #else
  309. /*
  310. * Per-cpu symbols which need to be offset from __per_cpu_load
  311. * for the boot processor.
  312. */
  313. #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
  314. INIT_PER_CPU(gdt_page);
  315. INIT_PER_CPU(irq_stack_union);
  316. /*
  317. * Build-time check on the image size:
  318. */
  319. . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  320. "kernel image bigger than KERNEL_IMAGE_SIZE");
  321. #ifdef CONFIG_SMP
  322. . = ASSERT((irq_stack_union == 0),
  323. "irq_stack_union is not at start of per-cpu area");
  324. #endif
  325. #endif /* CONFIG_X86_32 */
  326. #ifdef CONFIG_KEXEC
  327. #include <asm/kexec.h>
  328. . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  329. "kexec control code size is too big");
  330. #endif