vmlinux.lds.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /* Kernel link layout for various "sections"
  2. *
  3. * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
  4. * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
  5. * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
  6. * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
  7. * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
  8. * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
  9. * Copyright (C) 2006 Helge Deller <deller@gmx.de>
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. */
  26. #include <asm-generic/vmlinux.lds.h>
  27. /* needed for the processor specific cache alignment size */
  28. #include <asm/cache.h>
  29. #include <asm/page.h>
  30. #include <asm/asm-offsets.h>
  31. /* ld script to make hppa Linux kernel */
  32. #ifndef CONFIG_64BIT
  33. OUTPUT_FORMAT("elf32-hppa-linux")
  34. OUTPUT_ARCH(hppa)
  35. #else
  36. OUTPUT_FORMAT("elf64-hppa-linux")
  37. OUTPUT_ARCH(hppa:hppa2.0w)
  38. #endif
  39. ENTRY(_stext)
  40. #ifndef CONFIG_64BIT
  41. jiffies = jiffies_64 + 4;
  42. #else
  43. jiffies = jiffies_64;
  44. #endif
  45. SECTIONS
  46. {
  47. . = KERNEL_BINARY_TEXT_START;
  48. _text = .; /* Text and read-only data */
  49. .text ALIGN(16) : {
  50. *(.text)
  51. SCHED_TEXT
  52. LOCK_TEXT
  53. *(.text.do_softirq)
  54. *(.text.sys_exit)
  55. *(.text.do_sigaltstack)
  56. *(.text.do_fork)
  57. *(.text.*)
  58. *(.fixup)
  59. *(.lock.text) /* out-of-line lock text */
  60. *(.gnu.warning)
  61. } = 0
  62. _etext = .; /* End of text section */
  63. RODATA
  64. /* writeable */
  65. . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
  66. that we can properly leave these
  67. as writable */
  68. data_start = .;
  69. . = ALIGN(16); /* Exception table */
  70. __start___ex_table = .;
  71. __ex_table : { *(__ex_table) }
  72. __stop___ex_table = .;
  73. __start___unwind = .; /* unwind info */
  74. .PARISC.unwind : { *(.PARISC.unwind) }
  75. __stop___unwind = .;
  76. /* rarely changed data like cpu maps */
  77. . = ALIGN(16);
  78. .data.read_mostly : { *(.data.read_mostly) }
  79. . = ALIGN(L1_CACHE_BYTES);
  80. .data : { /* Data */
  81. *(.data)
  82. CONSTRUCTORS
  83. }
  84. . = ALIGN(L1_CACHE_BYTES);
  85. .data.cacheline_aligned : { *(.data.cacheline_aligned) }
  86. /* PA-RISC locks requires 16-byte alignment */
  87. . = ALIGN(16);
  88. .data.lock_aligned : { *(.data.lock_aligned) }
  89. . = ALIGN(ASM_PAGE_SIZE);
  90. /* nosave data is really only used for software suspend...it's here
  91. * just in case we ever implement it */
  92. __nosave_begin = .;
  93. .data_nosave : { *(.data.nosave) }
  94. . = ALIGN(ASM_PAGE_SIZE);
  95. __nosave_end = .;
  96. _edata = .; /* End of data section */
  97. __bss_start = .; /* BSS */
  98. /* page table entries need to be PAGE_SIZE aligned */
  99. . = ALIGN(ASM_PAGE_SIZE);
  100. .data.vmpages : {
  101. *(.data.vm0.pmd)
  102. *(.data.vm0.pgd)
  103. *(.data.vm0.pte)
  104. }
  105. .bss : { *(.bss) *(COMMON) }
  106. __bss_stop = .;
  107. /* assembler code expects init_task to be 16k aligned */
  108. . = ALIGN(16384); /* init_task */
  109. .data.init_task : { *(.data.init_task) }
  110. /* The interrupt stack is currently partially coded, but not yet
  111. * implemented */
  112. . = ALIGN(16384);
  113. init_istack : { *(init_istack) }
  114. #ifdef CONFIG_64BIT
  115. . = ALIGN(16); /* Linkage tables */
  116. .opd : { *(.opd) } PROVIDE (__gp = .);
  117. .plt : { *(.plt) }
  118. .dlt : { *(.dlt) }
  119. #endif
  120. /* reserve space for interrupt stack by aligning __init* to 16k */
  121. . = ALIGN(16384);
  122. __init_begin = .;
  123. .init.text : {
  124. _sinittext = .;
  125. *(.init.text)
  126. _einittext = .;
  127. }
  128. .init.data : { *(.init.data) }
  129. . = ALIGN(16);
  130. __setup_start = .;
  131. .init.setup : { *(.init.setup) }
  132. __setup_end = .;
  133. __initcall_start = .;
  134. .initcall.init : {
  135. *(.initcall1.init)
  136. *(.initcall2.init)
  137. *(.initcall3.init)
  138. *(.initcall4.init)
  139. *(.initcall5.init)
  140. *(.initcall6.init)
  141. *(.initcall7.init)
  142. }
  143. __initcall_end = .;
  144. __con_initcall_start = .;
  145. .con_initcall.init : { *(.con_initcall.init) }
  146. __con_initcall_end = .;
  147. SECURITY_INIT
  148. /* alternate instruction replacement. This is a mechanism x86 uses
  149. * to detect the CPU type and replace generic instruction sequences
  150. * with CPU specific ones. We don't currently do this in PA, but
  151. * it seems like a good idea... */
  152. . = ALIGN(4);
  153. __alt_instructions = .;
  154. .altinstructions : { *(.altinstructions) }
  155. __alt_instructions_end = .;
  156. .altinstr_replacement : { *(.altinstr_replacement) }
  157. /* .exit.text is discard at runtime, not link time, to deal with references
  158. from .altinstructions and .eh_frame */
  159. .exit.text : { *(.exit.text) }
  160. .exit.data : { *(.exit.data) }
  161. . = ALIGN(ASM_PAGE_SIZE);
  162. __initramfs_start = .;
  163. .init.ramfs : { *(.init.ramfs) }
  164. __initramfs_end = .;
  165. . = ALIGN(32);
  166. __per_cpu_start = .;
  167. .data.percpu : { *(.data.percpu) }
  168. __per_cpu_end = .;
  169. . = ALIGN(ASM_PAGE_SIZE);
  170. __init_end = .;
  171. /* freed after init ends here */
  172. _end = . ;
  173. /* Sections to be discarded */
  174. /DISCARD/ : {
  175. *(.exitcall.exit)
  176. #ifdef CONFIG_64BIT
  177. /* temporary hack until binutils is fixed to not emit these
  178. for static binaries */
  179. *(.interp)
  180. *(.dynsym)
  181. *(.dynstr)
  182. *(.dynamic)
  183. *(.hash)
  184. *(.gnu.hash)
  185. #endif
  186. }
  187. STABS_DEBUG
  188. .note 0 : { *(.note) }
  189. }