vmlinux.lds.S 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * File: arch/blackfin/kernel/vmlinux.lds.S
  3. * Based on: none - original work
  4. * Author:
  5. *
  6. * Created: Tue Sep 21 2004
  7. * Description: Master linker script for blackfin architecture
  8. *
  9. * Modified:
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #define VMLINUX_SYMBOL(_sym_) _##_sym_
  30. #include <asm-generic/vmlinux.lds.h>
  31. #include <asm/mem_map.h>
  32. #include <asm/page.h>
  33. #include <asm/thread_info.h>
  34. OUTPUT_FORMAT("elf32-bfin")
  35. ENTRY(__start)
  36. _jiffies = _jiffies_64;
  37. SECTIONS
  38. {
  39. . = CONFIG_BOOT_LOAD;
  40. /* Neither the text, ro_data or bss section need to be aligned
  41. * So pack them back to back
  42. */
  43. .text :
  44. {
  45. __text = .;
  46. _text = .;
  47. __stext = .;
  48. TEXT_TEXT
  49. #ifndef CONFIG_SCHEDULE_L1
  50. SCHED_TEXT
  51. #endif
  52. LOCK_TEXT
  53. IRQENTRY_TEXT
  54. KPROBES_TEXT
  55. *(.text.*)
  56. *(.fixup)
  57. #if !L1_CODE_LENGTH
  58. *(.l1.text)
  59. #endif
  60. . = ALIGN(16);
  61. ___start___ex_table = .;
  62. *(__ex_table)
  63. ___stop___ex_table = .;
  64. __etext = .;
  65. }
  66. NOTES
  67. /* Just in case the first read only is a 32-bit access */
  68. RO_DATA(4)
  69. .bss :
  70. {
  71. . = ALIGN(4);
  72. ___bss_start = .;
  73. *(.bss .bss.*)
  74. *(COMMON)
  75. #if !L1_DATA_A_LENGTH
  76. *(.l1.bss)
  77. #endif
  78. #if !L1_DATA_B_LENGTH
  79. *(.l1.bss.B)
  80. #endif
  81. . = ALIGN(4);
  82. ___bss_stop = .;
  83. }
  84. .data :
  85. {
  86. __sdata = .;
  87. /* This gets done first, so the glob doesn't suck it in */
  88. . = ALIGN(32);
  89. *(.data.cacheline_aligned)
  90. #if !L1_DATA_A_LENGTH
  91. . = ALIGN(32);
  92. *(.data_l1.cacheline_aligned)
  93. *(.l1.data)
  94. #endif
  95. #if !L1_DATA_B_LENGTH
  96. *(.l1.data.B)
  97. #endif
  98. #if !L2_LENGTH
  99. . = ALIGN(32);
  100. *(.data_l2.cacheline_aligned)
  101. *(.l2.data)
  102. #endif
  103. DATA_DATA
  104. CONSTRUCTORS
  105. /* make sure the init_task is aligned to the
  106. * kernel thread size so we can locate the kernel
  107. * stack properly and quickly.
  108. */
  109. . = ALIGN(THREAD_SIZE);
  110. *(.init_task.data)
  111. __edata = .;
  112. }
  113. /* The init section should be last, so when we free it, it goes into
  114. * the general memory pool, and (hopefully) will decrease fragmentation
  115. * a tiny bit. The init section has a _requirement_ that it be
  116. * PAGE_SIZE aligned
  117. */
  118. . = ALIGN(PAGE_SIZE);
  119. ___init_begin = .;
  120. .init.text :
  121. {
  122. . = ALIGN(PAGE_SIZE);
  123. __sinittext = .;
  124. INIT_TEXT
  125. __einittext = .;
  126. }
  127. .init.data :
  128. {
  129. . = ALIGN(16);
  130. INIT_DATA
  131. }
  132. .init.setup :
  133. {
  134. . = ALIGN(16);
  135. ___setup_start = .;
  136. *(.init.setup)
  137. ___setup_end = .;
  138. }
  139. .initcall.init :
  140. {
  141. ___initcall_start = .;
  142. INITCALLS
  143. ___initcall_end = .;
  144. }
  145. .con_initcall.init :
  146. {
  147. ___con_initcall_start = .;
  148. *(.con_initcall.init)
  149. ___con_initcall_end = .;
  150. }
  151. PERCPU(4)
  152. SECURITY_INIT
  153. /* we have to discard exit text and such at runtime, not link time, to
  154. * handle embedded cross-section references (alt instructions, bug
  155. * table, eh_frame, etc...)
  156. */
  157. .exit.text :
  158. {
  159. EXIT_TEXT
  160. }
  161. .exit.data :
  162. {
  163. EXIT_DATA
  164. }
  165. .init.ramfs :
  166. {
  167. . = ALIGN(4);
  168. ___initramfs_start = .;
  169. *(.init.ramfs)
  170. . = ALIGN(4);
  171. ___initramfs_end = .;
  172. }
  173. __l1_lma_start = .;
  174. .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
  175. {
  176. . = ALIGN(4);
  177. __stext_l1 = .;
  178. *(.l1.text)
  179. #ifdef CONFIG_SCHEDULE_L1
  180. SCHED_TEXT
  181. #endif
  182. . = ALIGN(4);
  183. __etext_l1 = .;
  184. }
  185. ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
  186. .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
  187. {
  188. . = ALIGN(4);
  189. __sdata_l1 = .;
  190. *(.l1.data)
  191. __edata_l1 = .;
  192. . = ALIGN(32);
  193. *(.data_l1.cacheline_aligned)
  194. . = ALIGN(4);
  195. __sbss_l1 = .;
  196. *(.l1.bss)
  197. . = ALIGN(4);
  198. __ebss_l1 = .;
  199. }
  200. ASSERT (SIZEOF(.data_a_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  201. .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
  202. {
  203. . = ALIGN(4);
  204. __sdata_b_l1 = .;
  205. *(.l1.data.B)
  206. __edata_b_l1 = .;
  207. . = ALIGN(4);
  208. __sbss_b_l1 = .;
  209. *(.l1.bss.B)
  210. . = ALIGN(4);
  211. __ebss_b_l1 = .;
  212. }
  213. ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  214. __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
  215. .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
  216. {
  217. . = ALIGN(4);
  218. __stext_l2 = .;
  219. *(.l2.text)
  220. . = ALIGN(4);
  221. __etext_l2 = .;
  222. . = ALIGN(4);
  223. __sdata_l2 = .;
  224. *(.l2.data)
  225. __edata_l2 = .;
  226. . = ALIGN(32);
  227. *(.data_l2.cacheline_aligned)
  228. . = ALIGN(4);
  229. __sbss_l2 = .;
  230. *(.l2.bss)
  231. . = ALIGN(4);
  232. __ebss_l2 = .;
  233. }
  234. ASSERT (SIZEOF(.text_data_l1) <= L2_LENGTH, "L2 overflow!")
  235. /* Force trailing alignment of our init section so that when we
  236. * free our init memory, we don't leave behind a partial page.
  237. */
  238. . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
  239. . = ALIGN(PAGE_SIZE);
  240. ___init_end = .;
  241. __end =.;
  242. STABS_DEBUG
  243. DWARF_DEBUG
  244. /DISCARD/ :
  245. {
  246. *(.exitcall.exit)
  247. }
  248. }