vmlinux.lds.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * File: arch/blackfin/kernel/vmlinux.lds.S
  3. * Based on: none - original work
  4. * Author:
  5. *
  6. * Created: Tue Sep 21 2004
  7. * Description: Master linker script for blackfin architecture
  8. *
  9. * Modified:
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #define VMLINUX_SYMBOL(_sym_) _##_sym_
  30. #include <asm-generic/vmlinux.lds.h>
  31. #include <asm/mem_map.h>
  32. #include <asm/page.h>
  33. #include <asm/thread_info.h>
  34. OUTPUT_FORMAT("elf32-bfin")
  35. ENTRY(__start)
  36. _jiffies = _jiffies_64;
  37. SECTIONS
  38. {
  39. . = CONFIG_BOOT_LOAD;
  40. /* Neither the text, ro_data or bss section need to be aligned
  41. * So pack them back to back
  42. */
  43. .text :
  44. {
  45. __text = .;
  46. _text = .;
  47. __stext = .;
  48. TEXT_TEXT
  49. #ifndef CONFIG_SCHEDULE_L1
  50. SCHED_TEXT
  51. #endif
  52. LOCK_TEXT
  53. KPROBES_TEXT
  54. *(.text.*)
  55. *(.fixup)
  56. #if !L1_CODE_LENGTH
  57. *(.l1.text)
  58. #endif
  59. . = ALIGN(16);
  60. ___start___ex_table = .;
  61. *(__ex_table)
  62. ___stop___ex_table = .;
  63. __etext = .;
  64. }
  65. NOTES
  66. /* Just in case the first read only is a 32-bit access */
  67. RO_DATA(4)
  68. .bss :
  69. {
  70. . = ALIGN(4);
  71. ___bss_start = .;
  72. *(.bss .bss.*)
  73. *(COMMON)
  74. #if !L1_DATA_A_LENGTH
  75. *(.l1.bss)
  76. #endif
  77. #if !L1_DATA_B_LENGTH
  78. *(.l1.bss.B)
  79. #endif
  80. . = ALIGN(4);
  81. ___bss_stop = .;
  82. }
  83. .data :
  84. {
  85. __sdata = .;
  86. /* This gets done first, so the glob doesn't suck it in */
  87. . = ALIGN(32);
  88. *(.data.cacheline_aligned)
  89. #if !L1_DATA_A_LENGTH
  90. . = ALIGN(32);
  91. *(.data_l1.cacheline_aligned)
  92. *(.l1.data)
  93. #endif
  94. #if !L1_DATA_B_LENGTH
  95. *(.l1.data.B)
  96. #endif
  97. #if !L2_LENGTH
  98. . = ALIGN(32);
  99. *(.data_l2.cacheline_aligned)
  100. *(.l2.data)
  101. #endif
  102. DATA_DATA
  103. CONSTRUCTORS
  104. /* make sure the init_task is aligned to the
  105. * kernel thread size so we can locate the kernel
  106. * stack properly and quickly.
  107. */
  108. . = ALIGN(THREAD_SIZE);
  109. *(.init_task.data)
  110. __edata = .;
  111. }
  112. /* The init section should be last, so when we free it, it goes into
  113. * the general memory pool, and (hopefully) will decrease fragmentation
  114. * a tiny bit. The init section has a _requirement_ that it be
  115. * PAGE_SIZE aligned
  116. */
  117. . = ALIGN(PAGE_SIZE);
  118. ___init_begin = .;
  119. .init.text :
  120. {
  121. . = ALIGN(PAGE_SIZE);
  122. __sinittext = .;
  123. INIT_TEXT
  124. __einittext = .;
  125. }
  126. .init.data :
  127. {
  128. . = ALIGN(16);
  129. INIT_DATA
  130. }
  131. .init.setup :
  132. {
  133. . = ALIGN(16);
  134. ___setup_start = .;
  135. *(.init.setup)
  136. ___setup_end = .;
  137. }
  138. .initcall.init :
  139. {
  140. ___initcall_start = .;
  141. INITCALLS
  142. ___initcall_end = .;
  143. }
  144. .con_initcall.init :
  145. {
  146. ___con_initcall_start = .;
  147. *(.con_initcall.init)
  148. ___con_initcall_end = .;
  149. }
  150. PERCPU(4)
  151. SECURITY_INIT
  152. .init.ramfs :
  153. {
  154. . = ALIGN(4);
  155. ___initramfs_start = .;
  156. *(.init.ramfs)
  157. . = ALIGN(4);
  158. ___initramfs_end = .;
  159. }
  160. __l1_lma_start = .;
  161. .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
  162. {
  163. . = ALIGN(4);
  164. __stext_l1 = .;
  165. *(.l1.text)
  166. #ifdef CONFIG_SCHEDULE_L1
  167. SCHED_TEXT
  168. #endif
  169. . = ALIGN(4);
  170. __etext_l1 = .;
  171. }
  172. ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
  173. .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
  174. {
  175. . = ALIGN(4);
  176. __sdata_l1 = .;
  177. *(.l1.data)
  178. __edata_l1 = .;
  179. . = ALIGN(32);
  180. *(.data_l1.cacheline_aligned)
  181. . = ALIGN(4);
  182. __sbss_l1 = .;
  183. *(.l1.bss)
  184. . = ALIGN(4);
  185. __ebss_l1 = .;
  186. }
  187. ASSERT (SIZEOF(.data_a_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  188. .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
  189. {
  190. . = ALIGN(4);
  191. __sdata_b_l1 = .;
  192. *(.l1.data.B)
  193. __edata_b_l1 = .;
  194. . = ALIGN(4);
  195. __sbss_b_l1 = .;
  196. *(.l1.bss.B)
  197. . = ALIGN(4);
  198. __ebss_b_l1 = .;
  199. }
  200. ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  201. __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
  202. .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
  203. {
  204. . = ALIGN(4);
  205. __stext_l2 = .;
  206. *(.l2.text)
  207. . = ALIGN(4);
  208. __etext_l2 = .;
  209. . = ALIGN(4);
  210. __sdata_l2 = .;
  211. *(.l2.data)
  212. __edata_l2 = .;
  213. . = ALIGN(32);
  214. *(.data_l2.cacheline_aligned)
  215. . = ALIGN(4);
  216. __sbss_l2 = .;
  217. *(.l2.bss)
  218. . = ALIGN(4);
  219. __ebss_l2 = .;
  220. }
  221. ASSERT (SIZEOF(.text_data_l1) <= L2_LENGTH, "L2 overflow!")
  222. /* Force trailing alignment of our init section so that when we
  223. * free our init memory, we don't leave behind a partial page.
  224. */
  225. . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
  226. . = ALIGN(PAGE_SIZE);
  227. ___init_end = .;
  228. __end =.;
  229. STABS_DEBUG
  230. DWARF_DEBUG
  231. /DISCARD/ :
  232. {
  233. EXIT_TEXT
  234. EXIT_DATA
  235. *(.exitcall.exit)
  236. }
  237. }