vmlinux.lds.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /*
  2. * File: arch/blackfin/kernel/vmlinux.lds.S
  3. * Based on: none - original work
  4. * Author:
  5. *
  6. * Created: Tue Sep 21 2004
  7. * Description: Master linker script for blackfin architecture
  8. *
  9. * Modified:
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #define VMLINUX_SYMBOL(_sym_) _##_sym_
  30. #include <asm-generic/vmlinux.lds.h>
  31. #include <asm/mem_map.h>
  32. #include <asm/page.h>
  33. #include <asm/thread_info.h>
  34. OUTPUT_FORMAT("elf32-bfin")
  35. ENTRY(__start)
  36. _jiffies = _jiffies_64;
  37. SECTIONS
  38. {
  39. . = CONFIG_BOOT_LOAD;
  40. /* Neither the text, ro_data or bss section need to be aligned
  41. * So pack them back to back
  42. */
  43. .text :
  44. {
  45. __text = .;
  46. _text = .;
  47. __stext = .;
  48. TEXT_TEXT
  49. SCHED_TEXT
  50. LOCK_TEXT
  51. KPROBES_TEXT
  52. *(.text.*)
  53. *(.fixup)
  54. #if !L1_CODE_LENGTH
  55. *(.l1.text)
  56. #endif
  57. . = ALIGN(16);
  58. ___start___ex_table = .;
  59. *(__ex_table)
  60. ___stop___ex_table = .;
  61. __etext = .;
  62. }
  63. NOTES
  64. /* Just in case the first read only is a 32-bit access */
  65. RO_DATA(4)
  66. .bss :
  67. {
  68. . = ALIGN(4);
  69. ___bss_start = .;
  70. *(.bss .bss.*)
  71. *(COMMON)
  72. #if !L1_DATA_A_LENGTH
  73. *(.l1.bss)
  74. #endif
  75. #if !L1_DATA_B_LENGTH
  76. *(.l1.bss.B)
  77. #endif
  78. . = ALIGN(4);
  79. ___bss_stop = .;
  80. }
  81. .data :
  82. {
  83. __sdata = .;
  84. /* This gets done first, so the glob doesn't suck it in */
  85. . = ALIGN(32);
  86. *(.data.cacheline_aligned)
  87. #if !L1_DATA_A_LENGTH
  88. . = ALIGN(32);
  89. *(.data_l1.cacheline_aligned)
  90. *(.l1.data)
  91. #endif
  92. #if !L1_DATA_B_LENGTH
  93. *(.l1.data.B)
  94. #endif
  95. #if !L2_LENGTH
  96. . = ALIGN(32);
  97. *(.data_l2.cacheline_aligned)
  98. *(.l2.data)
  99. #endif
  100. DATA_DATA
  101. CONSTRUCTORS
  102. /* make sure the init_task is aligned to the
  103. * kernel thread size so we can locate the kernel
  104. * stack properly and quickly.
  105. */
  106. . = ALIGN(THREAD_SIZE);
  107. *(.init_task.data)
  108. __edata = .;
  109. }
  110. /* The init section should be last, so when we free it, it goes into
  111. * the general memory pool, and (hopefully) will decrease fragmentation
  112. * a tiny bit. The init section has a _requirement_ that it be
  113. * PAGE_SIZE aligned
  114. */
  115. . = ALIGN(PAGE_SIZE);
  116. ___init_begin = .;
  117. .init.text :
  118. {
  119. . = ALIGN(PAGE_SIZE);
  120. __sinittext = .;
  121. INIT_TEXT
  122. __einittext = .;
  123. }
  124. .init.data :
  125. {
  126. . = ALIGN(16);
  127. INIT_DATA
  128. }
  129. .init.setup :
  130. {
  131. . = ALIGN(16);
  132. ___setup_start = .;
  133. *(.init.setup)
  134. ___setup_end = .;
  135. }
  136. .initcall.init :
  137. {
  138. ___initcall_start = .;
  139. INITCALLS
  140. ___initcall_end = .;
  141. }
  142. .con_initcall.init :
  143. {
  144. ___con_initcall_start = .;
  145. *(.con_initcall.init)
  146. ___con_initcall_end = .;
  147. }
  148. PERCPU(4)
  149. SECURITY_INIT
  150. .init.ramfs :
  151. {
  152. . = ALIGN(4);
  153. ___initramfs_start = .;
  154. *(.init.ramfs)
  155. . = ALIGN(4);
  156. ___initramfs_end = .;
  157. }
  158. __l1_lma_start = .;
  159. .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
  160. {
  161. . = ALIGN(4);
  162. __stext_l1 = .;
  163. *(.l1.text)
  164. . = ALIGN(4);
  165. __etext_l1 = .;
  166. }
  167. ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
  168. .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
  169. {
  170. . = ALIGN(4);
  171. __sdata_l1 = .;
  172. *(.l1.data)
  173. __edata_l1 = .;
  174. . = ALIGN(32);
  175. *(.data_l1.cacheline_aligned)
  176. . = ALIGN(4);
  177. __sbss_l1 = .;
  178. *(.l1.bss)
  179. . = ALIGN(4);
  180. __ebss_l1 = .;
  181. }
  182. ASSERT (SIZEOF(.data_a_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  183. .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
  184. {
  185. . = ALIGN(4);
  186. __sdata_b_l1 = .;
  187. *(.l1.data.B)
  188. __edata_b_l1 = .;
  189. . = ALIGN(4);
  190. __sbss_b_l1 = .;
  191. *(.l1.bss.B)
  192. . = ALIGN(4);
  193. __ebss_b_l1 = .;
  194. }
  195. ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  196. __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
  197. .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
  198. {
  199. . = ALIGN(4);
  200. __stext_l2 = .;
  201. *(.l2.text)
  202. . = ALIGN(4);
  203. __etext_l2 = .;
  204. . = ALIGN(4);
  205. __sdata_l2 = .;
  206. *(.l2.data)
  207. __edata_l2 = .;
  208. . = ALIGN(32);
  209. *(.data_l2.cacheline_aligned)
  210. . = ALIGN(4);
  211. __sbss_l2 = .;
  212. *(.l2.bss)
  213. . = ALIGN(4);
  214. __ebss_l2 = .;
  215. }
  216. ASSERT (SIZEOF(.text_data_l1) <= L2_LENGTH, "L2 overflow!")
  217. /* Force trailing alignment of our init section so that when we
  218. * free our init memory, we don't leave behind a partial page.
  219. */
  220. . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
  221. . = ALIGN(PAGE_SIZE);
  222. ___init_end = .;
  223. __end =.;
  224. STABS_DEBUG
  225. DWARF_DEBUG
  226. /DISCARD/ :
  227. {
  228. EXIT_TEXT
  229. EXIT_DATA
  230. *(.exitcall.exit)
  231. }
  232. }