vmlinux.lds.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * File: arch/blackfin/kernel/vmlinux.lds.S
  3. * Based on: none - original work
  4. * Author:
  5. *
  6. * Created: Tue Sep 21 2004
  7. * Description: Master linker script for blackfin architecture
  8. *
  9. * Modified:
  10. * Copyright 2004-2007 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #define VMLINUX_SYMBOL(_sym_) _##_sym_
  30. #include <asm-generic/vmlinux.lds.h>
  31. #include <asm/mem_map.h>
  32. #include <asm/page.h>
  33. #include <asm/thread_info.h>
  34. OUTPUT_FORMAT("elf32-bfin")
  35. ENTRY(__start)
  36. _jiffies = _jiffies_64;
  37. SECTIONS
  38. {
  39. . = CONFIG_BOOT_LOAD;
  40. /* Neither the text, ro_data or bss section need to be aligned
  41. * So pack them back to back
  42. */
  43. .text :
  44. {
  45. __text = .;
  46. _text = .;
  47. __stext = .;
  48. TEXT_TEXT
  49. SCHED_TEXT
  50. LOCK_TEXT
  51. KPROBES_TEXT
  52. *(.text.*)
  53. *(.fixup)
  54. #if !L1_CODE_LENGTH
  55. *(.l1.text)
  56. #endif
  57. . = ALIGN(16);
  58. ___start___ex_table = .;
  59. *(__ex_table)
  60. ___stop___ex_table = .;
  61. __etext = .;
  62. }
  63. /* Just in case the first read only is a 32-bit access */
  64. RO_DATA(4)
  65. .bss :
  66. {
  67. . = ALIGN(4);
  68. ___bss_start = .;
  69. *(.bss .bss.*)
  70. *(COMMON)
  71. #if !L1_DATA_A_LENGTH
  72. *(.l1.bss)
  73. #endif
  74. #if !L1_DATA_B_LENGTH
  75. *(.l1.bss.B)
  76. #endif
  77. . = ALIGN(4);
  78. ___bss_stop = .;
  79. }
  80. .data :
  81. {
  82. __sdata = .;
  83. /* This gets done first, so the glob doesn't suck it in */
  84. . = ALIGN(32);
  85. *(.data.cacheline_aligned)
  86. #if !L1_DATA_A_LENGTH
  87. . = ALIGN(32);
  88. *(.data_l1.cacheline_aligned)
  89. *(.l1.data)
  90. #endif
  91. #if !L1_DATA_B_LENGTH
  92. *(.l1.data.B)
  93. #endif
  94. #if !L2_LENGTH
  95. . = ALIGN(32);
  96. *(.data_l2.cacheline_aligned)
  97. *(.l2.data)
  98. #endif
  99. DATA_DATA
  100. *(.data.*)
  101. CONSTRUCTORS
  102. /* make sure the init_task is aligned to the
  103. * kernel thread size so we can locate the kernel
  104. * stack properly and quickly.
  105. */
  106. . = ALIGN(THREAD_SIZE);
  107. *(.init_task.data)
  108. __edata = .;
  109. }
  110. /* The init section should be last, so when we free it, it goes into
  111. * the general memory pool, and (hopefully) will decrease fragmentation
  112. * a tiny bit. The init section has a _requirement_ that it be
  113. * PAGE_SIZE aligned
  114. */
  115. . = ALIGN(PAGE_SIZE);
  116. ___init_begin = .;
  117. .init.text :
  118. {
  119. . = ALIGN(PAGE_SIZE);
  120. __sinittext = .;
  121. INIT_TEXT
  122. __einittext = .;
  123. }
  124. .init.data :
  125. {
  126. . = ALIGN(16);
  127. INIT_DATA
  128. }
  129. .init.setup :
  130. {
  131. . = ALIGN(16);
  132. ___setup_start = .;
  133. *(.init.setup)
  134. ___setup_end = .;
  135. }
  136. .initcall.init :
  137. {
  138. ___initcall_start = .;
  139. INITCALLS
  140. ___initcall_end = .;
  141. }
  142. .con_initcall.init :
  143. {
  144. ___con_initcall_start = .;
  145. *(.con_initcall.init)
  146. ___con_initcall_end = .;
  147. }
  148. SECURITY_INIT
  149. .init.ramfs :
  150. {
  151. . = ALIGN(4);
  152. ___initramfs_start = .;
  153. *(.init.ramfs)
  154. ___initramfs_end = .;
  155. }
  156. __l1_lma_start = .;
  157. .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
  158. {
  159. . = ALIGN(4);
  160. __stext_l1 = .;
  161. *(.l1.text)
  162. . = ALIGN(4);
  163. __etext_l1 = .;
  164. }
  165. .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
  166. {
  167. . = ALIGN(4);
  168. __sdata_l1 = .;
  169. *(.l1.data)
  170. __edata_l1 = .;
  171. . = ALIGN(32);
  172. *(.data_l1.cacheline_aligned)
  173. . = ALIGN(4);
  174. __sbss_l1 = .;
  175. *(.l1.bss)
  176. . = ALIGN(4);
  177. __ebss_l1 = .;
  178. }
  179. .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
  180. {
  181. . = ALIGN(4);
  182. __sdata_b_l1 = .;
  183. *(.l1.data.B)
  184. __edata_b_l1 = .;
  185. . = ALIGN(4);
  186. __sbss_b_l1 = .;
  187. *(.l1.bss.B)
  188. . = ALIGN(4);
  189. __ebss_b_l1 = .;
  190. }
  191. __l2_lma_start = .;
  192. .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
  193. {
  194. . = ALIGN(4);
  195. __stext_l2 = .;
  196. *(.l2.text)
  197. . = ALIGN(4);
  198. __etext_l2 = .;
  199. . = ALIGN(4);
  200. __sdata_l2 = .;
  201. *(.l2.data)
  202. __edata_l2 = .;
  203. . = ALIGN(32);
  204. *(.data_l2.cacheline_aligned)
  205. . = ALIGN(4);
  206. __sbss_l2 = .;
  207. *(.l2.bss)
  208. . = ALIGN(4);
  209. __ebss_l2 = .;
  210. }
  211. /* Force trailing alignment of our init section so that when we
  212. * free our init memory, we don't leave behind a partial page.
  213. */
  214. . = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
  215. . = ALIGN(PAGE_SIZE);
  216. ___init_end = .;
  217. __end =.;
  218. STABS_DEBUG
  219. DWARF_DEBUG
  220. /DISCARD/ :
  221. {
  222. EXIT_TEXT
  223. EXIT_DATA
  224. *(.exitcall.exit)
  225. }
  226. }