vmlinux.lds.S 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later
  5. */
  6. #include <asm-generic/vmlinux.lds.h>
  7. #include <asm/mem_map.h>
  8. #include <asm/page.h>
  9. #include <asm/thread_info.h>
  10. OUTPUT_FORMAT("elf32-bfin")
  11. ENTRY(__start)
  12. _jiffies = _jiffies_64;
  13. SECTIONS
  14. {
  15. #ifdef CONFIG_RAMKERNEL
  16. . = CONFIG_BOOT_LOAD;
  17. #else
  18. . = CONFIG_ROM_BASE;
  19. #endif
  20. /* Neither the text, ro_data or bss section need to be aligned
  21. * So pack them back to back
  22. */
  23. .text :
  24. {
  25. __text = .;
  26. _text = .;
  27. __stext = .;
  28. TEXT_TEXT
  29. #ifndef CONFIG_SCHEDULE_L1
  30. SCHED_TEXT
  31. #endif
  32. LOCK_TEXT
  33. IRQENTRY_TEXT
  34. KPROBES_TEXT
  35. #ifdef CONFIG_ROMKERNEL
  36. __sinittext = .;
  37. INIT_TEXT
  38. __einittext = .;
  39. EXIT_TEXT
  40. #endif
  41. *(.text.*)
  42. *(.fixup)
  43. #if !L1_CODE_LENGTH
  44. *(.l1.text)
  45. #endif
  46. . = ALIGN(16);
  47. ___start___ex_table = .;
  48. *(__ex_table)
  49. ___stop___ex_table = .;
  50. __etext = .;
  51. }
  52. NOTES
  53. /* Just in case the first read only is a 32-bit access */
  54. RO_DATA(4)
  55. __rodata_end = .;
  56. #ifdef CONFIG_ROMKERNEL
  57. . = CONFIG_BOOT_LOAD;
  58. .bss : AT(__rodata_end)
  59. #else
  60. .bss :
  61. #endif
  62. {
  63. . = ALIGN(4);
  64. ___bss_start = .;
  65. *(.bss .bss.*)
  66. *(COMMON)
  67. #if !L1_DATA_A_LENGTH
  68. *(.l1.bss)
  69. #endif
  70. #if !L1_DATA_B_LENGTH
  71. *(.l1.bss.B)
  72. #endif
  73. . = ALIGN(4);
  74. ___bss_stop = .;
  75. }
  76. #if defined(CONFIG_ROMKERNEL)
  77. .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
  78. #else
  79. .data :
  80. #endif
  81. {
  82. __sdata = .;
  83. /* This gets done first, so the glob doesn't suck it in */
  84. CACHELINE_ALIGNED_DATA(32)
  85. #if !L1_DATA_A_LENGTH
  86. . = ALIGN(32);
  87. *(.data_l1.cacheline_aligned)
  88. *(.l1.data)
  89. #endif
  90. #if !L1_DATA_B_LENGTH
  91. *(.l1.data.B)
  92. #endif
  93. #if !L2_LENGTH
  94. . = ALIGN(32);
  95. *(.data_l2.cacheline_aligned)
  96. *(.l2.data)
  97. #endif
  98. DATA_DATA
  99. CONSTRUCTORS
  100. INIT_TASK_DATA(THREAD_SIZE)
  101. __edata = .;
  102. }
  103. __data_lma = LOADADDR(.data);
  104. __data_len = SIZEOF(.data);
  105. /* The init section should be last, so when we free it, it goes into
  106. * the general memory pool, and (hopefully) will decrease fragmentation
  107. * a tiny bit. The init section has a _requirement_ that it be
  108. * PAGE_SIZE aligned
  109. */
  110. . = ALIGN(PAGE_SIZE);
  111. ___init_begin = .;
  112. #ifdef CONFIG_RAMKERNEL
  113. INIT_TEXT_SECTION(PAGE_SIZE)
  114. /* We have to discard exit text and such at runtime, not link time, to
  115. * handle embedded cross-section references (alt instructions, bug
  116. * table, eh_frame, etc...). We need all of our .text up front and
  117. * .data after it for PCREL call issues.
  118. */
  119. .exit.text :
  120. {
  121. EXIT_TEXT
  122. }
  123. . = ALIGN(16);
  124. INIT_DATA_SECTION(16)
  125. PERCPU(4)
  126. .exit.data :
  127. {
  128. EXIT_DATA
  129. }
  130. .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
  131. #else
  132. .init.data : AT(__data_lma + __data_len)
  133. {
  134. __sinitdata = .;
  135. INIT_DATA
  136. INIT_SETUP(16)
  137. INIT_CALLS
  138. CON_INITCALL
  139. SECURITY_INITCALL
  140. INIT_RAM_FS
  141. . = ALIGN(4);
  142. ___per_cpu_load = .;
  143. ___per_cpu_start = .;
  144. *(.data.percpu.first)
  145. *(.data.percpu.page_aligned)
  146. *(.data.percpu)
  147. *(.data.percpu.shared_aligned)
  148. ___per_cpu_end = .;
  149. EXIT_DATA
  150. __einitdata = .;
  151. }
  152. __init_data_lma = LOADADDR(.init.data);
  153. __init_data_len = SIZEOF(.init.data);
  154. __init_data_end = .;
  155. .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
  156. #endif
  157. {
  158. . = ALIGN(4);
  159. __stext_l1 = .;
  160. *(.l1.text)
  161. #ifdef CONFIG_SCHEDULE_L1
  162. SCHED_TEXT
  163. #endif
  164. . = ALIGN(4);
  165. __etext_l1 = .;
  166. }
  167. __text_l1_lma = LOADADDR(.text_l1);
  168. __text_l1_len = SIZEOF(.text_l1);
  169. ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
  170. .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
  171. {
  172. . = ALIGN(4);
  173. __sdata_l1 = .;
  174. *(.l1.data)
  175. __edata_l1 = .;
  176. . = ALIGN(32);
  177. *(.data_l1.cacheline_aligned)
  178. . = ALIGN(4);
  179. __sbss_l1 = .;
  180. *(.l1.bss)
  181. . = ALIGN(4);
  182. __ebss_l1 = .;
  183. }
  184. __data_l1_lma = LOADADDR(.data_l1);
  185. __data_l1_len = SIZEOF(.data_l1);
  186. ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  187. .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
  188. {
  189. . = ALIGN(4);
  190. __sdata_b_l1 = .;
  191. *(.l1.data.B)
  192. __edata_b_l1 = .;
  193. . = ALIGN(4);
  194. __sbss_b_l1 = .;
  195. *(.l1.bss.B)
  196. . = ALIGN(4);
  197. __ebss_b_l1 = .;
  198. }
  199. __data_b_l1_lma = LOADADDR(.data_b_l1);
  200. __data_b_l1_len = SIZEOF(.data_b_l1);
  201. ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  202. .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
  203. {
  204. . = ALIGN(4);
  205. __stext_l2 = .;
  206. *(.l2.text)
  207. . = ALIGN(4);
  208. __etext_l2 = .;
  209. . = ALIGN(4);
  210. __sdata_l2 = .;
  211. *(.l2.data)
  212. __edata_l2 = .;
  213. . = ALIGN(32);
  214. *(.data_l2.cacheline_aligned)
  215. . = ALIGN(4);
  216. __sbss_l2 = .;
  217. *(.l2.bss)
  218. . = ALIGN(4);
  219. __ebss_l2 = .;
  220. }
  221. __l2_lma = LOADADDR(.text_data_l2);
  222. __l2_len = SIZEOF(.text_data_l2);
  223. ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
  224. /* Force trailing alignment of our init section so that when we
  225. * free our init memory, we don't leave behind a partial page.
  226. */
  227. #ifdef CONFIG_RAMKERNEL
  228. . = __l2_lma + __l2_len;
  229. #else
  230. . = __init_data_end;
  231. #endif
  232. . = ALIGN(PAGE_SIZE);
  233. ___init_end = .;
  234. __end =.;
  235. STABS_DEBUG
  236. DWARF_DEBUG
  237. DISCARDS
  238. }