head.S 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622
  1. /*
  2. * OpenRISC head.S
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/linkage.h>
  18. #include <linux/threads.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <asm/processor.h>
  22. #include <asm/page.h>
  23. #include <asm/mmu.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/cache.h>
  26. #include <asm/spr_defs.h>
  27. #include <asm/asm-offsets.h>
  28. #include <linux/of_fdt.h>
  29. #define tophys(rd,rs) \
  30. l.movhi rd,hi(-KERNELBASE) ;\
  31. l.add rd,rd,rs
  32. #define CLEAR_GPR(gpr) \
  33. l.or gpr,r0,r0
  34. #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
  35. l.movhi gpr,hi(symbol) ;\
  36. l.ori gpr,gpr,lo(symbol)
  37. #define UART_BASE_ADD 0x90000000
  38. #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
  39. #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
  40. /* ============================================[ tmp store locations ]=== */
  41. /*
  42. * emergency_print temporary stores
  43. */
  44. #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
  45. #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
  46. #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
  47. #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
  48. #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
  49. #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
  50. #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
  51. #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
  52. #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
  53. #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
  54. #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
  55. #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
  56. /*
  57. * TLB miss handlers temorary stores
  58. */
  59. #define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
  60. #define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
  61. #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
  62. #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
  63. #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
  64. #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
  65. #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
  66. #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
  67. #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
  68. #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
  69. #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
  70. #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
  71. /*
  72. * EXCEPTION_HANDLE temporary stores
  73. */
  74. #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
  75. #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
  76. #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
  77. #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
  78. #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
  79. #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
  80. /*
  81. * For UNHANLDED_EXCEPTION
  82. */
  83. #define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
  84. #define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
  85. /* =========================================================[ macros ]=== */
  86. #define GET_CURRENT_PGD(reg,t1) \
  87. LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
  88. tophys (t1,reg) ;\
  89. l.lwz reg,0(t1)
  90. /*
  91. * DSCR: this is a common hook for handling exceptions. it will save
  92. * the needed registers, set up stack and pointer to current
  93. * then jump to the handler while enabling MMU
  94. *
  95. * PRMS: handler - a function to jump to. it has to save the
  96. * remaining registers to kernel stack, call
  97. * appropriate arch-independant exception handler
  98. * and finaly jump to ret_from_except
  99. *
  100. * PREQ: unchanged state from the time exception happened
  101. *
  102. * POST: SAVED the following registers original value
  103. * to the new created exception frame pointed to by r1
  104. *
  105. * r1 - ksp pointing to the new (exception) frame
  106. * r4 - EEAR exception EA
  107. * r10 - current pointing to current_thread_info struct
  108. * r12 - syscall 0, since we didn't come from syscall
  109. * r13 - temp it actually contains new SR, not needed anymore
  110. * r31 - handler address of the handler we'll jump to
  111. *
  112. * handler has to save remaining registers to the exception
  113. * ksp frame *before* tainting them!
  114. *
  115. * NOTE: this function is not reentrant per se. reentrancy is guaranteed
  116. * by processor disabling all exceptions/interrupts when exception
  117. * accours.
  118. *
  119. * OPTM: no need to make it so wasteful to extract ksp when in user mode
  120. */
  121. #define EXCEPTION_HANDLE(handler) \
  122. EXCEPTION_T_STORE_GPR30 ;\
  123. l.mfspr r30,r0,SPR_ESR_BASE ;\
  124. l.andi r30,r30,SPR_SR_SM ;\
  125. l.sfeqi r30,0 ;\
  126. EXCEPTION_T_STORE_GPR10 ;\
  127. l.bnf 2f /* kernel_mode */ ;\
  128. EXCEPTION_T_STORE_SP /* delay slot */ ;\
  129. 1: /* user_mode: */ ;\
  130. LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
  131. tophys (r30,r1) ;\
  132. /* r10: current_thread_info */ ;\
  133. l.lwz r10,0(r30) ;\
  134. tophys (r30,r10) ;\
  135. l.lwz r1,(TI_KSP)(r30) ;\
  136. /* fall through */ ;\
  137. 2: /* kernel_mode: */ ;\
  138. /* create new stack frame, save only needed gprs */ ;\
  139. /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
  140. /* r12: temp, syscall indicator */ ;\
  141. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  142. /* r1 is KSP, r30 is __pa(KSP) */ ;\
  143. tophys (r30,r1) ;\
  144. l.sw PT_GPR12(r30),r12 ;\
  145. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  146. l.sw PT_PC(r30),r12 ;\
  147. l.mfspr r12,r0,SPR_ESR_BASE ;\
  148. l.sw PT_SR(r30),r12 ;\
  149. /* save r30 */ ;\
  150. EXCEPTION_T_LOAD_GPR30(r12) ;\
  151. l.sw PT_GPR30(r30),r12 ;\
  152. /* save r10 as was prior to exception */ ;\
  153. EXCEPTION_T_LOAD_GPR10(r12) ;\
  154. l.sw PT_GPR10(r30),r12 ;\
  155. /* save PT_SP as was prior to exception */ ;\
  156. EXCEPTION_T_LOAD_SP(r12) ;\
  157. l.sw PT_SP(r30),r12 ;\
  158. /* save exception r4, set r4 = EA */ ;\
  159. l.sw PT_GPR4(r30),r4 ;\
  160. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  161. /* r12 == 1 if we come from syscall */ ;\
  162. CLEAR_GPR(r12) ;\
  163. /* ----- turn on MMU ----- */ ;\
  164. l.ori r30,r0,(EXCEPTION_SR) ;\
  165. l.mtspr r0,r30,SPR_ESR_BASE ;\
  166. /* r30: EA address of handler */ ;\
  167. LOAD_SYMBOL_2_GPR(r30,handler) ;\
  168. l.mtspr r0,r30,SPR_EPCR_BASE ;\
  169. l.rfe
  170. /*
  171. * this doesn't work
  172. *
  173. *
  174. * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
  175. * #define UNHANDLED_EXCEPTION(handler) \
  176. * l.ori r3,r0,0x1 ;\
  177. * l.mtspr r0,r3,SPR_SR ;\
  178. * l.movhi r3,hi(0xf0000100) ;\
  179. * l.ori r3,r3,lo(0xf0000100) ;\
  180. * l.jr r3 ;\
  181. * l.nop 1
  182. *
  183. * #endif
  184. */
  185. /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
  186. * a bit more carefull (if we have a PT_SP or current pointer
  187. * corruption) and set them up from 'current_set'
  188. *
  189. */
  190. #define UNHANDLED_EXCEPTION(handler) \
  191. EXCEPTION_T_STORE_GPR31 ;\
  192. EXCEPTION_T_STORE_GPR10 ;\
  193. EXCEPTION_T_STORE_SP ;\
  194. /* temporary store r3, r9 into r1, r10 */ ;\
  195. l.addi r1,r3,0x0 ;\
  196. l.addi r10,r9,0x0 ;\
  197. /* the string referenced by r3 must be low enough */ ;\
  198. l.jal _emergency_print ;\
  199. l.ori r3,r0,lo(_string_unhandled_exception) ;\
  200. l.mfspr r3,r0,SPR_NPC ;\
  201. l.jal _emergency_print_nr ;\
  202. l.andi r3,r3,0x1f00 ;\
  203. /* the string referenced by r3 must be low enough */ ;\
  204. l.jal _emergency_print ;\
  205. l.ori r3,r0,lo(_string_epc_prefix) ;\
  206. l.jal _emergency_print_nr ;\
  207. l.mfspr r3,r0,SPR_EPCR_BASE ;\
  208. l.jal _emergency_print ;\
  209. l.ori r3,r0,lo(_string_nl) ;\
  210. /* end of printing */ ;\
  211. l.addi r3,r1,0x0 ;\
  212. l.addi r9,r10,0x0 ;\
  213. /* extract current, ksp from current_set */ ;\
  214. LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
  215. LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
  216. /* create new stack frame, save only needed gprs */ ;\
  217. /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
  218. /* r12: temp, syscall indicator, r13 temp */ ;\
  219. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  220. /* r1 is KSP, r31 is __pa(KSP) */ ;\
  221. tophys (r31,r1) ;\
  222. l.sw PT_GPR12(r31),r12 ;\
  223. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  224. l.sw PT_PC(r31),r12 ;\
  225. l.mfspr r12,r0,SPR_ESR_BASE ;\
  226. l.sw PT_SR(r31),r12 ;\
  227. /* save r31 */ ;\
  228. EXCEPTION_T_LOAD_GPR31(r12) ;\
  229. l.sw PT_GPR31(r31),r12 ;\
  230. /* save r10 as was prior to exception */ ;\
  231. EXCEPTION_T_LOAD_GPR10(r12) ;\
  232. l.sw PT_GPR10(r31),r12 ;\
  233. /* save PT_SP as was prior to exception */ ;\
  234. EXCEPTION_T_LOAD_SP(r12) ;\
  235. l.sw PT_SP(r31),r12 ;\
  236. l.sw PT_GPR13(r31),r13 ;\
  237. /* --> */ ;\
  238. /* save exception r4, set r4 = EA */ ;\
  239. l.sw PT_GPR4(r31),r4 ;\
  240. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  241. /* r12 == 1 if we come from syscall */ ;\
  242. CLEAR_GPR(r12) ;\
  243. /* ----- play a MMU trick ----- */ ;\
  244. l.ori r31,r0,(EXCEPTION_SR) ;\
  245. l.mtspr r0,r31,SPR_ESR_BASE ;\
  246. /* r31: EA address of handler */ ;\
  247. LOAD_SYMBOL_2_GPR(r31,handler) ;\
  248. l.mtspr r0,r31,SPR_EPCR_BASE ;\
  249. l.rfe
  250. /* =====================================================[ exceptions] === */
  251. /* ---[ 0x100: RESET exception ]----------------------------------------- */
  252. .org 0x100
  253. /* Jump to .init code at _start which lives in the .head section
  254. * and will be discarded after boot.
  255. */
  256. LOAD_SYMBOL_2_GPR(r4, _start)
  257. tophys (r3,r4) /* MMU disabled */
  258. l.jr r3
  259. l.nop
  260. /* ---[ 0x200: BUS exception ]------------------------------------------- */
  261. .org 0x200
  262. _dispatch_bus_fault:
  263. EXCEPTION_HANDLE(_bus_fault_handler)
  264. /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
  265. .org 0x300
  266. _dispatch_do_dpage_fault:
  267. // totaly disable timer interrupt
  268. // l.mtspr r0,r0,SPR_TTMR
  269. // DEBUG_TLB_PROBE(0x300)
  270. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
  271. EXCEPTION_HANDLE(_data_page_fault_handler)
  272. /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
  273. .org 0x400
  274. _dispatch_do_ipage_fault:
  275. // totaly disable timer interrupt
  276. // l.mtspr r0,r0,SPR_TTMR
  277. // DEBUG_TLB_PROBE(0x400)
  278. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
  279. EXCEPTION_HANDLE(_insn_page_fault_handler)
  280. /* ---[ 0x500: Timer exception ]----------------------------------------- */
  281. .org 0x500
  282. EXCEPTION_HANDLE(_timer_handler)
  283. /* ---[ 0x600: Aligment exception ]-------------------------------------- */
  284. .org 0x600
  285. EXCEPTION_HANDLE(_alignment_handler)
  286. /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
  287. .org 0x700
  288. EXCEPTION_HANDLE(_illegal_instruction_handler)
  289. /* ---[ 0x800: External interrupt exception ]---------------------------- */
  290. .org 0x800
  291. EXCEPTION_HANDLE(_external_irq_handler)
  292. /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
  293. .org 0x900
  294. l.j boot_dtlb_miss_handler
  295. l.nop
  296. /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
  297. .org 0xa00
  298. l.j boot_itlb_miss_handler
  299. l.nop
  300. /* ---[ 0xb00: Range exception ]----------------------------------------- */
  301. .org 0xb00
  302. UNHANDLED_EXCEPTION(_vector_0xb00)
  303. /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
  304. .org 0xc00
  305. EXCEPTION_HANDLE(_sys_call_handler)
  306. /* ---[ 0xd00: Trap exception ]------------------------------------------ */
  307. .org 0xd00
  308. UNHANDLED_EXCEPTION(_vector_0xd00)
  309. /* ---[ 0xe00: Trap exception ]------------------------------------------ */
  310. .org 0xe00
  311. // UNHANDLED_EXCEPTION(_vector_0xe00)
  312. EXCEPTION_HANDLE(_trap_handler)
  313. /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
  314. .org 0xf00
  315. UNHANDLED_EXCEPTION(_vector_0xf00)
  316. /* ---[ 0x1000: Reserved exception ]------------------------------------- */
  317. .org 0x1000
  318. UNHANDLED_EXCEPTION(_vector_0x1000)
  319. /* ---[ 0x1100: Reserved exception ]------------------------------------- */
  320. .org 0x1100
  321. UNHANDLED_EXCEPTION(_vector_0x1100)
  322. /* ---[ 0x1200: Reserved exception ]------------------------------------- */
  323. .org 0x1200
  324. UNHANDLED_EXCEPTION(_vector_0x1200)
  325. /* ---[ 0x1300: Reserved exception ]------------------------------------- */
  326. .org 0x1300
  327. UNHANDLED_EXCEPTION(_vector_0x1300)
  328. /* ---[ 0x1400: Reserved exception ]------------------------------------- */
  329. .org 0x1400
  330. UNHANDLED_EXCEPTION(_vector_0x1400)
  331. /* ---[ 0x1500: Reserved exception ]------------------------------------- */
  332. .org 0x1500
  333. UNHANDLED_EXCEPTION(_vector_0x1500)
  334. /* ---[ 0x1600: Reserved exception ]------------------------------------- */
  335. .org 0x1600
  336. UNHANDLED_EXCEPTION(_vector_0x1600)
  337. /* ---[ 0x1700: Reserved exception ]------------------------------------- */
  338. .org 0x1700
  339. UNHANDLED_EXCEPTION(_vector_0x1700)
  340. /* ---[ 0x1800: Reserved exception ]------------------------------------- */
  341. .org 0x1800
  342. UNHANDLED_EXCEPTION(_vector_0x1800)
  343. /* ---[ 0x1900: Reserved exception ]------------------------------------- */
  344. .org 0x1900
  345. UNHANDLED_EXCEPTION(_vector_0x1900)
  346. /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
  347. .org 0x1a00
  348. UNHANDLED_EXCEPTION(_vector_0x1a00)
  349. /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
  350. .org 0x1b00
  351. UNHANDLED_EXCEPTION(_vector_0x1b00)
  352. /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
  353. .org 0x1c00
  354. UNHANDLED_EXCEPTION(_vector_0x1c00)
  355. /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
  356. .org 0x1d00
  357. UNHANDLED_EXCEPTION(_vector_0x1d00)
  358. /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
  359. .org 0x1e00
  360. UNHANDLED_EXCEPTION(_vector_0x1e00)
  361. /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
  362. .org 0x1f00
  363. UNHANDLED_EXCEPTION(_vector_0x1f00)
  364. .org 0x2000
  365. /* ===================================================[ kernel start ]=== */
  366. /* .text*/
  367. /* This early stuff belongs in HEAD, but some of the functions below definitely
  368. * don't... */
  369. __HEAD
  370. .global _start
  371. _start:
  372. /* save kernel parameters */
  373. l.or r25,r0,r3 /* pointer to fdt */
  374. /*
  375. * ensure a deterministic start
  376. */
  377. l.ori r3,r0,0x1
  378. l.mtspr r0,r3,SPR_SR
  379. CLEAR_GPR(r1)
  380. CLEAR_GPR(r2)
  381. CLEAR_GPR(r3)
  382. CLEAR_GPR(r4)
  383. CLEAR_GPR(r5)
  384. CLEAR_GPR(r6)
  385. CLEAR_GPR(r7)
  386. CLEAR_GPR(r8)
  387. CLEAR_GPR(r9)
  388. CLEAR_GPR(r10)
  389. CLEAR_GPR(r11)
  390. CLEAR_GPR(r12)
  391. CLEAR_GPR(r13)
  392. CLEAR_GPR(r14)
  393. CLEAR_GPR(r15)
  394. CLEAR_GPR(r16)
  395. CLEAR_GPR(r17)
  396. CLEAR_GPR(r18)
  397. CLEAR_GPR(r19)
  398. CLEAR_GPR(r20)
  399. CLEAR_GPR(r21)
  400. CLEAR_GPR(r22)
  401. CLEAR_GPR(r23)
  402. CLEAR_GPR(r24)
  403. CLEAR_GPR(r26)
  404. CLEAR_GPR(r27)
  405. CLEAR_GPR(r28)
  406. CLEAR_GPR(r29)
  407. CLEAR_GPR(r30)
  408. CLEAR_GPR(r31)
  409. /*
  410. * set up initial ksp and current
  411. */
  412. LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack
  413. LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
  414. tophys (r31,r10)
  415. l.sw TI_KSP(r31), r1
  416. l.ori r4,r0,0x0
  417. /*
  418. * .data contains initialized data,
  419. * .bss contains uninitialized data - clear it up
  420. */
  421. clear_bss:
  422. LOAD_SYMBOL_2_GPR(r24, __bss_start)
  423. LOAD_SYMBOL_2_GPR(r26, _end)
  424. tophys(r28,r24)
  425. tophys(r30,r26)
  426. CLEAR_GPR(r24)
  427. CLEAR_GPR(r26)
  428. 1:
  429. l.sw (0)(r28),r0
  430. l.sfltu r28,r30
  431. l.bf 1b
  432. l.addi r28,r28,4
  433. enable_ic:
  434. l.jal _ic_enable
  435. l.nop
  436. enable_dc:
  437. l.jal _dc_enable
  438. l.nop
  439. flush_tlb:
  440. /*
  441. * I N V A L I D A T E T L B e n t r i e s
  442. */
  443. LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
  444. LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
  445. l.addi r7,r0,128 /* Maximum number of sets */
  446. 1:
  447. l.mtspr r5,r0,0x0
  448. l.mtspr r6,r0,0x0
  449. l.addi r5,r5,1
  450. l.addi r6,r6,1
  451. l.sfeq r7,r0
  452. l.bnf 1b
  453. l.addi r7,r7,-1
  454. /* The MMU needs to be enabled before or32_early_setup is called */
  455. enable_mmu:
  456. /*
  457. * enable dmmu & immu
  458. * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
  459. */
  460. l.mfspr r30,r0,SPR_SR
  461. l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
  462. l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
  463. l.or r30,r30,r28
  464. l.mtspr r0,r30,SPR_SR
  465. l.nop
  466. l.nop
  467. l.nop
  468. l.nop
  469. l.nop
  470. l.nop
  471. l.nop
  472. l.nop
  473. l.nop
  474. l.nop
  475. l.nop
  476. l.nop
  477. l.nop
  478. l.nop
  479. l.nop
  480. l.nop
  481. // reset the simulation counters
  482. l.nop 5
  483. /* check fdt header magic word */
  484. l.lwz r3,0(r25) /* load magic from fdt into r3 */
  485. l.movhi r4,hi(OF_DT_HEADER)
  486. l.ori r4,r4,lo(OF_DT_HEADER)
  487. l.sfeq r3,r4
  488. l.bf _fdt_found
  489. l.nop
  490. /* magic number mismatch, set fdt pointer to null */
  491. l.or r25,r0,r0
  492. _fdt_found:
  493. /* pass fdt pointer to or32_early_setup in r3 */
  494. l.or r3,r0,r25
  495. LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
  496. l.jalr r24
  497. l.nop
  498. clear_regs:
  499. /*
  500. * clear all GPRS to increase determinism
  501. */
  502. CLEAR_GPR(r2)
  503. CLEAR_GPR(r3)
  504. CLEAR_GPR(r4)
  505. CLEAR_GPR(r5)
  506. CLEAR_GPR(r6)
  507. CLEAR_GPR(r7)
  508. CLEAR_GPR(r8)
  509. CLEAR_GPR(r9)
  510. CLEAR_GPR(r11)
  511. CLEAR_GPR(r12)
  512. CLEAR_GPR(r13)
  513. CLEAR_GPR(r14)
  514. CLEAR_GPR(r15)
  515. CLEAR_GPR(r16)
  516. CLEAR_GPR(r17)
  517. CLEAR_GPR(r18)
  518. CLEAR_GPR(r19)
  519. CLEAR_GPR(r20)
  520. CLEAR_GPR(r21)
  521. CLEAR_GPR(r22)
  522. CLEAR_GPR(r23)
  523. CLEAR_GPR(r24)
  524. CLEAR_GPR(r25)
  525. CLEAR_GPR(r26)
  526. CLEAR_GPR(r27)
  527. CLEAR_GPR(r28)
  528. CLEAR_GPR(r29)
  529. CLEAR_GPR(r30)
  530. CLEAR_GPR(r31)
  531. jump_start_kernel:
  532. /*
  533. * jump to kernel entry (start_kernel)
  534. */
  535. LOAD_SYMBOL_2_GPR(r30, start_kernel)
  536. l.jr r30
  537. l.nop
  538. /* ========================================[ cache ]=== */
  539. /* aligment here so we don't change memory offsets with
  540. * memory controler defined
  541. */
  542. .align 0x2000
  543. _ic_enable:
  544. /* Check if IC present and skip enabling otherwise */
  545. l.mfspr r24,r0,SPR_UPR
  546. l.andi r26,r24,SPR_UPR_ICP
  547. l.sfeq r26,r0
  548. l.bf 9f
  549. l.nop
  550. /* Disable IC */
  551. l.mfspr r6,r0,SPR_SR
  552. l.addi r5,r0,-1
  553. l.xori r5,r5,SPR_SR_ICE
  554. l.and r5,r6,r5
  555. l.mtspr r0,r5,SPR_SR
  556. /* Establish cache block size
  557. If BS=0, 16;
  558. If BS=1, 32;
  559. r14 contain block size
  560. */
  561. l.mfspr r24,r0,SPR_ICCFGR
  562. l.andi r26,r24,SPR_ICCFGR_CBS
  563. l.srli r28,r26,7
  564. l.ori r30,r0,16
  565. l.sll r14,r30,r28
  566. /* Establish number of cache sets
  567. r16 contains number of cache sets
  568. r28 contains log(# of cache sets)
  569. */
  570. l.andi r26,r24,SPR_ICCFGR_NCS
  571. l.srli r28,r26,3
  572. l.ori r30,r0,1
  573. l.sll r16,r30,r28
  574. /* Invalidate IC */
  575. l.addi r6,r0,0
  576. l.sll r5,r14,r28
  577. // l.mul r5,r14,r16
  578. // l.trap 1
  579. // l.addi r5,r0,IC_SIZE
  580. 1:
  581. l.mtspr r0,r6,SPR_ICBIR
  582. l.sfne r6,r5
  583. l.bf 1b
  584. l.add r6,r6,r14
  585. // l.addi r6,r6,IC_LINE
  586. /* Enable IC */
  587. l.mfspr r6,r0,SPR_SR
  588. l.ori r6,r6,SPR_SR_ICE
  589. l.mtspr r0,r6,SPR_SR
  590. l.nop
  591. l.nop
  592. l.nop
  593. l.nop
  594. l.nop
  595. l.nop
  596. l.nop
  597. l.nop
  598. l.nop
  599. l.nop
  600. 9:
  601. l.jr r9
  602. l.nop
  603. _dc_enable:
  604. /* Check if DC present and skip enabling otherwise */
  605. l.mfspr r24,r0,SPR_UPR
  606. l.andi r26,r24,SPR_UPR_DCP
  607. l.sfeq r26,r0
  608. l.bf 9f
  609. l.nop
  610. /* Disable DC */
  611. l.mfspr r6,r0,SPR_SR
  612. l.addi r5,r0,-1
  613. l.xori r5,r5,SPR_SR_DCE
  614. l.and r5,r6,r5
  615. l.mtspr r0,r5,SPR_SR
  616. /* Establish cache block size
  617. If BS=0, 16;
  618. If BS=1, 32;
  619. r14 contain block size
  620. */
  621. l.mfspr r24,r0,SPR_DCCFGR
  622. l.andi r26,r24,SPR_DCCFGR_CBS
  623. l.srli r28,r26,7
  624. l.ori r30,r0,16
  625. l.sll r14,r30,r28
  626. /* Establish number of cache sets
  627. r16 contains number of cache sets
  628. r28 contains log(# of cache sets)
  629. */
  630. l.andi r26,r24,SPR_DCCFGR_NCS
  631. l.srli r28,r26,3
  632. l.ori r30,r0,1
  633. l.sll r16,r30,r28
  634. /* Invalidate DC */
  635. l.addi r6,r0,0
  636. l.sll r5,r14,r28
  637. 1:
  638. l.mtspr r0,r6,SPR_DCBIR
  639. l.sfne r6,r5
  640. l.bf 1b
  641. l.add r6,r6,r14
  642. /* Enable DC */
  643. l.mfspr r6,r0,SPR_SR
  644. l.ori r6,r6,SPR_SR_DCE
  645. l.mtspr r0,r6,SPR_SR
  646. 9:
  647. l.jr r9
  648. l.nop
  649. /* ===============================================[ page table masks ]=== */
  650. /* bit 4 is used in hardware as write back cache bit. we never use this bit
  651. * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
  652. * writing into hardware pte's
  653. */
  654. #define DTLB_UP_CONVERT_MASK 0x3fa
  655. #define ITLB_UP_CONVERT_MASK 0x3a
  656. /* for SMP we'd have (this is a bit subtle, CC must be always set
  657. * for SMP, but since we have _PAGE_PRESENT bit always defined
  658. * we can just modify the mask)
  659. */
  660. #define DTLB_SMP_CONVERT_MASK 0x3fb
  661. #define ITLB_SMP_CONVERT_MASK 0x3b
  662. /* ---[ boot dtlb miss handler ]----------------------------------------- */
  663. boot_dtlb_miss_handler:
  664. /* mask for DTLB_MR register: - (0) sets V (valid) bit,
  665. * - (31-12) sets bits belonging to VPN (31-12)
  666. */
  667. #define DTLB_MR_MASK 0xfffff001
  668. /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
  669. * - (4) sets A (access) bit,
  670. * - (5) sets D (dirty) bit,
  671. * - (8) sets SRE (superuser read) bit
  672. * - (9) sets SWE (superuser write) bit
  673. * - (31-12) sets bits belonging to VPN (31-12)
  674. */
  675. #define DTLB_TR_MASK 0xfffff332
  676. /* These are for masking out the VPN/PPN value from the MR/TR registers...
  677. * it's not the same as the PFN */
  678. #define VPN_MASK 0xfffff000
  679. #define PPN_MASK 0xfffff000
  680. EXCEPTION_STORE_GPR6
  681. #if 0
  682. l.mfspr r6,r0,SPR_ESR_BASE //
  683. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  684. l.sfeqi r6,0 // r6 == 0x1 --> SM
  685. l.bf exit_with_no_dtranslation //
  686. l.nop
  687. #endif
  688. /* this could be optimized by moving storing of
  689. * non r6 registers here, and jumping r6 restore
  690. * if not in supervisor mode
  691. */
  692. EXCEPTION_STORE_GPR2
  693. EXCEPTION_STORE_GPR3
  694. EXCEPTION_STORE_GPR4
  695. EXCEPTION_STORE_GPR5
  696. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  697. immediate_translation:
  698. CLEAR_GPR(r6)
  699. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  700. l.mfspr r6, r0, SPR_DMMUCFGR
  701. l.andi r6, r6, SPR_DMMUCFGR_NTS
  702. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  703. l.ori r5, r0, 0x1
  704. l.sll r5, r5, r6 // r5 = number DMMU sets
  705. l.addi r6, r5, -1 // r6 = nsets mask
  706. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  707. l.or r6,r6,r4 // r6 <- r4
  708. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  709. l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
  710. l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
  711. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
  712. l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
  713. /* set up DTLB with no translation for EA <= 0xbfffffff */
  714. LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
  715. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
  716. l.bf 1f // goto out
  717. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  718. tophys(r3,r4) // r3 <- PA
  719. 1:
  720. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  721. l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
  722. l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
  723. l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
  724. l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
  725. EXCEPTION_LOAD_GPR6
  726. EXCEPTION_LOAD_GPR5
  727. EXCEPTION_LOAD_GPR4
  728. EXCEPTION_LOAD_GPR3
  729. EXCEPTION_LOAD_GPR2
  730. l.rfe // SR <- ESR, PC <- EPC
  731. exit_with_no_dtranslation:
  732. /* EA out of memory or not in supervisor mode */
  733. EXCEPTION_LOAD_GPR6
  734. EXCEPTION_LOAD_GPR4
  735. l.j _dispatch_bus_fault
  736. /* ---[ boot itlb miss handler ]----------------------------------------- */
  737. boot_itlb_miss_handler:
  738. /* mask for ITLB_MR register: - sets V (valid) bit,
  739. * - sets bits belonging to VPN (15-12)
  740. */
  741. #define ITLB_MR_MASK 0xfffff001
  742. /* mask for ITLB_TR register: - sets A (access) bit,
  743. * - sets SXE (superuser execute) bit
  744. * - sets bits belonging to VPN (15-12)
  745. */
  746. #define ITLB_TR_MASK 0xfffff050
  747. /*
  748. #define VPN_MASK 0xffffe000
  749. #define PPN_MASK 0xffffe000
  750. */
  751. EXCEPTION_STORE_GPR2
  752. EXCEPTION_STORE_GPR3
  753. EXCEPTION_STORE_GPR4
  754. EXCEPTION_STORE_GPR5
  755. EXCEPTION_STORE_GPR6
  756. #if 0
  757. l.mfspr r6,r0,SPR_ESR_BASE //
  758. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  759. l.sfeqi r6,0 // r6 == 0x1 --> SM
  760. l.bf exit_with_no_itranslation
  761. l.nop
  762. #endif
  763. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  764. earlyearly:
  765. CLEAR_GPR(r6)
  766. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  767. l.mfspr r6, r0, SPR_IMMUCFGR
  768. l.andi r6, r6, SPR_IMMUCFGR_NTS
  769. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  770. l.ori r5, r0, 0x1
  771. l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
  772. l.addi r6, r5, -1 // r6 = nsets mask
  773. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  774. l.or r6,r6,r4 // r6 <- r4
  775. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  776. l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
  777. l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
  778. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
  779. l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
  780. /*
  781. * set up ITLB with no translation for EA <= 0x0fffffff
  782. *
  783. * we need this for head.S mapping (EA = PA). if we move all functions
  784. * which run with mmu enabled into entry.S, we might be able to eliminate this.
  785. *
  786. */
  787. LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
  788. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
  789. l.bf 1f // goto out
  790. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  791. tophys(r3,r4) // r3 <- PA
  792. 1:
  793. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  794. l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
  795. l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
  796. l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
  797. l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
  798. EXCEPTION_LOAD_GPR6
  799. EXCEPTION_LOAD_GPR5
  800. EXCEPTION_LOAD_GPR4
  801. EXCEPTION_LOAD_GPR3
  802. EXCEPTION_LOAD_GPR2
  803. l.rfe // SR <- ESR, PC <- EPC
  804. exit_with_no_itranslation:
  805. EXCEPTION_LOAD_GPR4
  806. EXCEPTION_LOAD_GPR6
  807. l.j _dispatch_bus_fault
  808. l.nop
  809. /* ====================================================================== */
  810. /*
  811. * Stuff below here shouldn't go into .head section... maybe this stuff
  812. * can be moved to entry.S ???
  813. */
  814. /* ==============================================[ DTLB miss handler ]=== */
  815. /*
  816. * Comments:
  817. * Exception handlers are entered with MMU off so the following handler
  818. * needs to use physical addressing
  819. *
  820. */
  821. .text
  822. ENTRY(dtlb_miss_handler)
  823. EXCEPTION_STORE_GPR2
  824. EXCEPTION_STORE_GPR3
  825. EXCEPTION_STORE_GPR4
  826. EXCEPTION_STORE_GPR5
  827. EXCEPTION_STORE_GPR6
  828. /*
  829. * get EA of the miss
  830. */
  831. l.mfspr r2,r0,SPR_EEAR_BASE
  832. /*
  833. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  834. */
  835. GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
  836. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  837. l.slli r4,r4,0x2 // to get address << 2
  838. l.add r5,r4,r3 // r4 is pgd_index(daddr)
  839. /*
  840. * if (pmd_none(*pmd))
  841. * goto pmd_none:
  842. */
  843. tophys (r4,r5)
  844. l.lwz r3,0x0(r4) // get *pmd value
  845. l.sfne r3,r0
  846. l.bnf d_pmd_none
  847. l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK
  848. /*
  849. * if (pmd_bad(*pmd))
  850. * pmd_clear(pmd)
  851. * goto pmd_bad:
  852. */
  853. // l.sfeq r3,r0 // check *pmd value
  854. // l.bf d_pmd_good
  855. l.addi r3,r0,0xffffe000 // PAGE_MASK
  856. // l.j d_pmd_bad
  857. // l.sw 0x0(r4),r0 // clear pmd
  858. d_pmd_good:
  859. /*
  860. * pte = *pte_offset(pmd, daddr);
  861. */
  862. l.lwz r4,0x0(r4) // get **pmd value
  863. l.and r4,r4,r3 // & PAGE_MASK
  864. l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  865. l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  866. l.slli r3,r3,0x2 // to get address << 2
  867. l.add r3,r3,r4
  868. l.lwz r2,0x0(r3) // this is pte at last
  869. /*
  870. * if (!pte_present(pte))
  871. */
  872. l.andi r4,r2,0x1
  873. l.sfne r4,r0 // is pte present
  874. l.bnf d_pte_not_present
  875. l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
  876. /*
  877. * fill DTLB TR register
  878. */
  879. l.and r4,r2,r3 // apply the mask
  880. // Determine number of DMMU sets
  881. l.mfspr r6, r0, SPR_DMMUCFGR
  882. l.andi r6, r6, SPR_DMMUCFGR_NTS
  883. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  884. l.ori r3, r0, 0x1
  885. l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR
  886. l.addi r6, r3, -1 // r6 = nsets mask
  887. l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
  888. //NUM_TLB_ENTRIES
  889. l.mtspr r5,r4,SPR_DTLBTR_BASE(0)
  890. /*
  891. * fill DTLB MR register
  892. */
  893. l.mfspr r2,r0,SPR_EEAR_BASE
  894. l.addi r3,r0,0xffffe000 // PAGE_MASK
  895. l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
  896. l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
  897. l.mtspr r5,r4,SPR_DTLBMR_BASE(0)
  898. EXCEPTION_LOAD_GPR2
  899. EXCEPTION_LOAD_GPR3
  900. EXCEPTION_LOAD_GPR4
  901. EXCEPTION_LOAD_GPR5
  902. EXCEPTION_LOAD_GPR6
  903. l.rfe
  904. d_pmd_bad:
  905. l.nop 1
  906. EXCEPTION_LOAD_GPR2
  907. EXCEPTION_LOAD_GPR3
  908. EXCEPTION_LOAD_GPR4
  909. EXCEPTION_LOAD_GPR5
  910. EXCEPTION_LOAD_GPR6
  911. l.rfe
  912. d_pmd_none:
  913. d_pte_not_present:
  914. EXCEPTION_LOAD_GPR2
  915. EXCEPTION_LOAD_GPR3
  916. EXCEPTION_LOAD_GPR4
  917. EXCEPTION_LOAD_GPR5
  918. EXCEPTION_LOAD_GPR6
  919. l.j _dispatch_do_dpage_fault
  920. l.nop
  921. /* ==============================================[ ITLB miss handler ]=== */
  922. ENTRY(itlb_miss_handler)
  923. EXCEPTION_STORE_GPR2
  924. EXCEPTION_STORE_GPR3
  925. EXCEPTION_STORE_GPR4
  926. EXCEPTION_STORE_GPR5
  927. EXCEPTION_STORE_GPR6
  928. /*
  929. * get EA of the miss
  930. */
  931. l.mfspr r2,r0,SPR_EEAR_BASE
  932. /*
  933. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  934. *
  935. */
  936. GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
  937. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  938. l.slli r4,r4,0x2 // to get address << 2
  939. l.add r5,r4,r3 // r4 is pgd_index(daddr)
  940. /*
  941. * if (pmd_none(*pmd))
  942. * goto pmd_none:
  943. */
  944. tophys (r4,r5)
  945. l.lwz r3,0x0(r4) // get *pmd value
  946. l.sfne r3,r0
  947. l.bnf i_pmd_none
  948. l.andi r3,r3,0x1fff // ~PAGE_MASK
  949. /*
  950. * if (pmd_bad(*pmd))
  951. * pmd_clear(pmd)
  952. * goto pmd_bad:
  953. */
  954. // l.sfeq r3,r0 // check *pmd value
  955. // l.bf i_pmd_good
  956. l.addi r3,r0,0xffffe000 // PAGE_MASK
  957. // l.j i_pmd_bad
  958. // l.sw 0x0(r4),r0 // clear pmd
  959. i_pmd_good:
  960. /*
  961. * pte = *pte_offset(pmd, iaddr);
  962. *
  963. */
  964. l.lwz r4,0x0(r4) // get **pmd value
  965. l.and r4,r4,r3 // & PAGE_MASK
  966. l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  967. l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  968. l.slli r3,r3,0x2 // to get address << 2
  969. l.add r3,r3,r4
  970. l.lwz r2,0x0(r3) // this is pte at last
  971. /*
  972. * if (!pte_present(pte))
  973. *
  974. */
  975. l.andi r4,r2,0x1
  976. l.sfne r4,r0 // is pte present
  977. l.bnf i_pte_not_present
  978. l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
  979. /*
  980. * fill ITLB TR register
  981. */
  982. l.and r4,r2,r3 // apply the mask
  983. l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
  984. // l.andi r3,r2,0x400 // _PAGE_EXEC
  985. l.sfeq r3,r0
  986. l.bf itlb_tr_fill //_workaround
  987. // Determine number of IMMU sets
  988. l.mfspr r6, r0, SPR_IMMUCFGR
  989. l.andi r6, r6, SPR_IMMUCFGR_NTS
  990. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  991. l.ori r3, r0, 0x1
  992. l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR
  993. l.addi r6, r3, -1 // r6 = nsets mask
  994. l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
  995. /*
  996. * __PHX__ :: fixme
  997. * we should not just blindly set executable flags,
  998. * but it does help with ping. the clean way would be to find out
  999. * (and fix it) why stack doesn't have execution permissions
  1000. */
  1001. itlb_tr_fill_workaround:
  1002. l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
  1003. itlb_tr_fill:
  1004. l.mtspr r5,r4,SPR_ITLBTR_BASE(0)
  1005. /*
  1006. * fill DTLB MR register
  1007. */
  1008. l.mfspr r2,r0,SPR_EEAR_BASE
  1009. l.addi r3,r0,0xffffe000 // PAGE_MASK
  1010. l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
  1011. l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
  1012. l.mtspr r5,r4,SPR_ITLBMR_BASE(0)
  1013. EXCEPTION_LOAD_GPR2
  1014. EXCEPTION_LOAD_GPR3
  1015. EXCEPTION_LOAD_GPR4
  1016. EXCEPTION_LOAD_GPR5
  1017. EXCEPTION_LOAD_GPR6
  1018. l.rfe
  1019. i_pmd_bad:
  1020. l.nop 1
  1021. EXCEPTION_LOAD_GPR2
  1022. EXCEPTION_LOAD_GPR3
  1023. EXCEPTION_LOAD_GPR4
  1024. EXCEPTION_LOAD_GPR5
  1025. EXCEPTION_LOAD_GPR6
  1026. l.rfe
  1027. i_pmd_none:
  1028. i_pte_not_present:
  1029. EXCEPTION_LOAD_GPR2
  1030. EXCEPTION_LOAD_GPR3
  1031. EXCEPTION_LOAD_GPR4
  1032. EXCEPTION_LOAD_GPR5
  1033. EXCEPTION_LOAD_GPR6
  1034. l.j _dispatch_do_ipage_fault
  1035. l.nop
  1036. /* ==============================================[ boot tlb handlers ]=== */
  1037. /* =================================================[ debugging aids ]=== */
  1038. .align 64
  1039. _immu_trampoline:
  1040. .space 64
  1041. _immu_trampoline_top:
  1042. #define TRAMP_SLOT_0 (0x0)
  1043. #define TRAMP_SLOT_1 (0x4)
  1044. #define TRAMP_SLOT_2 (0x8)
  1045. #define TRAMP_SLOT_3 (0xc)
  1046. #define TRAMP_SLOT_4 (0x10)
  1047. #define TRAMP_SLOT_5 (0x14)
  1048. #define TRAMP_FRAME_SIZE (0x18)
  1049. ENTRY(_immu_trampoline_workaround)
  1050. // r2 EEA
  1051. // r6 is physical EEA
  1052. tophys(r6,r2)
  1053. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1054. tophys (r3,r5) // r3 is trampoline (physical)
  1055. LOAD_SYMBOL_2_GPR(r4,0x15000000)
  1056. l.sw TRAMP_SLOT_0(r3),r4
  1057. l.sw TRAMP_SLOT_1(r3),r4
  1058. l.sw TRAMP_SLOT_4(r3),r4
  1059. l.sw TRAMP_SLOT_5(r3),r4
  1060. // EPC = EEA - 0x4
  1061. l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
  1062. l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
  1063. l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
  1064. l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
  1065. l.srli r5,r4,26 // check opcode for write access
  1066. l.sfeqi r5,0 // l.j
  1067. l.bf 0f
  1068. l.sfeqi r5,0x11 // l.jr
  1069. l.bf 1f
  1070. l.sfeqi r5,1 // l.jal
  1071. l.bf 2f
  1072. l.sfeqi r5,0x12 // l.jalr
  1073. l.bf 3f
  1074. l.sfeqi r5,3 // l.bnf
  1075. l.bf 4f
  1076. l.sfeqi r5,4 // l.bf
  1077. l.bf 5f
  1078. 99:
  1079. l.nop
  1080. l.j 99b // should never happen
  1081. l.nop 1
  1082. // r2 is EEA
  1083. // r3 is trampoline address (physical)
  1084. // r4 is instruction
  1085. // r6 is physical(EEA)
  1086. //
  1087. // r5
  1088. 2: // l.jal
  1089. /* 19 20 aa aa l.movhi r9,0xaaaa
  1090. * a9 29 bb bb l.ori r9,0xbbbb
  1091. *
  1092. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1093. */
  1094. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1095. // l.movhi r9,0xaaaa
  1096. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1097. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1098. l.srli r5,r6,16
  1099. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1100. // l.ori r9,0xbbbb
  1101. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1102. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1103. l.andi r5,r6,0xffff
  1104. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1105. /* falthrough, need to set up new jump offset */
  1106. 0: // l.j
  1107. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1108. // l.srli r6,r6,6 // original offset shifted right 2
  1109. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1110. // l.srli r4,r4,6 // old jump position: shifted right 2
  1111. l.addi r5,r3,0xc // new jump position (physical)
  1112. l.slli r5,r5,4 // new jump position: shifted left 4
  1113. // calculate new jump offset
  1114. // new_off = old_off + (old_jump - new_jump)
  1115. l.sub r5,r4,r5 // old_jump - new_jump
  1116. l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
  1117. l.srli r5,r5,6 // new offset shifted right 2
  1118. // r5 is new jump offset
  1119. // l.j has opcode 0x0...
  1120. l.sw TRAMP_SLOT_2(r3),r5 // write it back
  1121. l.j trampoline_out
  1122. l.nop
  1123. /* ----------------------------- */
  1124. 3: // l.jalr
  1125. /* 19 20 aa aa l.movhi r9,0xaaaa
  1126. * a9 29 bb bb l.ori r9,0xbbbb
  1127. *
  1128. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1129. */
  1130. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1131. // l.movhi r9,0xaaaa
  1132. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1133. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1134. l.srli r5,r6,16
  1135. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1136. // l.ori r9,0xbbbb
  1137. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1138. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1139. l.andi r5,r6,0xffff
  1140. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1141. l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
  1142. l.andi r5,r5,0x3ff // clear out opcode part
  1143. l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
  1144. l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
  1145. /* falthrough */
  1146. 1: // l.jr
  1147. l.j trampoline_out
  1148. l.nop
  1149. /* ----------------------------- */
  1150. 4: // l.bnf
  1151. 5: // l.bf
  1152. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1153. // l.srli r6,r6,6 // original offset shifted right 2
  1154. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1155. // l.srli r4,r4,6 // old jump position: shifted right 2
  1156. l.addi r5,r3,0xc // new jump position (physical)
  1157. l.slli r5,r5,4 // new jump position: shifted left 4
  1158. // calculate new jump offset
  1159. // new_off = old_off + (old_jump - new_jump)
  1160. l.add r6,r6,r4 // (orig_off + old_jump)
  1161. l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
  1162. l.srli r6,r6,6 // new offset shifted right 2
  1163. // r6 is new jump offset
  1164. l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
  1165. l.srli r4,r4,16
  1166. l.andi r4,r4,0xfc00 // get opcode part
  1167. l.slli r4,r4,16
  1168. l.or r6,r4,r6 // l.b(n)f new offset
  1169. l.sw TRAMP_SLOT_2(r3),r6 // write it back
  1170. /* we need to add l.j to EEA + 0x8 */
  1171. tophys (r4,r2) // may not be needed (due to shifts down_
  1172. l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
  1173. // jump position = r5 + 0x8 (0x8 compensated)
  1174. l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
  1175. l.slli r4,r4,4 // the amount of info in imediate of jump
  1176. l.srli r4,r4,6 // jump instruction with offset
  1177. l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
  1178. /* fallthrough */
  1179. trampoline_out:
  1180. // set up new EPC to point to our trampoline code
  1181. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1182. l.mtspr r0,r5,SPR_EPCR_BASE
  1183. // immu_trampoline is (4x) CACHE_LINE aligned
  1184. // and only 6 instructions long,
  1185. // so we need to invalidate only 2 lines
  1186. /* Establish cache block size
  1187. If BS=0, 16;
  1188. If BS=1, 32;
  1189. r14 contain block size
  1190. */
  1191. l.mfspr r21,r0,SPR_ICCFGR
  1192. l.andi r21,r21,SPR_ICCFGR_CBS
  1193. l.srli r21,r21,7
  1194. l.ori r23,r0,16
  1195. l.sll r14,r23,r21
  1196. l.mtspr r0,r5,SPR_ICBIR
  1197. l.add r5,r5,r14
  1198. l.mtspr r0,r5,SPR_ICBIR
  1199. l.jr r9
  1200. l.nop
  1201. /*
  1202. * DSCR: prints a string referenced by r3.
  1203. *
  1204. * PRMS: r3 - address of the first character of null
  1205. * terminated string to be printed
  1206. *
  1207. * PREQ: UART at UART_BASE_ADD has to be initialized
  1208. *
  1209. * POST: caller should be aware that r3, r9 are changed
  1210. */
  1211. ENTRY(_emergency_print)
  1212. EMERGENCY_PRINT_STORE_GPR4
  1213. EMERGENCY_PRINT_STORE_GPR5
  1214. EMERGENCY_PRINT_STORE_GPR6
  1215. EMERGENCY_PRINT_STORE_GPR7
  1216. 2:
  1217. l.lbz r7,0(r3)
  1218. l.sfeq r7,r0
  1219. l.bf 9f
  1220. l.nop
  1221. // putc:
  1222. l.movhi r4,hi(UART_BASE_ADD)
  1223. l.addi r6,r0,0x20
  1224. 1: l.lbz r5,5(r4)
  1225. l.andi r5,r5,0x20
  1226. l.sfeq r5,r6
  1227. l.bnf 1b
  1228. l.nop
  1229. l.sb 0(r4),r7
  1230. l.addi r6,r0,0x60
  1231. 1: l.lbz r5,5(r4)
  1232. l.andi r5,r5,0x60
  1233. l.sfeq r5,r6
  1234. l.bnf 1b
  1235. l.nop
  1236. /* next character */
  1237. l.j 2b
  1238. l.addi r3,r3,0x1
  1239. 9:
  1240. EMERGENCY_PRINT_LOAD_GPR7
  1241. EMERGENCY_PRINT_LOAD_GPR6
  1242. EMERGENCY_PRINT_LOAD_GPR5
  1243. EMERGENCY_PRINT_LOAD_GPR4
  1244. l.jr r9
  1245. l.nop
  1246. ENTRY(_emergency_print_nr)
  1247. EMERGENCY_PRINT_STORE_GPR4
  1248. EMERGENCY_PRINT_STORE_GPR5
  1249. EMERGENCY_PRINT_STORE_GPR6
  1250. EMERGENCY_PRINT_STORE_GPR7
  1251. EMERGENCY_PRINT_STORE_GPR8
  1252. l.addi r8,r0,32 // shift register
  1253. 1: /* remove leading zeros */
  1254. l.addi r8,r8,-0x4
  1255. l.srl r7,r3,r8
  1256. l.andi r7,r7,0xf
  1257. /* don't skip the last zero if number == 0x0 */
  1258. l.sfeqi r8,0x4
  1259. l.bf 2f
  1260. l.nop
  1261. l.sfeq r7,r0
  1262. l.bf 1b
  1263. l.nop
  1264. 2:
  1265. l.srl r7,r3,r8
  1266. l.andi r7,r7,0xf
  1267. l.sflts r8,r0
  1268. l.bf 9f
  1269. l.sfgtui r7,0x9
  1270. l.bnf 8f
  1271. l.nop
  1272. l.addi r7,r7,0x27
  1273. 8:
  1274. l.addi r7,r7,0x30
  1275. // putc:
  1276. l.movhi r4,hi(UART_BASE_ADD)
  1277. l.addi r6,r0,0x20
  1278. 1: l.lbz r5,5(r4)
  1279. l.andi r5,r5,0x20
  1280. l.sfeq r5,r6
  1281. l.bnf 1b
  1282. l.nop
  1283. l.sb 0(r4),r7
  1284. l.addi r6,r0,0x60
  1285. 1: l.lbz r5,5(r4)
  1286. l.andi r5,r5,0x60
  1287. l.sfeq r5,r6
  1288. l.bnf 1b
  1289. l.nop
  1290. /* next character */
  1291. l.j 2b
  1292. l.addi r8,r8,-0x4
  1293. 9:
  1294. EMERGENCY_PRINT_LOAD_GPR8
  1295. EMERGENCY_PRINT_LOAD_GPR7
  1296. EMERGENCY_PRINT_LOAD_GPR6
  1297. EMERGENCY_PRINT_LOAD_GPR5
  1298. EMERGENCY_PRINT_LOAD_GPR4
  1299. l.jr r9
  1300. l.nop
  1301. /*
  1302. * This should be used for debugging only.
  1303. * It messes up the Linux early serial output
  1304. * somehow, so use it sparingly and essentially
  1305. * only if you need to debug something that goes wrong
  1306. * before Linux gets the early serial going.
  1307. *
  1308. * Furthermore, you'll have to make sure you set the
  1309. * UART_DEVISOR correctly according to the system
  1310. * clock rate.
  1311. *
  1312. *
  1313. */
  1314. #define SYS_CLK 20000000
  1315. //#define SYS_CLK 1843200
  1316. #define OR32_CONSOLE_BAUD 115200
  1317. #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
  1318. ENTRY(_early_uart_init)
  1319. l.movhi r3,hi(UART_BASE_ADD)
  1320. l.addi r4,r0,0x7
  1321. l.sb 0x2(r3),r4
  1322. l.addi r4,r0,0x0
  1323. l.sb 0x1(r3),r4
  1324. l.addi r4,r0,0x3
  1325. l.sb 0x3(r3),r4
  1326. l.lbz r5,3(r3)
  1327. l.ori r4,r5,0x80
  1328. l.sb 0x3(r3),r4
  1329. l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
  1330. l.sb UART_DLM(r3),r4
  1331. l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
  1332. l.sb UART_DLL(r3),r4
  1333. l.sb 0x3(r3),r5
  1334. l.jr r9
  1335. l.nop
  1336. _string_copying_linux:
  1337. .string "\n\n\n\n\n\rCopying Linux... \0"
  1338. _string_ok_booting:
  1339. .string "Ok, booting the kernel.\n\r\0"
  1340. _string_unhandled_exception:
  1341. .string "\n\rRunarunaround: Unhandled exception 0x\0"
  1342. _string_epc_prefix:
  1343. .string ": EPC=0x\0"
  1344. _string_nl:
  1345. .string "\n\r\0"
  1346. .global _string_esr_irq_bug
  1347. _string_esr_irq_bug:
  1348. .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
  1349. /* ========================================[ page aligned structures ]=== */
  1350. /*
  1351. * .data section should be page aligned
  1352. * (look into arch/or32/kernel/vmlinux.lds)
  1353. */
  1354. .section .data,"aw"
  1355. .align 8192
  1356. .global empty_zero_page
  1357. empty_zero_page:
  1358. .space 8192
  1359. .global swapper_pg_dir
  1360. swapper_pg_dir:
  1361. .space 8192
  1362. .global _unhandled_stack
  1363. _unhandled_stack:
  1364. .space 8192
  1365. _unhandled_stack_top:
  1366. /* ============================================================[ EOF ]=== */