head_64.S 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958
  1. /*
  2. * arch/ppc64/kernel/head.S
  3. *
  4. * PowerPC version
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. *
  7. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  8. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  9. * Adapted for Power Macintosh by Paul Mackerras.
  10. * Low-level exception handlers and MMU support
  11. * rewritten by Paul Mackerras.
  12. * Copyright (C) 1996 Paul Mackerras.
  13. *
  14. * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
  15. * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
  16. *
  17. * This file contains the low-level support and setup for the
  18. * PowerPC-64 platform, including trap and interrupt dispatch.
  19. *
  20. * This program is free software; you can redistribute it and/or
  21. * modify it under the terms of the GNU General Public License
  22. * as published by the Free Software Foundation; either version
  23. * 2 of the License, or (at your option) any later version.
  24. */
  25. #include <linux/config.h>
  26. #include <linux/threads.h>
  27. #include <asm/reg.h>
  28. #include <asm/page.h>
  29. #include <asm/mmu.h>
  30. #include <asm/systemcfg.h>
  31. #include <asm/ppc_asm.h>
  32. #include <asm/asm-offsets.h>
  33. #include <asm/bug.h>
  34. #include <asm/cputable.h>
  35. #include <asm/setup.h>
  36. #include <asm/hvcall.h>
  37. #include <asm/iSeries/LparMap.h>
  38. #ifdef CONFIG_PPC_ISERIES
  39. #define DO_SOFT_DISABLE
  40. #endif
  41. /*
  42. * We layout physical memory as follows:
  43. * 0x0000 - 0x00ff : Secondary processor spin code
  44. * 0x0100 - 0x2fff : pSeries Interrupt prologs
  45. * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
  46. * 0x6000 - 0x6fff : Initial (CPU0) segment table
  47. * 0x7000 - 0x7fff : FWNMI data area
  48. * 0x8000 - : Early init and support code
  49. */
  50. /*
  51. * SPRG Usage
  52. *
  53. * Register Definition
  54. *
  55. * SPRG0 reserved for hypervisor
  56. * SPRG1 temp - used to save gpr
  57. * SPRG2 temp - used to save gpr
  58. * SPRG3 virt addr of paca
  59. */
  60. /*
  61. * Entering into this code we make the following assumptions:
  62. * For pSeries:
  63. * 1. The MMU is off & open firmware is running in real mode.
  64. * 2. The kernel is entered at __start
  65. *
  66. * For iSeries:
  67. * 1. The MMU is on (as it always is for iSeries)
  68. * 2. The kernel is entered at system_reset_iSeries
  69. */
  70. .text
  71. .globl _stext
  72. _stext:
  73. #ifdef CONFIG_PPC_MULTIPLATFORM
  74. _GLOBAL(__start)
  75. /* NOP this out unconditionally */
  76. BEGIN_FTR_SECTION
  77. b .__start_initialization_multiplatform
  78. END_FTR_SECTION(0, 1)
  79. #endif /* CONFIG_PPC_MULTIPLATFORM */
  80. /* Catch branch to 0 in real mode */
  81. trap
  82. #ifdef CONFIG_PPC_ISERIES
  83. /*
  84. * At offset 0x20, there is a pointer to iSeries LPAR data.
  85. * This is required by the hypervisor
  86. */
  87. . = 0x20
  88. .llong hvReleaseData-KERNELBASE
  89. /*
  90. * At offset 0x28 and 0x30 are offsets to the mschunks_map
  91. * array (used by the iSeries LPAR debugger to do translation
  92. * between physical addresses and absolute addresses) and
  93. * to the pidhash table (also used by the debugger)
  94. */
  95. .llong mschunks_map-KERNELBASE
  96. .llong 0 /* pidhash-KERNELBASE SFRXXX */
  97. /* Offset 0x38 - Pointer to start of embedded System.map */
  98. .globl embedded_sysmap_start
  99. embedded_sysmap_start:
  100. .llong 0
  101. /* Offset 0x40 - Pointer to end of embedded System.map */
  102. .globl embedded_sysmap_end
  103. embedded_sysmap_end:
  104. .llong 0
  105. #endif /* CONFIG_PPC_ISERIES */
  106. /* Secondary processors spin on this value until it goes to 1. */
  107. .globl __secondary_hold_spinloop
  108. __secondary_hold_spinloop:
  109. .llong 0x0
  110. /* Secondary processors write this value with their cpu # */
  111. /* after they enter the spin loop immediately below. */
  112. .globl __secondary_hold_acknowledge
  113. __secondary_hold_acknowledge:
  114. .llong 0x0
  115. . = 0x60
  116. /*
  117. * The following code is used on pSeries to hold secondary processors
  118. * in a spin loop after they have been freed from OpenFirmware, but
  119. * before the bulk of the kernel has been relocated. This code
  120. * is relocated to physical address 0x60 before prom_init is run.
  121. * All of it must fit below the first exception vector at 0x100.
  122. */
  123. _GLOBAL(__secondary_hold)
  124. mfmsr r24
  125. ori r24,r24,MSR_RI
  126. mtmsrd r24 /* RI on */
  127. /* Grab our linux cpu number */
  128. mr r24,r3
  129. /* Tell the master cpu we're here */
  130. /* Relocation is off & we are located at an address less */
  131. /* than 0x100, so only need to grab low order offset. */
  132. std r24,__secondary_hold_acknowledge@l(0)
  133. sync
  134. /* All secondary cpus wait here until told to start. */
  135. 100: ld r4,__secondary_hold_spinloop@l(0)
  136. cmpdi 0,r4,1
  137. bne 100b
  138. #ifdef CONFIG_HMT
  139. b .hmt_init
  140. #else
  141. #ifdef CONFIG_SMP
  142. mr r3,r24
  143. b .pSeries_secondary_smp_init
  144. #else
  145. BUG_OPCODE
  146. #endif
  147. #endif
  148. /* This value is used to mark exception frames on the stack. */
  149. .section ".toc","aw"
  150. exception_marker:
  151. .tc ID_72656773_68657265[TC],0x7265677368657265
  152. .text
  153. /*
  154. * The following macros define the code that appears as
  155. * the prologue to each of the exception handlers. They
  156. * are split into two parts to allow a single kernel binary
  157. * to be used for pSeries and iSeries.
  158. * LOL. One day... - paulus
  159. */
  160. /*
  161. * We make as much of the exception code common between native
  162. * exception handlers (including pSeries LPAR) and iSeries LPAR
  163. * implementations as possible.
  164. */
  165. /*
  166. * This is the start of the interrupt handlers for pSeries
  167. * This code runs with relocation off.
  168. */
  169. #define EX_R9 0
  170. #define EX_R10 8
  171. #define EX_R11 16
  172. #define EX_R12 24
  173. #define EX_R13 32
  174. #define EX_SRR0 40
  175. #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
  176. #define EX_DAR 48
  177. #define EX_LR 48 /* SLB miss saves LR, but not DAR */
  178. #define EX_DSISR 56
  179. #define EX_CCR 60
  180. #define EXCEPTION_PROLOG_PSERIES(area, label) \
  181. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  182. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  183. std r10,area+EX_R10(r13); \
  184. std r11,area+EX_R11(r13); \
  185. std r12,area+EX_R12(r13); \
  186. mfspr r9,SPRN_SPRG1; \
  187. std r9,area+EX_R13(r13); \
  188. mfcr r9; \
  189. clrrdi r12,r13,32; /* get high part of &label */ \
  190. mfmsr r10; \
  191. mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  192. ori r12,r12,(label)@l; /* virt addr of handler */ \
  193. ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  194. mtspr SPRN_SRR0,r12; \
  195. mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  196. mtspr SPRN_SRR1,r10; \
  197. rfid; \
  198. b . /* prevent speculative execution */
  199. /*
  200. * This is the start of the interrupt handlers for iSeries
  201. * This code runs with relocation on.
  202. */
  203. #define EXCEPTION_PROLOG_ISERIES_1(area) \
  204. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  205. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  206. std r10,area+EX_R10(r13); \
  207. std r11,area+EX_R11(r13); \
  208. std r12,area+EX_R12(r13); \
  209. mfspr r9,SPRN_SPRG1; \
  210. std r9,area+EX_R13(r13); \
  211. mfcr r9
  212. #define EXCEPTION_PROLOG_ISERIES_2 \
  213. mfmsr r10; \
  214. ld r11,PACALPPACA+LPPACASRR0(r13); \
  215. ld r12,PACALPPACA+LPPACASRR1(r13); \
  216. ori r10,r10,MSR_RI; \
  217. mtmsrd r10,1
  218. /*
  219. * The common exception prolog is used for all except a few exceptions
  220. * such as a segment miss on a kernel address. We have to be prepared
  221. * to take another exception from the point where we first touch the
  222. * kernel stack onwards.
  223. *
  224. * On entry r13 points to the paca, r9-r13 are saved in the paca,
  225. * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
  226. * SRR1, and relocation is on.
  227. */
  228. #define EXCEPTION_PROLOG_COMMON(n, area) \
  229. andi. r10,r12,MSR_PR; /* See if coming from user */ \
  230. mr r10,r1; /* Save r1 */ \
  231. subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
  232. beq- 1f; \
  233. ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
  234. 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
  235. bge- cr1,bad_stack; /* abort if it is */ \
  236. std r9,_CCR(r1); /* save CR in stackframe */ \
  237. std r11,_NIP(r1); /* save SRR0 in stackframe */ \
  238. std r12,_MSR(r1); /* save SRR1 in stackframe */ \
  239. std r10,0(r1); /* make stack chain pointer */ \
  240. std r0,GPR0(r1); /* save r0 in stackframe */ \
  241. std r10,GPR1(r1); /* save r1 in stackframe */ \
  242. std r2,GPR2(r1); /* save r2 in stackframe */ \
  243. SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
  244. SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
  245. ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
  246. ld r10,area+EX_R10(r13); \
  247. std r9,GPR9(r1); \
  248. std r10,GPR10(r1); \
  249. ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
  250. ld r10,area+EX_R12(r13); \
  251. ld r11,area+EX_R13(r13); \
  252. std r9,GPR11(r1); \
  253. std r10,GPR12(r1); \
  254. std r11,GPR13(r1); \
  255. ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
  256. mflr r9; /* save LR in stackframe */ \
  257. std r9,_LINK(r1); \
  258. mfctr r10; /* save CTR in stackframe */ \
  259. std r10,_CTR(r1); \
  260. mfspr r11,SPRN_XER; /* save XER in stackframe */ \
  261. std r11,_XER(r1); \
  262. li r9,(n)+1; \
  263. std r9,_TRAP(r1); /* set trap number */ \
  264. li r10,0; \
  265. ld r11,exception_marker@toc(r2); \
  266. std r10,RESULT(r1); /* clear regs->result */ \
  267. std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
  268. /*
  269. * Exception vectors.
  270. */
  271. #define STD_EXCEPTION_PSERIES(n, label) \
  272. . = n; \
  273. .globl label##_pSeries; \
  274. label##_pSeries: \
  275. HMT_MEDIUM; \
  276. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  277. RUNLATCH_ON(r13); \
  278. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
  279. #define STD_EXCEPTION_ISERIES(n, label, area) \
  280. .globl label##_iSeries; \
  281. label##_iSeries: \
  282. HMT_MEDIUM; \
  283. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  284. RUNLATCH_ON(r13); \
  285. EXCEPTION_PROLOG_ISERIES_1(area); \
  286. EXCEPTION_PROLOG_ISERIES_2; \
  287. b label##_common
  288. #define MASKABLE_EXCEPTION_ISERIES(n, label) \
  289. .globl label##_iSeries; \
  290. label##_iSeries: \
  291. HMT_MEDIUM; \
  292. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  293. RUNLATCH_ON(r13); \
  294. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
  295. lbz r10,PACAPROCENABLED(r13); \
  296. cmpwi 0,r10,0; \
  297. beq- label##_iSeries_masked; \
  298. EXCEPTION_PROLOG_ISERIES_2; \
  299. b label##_common; \
  300. #ifdef DO_SOFT_DISABLE
  301. #define DISABLE_INTS \
  302. lbz r10,PACAPROCENABLED(r13); \
  303. li r11,0; \
  304. std r10,SOFTE(r1); \
  305. mfmsr r10; \
  306. stb r11,PACAPROCENABLED(r13); \
  307. ori r10,r10,MSR_EE; \
  308. mtmsrd r10,1
  309. #define ENABLE_INTS \
  310. lbz r10,PACAPROCENABLED(r13); \
  311. mfmsr r11; \
  312. std r10,SOFTE(r1); \
  313. ori r11,r11,MSR_EE; \
  314. mtmsrd r11,1
  315. #else /* hard enable/disable interrupts */
  316. #define DISABLE_INTS
  317. #define ENABLE_INTS \
  318. ld r12,_MSR(r1); \
  319. mfmsr r11; \
  320. rlwimi r11,r12,0,MSR_EE; \
  321. mtmsrd r11,1
  322. #endif
  323. #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
  324. .align 7; \
  325. .globl label##_common; \
  326. label##_common: \
  327. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  328. DISABLE_INTS; \
  329. bl .save_nvgprs; \
  330. addi r3,r1,STACK_FRAME_OVERHEAD; \
  331. bl hdlr; \
  332. b .ret_from_except
  333. #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
  334. .align 7; \
  335. .globl label##_common; \
  336. label##_common: \
  337. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  338. DISABLE_INTS; \
  339. addi r3,r1,STACK_FRAME_OVERHEAD; \
  340. bl hdlr; \
  341. b .ret_from_except_lite
  342. /*
  343. * Start of pSeries system interrupt routines
  344. */
  345. . = 0x100
  346. .globl __start_interrupts
  347. __start_interrupts:
  348. STD_EXCEPTION_PSERIES(0x100, system_reset)
  349. . = 0x200
  350. _machine_check_pSeries:
  351. HMT_MEDIUM
  352. mtspr SPRN_SPRG1,r13 /* save r13 */
  353. RUNLATCH_ON(r13)
  354. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  355. . = 0x300
  356. .globl data_access_pSeries
  357. data_access_pSeries:
  358. HMT_MEDIUM
  359. mtspr SPRN_SPRG1,r13
  360. BEGIN_FTR_SECTION
  361. mtspr SPRN_SPRG2,r12
  362. mfspr r13,SPRN_DAR
  363. mfspr r12,SPRN_DSISR
  364. srdi r13,r13,60
  365. rlwimi r13,r12,16,0x20
  366. mfcr r12
  367. cmpwi r13,0x2c
  368. beq .do_stab_bolted_pSeries
  369. mtcrf 0x80,r12
  370. mfspr r12,SPRN_SPRG2
  371. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  372. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
  373. . = 0x380
  374. .globl data_access_slb_pSeries
  375. data_access_slb_pSeries:
  376. HMT_MEDIUM
  377. mtspr SPRN_SPRG1,r13
  378. RUNLATCH_ON(r13)
  379. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  380. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  381. std r10,PACA_EXSLB+EX_R10(r13)
  382. std r11,PACA_EXSLB+EX_R11(r13)
  383. std r12,PACA_EXSLB+EX_R12(r13)
  384. std r3,PACA_EXSLB+EX_R3(r13)
  385. mfspr r9,SPRN_SPRG1
  386. std r9,PACA_EXSLB+EX_R13(r13)
  387. mfcr r9
  388. mfspr r12,SPRN_SRR1 /* and SRR1 */
  389. mfspr r3,SPRN_DAR
  390. b .do_slb_miss /* Rel. branch works in real mode */
  391. STD_EXCEPTION_PSERIES(0x400, instruction_access)
  392. . = 0x480
  393. .globl instruction_access_slb_pSeries
  394. instruction_access_slb_pSeries:
  395. HMT_MEDIUM
  396. mtspr SPRN_SPRG1,r13
  397. RUNLATCH_ON(r13)
  398. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  399. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  400. std r10,PACA_EXSLB+EX_R10(r13)
  401. std r11,PACA_EXSLB+EX_R11(r13)
  402. std r12,PACA_EXSLB+EX_R12(r13)
  403. std r3,PACA_EXSLB+EX_R3(r13)
  404. mfspr r9,SPRN_SPRG1
  405. std r9,PACA_EXSLB+EX_R13(r13)
  406. mfcr r9
  407. mfspr r12,SPRN_SRR1 /* and SRR1 */
  408. mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
  409. b .do_slb_miss /* Rel. branch works in real mode */
  410. STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
  411. STD_EXCEPTION_PSERIES(0x600, alignment)
  412. STD_EXCEPTION_PSERIES(0x700, program_check)
  413. STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
  414. STD_EXCEPTION_PSERIES(0x900, decrementer)
  415. STD_EXCEPTION_PSERIES(0xa00, trap_0a)
  416. STD_EXCEPTION_PSERIES(0xb00, trap_0b)
  417. . = 0xc00
  418. .globl system_call_pSeries
  419. system_call_pSeries:
  420. HMT_MEDIUM
  421. RUNLATCH_ON(r9)
  422. mr r9,r13
  423. mfmsr r10
  424. mfspr r13,SPRN_SPRG3
  425. mfspr r11,SPRN_SRR0
  426. clrrdi r12,r13,32
  427. oris r12,r12,system_call_common@h
  428. ori r12,r12,system_call_common@l
  429. mtspr SPRN_SRR0,r12
  430. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  431. mfspr r12,SPRN_SRR1
  432. mtspr SPRN_SRR1,r10
  433. rfid
  434. b . /* prevent speculative execution */
  435. STD_EXCEPTION_PSERIES(0xd00, single_step)
  436. STD_EXCEPTION_PSERIES(0xe00, trap_0e)
  437. /* We need to deal with the Altivec unavailable exception
  438. * here which is at 0xf20, thus in the middle of the
  439. * prolog code of the PerformanceMonitor one. A little
  440. * trickery is thus necessary
  441. */
  442. . = 0xf00
  443. b performance_monitor_pSeries
  444. STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
  445. STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
  446. STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
  447. . = 0x3000
  448. /*** pSeries interrupt support ***/
  449. /* moved from 0xf00 */
  450. STD_EXCEPTION_PSERIES(., performance_monitor)
  451. .align 7
  452. _GLOBAL(do_stab_bolted_pSeries)
  453. mtcrf 0x80,r12
  454. mfspr r12,SPRN_SPRG2
  455. EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
  456. /*
  457. * Vectors for the FWNMI option. Share common code.
  458. */
  459. .globl system_reset_fwnmi
  460. system_reset_fwnmi:
  461. HMT_MEDIUM
  462. mtspr SPRN_SPRG1,r13 /* save r13 */
  463. RUNLATCH_ON(r13)
  464. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
  465. .globl machine_check_fwnmi
  466. machine_check_fwnmi:
  467. HMT_MEDIUM
  468. mtspr SPRN_SPRG1,r13 /* save r13 */
  469. RUNLATCH_ON(r13)
  470. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  471. #ifdef CONFIG_PPC_ISERIES
  472. /*** ISeries-LPAR interrupt handlers ***/
  473. STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
  474. .globl data_access_iSeries
  475. data_access_iSeries:
  476. mtspr SPRN_SPRG1,r13
  477. BEGIN_FTR_SECTION
  478. mtspr SPRN_SPRG2,r12
  479. mfspr r13,SPRN_DAR
  480. mfspr r12,SPRN_DSISR
  481. srdi r13,r13,60
  482. rlwimi r13,r12,16,0x20
  483. mfcr r12
  484. cmpwi r13,0x2c
  485. beq .do_stab_bolted_iSeries
  486. mtcrf 0x80,r12
  487. mfspr r12,SPRN_SPRG2
  488. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  489. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
  490. EXCEPTION_PROLOG_ISERIES_2
  491. b data_access_common
  492. .do_stab_bolted_iSeries:
  493. mtcrf 0x80,r12
  494. mfspr r12,SPRN_SPRG2
  495. EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
  496. EXCEPTION_PROLOG_ISERIES_2
  497. b .do_stab_bolted
  498. .globl data_access_slb_iSeries
  499. data_access_slb_iSeries:
  500. mtspr SPRN_SPRG1,r13 /* save r13 */
  501. EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
  502. std r3,PACA_EXSLB+EX_R3(r13)
  503. ld r12,PACALPPACA+LPPACASRR1(r13)
  504. mfspr r3,SPRN_DAR
  505. b .do_slb_miss
  506. STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
  507. .globl instruction_access_slb_iSeries
  508. instruction_access_slb_iSeries:
  509. mtspr SPRN_SPRG1,r13 /* save r13 */
  510. EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
  511. std r3,PACA_EXSLB+EX_R3(r13)
  512. ld r12,PACALPPACA+LPPACASRR1(r13)
  513. ld r3,PACALPPACA+LPPACASRR0(r13)
  514. b .do_slb_miss
  515. MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
  516. STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
  517. STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
  518. STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
  519. MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
  520. STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
  521. STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
  522. .globl system_call_iSeries
  523. system_call_iSeries:
  524. mr r9,r13
  525. mfspr r13,SPRN_SPRG3
  526. EXCEPTION_PROLOG_ISERIES_2
  527. b system_call_common
  528. STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
  529. STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
  530. STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
  531. .globl system_reset_iSeries
  532. system_reset_iSeries:
  533. mfspr r13,SPRN_SPRG3 /* Get paca address */
  534. mfmsr r24
  535. ori r24,r24,MSR_RI
  536. mtmsrd r24 /* RI on */
  537. lhz r24,PACAPACAINDEX(r13) /* Get processor # */
  538. cmpwi 0,r24,0 /* Are we processor 0? */
  539. beq .__start_initialization_iSeries /* Start up the first processor */
  540. mfspr r4,SPRN_CTRLF
  541. li r5,CTRL_RUNLATCH /* Turn off the run light */
  542. andc r4,r4,r5
  543. mtspr SPRN_CTRLT,r4
  544. 1:
  545. HMT_LOW
  546. #ifdef CONFIG_SMP
  547. lbz r23,PACAPROCSTART(r13) /* Test if this processor
  548. * should start */
  549. sync
  550. LOADADDR(r3,current_set)
  551. sldi r28,r24,3 /* get current_set[cpu#] */
  552. ldx r3,r3,r28
  553. addi r1,r3,THREAD_SIZE
  554. subi r1,r1,STACK_FRAME_OVERHEAD
  555. cmpwi 0,r23,0
  556. beq iSeries_secondary_smp_loop /* Loop until told to go */
  557. bne .__secondary_start /* Loop until told to go */
  558. iSeries_secondary_smp_loop:
  559. /* Let the Hypervisor know we are alive */
  560. /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
  561. lis r3,0x8002
  562. rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
  563. #else /* CONFIG_SMP */
  564. /* Yield the processor. This is required for non-SMP kernels
  565. which are running on multi-threaded machines. */
  566. lis r3,0x8000
  567. rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
  568. addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
  569. li r4,0 /* "yield timed" */
  570. li r5,-1 /* "yield forever" */
  571. #endif /* CONFIG_SMP */
  572. li r0,-1 /* r0=-1 indicates a Hypervisor call */
  573. sc /* Invoke the hypervisor via a system call */
  574. mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
  575. b 1b /* If SMP not configured, secondaries
  576. * loop forever */
  577. .globl decrementer_iSeries_masked
  578. decrementer_iSeries_masked:
  579. li r11,1
  580. stb r11,PACALPPACA+LPPACADECRINT(r13)
  581. lwz r12,PACADEFAULTDECR(r13)
  582. mtspr SPRN_DEC,r12
  583. /* fall through */
  584. .globl hardware_interrupt_iSeries_masked
  585. hardware_interrupt_iSeries_masked:
  586. mtcrf 0x80,r9 /* Restore regs */
  587. ld r11,PACALPPACA+LPPACASRR0(r13)
  588. ld r12,PACALPPACA+LPPACASRR1(r13)
  589. mtspr SPRN_SRR0,r11
  590. mtspr SPRN_SRR1,r12
  591. ld r9,PACA_EXGEN+EX_R9(r13)
  592. ld r10,PACA_EXGEN+EX_R10(r13)
  593. ld r11,PACA_EXGEN+EX_R11(r13)
  594. ld r12,PACA_EXGEN+EX_R12(r13)
  595. ld r13,PACA_EXGEN+EX_R13(r13)
  596. rfid
  597. b . /* prevent speculative execution */
  598. #endif /* CONFIG_PPC_ISERIES */
  599. /*** Common interrupt handlers ***/
  600. STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
  601. /*
  602. * Machine check is different because we use a different
  603. * save area: PACA_EXMC instead of PACA_EXGEN.
  604. */
  605. .align 7
  606. .globl machine_check_common
  607. machine_check_common:
  608. EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
  609. DISABLE_INTS
  610. bl .save_nvgprs
  611. addi r3,r1,STACK_FRAME_OVERHEAD
  612. bl .machine_check_exception
  613. b .ret_from_except
  614. STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
  615. STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
  616. STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
  617. STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
  618. STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
  619. STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
  620. STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
  621. #ifdef CONFIG_ALTIVEC
  622. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
  623. #else
  624. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
  625. #endif
  626. /*
  627. * Here we have detected that the kernel stack pointer is bad.
  628. * R9 contains the saved CR, r13 points to the paca,
  629. * r10 contains the (bad) kernel stack pointer,
  630. * r11 and r12 contain the saved SRR0 and SRR1.
  631. * We switch to using an emergency stack, save the registers there,
  632. * and call kernel_bad_stack(), which panics.
  633. */
  634. bad_stack:
  635. ld r1,PACAEMERGSP(r13)
  636. subi r1,r1,64+INT_FRAME_SIZE
  637. std r9,_CCR(r1)
  638. std r10,GPR1(r1)
  639. std r11,_NIP(r1)
  640. std r12,_MSR(r1)
  641. mfspr r11,SPRN_DAR
  642. mfspr r12,SPRN_DSISR
  643. std r11,_DAR(r1)
  644. std r12,_DSISR(r1)
  645. mflr r10
  646. mfctr r11
  647. mfxer r12
  648. std r10,_LINK(r1)
  649. std r11,_CTR(r1)
  650. std r12,_XER(r1)
  651. SAVE_GPR(0,r1)
  652. SAVE_GPR(2,r1)
  653. SAVE_4GPRS(3,r1)
  654. SAVE_2GPRS(7,r1)
  655. SAVE_10GPRS(12,r1)
  656. SAVE_10GPRS(22,r1)
  657. addi r11,r1,INT_FRAME_SIZE
  658. std r11,0(r1)
  659. li r12,0
  660. std r12,0(r11)
  661. ld r2,PACATOC(r13)
  662. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  663. bl .kernel_bad_stack
  664. b 1b
  665. /*
  666. * Return from an exception with minimal checks.
  667. * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
  668. * If interrupts have been enabled, or anything has been
  669. * done that might have changed the scheduling status of
  670. * any task or sent any task a signal, you should use
  671. * ret_from_except or ret_from_except_lite instead of this.
  672. */
  673. fast_exception_return:
  674. ld r12,_MSR(r1)
  675. ld r11,_NIP(r1)
  676. andi. r3,r12,MSR_RI /* check if RI is set */
  677. beq- unrecov_fer
  678. ld r3,_CCR(r1)
  679. ld r4,_LINK(r1)
  680. ld r5,_CTR(r1)
  681. ld r6,_XER(r1)
  682. mtcr r3
  683. mtlr r4
  684. mtctr r5
  685. mtxer r6
  686. REST_GPR(0, r1)
  687. REST_8GPRS(2, r1)
  688. mfmsr r10
  689. clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
  690. mtmsrd r10,1
  691. mtspr SPRN_SRR1,r12
  692. mtspr SPRN_SRR0,r11
  693. REST_4GPRS(10, r1)
  694. ld r1,GPR1(r1)
  695. rfid
  696. b . /* prevent speculative execution */
  697. unrecov_fer:
  698. bl .save_nvgprs
  699. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  700. bl .unrecoverable_exception
  701. b 1b
  702. /*
  703. * Here r13 points to the paca, r9 contains the saved CR,
  704. * SRR0 and SRR1 are saved in r11 and r12,
  705. * r9 - r13 are saved in paca->exgen.
  706. */
  707. .align 7
  708. .globl data_access_common
  709. data_access_common:
  710. RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
  711. mfspr r10,SPRN_DAR
  712. std r10,PACA_EXGEN+EX_DAR(r13)
  713. mfspr r10,SPRN_DSISR
  714. stw r10,PACA_EXGEN+EX_DSISR(r13)
  715. EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
  716. ld r3,PACA_EXGEN+EX_DAR(r13)
  717. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  718. li r5,0x300
  719. b .do_hash_page /* Try to handle as hpte fault */
  720. .align 7
  721. .globl instruction_access_common
  722. instruction_access_common:
  723. EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
  724. ld r3,_NIP(r1)
  725. andis. r4,r12,0x5820
  726. li r5,0x400
  727. b .do_hash_page /* Try to handle as hpte fault */
  728. .align 7
  729. .globl hardware_interrupt_common
  730. .globl hardware_interrupt_entry
  731. hardware_interrupt_common:
  732. EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
  733. hardware_interrupt_entry:
  734. DISABLE_INTS
  735. addi r3,r1,STACK_FRAME_OVERHEAD
  736. bl .do_IRQ
  737. b .ret_from_except_lite
  738. .align 7
  739. .globl alignment_common
  740. alignment_common:
  741. mfspr r10,SPRN_DAR
  742. std r10,PACA_EXGEN+EX_DAR(r13)
  743. mfspr r10,SPRN_DSISR
  744. stw r10,PACA_EXGEN+EX_DSISR(r13)
  745. EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
  746. ld r3,PACA_EXGEN+EX_DAR(r13)
  747. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  748. std r3,_DAR(r1)
  749. std r4,_DSISR(r1)
  750. bl .save_nvgprs
  751. addi r3,r1,STACK_FRAME_OVERHEAD
  752. ENABLE_INTS
  753. bl .alignment_exception
  754. b .ret_from_except
  755. .align 7
  756. .globl program_check_common
  757. program_check_common:
  758. EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
  759. bl .save_nvgprs
  760. addi r3,r1,STACK_FRAME_OVERHEAD
  761. ENABLE_INTS
  762. bl .program_check_exception
  763. b .ret_from_except
  764. .align 7
  765. .globl fp_unavailable_common
  766. fp_unavailable_common:
  767. EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
  768. bne .load_up_fpu /* if from user, just load it up */
  769. bl .save_nvgprs
  770. addi r3,r1,STACK_FRAME_OVERHEAD
  771. ENABLE_INTS
  772. bl .kernel_fp_unavailable_exception
  773. BUG_OPCODE
  774. .align 7
  775. .globl altivec_unavailable_common
  776. altivec_unavailable_common:
  777. EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
  778. #ifdef CONFIG_ALTIVEC
  779. BEGIN_FTR_SECTION
  780. bne .load_up_altivec /* if from user, just load it up */
  781. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  782. #endif
  783. bl .save_nvgprs
  784. addi r3,r1,STACK_FRAME_OVERHEAD
  785. ENABLE_INTS
  786. bl .altivec_unavailable_exception
  787. b .ret_from_except
  788. #ifdef CONFIG_ALTIVEC
  789. /*
  790. * load_up_altivec(unused, unused, tsk)
  791. * Disable VMX for the task which had it previously,
  792. * and save its vector registers in its thread_struct.
  793. * Enables the VMX for use in the kernel on return.
  794. * On SMP we know the VMX is free, since we give it up every
  795. * switch (ie, no lazy save of the vector registers).
  796. * On entry: r13 == 'current' && last_task_used_altivec != 'current'
  797. */
  798. _STATIC(load_up_altivec)
  799. mfmsr r5 /* grab the current MSR */
  800. oris r5,r5,MSR_VEC@h
  801. mtmsrd r5 /* enable use of VMX now */
  802. isync
  803. /*
  804. * For SMP, we don't do lazy VMX switching because it just gets too
  805. * horrendously complex, especially when a task switches from one CPU
  806. * to another. Instead we call giveup_altvec in switch_to.
  807. * VRSAVE isn't dealt with here, that is done in the normal context
  808. * switch code. Note that we could rely on vrsave value to eventually
  809. * avoid saving all of the VREGs here...
  810. */
  811. #ifndef CONFIG_SMP
  812. ld r3,last_task_used_altivec@got(r2)
  813. ld r4,0(r3)
  814. cmpdi 0,r4,0
  815. beq 1f
  816. /* Save VMX state to last_task_used_altivec's THREAD struct */
  817. addi r4,r4,THREAD
  818. SAVE_32VRS(0,r5,r4)
  819. mfvscr vr0
  820. li r10,THREAD_VSCR
  821. stvx vr0,r10,r4
  822. /* Disable VMX for last_task_used_altivec */
  823. ld r5,PT_REGS(r4)
  824. ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  825. lis r6,MSR_VEC@h
  826. andc r4,r4,r6
  827. std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  828. 1:
  829. #endif /* CONFIG_SMP */
  830. /* Hack: if we get an altivec unavailable trap with VRSAVE
  831. * set to all zeros, we assume this is a broken application
  832. * that fails to set it properly, and thus we switch it to
  833. * all 1's
  834. */
  835. mfspr r4,SPRN_VRSAVE
  836. cmpdi 0,r4,0
  837. bne+ 1f
  838. li r4,-1
  839. mtspr SPRN_VRSAVE,r4
  840. 1:
  841. /* enable use of VMX after return */
  842. ld r4,PACACURRENT(r13)
  843. addi r5,r4,THREAD /* Get THREAD */
  844. oris r12,r12,MSR_VEC@h
  845. std r12,_MSR(r1)
  846. li r4,1
  847. li r10,THREAD_VSCR
  848. stw r4,THREAD_USED_VR(r5)
  849. lvx vr0,r10,r5
  850. mtvscr vr0
  851. REST_32VRS(0,r4,r5)
  852. #ifndef CONFIG_SMP
  853. /* Update last_task_used_math to 'current' */
  854. subi r4,r5,THREAD /* Back to 'current' */
  855. std r4,0(r3)
  856. #endif /* CONFIG_SMP */
  857. /* restore registers and return */
  858. b fast_exception_return
  859. #endif /* CONFIG_ALTIVEC */
  860. /*
  861. * Hash table stuff
  862. */
  863. .align 7
  864. _GLOBAL(do_hash_page)
  865. std r3,_DAR(r1)
  866. std r4,_DSISR(r1)
  867. andis. r0,r4,0xa450 /* weird error? */
  868. bne- .handle_page_fault /* if not, try to insert a HPTE */
  869. BEGIN_FTR_SECTION
  870. andis. r0,r4,0x0020 /* Is it a segment table fault? */
  871. bne- .do_ste_alloc /* If so handle it */
  872. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  873. /*
  874. * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
  875. * accessing a userspace segment (even from the kernel). We assume
  876. * kernel addresses always have the high bit set.
  877. */
  878. rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
  879. rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
  880. orc r0,r12,r0 /* MSR_PR | ~high_bit */
  881. rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
  882. ori r4,r4,1 /* add _PAGE_PRESENT */
  883. rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
  884. /*
  885. * On iSeries, we soft-disable interrupts here, then
  886. * hard-enable interrupts so that the hash_page code can spin on
  887. * the hash_table_lock without problems on a shared processor.
  888. */
  889. DISABLE_INTS
  890. /*
  891. * r3 contains the faulting address
  892. * r4 contains the required access permissions
  893. * r5 contains the trap number
  894. *
  895. * at return r3 = 0 for success
  896. */
  897. bl .hash_page /* build HPTE if possible */
  898. cmpdi r3,0 /* see if hash_page succeeded */
  899. #ifdef DO_SOFT_DISABLE
  900. /*
  901. * If we had interrupts soft-enabled at the point where the
  902. * DSI/ISI occurred, and an interrupt came in during hash_page,
  903. * handle it now.
  904. * We jump to ret_from_except_lite rather than fast_exception_return
  905. * because ret_from_except_lite will check for and handle pending
  906. * interrupts if necessary.
  907. */
  908. beq .ret_from_except_lite
  909. /* For a hash failure, we don't bother re-enabling interrupts */
  910. ble- 12f
  911. /*
  912. * hash_page couldn't handle it, set soft interrupt enable back
  913. * to what it was before the trap. Note that .local_irq_restore
  914. * handles any interrupts pending at this point.
  915. */
  916. ld r3,SOFTE(r1)
  917. bl .local_irq_restore
  918. b 11f
  919. #else
  920. beq fast_exception_return /* Return from exception on success */
  921. ble- 12f /* Failure return from hash_page */
  922. /* fall through */
  923. #endif
  924. /* Here we have a page fault that hash_page can't handle. */
  925. _GLOBAL(handle_page_fault)
  926. ENABLE_INTS
  927. 11: ld r4,_DAR(r1)
  928. ld r5,_DSISR(r1)
  929. addi r3,r1,STACK_FRAME_OVERHEAD
  930. bl .do_page_fault
  931. cmpdi r3,0
  932. beq+ .ret_from_except_lite
  933. bl .save_nvgprs
  934. mr r5,r3
  935. addi r3,r1,STACK_FRAME_OVERHEAD
  936. lwz r4,_DAR(r1)
  937. bl .bad_page_fault
  938. b .ret_from_except
  939. /* We have a page fault that hash_page could handle but HV refused
  940. * the PTE insertion
  941. */
  942. 12: bl .save_nvgprs
  943. addi r3,r1,STACK_FRAME_OVERHEAD
  944. lwz r4,_DAR(r1)
  945. bl .low_hash_fault
  946. b .ret_from_except
  947. /* here we have a segment miss */
  948. _GLOBAL(do_ste_alloc)
  949. bl .ste_allocate /* try to insert stab entry */
  950. cmpdi r3,0
  951. beq+ fast_exception_return
  952. b .handle_page_fault
  953. /*
  954. * r13 points to the PACA, r9 contains the saved CR,
  955. * r11 and r12 contain the saved SRR0 and SRR1.
  956. * r9 - r13 are saved in paca->exslb.
  957. * We assume we aren't going to take any exceptions during this procedure.
  958. * We assume (DAR >> 60) == 0xc.
  959. */
  960. .align 7
  961. _GLOBAL(do_stab_bolted)
  962. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  963. std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
  964. /* Hash to the primary group */
  965. ld r10,PACASTABVIRT(r13)
  966. mfspr r11,SPRN_DAR
  967. srdi r11,r11,28
  968. rldimi r10,r11,7,52 /* r10 = first ste of the group */
  969. /* Calculate VSID */
  970. /* This is a kernel address, so protovsid = ESID */
  971. ASM_VSID_SCRAMBLE(r11, r9)
  972. rldic r9,r11,12,16 /* r9 = vsid << 12 */
  973. /* Search the primary group for a free entry */
  974. 1: ld r11,0(r10) /* Test valid bit of the current ste */
  975. andi. r11,r11,0x80
  976. beq 2f
  977. addi r10,r10,16
  978. andi. r11,r10,0x70
  979. bne 1b
  980. /* Stick for only searching the primary group for now. */
  981. /* At least for now, we use a very simple random castout scheme */
  982. /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
  983. mftb r11
  984. rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
  985. ori r11,r11,0x10
  986. /* r10 currently points to an ste one past the group of interest */
  987. /* make it point to the randomly selected entry */
  988. subi r10,r10,128
  989. or r10,r10,r11 /* r10 is the entry to invalidate */
  990. isync /* mark the entry invalid */
  991. ld r11,0(r10)
  992. rldicl r11,r11,56,1 /* clear the valid bit */
  993. rotldi r11,r11,8
  994. std r11,0(r10)
  995. sync
  996. clrrdi r11,r11,28 /* Get the esid part of the ste */
  997. slbie r11
  998. 2: std r9,8(r10) /* Store the vsid part of the ste */
  999. eieio
  1000. mfspr r11,SPRN_DAR /* Get the new esid */
  1001. clrrdi r11,r11,28 /* Permits a full 32b of ESID */
  1002. ori r11,r11,0x90 /* Turn on valid and kp */
  1003. std r11,0(r10) /* Put new entry back into the stab */
  1004. sync
  1005. /* All done -- return from exception. */
  1006. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  1007. ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
  1008. andi. r10,r12,MSR_RI
  1009. beq- unrecov_slb
  1010. mtcrf 0x80,r9 /* restore CR */
  1011. mfmsr r10
  1012. clrrdi r10,r10,2
  1013. mtmsrd r10,1
  1014. mtspr SPRN_SRR0,r11
  1015. mtspr SPRN_SRR1,r12
  1016. ld r9,PACA_EXSLB+EX_R9(r13)
  1017. ld r10,PACA_EXSLB+EX_R10(r13)
  1018. ld r11,PACA_EXSLB+EX_R11(r13)
  1019. ld r12,PACA_EXSLB+EX_R12(r13)
  1020. ld r13,PACA_EXSLB+EX_R13(r13)
  1021. rfid
  1022. b . /* prevent speculative execution */
  1023. /*
  1024. * r13 points to the PACA, r9 contains the saved CR,
  1025. * r11 and r12 contain the saved SRR0 and SRR1.
  1026. * r3 has the faulting address
  1027. * r9 - r13 are saved in paca->exslb.
  1028. * r3 is saved in paca->slb_r3
  1029. * We assume we aren't going to take any exceptions during this procedure.
  1030. */
  1031. _GLOBAL(do_slb_miss)
  1032. mflr r10
  1033. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  1034. std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
  1035. bl .slb_allocate /* handle it */
  1036. /* All done -- return from exception. */
  1037. ld r10,PACA_EXSLB+EX_LR(r13)
  1038. ld r3,PACA_EXSLB+EX_R3(r13)
  1039. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  1040. #ifdef CONFIG_PPC_ISERIES
  1041. ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
  1042. #endif /* CONFIG_PPC_ISERIES */
  1043. mtlr r10
  1044. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  1045. beq- unrecov_slb
  1046. .machine push
  1047. .machine "power4"
  1048. mtcrf 0x80,r9
  1049. mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
  1050. .machine pop
  1051. #ifdef CONFIG_PPC_ISERIES
  1052. mtspr SPRN_SRR0,r11
  1053. mtspr SPRN_SRR1,r12
  1054. #endif /* CONFIG_PPC_ISERIES */
  1055. ld r9,PACA_EXSLB+EX_R9(r13)
  1056. ld r10,PACA_EXSLB+EX_R10(r13)
  1057. ld r11,PACA_EXSLB+EX_R11(r13)
  1058. ld r12,PACA_EXSLB+EX_R12(r13)
  1059. ld r13,PACA_EXSLB+EX_R13(r13)
  1060. rfid
  1061. b . /* prevent speculative execution */
  1062. unrecov_slb:
  1063. EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
  1064. DISABLE_INTS
  1065. bl .save_nvgprs
  1066. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  1067. bl .unrecoverable_exception
  1068. b 1b
  1069. /*
  1070. * Space for CPU0's segment table.
  1071. *
  1072. * On iSeries, the hypervisor must fill in at least one entry before
  1073. * we get control (with relocate on). The address is give to the hv
  1074. * as a page number (see xLparMap in lpardata.c), so this must be at a
  1075. * fixed address (the linker can't compute (u64)&initial_stab >>
  1076. * PAGE_SHIFT).
  1077. */
  1078. . = STAB0_PHYS_ADDR /* 0x6000 */
  1079. .globl initial_stab
  1080. initial_stab:
  1081. .space 4096
  1082. /*
  1083. * Data area reserved for FWNMI option.
  1084. * This address (0x7000) is fixed by the RPA.
  1085. */
  1086. .= 0x7000
  1087. .globl fwnmi_data_area
  1088. fwnmi_data_area:
  1089. /* iSeries does not use the FWNMI stuff, so it is safe to put
  1090. * this here, even if we later allow kernels that will boot on
  1091. * both pSeries and iSeries */
  1092. #ifdef CONFIG_PPC_ISERIES
  1093. . = LPARMAP_PHYS
  1094. #include "lparmap.s"
  1095. /*
  1096. * This ".text" is here for old compilers that generate a trailing
  1097. * .note section when compiling .c files to .s
  1098. */
  1099. .text
  1100. #endif /* CONFIG_PPC_ISERIES */
  1101. . = 0x8000
  1102. /*
  1103. * On pSeries, secondary processors spin in the following code.
  1104. * At entry, r3 = this processor's number (physical cpu id)
  1105. */
  1106. _GLOBAL(pSeries_secondary_smp_init)
  1107. mr r24,r3
  1108. /* turn on 64-bit mode */
  1109. bl .enable_64b_mode
  1110. isync
  1111. /* Copy some CPU settings from CPU 0 */
  1112. bl .__restore_cpu_setup
  1113. /* Set up a paca value for this processor. Since we have the
  1114. * physical cpu id in r24, we need to search the pacas to find
  1115. * which logical id maps to our physical one.
  1116. */
  1117. LOADADDR(r13, paca) /* Get base vaddr of paca array */
  1118. li r5,0 /* logical cpu id */
  1119. 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
  1120. cmpw r6,r24 /* Compare to our id */
  1121. beq 2f
  1122. addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
  1123. addi r5,r5,1
  1124. cmpwi r5,NR_CPUS
  1125. blt 1b
  1126. mr r3,r24 /* not found, copy phys to r3 */
  1127. b .kexec_wait /* next kernel might do better */
  1128. 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1129. /* From now on, r24 is expected to be logical cpuid */
  1130. mr r24,r5
  1131. 3: HMT_LOW
  1132. lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
  1133. /* start. */
  1134. sync
  1135. /* Create a temp kernel stack for use before relocation is on. */
  1136. ld r1,PACAEMERGSP(r13)
  1137. subi r1,r1,STACK_FRAME_OVERHEAD
  1138. cmpwi 0,r23,0
  1139. #ifdef CONFIG_SMP
  1140. bne .__secondary_start
  1141. #endif
  1142. b 3b /* Loop until told to go */
  1143. #ifdef CONFIG_PPC_ISERIES
  1144. _STATIC(__start_initialization_iSeries)
  1145. /* Clear out the BSS */
  1146. LOADADDR(r11,__bss_stop)
  1147. LOADADDR(r8,__bss_start)
  1148. sub r11,r11,r8 /* bss size */
  1149. addi r11,r11,7 /* round up to an even double word */
  1150. rldicl. r11,r11,61,3 /* shift right by 3 */
  1151. beq 4f
  1152. addi r8,r8,-8
  1153. li r0,0
  1154. mtctr r11 /* zero this many doublewords */
  1155. 3: stdu r0,8(r8)
  1156. bdnz 3b
  1157. 4:
  1158. LOADADDR(r1,init_thread_union)
  1159. addi r1,r1,THREAD_SIZE
  1160. li r0,0
  1161. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1162. LOADADDR(r3,cpu_specs)
  1163. LOADADDR(r4,cur_cpu_spec)
  1164. li r5,0
  1165. bl .identify_cpu
  1166. LOADADDR(r2,__toc_start)
  1167. addi r2,r2,0x4000
  1168. addi r2,r2,0x4000
  1169. bl .iSeries_early_setup
  1170. bl .early_setup
  1171. /* relocation is on at this point */
  1172. b .start_here_common
  1173. #endif /* CONFIG_PPC_ISERIES */
  1174. #ifdef CONFIG_PPC_MULTIPLATFORM
  1175. _STATIC(__mmu_off)
  1176. mfmsr r3
  1177. andi. r0,r3,MSR_IR|MSR_DR
  1178. beqlr
  1179. andc r3,r3,r0
  1180. mtspr SPRN_SRR0,r4
  1181. mtspr SPRN_SRR1,r3
  1182. sync
  1183. rfid
  1184. b . /* prevent speculative execution */
  1185. /*
  1186. * Here is our main kernel entry point. We support currently 2 kind of entries
  1187. * depending on the value of r5.
  1188. *
  1189. * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
  1190. * in r3...r7
  1191. *
  1192. * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
  1193. * DT block, r4 is a physical pointer to the kernel itself
  1194. *
  1195. */
  1196. _GLOBAL(__start_initialization_multiplatform)
  1197. /*
  1198. * Are we booted from a PROM Of-type client-interface ?
  1199. */
  1200. cmpldi cr0,r5,0
  1201. bne .__boot_from_prom /* yes -> prom */
  1202. /* Save parameters */
  1203. mr r31,r3
  1204. mr r30,r4
  1205. /* Make sure we are running in 64 bits mode */
  1206. bl .enable_64b_mode
  1207. /* Setup some critical 970 SPRs before switching MMU off */
  1208. bl .__970_cpu_preinit
  1209. /* cpu # */
  1210. li r24,0
  1211. /* Switch off MMU if not already */
  1212. LOADADDR(r4, .__after_prom_start - KERNELBASE)
  1213. add r4,r4,r30
  1214. bl .__mmu_off
  1215. b .__after_prom_start
  1216. _STATIC(__boot_from_prom)
  1217. /* Save parameters */
  1218. mr r31,r3
  1219. mr r30,r4
  1220. mr r29,r5
  1221. mr r28,r6
  1222. mr r27,r7
  1223. /* Make sure we are running in 64 bits mode */
  1224. bl .enable_64b_mode
  1225. /* put a relocation offset into r3 */
  1226. bl .reloc_offset
  1227. LOADADDR(r2,__toc_start)
  1228. addi r2,r2,0x4000
  1229. addi r2,r2,0x4000
  1230. /* Relocate the TOC from a virt addr to a real addr */
  1231. sub r2,r2,r3
  1232. /* Restore parameters */
  1233. mr r3,r31
  1234. mr r4,r30
  1235. mr r5,r29
  1236. mr r6,r28
  1237. mr r7,r27
  1238. /* Do all of the interaction with OF client interface */
  1239. bl .prom_init
  1240. /* We never return */
  1241. trap
  1242. /*
  1243. * At this point, r3 contains the physical address we are running at,
  1244. * returned by prom_init()
  1245. */
  1246. _STATIC(__after_prom_start)
  1247. /*
  1248. * We need to run with __start at physical address 0.
  1249. * This will leave some code in the first 256B of
  1250. * real memory, which are reserved for software use.
  1251. * The remainder of the first page is loaded with the fixed
  1252. * interrupt vectors. The next two pages are filled with
  1253. * unknown exception placeholders.
  1254. *
  1255. * Note: This process overwrites the OF exception vectors.
  1256. * r26 == relocation offset
  1257. * r27 == KERNELBASE
  1258. */
  1259. bl .reloc_offset
  1260. mr r26,r3
  1261. SET_REG_TO_CONST(r27,KERNELBASE)
  1262. li r3,0 /* target addr */
  1263. // XXX FIXME: Use phys returned by OF (r30)
  1264. sub r4,r27,r26 /* source addr */
  1265. /* current address of _start */
  1266. /* i.e. where we are running */
  1267. /* the source addr */
  1268. LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
  1269. sub r5,r5,r27
  1270. li r6,0x100 /* Start offset, the first 0x100 */
  1271. /* bytes were copied earlier. */
  1272. bl .copy_and_flush /* copy the first n bytes */
  1273. /* this includes the code being */
  1274. /* executed here. */
  1275. LOADADDR(r0, 4f) /* Jump to the copy of this code */
  1276. mtctr r0 /* that we just made/relocated */
  1277. bctr
  1278. 4: LOADADDR(r5,klimit)
  1279. sub r5,r5,r26
  1280. ld r5,0(r5) /* get the value of klimit */
  1281. sub r5,r5,r27
  1282. bl .copy_and_flush /* copy the rest */
  1283. b .start_here_multiplatform
  1284. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1285. /*
  1286. * Copy routine used to copy the kernel to start at physical address 0
  1287. * and flush and invalidate the caches as needed.
  1288. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
  1289. * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
  1290. *
  1291. * Note: this routine *only* clobbers r0, r6 and lr
  1292. */
  1293. _GLOBAL(copy_and_flush)
  1294. addi r5,r5,-8
  1295. addi r6,r6,-8
  1296. 4: li r0,16 /* Use the least common */
  1297. /* denominator cache line */
  1298. /* size. This results in */
  1299. /* extra cache line flushes */
  1300. /* but operation is correct. */
  1301. /* Can't get cache line size */
  1302. /* from NACA as it is being */
  1303. /* moved too. */
  1304. mtctr r0 /* put # words/line in ctr */
  1305. 3: addi r6,r6,8 /* copy a cache line */
  1306. ldx r0,r6,r4
  1307. stdx r0,r6,r3
  1308. bdnz 3b
  1309. dcbst r6,r3 /* write it to memory */
  1310. sync
  1311. icbi r6,r3 /* flush the icache line */
  1312. cmpld 0,r6,r5
  1313. blt 4b
  1314. sync
  1315. addi r5,r5,8
  1316. addi r6,r6,8
  1317. blr
  1318. .align 8
  1319. copy_to_here:
  1320. #ifdef CONFIG_SMP
  1321. #ifdef CONFIG_PPC_PMAC
  1322. /*
  1323. * On PowerMac, secondary processors starts from the reset vector, which
  1324. * is temporarily turned into a call to one of the functions below.
  1325. */
  1326. .section ".text";
  1327. .align 2 ;
  1328. .globl pmac_secondary_start_1
  1329. pmac_secondary_start_1:
  1330. li r24, 1
  1331. b .pmac_secondary_start
  1332. .globl pmac_secondary_start_2
  1333. pmac_secondary_start_2:
  1334. li r24, 2
  1335. b .pmac_secondary_start
  1336. .globl pmac_secondary_start_3
  1337. pmac_secondary_start_3:
  1338. li r24, 3
  1339. b .pmac_secondary_start
  1340. _GLOBAL(pmac_secondary_start)
  1341. /* turn on 64-bit mode */
  1342. bl .enable_64b_mode
  1343. isync
  1344. /* Copy some CPU settings from CPU 0 */
  1345. bl .__restore_cpu_setup
  1346. /* pSeries do that early though I don't think we really need it */
  1347. mfmsr r3
  1348. ori r3,r3,MSR_RI
  1349. mtmsrd r3 /* RI on */
  1350. /* Set up a paca value for this processor. */
  1351. LOADADDR(r4, paca) /* Get base vaddr of paca array */
  1352. mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
  1353. add r13,r13,r4 /* for this processor. */
  1354. mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1355. /* Create a temp kernel stack for use before relocation is on. */
  1356. ld r1,PACAEMERGSP(r13)
  1357. subi r1,r1,STACK_FRAME_OVERHEAD
  1358. b .__secondary_start
  1359. #endif /* CONFIG_PPC_PMAC */
  1360. /*
  1361. * This function is called after the master CPU has released the
  1362. * secondary processors. The execution environment is relocation off.
  1363. * The paca for this processor has the following fields initialized at
  1364. * this point:
  1365. * 1. Processor number
  1366. * 2. Segment table pointer (virtual address)
  1367. * On entry the following are set:
  1368. * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
  1369. * r24 = cpu# (in Linux terms)
  1370. * r13 = paca virtual address
  1371. * SPRG3 = paca virtual address
  1372. */
  1373. _GLOBAL(__secondary_start)
  1374. HMT_MEDIUM /* Set thread priority to MEDIUM */
  1375. ld r2,PACATOC(r13)
  1376. li r6,0
  1377. stb r6,PACAPROCENABLED(r13)
  1378. #ifndef CONFIG_PPC_ISERIES
  1379. /* Initialize the page table pointer register. */
  1380. LOADADDR(r6,_SDR1)
  1381. ld r6,0(r6) /* get the value of _SDR1 */
  1382. mtspr SPRN_SDR1,r6 /* set the htab location */
  1383. #endif
  1384. /* Initialize the first segment table (or SLB) entry */
  1385. ld r3,PACASTABVIRT(r13) /* get addr of segment table */
  1386. bl .stab_initialize
  1387. /* Initialize the kernel stack. Just a repeat for iSeries. */
  1388. LOADADDR(r3,current_set)
  1389. sldi r28,r24,3 /* get current_set[cpu#] */
  1390. ldx r1,r3,r28
  1391. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  1392. std r1,PACAKSAVE(r13)
  1393. ld r3,PACASTABREAL(r13) /* get raddr of segment table */
  1394. ori r4,r3,1 /* turn on valid bit */
  1395. #ifdef CONFIG_PPC_ISERIES
  1396. li r0,-1 /* hypervisor call */
  1397. li r3,1
  1398. sldi r3,r3,63 /* 0x8000000000000000 */
  1399. ori r3,r3,4 /* 0x8000000000000004 */
  1400. sc /* HvCall_setASR */
  1401. #else
  1402. /* set the ASR */
  1403. ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
  1404. ld r3,0(r3)
  1405. lwz r3,PLATFORM(r3) /* r3 = platform flags */
  1406. andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
  1407. beq 98f /* branch if result is 0 */
  1408. mfspr r3,SPRN_PVR
  1409. srwi r3,r3,16
  1410. cmpwi r3,0x37 /* SStar */
  1411. beq 97f
  1412. cmpwi r3,0x36 /* IStar */
  1413. beq 97f
  1414. cmpwi r3,0x34 /* Pulsar */
  1415. bne 98f
  1416. 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
  1417. HVSC /* Invoking hcall */
  1418. b 99f
  1419. 98: /* !(rpa hypervisor) || !(star) */
  1420. mtasr r4 /* set the stab location */
  1421. 99:
  1422. #endif
  1423. li r7,0
  1424. mtlr r7
  1425. /* enable MMU and jump to start_secondary */
  1426. LOADADDR(r3,.start_secondary_prolog)
  1427. SET_REG_TO_CONST(r4, MSR_KERNEL)
  1428. #ifdef DO_SOFT_DISABLE
  1429. ori r4,r4,MSR_EE
  1430. #endif
  1431. mtspr SPRN_SRR0,r3
  1432. mtspr SPRN_SRR1,r4
  1433. rfid
  1434. b . /* prevent speculative execution */
  1435. /*
  1436. * Running with relocation on at this point. All we want to do is
  1437. * zero the stack back-chain pointer before going into C code.
  1438. */
  1439. _GLOBAL(start_secondary_prolog)
  1440. li r3,0
  1441. std r3,0(r1) /* Zero the stack frame pointer */
  1442. bl .start_secondary
  1443. #endif
  1444. /*
  1445. * This subroutine clobbers r11 and r12
  1446. */
  1447. _GLOBAL(enable_64b_mode)
  1448. mfmsr r11 /* grab the current MSR */
  1449. li r12,1
  1450. rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
  1451. or r11,r11,r12
  1452. li r12,1
  1453. rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
  1454. or r11,r11,r12
  1455. mtmsrd r11
  1456. isync
  1457. blr
  1458. #ifdef CONFIG_PPC_MULTIPLATFORM
  1459. /*
  1460. * This is where the main kernel code starts.
  1461. */
  1462. _STATIC(start_here_multiplatform)
  1463. /* get a new offset, now that the kernel has moved. */
  1464. bl .reloc_offset
  1465. mr r26,r3
  1466. /* Clear out the BSS. It may have been done in prom_init,
  1467. * already but that's irrelevant since prom_init will soon
  1468. * be detached from the kernel completely. Besides, we need
  1469. * to clear it now for kexec-style entry.
  1470. */
  1471. LOADADDR(r11,__bss_stop)
  1472. LOADADDR(r8,__bss_start)
  1473. sub r11,r11,r8 /* bss size */
  1474. addi r11,r11,7 /* round up to an even double word */
  1475. rldicl. r11,r11,61,3 /* shift right by 3 */
  1476. beq 4f
  1477. addi r8,r8,-8
  1478. li r0,0
  1479. mtctr r11 /* zero this many doublewords */
  1480. 3: stdu r0,8(r8)
  1481. bdnz 3b
  1482. 4:
  1483. mfmsr r6
  1484. ori r6,r6,MSR_RI
  1485. mtmsrd r6 /* RI on */
  1486. #ifdef CONFIG_HMT
  1487. /* Start up the second thread on cpu 0 */
  1488. mfspr r3,SPRN_PVR
  1489. srwi r3,r3,16
  1490. cmpwi r3,0x34 /* Pulsar */
  1491. beq 90f
  1492. cmpwi r3,0x36 /* Icestar */
  1493. beq 90f
  1494. cmpwi r3,0x37 /* SStar */
  1495. beq 90f
  1496. b 91f /* HMT not supported */
  1497. 90: li r3,0
  1498. bl .hmt_start_secondary
  1499. 91:
  1500. #endif
  1501. /* The following gets the stack and TOC set up with the regs */
  1502. /* pointing to the real addr of the kernel stack. This is */
  1503. /* all done to support the C function call below which sets */
  1504. /* up the htab. This is done because we have relocated the */
  1505. /* kernel but are still running in real mode. */
  1506. LOADADDR(r3,init_thread_union)
  1507. sub r3,r3,r26
  1508. /* set up a stack pointer (physical address) */
  1509. addi r1,r3,THREAD_SIZE
  1510. li r0,0
  1511. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1512. /* set up the TOC (physical address) */
  1513. LOADADDR(r2,__toc_start)
  1514. addi r2,r2,0x4000
  1515. addi r2,r2,0x4000
  1516. sub r2,r2,r26
  1517. LOADADDR(r3,cpu_specs)
  1518. sub r3,r3,r26
  1519. LOADADDR(r4,cur_cpu_spec)
  1520. sub r4,r4,r26
  1521. mr r5,r26
  1522. bl .identify_cpu
  1523. /* Save some low level config HIDs of CPU0 to be copied to
  1524. * other CPUs later on, or used for suspend/resume
  1525. */
  1526. bl .__save_cpu_setup
  1527. sync
  1528. /* Setup a valid physical PACA pointer in SPRG3 for early_setup
  1529. * note that boot_cpuid can always be 0 nowadays since there is
  1530. * nowhere it can be initialized differently before we reach this
  1531. * code
  1532. */
  1533. LOADADDR(r27, boot_cpuid)
  1534. sub r27,r27,r26
  1535. lwz r27,0(r27)
  1536. LOADADDR(r24, paca) /* Get base vaddr of paca array */
  1537. mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
  1538. add r13,r13,r24 /* for this processor. */
  1539. sub r13,r13,r26 /* convert to physical addr */
  1540. mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
  1541. /* Do very early kernel initializations, including initial hash table,
  1542. * stab and slb setup before we turn on relocation. */
  1543. /* Restore parameters passed from prom_init/kexec */
  1544. mr r3,r31
  1545. bl .early_setup
  1546. /* set the ASR */
  1547. ld r3,PACASTABREAL(r13)
  1548. ori r4,r3,1 /* turn on valid bit */
  1549. ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
  1550. ld r3,0(r3)
  1551. lwz r3,PLATFORM(r3) /* r3 = platform flags */
  1552. andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
  1553. beq 98f /* branch if result is 0 */
  1554. mfspr r3,SPRN_PVR
  1555. srwi r3,r3,16
  1556. cmpwi r3,0x37 /* SStar */
  1557. beq 97f
  1558. cmpwi r3,0x36 /* IStar */
  1559. beq 97f
  1560. cmpwi r3,0x34 /* Pulsar */
  1561. bne 98f
  1562. 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
  1563. HVSC /* Invoking hcall */
  1564. b 99f
  1565. 98: /* !(rpa hypervisor) || !(star) */
  1566. mtasr r4 /* set the stab location */
  1567. 99:
  1568. /* Set SDR1 (hash table pointer) */
  1569. ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
  1570. ld r3,0(r3)
  1571. lwz r3,PLATFORM(r3) /* r3 = platform flags */
  1572. /* Test if bit 0 is set (LPAR bit) */
  1573. andi. r3,r3,PLATFORM_LPAR
  1574. bne 98f /* branch if result is !0 */
  1575. LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
  1576. sub r6,r6,r26
  1577. ld r6,0(r6) /* get the value of _SDR1 */
  1578. mtspr SPRN_SDR1,r6 /* set the htab location */
  1579. 98:
  1580. LOADADDR(r3,.start_here_common)
  1581. SET_REG_TO_CONST(r4, MSR_KERNEL)
  1582. mtspr SPRN_SRR0,r3
  1583. mtspr SPRN_SRR1,r4
  1584. rfid
  1585. b . /* prevent speculative execution */
  1586. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1587. /* This is where all platforms converge execution */
  1588. _STATIC(start_here_common)
  1589. /* relocation is on at this point */
  1590. /* The following code sets up the SP and TOC now that we are */
  1591. /* running with translation enabled. */
  1592. LOADADDR(r3,init_thread_union)
  1593. /* set up the stack */
  1594. addi r1,r3,THREAD_SIZE
  1595. li r0,0
  1596. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1597. /* Apply the CPUs-specific fixups (nop out sections not relevant
  1598. * to this CPU
  1599. */
  1600. li r3,0
  1601. bl .do_cpu_ftr_fixups
  1602. LOADADDR(r26, boot_cpuid)
  1603. lwz r26,0(r26)
  1604. LOADADDR(r24, paca) /* Get base vaddr of paca array */
  1605. mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
  1606. add r13,r13,r24 /* for this processor. */
  1607. mtspr SPRN_SPRG3,r13
  1608. /* ptr to current */
  1609. LOADADDR(r4,init_task)
  1610. std r4,PACACURRENT(r13)
  1611. /* Load the TOC */
  1612. ld r2,PACATOC(r13)
  1613. std r1,PACAKSAVE(r13)
  1614. bl .setup_system
  1615. /* Load up the kernel context */
  1616. 5:
  1617. #ifdef DO_SOFT_DISABLE
  1618. li r5,0
  1619. stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
  1620. mfmsr r5
  1621. ori r5,r5,MSR_EE /* Hard Enabled */
  1622. mtmsrd r5
  1623. #endif
  1624. bl .start_kernel
  1625. _GLOBAL(hmt_init)
  1626. #ifdef CONFIG_HMT
  1627. LOADADDR(r5, hmt_thread_data)
  1628. mfspr r7,SPRN_PVR
  1629. srwi r7,r7,16
  1630. cmpwi r7,0x34 /* Pulsar */
  1631. beq 90f
  1632. cmpwi r7,0x36 /* Icestar */
  1633. beq 91f
  1634. cmpwi r7,0x37 /* SStar */
  1635. beq 91f
  1636. b 101f
  1637. 90: mfspr r6,SPRN_PIR
  1638. andi. r6,r6,0x1f
  1639. b 92f
  1640. 91: mfspr r6,SPRN_PIR
  1641. andi. r6,r6,0x3ff
  1642. 92: sldi r4,r24,3
  1643. stwx r6,r5,r4
  1644. bl .hmt_start_secondary
  1645. b 101f
  1646. __hmt_secondary_hold:
  1647. LOADADDR(r5, hmt_thread_data)
  1648. clrldi r5,r5,4
  1649. li r7,0
  1650. mfspr r6,SPRN_PIR
  1651. mfspr r8,SPRN_PVR
  1652. srwi r8,r8,16
  1653. cmpwi r8,0x34
  1654. bne 93f
  1655. andi. r6,r6,0x1f
  1656. b 103f
  1657. 93: andi. r6,r6,0x3f
  1658. 103: lwzx r8,r5,r7
  1659. cmpw r8,r6
  1660. beq 104f
  1661. addi r7,r7,8
  1662. b 103b
  1663. 104: addi r7,r7,4
  1664. lwzx r9,r5,r7
  1665. mr r24,r9
  1666. 101:
  1667. #endif
  1668. mr r3,r24
  1669. b .pSeries_secondary_smp_init
  1670. #ifdef CONFIG_HMT
  1671. _GLOBAL(hmt_start_secondary)
  1672. LOADADDR(r4,__hmt_secondary_hold)
  1673. clrldi r4,r4,4
  1674. mtspr SPRN_NIADORM, r4
  1675. mfspr r4, SPRN_MSRDORM
  1676. li r5, -65
  1677. and r4, r4, r5
  1678. mtspr SPRN_MSRDORM, r4
  1679. lis r4,0xffef
  1680. ori r4,r4,0x7403
  1681. mtspr SPRN_TSC, r4
  1682. li r4,0x1f4
  1683. mtspr SPRN_TST, r4
  1684. mfspr r4, SPRN_HID0
  1685. ori r4, r4, 0x1
  1686. mtspr SPRN_HID0, r4
  1687. mfspr r4, SPRN_CTRLF
  1688. oris r4, r4, 0x40
  1689. mtspr SPRN_CTRLT, r4
  1690. blr
  1691. #endif
  1692. #if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
  1693. _GLOBAL(smp_release_cpus)
  1694. /* All secondary cpus are spinning on a common
  1695. * spinloop, release them all now so they can start
  1696. * to spin on their individual paca spinloops.
  1697. * For non SMP kernels, the secondary cpus never
  1698. * get out of the common spinloop.
  1699. * XXX This does nothing useful on iSeries, secondaries are
  1700. * already waiting on their paca.
  1701. */
  1702. li r3,1
  1703. LOADADDR(r5,__secondary_hold_spinloop)
  1704. std r3,0(r5)
  1705. sync
  1706. blr
  1707. #endif /* CONFIG_SMP */
  1708. /*
  1709. * We put a few things here that have to be page-aligned.
  1710. * This stuff goes at the beginning of the bss, which is page-aligned.
  1711. */
  1712. .section ".bss"
  1713. .align PAGE_SHIFT
  1714. .globl empty_zero_page
  1715. empty_zero_page:
  1716. .space PAGE_SIZE
  1717. .globl swapper_pg_dir
  1718. swapper_pg_dir:
  1719. .space PAGE_SIZE
  1720. /*
  1721. * This space gets a copy of optional info passed to us by the bootstrap
  1722. * Used to pass parameters into the kernel like root=/dev/sda1, etc.
  1723. */
  1724. .globl cmd_line
  1725. cmd_line:
  1726. .space COMMAND_LINE_SIZE