head_64.S 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013
  1. /*
  2. * arch/ppc64/kernel/head.S
  3. *
  4. * PowerPC version
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. *
  7. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  8. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  9. * Adapted for Power Macintosh by Paul Mackerras.
  10. * Low-level exception handlers and MMU support
  11. * rewritten by Paul Mackerras.
  12. * Copyright (C) 1996 Paul Mackerras.
  13. *
  14. * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
  15. * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
  16. *
  17. * This file contains the low-level support and setup for the
  18. * PowerPC-64 platform, including trap and interrupt dispatch.
  19. *
  20. * This program is free software; you can redistribute it and/or
  21. * modify it under the terms of the GNU General Public License
  22. * as published by the Free Software Foundation; either version
  23. * 2 of the License, or (at your option) any later version.
  24. */
  25. #include <linux/config.h>
  26. #include <linux/threads.h>
  27. #include <asm/reg.h>
  28. #include <asm/page.h>
  29. #include <asm/mmu.h>
  30. #include <asm/ppc_asm.h>
  31. #include <asm/asm-offsets.h>
  32. #include <asm/bug.h>
  33. #include <asm/cputable.h>
  34. #include <asm/setup.h>
  35. #include <asm/hvcall.h>
  36. #include <asm/iseries/lpar_map.h>
  37. #include <asm/thread_info.h>
  38. #ifdef CONFIG_PPC_ISERIES
  39. #define DO_SOFT_DISABLE
  40. #endif
  41. /*
  42. * We layout physical memory as follows:
  43. * 0x0000 - 0x00ff : Secondary processor spin code
  44. * 0x0100 - 0x2fff : pSeries Interrupt prologs
  45. * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
  46. * 0x6000 - 0x6fff : Initial (CPU0) segment table
  47. * 0x7000 - 0x7fff : FWNMI data area
  48. * 0x8000 - : Early init and support code
  49. */
  50. /*
  51. * SPRG Usage
  52. *
  53. * Register Definition
  54. *
  55. * SPRG0 reserved for hypervisor
  56. * SPRG1 temp - used to save gpr
  57. * SPRG2 temp - used to save gpr
  58. * SPRG3 virt addr of paca
  59. */
  60. /*
  61. * Entering into this code we make the following assumptions:
  62. * For pSeries:
  63. * 1. The MMU is off & open firmware is running in real mode.
  64. * 2. The kernel is entered at __start
  65. *
  66. * For iSeries:
  67. * 1. The MMU is on (as it always is for iSeries)
  68. * 2. The kernel is entered at system_reset_iSeries
  69. */
  70. .text
  71. .globl _stext
  72. _stext:
  73. #ifdef CONFIG_PPC_MULTIPLATFORM
  74. _GLOBAL(__start)
  75. /* NOP this out unconditionally */
  76. BEGIN_FTR_SECTION
  77. b .__start_initialization_multiplatform
  78. END_FTR_SECTION(0, 1)
  79. #endif /* CONFIG_PPC_MULTIPLATFORM */
  80. /* Catch branch to 0 in real mode */
  81. trap
  82. #ifdef CONFIG_PPC_ISERIES
  83. /*
  84. * At offset 0x20, there is a pointer to iSeries LPAR data.
  85. * This is required by the hypervisor
  86. */
  87. . = 0x20
  88. .llong hvReleaseData-KERNELBASE
  89. /*
  90. * At offset 0x28 and 0x30 are offsets to the mschunks_map
  91. * array (used by the iSeries LPAR debugger to do translation
  92. * between physical addresses and absolute addresses) and
  93. * to the pidhash table (also used by the debugger)
  94. */
  95. .llong mschunks_map-KERNELBASE
  96. .llong 0 /* pidhash-KERNELBASE SFRXXX */
  97. /* Offset 0x38 - Pointer to start of embedded System.map */
  98. .globl embedded_sysmap_start
  99. embedded_sysmap_start:
  100. .llong 0
  101. /* Offset 0x40 - Pointer to end of embedded System.map */
  102. .globl embedded_sysmap_end
  103. embedded_sysmap_end:
  104. .llong 0
  105. #endif /* CONFIG_PPC_ISERIES */
  106. /* Secondary processors spin on this value until it goes to 1. */
  107. .globl __secondary_hold_spinloop
  108. __secondary_hold_spinloop:
  109. .llong 0x0
  110. /* Secondary processors write this value with their cpu # */
  111. /* after they enter the spin loop immediately below. */
  112. .globl __secondary_hold_acknowledge
  113. __secondary_hold_acknowledge:
  114. .llong 0x0
  115. . = 0x60
  116. /*
  117. * The following code is used on pSeries to hold secondary processors
  118. * in a spin loop after they have been freed from OpenFirmware, but
  119. * before the bulk of the kernel has been relocated. This code
  120. * is relocated to physical address 0x60 before prom_init is run.
  121. * All of it must fit below the first exception vector at 0x100.
  122. */
  123. _GLOBAL(__secondary_hold)
  124. mfmsr r24
  125. ori r24,r24,MSR_RI
  126. mtmsrd r24 /* RI on */
  127. /* Grab our linux cpu number */
  128. mr r24,r3
  129. /* Tell the master cpu we're here */
  130. /* Relocation is off & we are located at an address less */
  131. /* than 0x100, so only need to grab low order offset. */
  132. std r24,__secondary_hold_acknowledge@l(0)
  133. sync
  134. /* All secondary cpus wait here until told to start. */
  135. 100: ld r4,__secondary_hold_spinloop@l(0)
  136. cmpdi 0,r4,1
  137. bne 100b
  138. #ifdef CONFIG_HMT
  139. b .hmt_init
  140. #else
  141. #ifdef CONFIG_SMP
  142. mr r3,r24
  143. b .pSeries_secondary_smp_init
  144. #else
  145. BUG_OPCODE
  146. #endif
  147. #endif
  148. /* This value is used to mark exception frames on the stack. */
  149. .section ".toc","aw"
  150. exception_marker:
  151. .tc ID_72656773_68657265[TC],0x7265677368657265
  152. .text
  153. /*
  154. * The following macros define the code that appears as
  155. * the prologue to each of the exception handlers. They
  156. * are split into two parts to allow a single kernel binary
  157. * to be used for pSeries and iSeries.
  158. * LOL. One day... - paulus
  159. */
  160. /*
  161. * We make as much of the exception code common between native
  162. * exception handlers (including pSeries LPAR) and iSeries LPAR
  163. * implementations as possible.
  164. */
  165. /*
  166. * This is the start of the interrupt handlers for pSeries
  167. * This code runs with relocation off.
  168. */
  169. #define EX_R9 0
  170. #define EX_R10 8
  171. #define EX_R11 16
  172. #define EX_R12 24
  173. #define EX_R13 32
  174. #define EX_SRR0 40
  175. #define EX_DAR 48
  176. #define EX_DSISR 56
  177. #define EX_CCR 60
  178. #define EX_R3 64
  179. #define EX_LR 72
  180. #define EXCEPTION_PROLOG_PSERIES(area, label) \
  181. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  182. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  183. std r10,area+EX_R10(r13); \
  184. std r11,area+EX_R11(r13); \
  185. std r12,area+EX_R12(r13); \
  186. mfspr r9,SPRN_SPRG1; \
  187. std r9,area+EX_R13(r13); \
  188. mfcr r9; \
  189. clrrdi r12,r13,32; /* get high part of &label */ \
  190. mfmsr r10; \
  191. mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  192. ori r12,r12,(label)@l; /* virt addr of handler */ \
  193. ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  194. mtspr SPRN_SRR0,r12; \
  195. mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  196. mtspr SPRN_SRR1,r10; \
  197. rfid; \
  198. b . /* prevent speculative execution */
  199. /*
  200. * This is the start of the interrupt handlers for iSeries
  201. * This code runs with relocation on.
  202. */
  203. #define EXCEPTION_PROLOG_ISERIES_1(area) \
  204. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  205. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  206. std r10,area+EX_R10(r13); \
  207. std r11,area+EX_R11(r13); \
  208. std r12,area+EX_R12(r13); \
  209. mfspr r9,SPRN_SPRG1; \
  210. std r9,area+EX_R13(r13); \
  211. mfcr r9
  212. #define EXCEPTION_PROLOG_ISERIES_2 \
  213. mfmsr r10; \
  214. ld r11,PACALPPACA+LPPACASRR0(r13); \
  215. ld r12,PACALPPACA+LPPACASRR1(r13); \
  216. ori r10,r10,MSR_RI; \
  217. mtmsrd r10,1
  218. /*
  219. * The common exception prolog is used for all except a few exceptions
  220. * such as a segment miss on a kernel address. We have to be prepared
  221. * to take another exception from the point where we first touch the
  222. * kernel stack onwards.
  223. *
  224. * On entry r13 points to the paca, r9-r13 are saved in the paca,
  225. * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
  226. * SRR1, and relocation is on.
  227. */
  228. #define EXCEPTION_PROLOG_COMMON(n, area) \
  229. andi. r10,r12,MSR_PR; /* See if coming from user */ \
  230. mr r10,r1; /* Save r1 */ \
  231. subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
  232. beq- 1f; \
  233. ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
  234. 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
  235. bge- cr1,bad_stack; /* abort if it is */ \
  236. std r9,_CCR(r1); /* save CR in stackframe */ \
  237. std r11,_NIP(r1); /* save SRR0 in stackframe */ \
  238. std r12,_MSR(r1); /* save SRR1 in stackframe */ \
  239. std r10,0(r1); /* make stack chain pointer */ \
  240. std r0,GPR0(r1); /* save r0 in stackframe */ \
  241. std r10,GPR1(r1); /* save r1 in stackframe */ \
  242. std r2,GPR2(r1); /* save r2 in stackframe */ \
  243. SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
  244. SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
  245. ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
  246. ld r10,area+EX_R10(r13); \
  247. std r9,GPR9(r1); \
  248. std r10,GPR10(r1); \
  249. ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
  250. ld r10,area+EX_R12(r13); \
  251. ld r11,area+EX_R13(r13); \
  252. std r9,GPR11(r1); \
  253. std r10,GPR12(r1); \
  254. std r11,GPR13(r1); \
  255. ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
  256. mflr r9; /* save LR in stackframe */ \
  257. std r9,_LINK(r1); \
  258. mfctr r10; /* save CTR in stackframe */ \
  259. std r10,_CTR(r1); \
  260. mfspr r11,SPRN_XER; /* save XER in stackframe */ \
  261. std r11,_XER(r1); \
  262. li r9,(n)+1; \
  263. std r9,_TRAP(r1); /* set trap number */ \
  264. li r10,0; \
  265. ld r11,exception_marker@toc(r2); \
  266. std r10,RESULT(r1); /* clear regs->result */ \
  267. std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
  268. /*
  269. * Exception vectors.
  270. */
  271. #define STD_EXCEPTION_PSERIES(n, label) \
  272. . = n; \
  273. .globl label##_pSeries; \
  274. label##_pSeries: \
  275. HMT_MEDIUM; \
  276. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  277. RUNLATCH_ON(r13); \
  278. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
  279. #define STD_EXCEPTION_ISERIES(n, label, area) \
  280. .globl label##_iSeries; \
  281. label##_iSeries: \
  282. HMT_MEDIUM; \
  283. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  284. RUNLATCH_ON(r13); \
  285. EXCEPTION_PROLOG_ISERIES_1(area); \
  286. EXCEPTION_PROLOG_ISERIES_2; \
  287. b label##_common
  288. #define MASKABLE_EXCEPTION_ISERIES(n, label) \
  289. .globl label##_iSeries; \
  290. label##_iSeries: \
  291. HMT_MEDIUM; \
  292. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  293. RUNLATCH_ON(r13); \
  294. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
  295. lbz r10,PACAPROCENABLED(r13); \
  296. cmpwi 0,r10,0; \
  297. beq- label##_iSeries_masked; \
  298. EXCEPTION_PROLOG_ISERIES_2; \
  299. b label##_common; \
  300. #ifdef DO_SOFT_DISABLE
  301. #define DISABLE_INTS \
  302. lbz r10,PACAPROCENABLED(r13); \
  303. li r11,0; \
  304. std r10,SOFTE(r1); \
  305. mfmsr r10; \
  306. stb r11,PACAPROCENABLED(r13); \
  307. ori r10,r10,MSR_EE; \
  308. mtmsrd r10,1
  309. #define ENABLE_INTS \
  310. lbz r10,PACAPROCENABLED(r13); \
  311. mfmsr r11; \
  312. std r10,SOFTE(r1); \
  313. ori r11,r11,MSR_EE; \
  314. mtmsrd r11,1
  315. #else /* hard enable/disable interrupts */
  316. #define DISABLE_INTS
  317. #define ENABLE_INTS \
  318. ld r12,_MSR(r1); \
  319. mfmsr r11; \
  320. rlwimi r11,r12,0,MSR_EE; \
  321. mtmsrd r11,1
  322. #endif
  323. #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
  324. .align 7; \
  325. .globl label##_common; \
  326. label##_common: \
  327. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  328. DISABLE_INTS; \
  329. bl .save_nvgprs; \
  330. addi r3,r1,STACK_FRAME_OVERHEAD; \
  331. bl hdlr; \
  332. b .ret_from_except
  333. #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
  334. .align 7; \
  335. .globl label##_common; \
  336. label##_common: \
  337. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  338. DISABLE_INTS; \
  339. addi r3,r1,STACK_FRAME_OVERHEAD; \
  340. bl hdlr; \
  341. b .ret_from_except_lite
  342. /*
  343. * Start of pSeries system interrupt routines
  344. */
  345. . = 0x100
  346. .globl __start_interrupts
  347. __start_interrupts:
  348. STD_EXCEPTION_PSERIES(0x100, system_reset)
  349. . = 0x200
  350. _machine_check_pSeries:
  351. HMT_MEDIUM
  352. mtspr SPRN_SPRG1,r13 /* save r13 */
  353. RUNLATCH_ON(r13)
  354. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  355. . = 0x300
  356. .globl data_access_pSeries
  357. data_access_pSeries:
  358. HMT_MEDIUM
  359. mtspr SPRN_SPRG1,r13
  360. BEGIN_FTR_SECTION
  361. mtspr SPRN_SPRG2,r12
  362. mfspr r13,SPRN_DAR
  363. mfspr r12,SPRN_DSISR
  364. srdi r13,r13,60
  365. rlwimi r13,r12,16,0x20
  366. mfcr r12
  367. cmpwi r13,0x2c
  368. beq .do_stab_bolted_pSeries
  369. mtcrf 0x80,r12
  370. mfspr r12,SPRN_SPRG2
  371. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  372. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
  373. . = 0x380
  374. .globl data_access_slb_pSeries
  375. data_access_slb_pSeries:
  376. HMT_MEDIUM
  377. mtspr SPRN_SPRG1,r13
  378. RUNLATCH_ON(r13)
  379. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  380. std r3,PACA_EXSLB+EX_R3(r13)
  381. mfspr r3,SPRN_DAR
  382. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  383. mfcr r9
  384. #ifdef __DISABLED__
  385. /* Keep that around for when we re-implement dynamic VSIDs */
  386. cmpdi r3,0
  387. bge slb_miss_user_pseries
  388. #endif /* __DISABLED__ */
  389. std r10,PACA_EXSLB+EX_R10(r13)
  390. std r11,PACA_EXSLB+EX_R11(r13)
  391. std r12,PACA_EXSLB+EX_R12(r13)
  392. mfspr r10,SPRN_SPRG1
  393. std r10,PACA_EXSLB+EX_R13(r13)
  394. mfspr r12,SPRN_SRR1 /* and SRR1 */
  395. b .slb_miss_realmode /* Rel. branch works in real mode */
  396. STD_EXCEPTION_PSERIES(0x400, instruction_access)
  397. . = 0x480
  398. .globl instruction_access_slb_pSeries
  399. instruction_access_slb_pSeries:
  400. HMT_MEDIUM
  401. mtspr SPRN_SPRG1,r13
  402. RUNLATCH_ON(r13)
  403. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  404. std r3,PACA_EXSLB+EX_R3(r13)
  405. mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
  406. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  407. mfcr r9
  408. #ifdef __DISABLED__
  409. /* Keep that around for when we re-implement dynamic VSIDs */
  410. cmpdi r3,0
  411. bge slb_miss_user_pseries
  412. #endif /* __DISABLED__ */
  413. std r10,PACA_EXSLB+EX_R10(r13)
  414. std r11,PACA_EXSLB+EX_R11(r13)
  415. std r12,PACA_EXSLB+EX_R12(r13)
  416. mfspr r10,SPRN_SPRG1
  417. std r10,PACA_EXSLB+EX_R13(r13)
  418. mfspr r12,SPRN_SRR1 /* and SRR1 */
  419. b .slb_miss_realmode /* Rel. branch works in real mode */
  420. STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
  421. STD_EXCEPTION_PSERIES(0x600, alignment)
  422. STD_EXCEPTION_PSERIES(0x700, program_check)
  423. STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
  424. STD_EXCEPTION_PSERIES(0x900, decrementer)
  425. STD_EXCEPTION_PSERIES(0xa00, trap_0a)
  426. STD_EXCEPTION_PSERIES(0xb00, trap_0b)
  427. . = 0xc00
  428. .globl system_call_pSeries
  429. system_call_pSeries:
  430. HMT_MEDIUM
  431. RUNLATCH_ON(r9)
  432. mr r9,r13
  433. mfmsr r10
  434. mfspr r13,SPRN_SPRG3
  435. mfspr r11,SPRN_SRR0
  436. clrrdi r12,r13,32
  437. oris r12,r12,system_call_common@h
  438. ori r12,r12,system_call_common@l
  439. mtspr SPRN_SRR0,r12
  440. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  441. mfspr r12,SPRN_SRR1
  442. mtspr SPRN_SRR1,r10
  443. rfid
  444. b . /* prevent speculative execution */
  445. STD_EXCEPTION_PSERIES(0xd00, single_step)
  446. STD_EXCEPTION_PSERIES(0xe00, trap_0e)
  447. /* We need to deal with the Altivec unavailable exception
  448. * here which is at 0xf20, thus in the middle of the
  449. * prolog code of the PerformanceMonitor one. A little
  450. * trickery is thus necessary
  451. */
  452. . = 0xf00
  453. b performance_monitor_pSeries
  454. STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
  455. STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
  456. STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
  457. . = 0x3000
  458. /*** pSeries interrupt support ***/
  459. /* moved from 0xf00 */
  460. STD_EXCEPTION_PSERIES(., performance_monitor)
  461. .align 7
  462. _GLOBAL(do_stab_bolted_pSeries)
  463. mtcrf 0x80,r12
  464. mfspr r12,SPRN_SPRG2
  465. EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
  466. /*
  467. * We have some room here we use that to put
  468. * the peries slb miss user trampoline code so it's reasonably
  469. * away from slb_miss_user_common to avoid problems with rfid
  470. *
  471. * This is used for when the SLB miss handler has to go virtual,
  472. * which doesn't happen for now anymore but will once we re-implement
  473. * dynamic VSIDs for shared page tables
  474. */
  475. #ifdef __DISABLED__
  476. slb_miss_user_pseries:
  477. std r10,PACA_EXGEN+EX_R10(r13)
  478. std r11,PACA_EXGEN+EX_R11(r13)
  479. std r12,PACA_EXGEN+EX_R12(r13)
  480. mfspr r10,SPRG1
  481. ld r11,PACA_EXSLB+EX_R9(r13)
  482. ld r12,PACA_EXSLB+EX_R3(r13)
  483. std r10,PACA_EXGEN+EX_R13(r13)
  484. std r11,PACA_EXGEN+EX_R9(r13)
  485. std r12,PACA_EXGEN+EX_R3(r13)
  486. clrrdi r12,r13,32
  487. mfmsr r10
  488. mfspr r11,SRR0 /* save SRR0 */
  489. ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
  490. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  491. mtspr SRR0,r12
  492. mfspr r12,SRR1 /* and SRR1 */
  493. mtspr SRR1,r10
  494. rfid
  495. b . /* prevent spec. execution */
  496. #endif /* __DISABLED__ */
  497. /*
  498. * Vectors for the FWNMI option. Share common code.
  499. */
  500. .globl system_reset_fwnmi
  501. .align 7
  502. system_reset_fwnmi:
  503. HMT_MEDIUM
  504. mtspr SPRN_SPRG1,r13 /* save r13 */
  505. RUNLATCH_ON(r13)
  506. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
  507. .globl machine_check_fwnmi
  508. .align 7
  509. machine_check_fwnmi:
  510. HMT_MEDIUM
  511. mtspr SPRN_SPRG1,r13 /* save r13 */
  512. RUNLATCH_ON(r13)
  513. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  514. #ifdef CONFIG_PPC_ISERIES
  515. /*** ISeries-LPAR interrupt handlers ***/
  516. STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
  517. .globl data_access_iSeries
  518. data_access_iSeries:
  519. mtspr SPRN_SPRG1,r13
  520. BEGIN_FTR_SECTION
  521. mtspr SPRN_SPRG2,r12
  522. mfspr r13,SPRN_DAR
  523. mfspr r12,SPRN_DSISR
  524. srdi r13,r13,60
  525. rlwimi r13,r12,16,0x20
  526. mfcr r12
  527. cmpwi r13,0x2c
  528. beq .do_stab_bolted_iSeries
  529. mtcrf 0x80,r12
  530. mfspr r12,SPRN_SPRG2
  531. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  532. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
  533. EXCEPTION_PROLOG_ISERIES_2
  534. b data_access_common
  535. .do_stab_bolted_iSeries:
  536. mtcrf 0x80,r12
  537. mfspr r12,SPRN_SPRG2
  538. EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
  539. EXCEPTION_PROLOG_ISERIES_2
  540. b .do_stab_bolted
  541. .globl data_access_slb_iSeries
  542. data_access_slb_iSeries:
  543. mtspr SPRN_SPRG1,r13 /* save r13 */
  544. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  545. std r3,PACA_EXSLB+EX_R3(r13)
  546. mfspr r3,SPRN_DAR
  547. std r9,PACA_EXSLB+EX_R9(r13)
  548. mfcr r9
  549. #ifdef __DISABLED__
  550. cmpdi r3,0
  551. bge slb_miss_user_iseries
  552. #endif
  553. std r10,PACA_EXSLB+EX_R10(r13)
  554. std r11,PACA_EXSLB+EX_R11(r13)
  555. std r12,PACA_EXSLB+EX_R12(r13)
  556. mfspr r10,SPRN_SPRG1
  557. std r10,PACA_EXSLB+EX_R13(r13)
  558. ld r12,PACALPPACA+LPPACASRR1(r13);
  559. b .slb_miss_realmode
  560. STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
  561. .globl instruction_access_slb_iSeries
  562. instruction_access_slb_iSeries:
  563. mtspr SPRN_SPRG1,r13 /* save r13 */
  564. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  565. std r3,PACA_EXSLB+EX_R3(r13)
  566. ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
  567. std r9,PACA_EXSLB+EX_R9(r13)
  568. mfcr r9
  569. #ifdef __DISABLED__
  570. cmpdi r3,0
  571. bge .slb_miss_user_iseries
  572. #endif
  573. std r10,PACA_EXSLB+EX_R10(r13)
  574. std r11,PACA_EXSLB+EX_R11(r13)
  575. std r12,PACA_EXSLB+EX_R12(r13)
  576. mfspr r10,SPRN_SPRG1
  577. std r10,PACA_EXSLB+EX_R13(r13)
  578. ld r12,PACALPPACA+LPPACASRR1(r13);
  579. b .slb_miss_realmode
  580. #ifdef __DISABLED__
  581. slb_miss_user_iseries:
  582. std r10,PACA_EXGEN+EX_R10(r13)
  583. std r11,PACA_EXGEN+EX_R11(r13)
  584. std r12,PACA_EXGEN+EX_R12(r13)
  585. mfspr r10,SPRG1
  586. ld r11,PACA_EXSLB+EX_R9(r13)
  587. ld r12,PACA_EXSLB+EX_R3(r13)
  588. std r10,PACA_EXGEN+EX_R13(r13)
  589. std r11,PACA_EXGEN+EX_R9(r13)
  590. std r12,PACA_EXGEN+EX_R3(r13)
  591. EXCEPTION_PROLOG_ISERIES_2
  592. b slb_miss_user_common
  593. #endif
  594. MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
  595. STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
  596. STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
  597. STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
  598. MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
  599. STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
  600. STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
  601. .globl system_call_iSeries
  602. system_call_iSeries:
  603. mr r9,r13
  604. mfspr r13,SPRN_SPRG3
  605. EXCEPTION_PROLOG_ISERIES_2
  606. b system_call_common
  607. STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
  608. STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
  609. STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
  610. .globl system_reset_iSeries
  611. system_reset_iSeries:
  612. mfspr r13,SPRN_SPRG3 /* Get paca address */
  613. mfmsr r24
  614. ori r24,r24,MSR_RI
  615. mtmsrd r24 /* RI on */
  616. lhz r24,PACAPACAINDEX(r13) /* Get processor # */
  617. cmpwi 0,r24,0 /* Are we processor 0? */
  618. beq .__start_initialization_iSeries /* Start up the first processor */
  619. mfspr r4,SPRN_CTRLF
  620. li r5,CTRL_RUNLATCH /* Turn off the run light */
  621. andc r4,r4,r5
  622. mtspr SPRN_CTRLT,r4
  623. 1:
  624. HMT_LOW
  625. #ifdef CONFIG_SMP
  626. lbz r23,PACAPROCSTART(r13) /* Test if this processor
  627. * should start */
  628. sync
  629. LOADADDR(r3,current_set)
  630. sldi r28,r24,3 /* get current_set[cpu#] */
  631. ldx r3,r3,r28
  632. addi r1,r3,THREAD_SIZE
  633. subi r1,r1,STACK_FRAME_OVERHEAD
  634. cmpwi 0,r23,0
  635. beq iSeries_secondary_smp_loop /* Loop until told to go */
  636. bne .__secondary_start /* Loop until told to go */
  637. iSeries_secondary_smp_loop:
  638. /* Let the Hypervisor know we are alive */
  639. /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
  640. lis r3,0x8002
  641. rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
  642. #else /* CONFIG_SMP */
  643. /* Yield the processor. This is required for non-SMP kernels
  644. which are running on multi-threaded machines. */
  645. lis r3,0x8000
  646. rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
  647. addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
  648. li r4,0 /* "yield timed" */
  649. li r5,-1 /* "yield forever" */
  650. #endif /* CONFIG_SMP */
  651. li r0,-1 /* r0=-1 indicates a Hypervisor call */
  652. sc /* Invoke the hypervisor via a system call */
  653. mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
  654. b 1b /* If SMP not configured, secondaries
  655. * loop forever */
  656. .globl decrementer_iSeries_masked
  657. decrementer_iSeries_masked:
  658. li r11,1
  659. stb r11,PACALPPACA+LPPACADECRINT(r13)
  660. LOADBASE(r12,tb_ticks_per_jiffy)
  661. lwz r12,OFF(tb_ticks_per_jiffy)(r12)
  662. mtspr SPRN_DEC,r12
  663. /* fall through */
  664. .globl hardware_interrupt_iSeries_masked
  665. hardware_interrupt_iSeries_masked:
  666. mtcrf 0x80,r9 /* Restore regs */
  667. ld r11,PACALPPACA+LPPACASRR0(r13)
  668. ld r12,PACALPPACA+LPPACASRR1(r13)
  669. mtspr SPRN_SRR0,r11
  670. mtspr SPRN_SRR1,r12
  671. ld r9,PACA_EXGEN+EX_R9(r13)
  672. ld r10,PACA_EXGEN+EX_R10(r13)
  673. ld r11,PACA_EXGEN+EX_R11(r13)
  674. ld r12,PACA_EXGEN+EX_R12(r13)
  675. ld r13,PACA_EXGEN+EX_R13(r13)
  676. rfid
  677. b . /* prevent speculative execution */
  678. #endif /* CONFIG_PPC_ISERIES */
  679. /*** Common interrupt handlers ***/
  680. STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
  681. /*
  682. * Machine check is different because we use a different
  683. * save area: PACA_EXMC instead of PACA_EXGEN.
  684. */
  685. .align 7
  686. .globl machine_check_common
  687. machine_check_common:
  688. EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
  689. DISABLE_INTS
  690. bl .save_nvgprs
  691. addi r3,r1,STACK_FRAME_OVERHEAD
  692. bl .machine_check_exception
  693. b .ret_from_except
  694. STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
  695. STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
  696. STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
  697. STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
  698. STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
  699. STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
  700. STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
  701. #ifdef CONFIG_ALTIVEC
  702. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
  703. #else
  704. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
  705. #endif
  706. /*
  707. * Here we have detected that the kernel stack pointer is bad.
  708. * R9 contains the saved CR, r13 points to the paca,
  709. * r10 contains the (bad) kernel stack pointer,
  710. * r11 and r12 contain the saved SRR0 and SRR1.
  711. * We switch to using an emergency stack, save the registers there,
  712. * and call kernel_bad_stack(), which panics.
  713. */
  714. bad_stack:
  715. ld r1,PACAEMERGSP(r13)
  716. subi r1,r1,64+INT_FRAME_SIZE
  717. std r9,_CCR(r1)
  718. std r10,GPR1(r1)
  719. std r11,_NIP(r1)
  720. std r12,_MSR(r1)
  721. mfspr r11,SPRN_DAR
  722. mfspr r12,SPRN_DSISR
  723. std r11,_DAR(r1)
  724. std r12,_DSISR(r1)
  725. mflr r10
  726. mfctr r11
  727. mfxer r12
  728. std r10,_LINK(r1)
  729. std r11,_CTR(r1)
  730. std r12,_XER(r1)
  731. SAVE_GPR(0,r1)
  732. SAVE_GPR(2,r1)
  733. SAVE_4GPRS(3,r1)
  734. SAVE_2GPRS(7,r1)
  735. SAVE_10GPRS(12,r1)
  736. SAVE_10GPRS(22,r1)
  737. addi r11,r1,INT_FRAME_SIZE
  738. std r11,0(r1)
  739. li r12,0
  740. std r12,0(r11)
  741. ld r2,PACATOC(r13)
  742. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  743. bl .kernel_bad_stack
  744. b 1b
  745. /*
  746. * Return from an exception with minimal checks.
  747. * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
  748. * If interrupts have been enabled, or anything has been
  749. * done that might have changed the scheduling status of
  750. * any task or sent any task a signal, you should use
  751. * ret_from_except or ret_from_except_lite instead of this.
  752. */
  753. .globl fast_exception_return
  754. fast_exception_return:
  755. ld r12,_MSR(r1)
  756. ld r11,_NIP(r1)
  757. andi. r3,r12,MSR_RI /* check if RI is set */
  758. beq- unrecov_fer
  759. ld r3,_CCR(r1)
  760. ld r4,_LINK(r1)
  761. ld r5,_CTR(r1)
  762. ld r6,_XER(r1)
  763. mtcr r3
  764. mtlr r4
  765. mtctr r5
  766. mtxer r6
  767. REST_GPR(0, r1)
  768. REST_8GPRS(2, r1)
  769. mfmsr r10
  770. clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
  771. mtmsrd r10,1
  772. mtspr SPRN_SRR1,r12
  773. mtspr SPRN_SRR0,r11
  774. REST_4GPRS(10, r1)
  775. ld r1,GPR1(r1)
  776. rfid
  777. b . /* prevent speculative execution */
  778. unrecov_fer:
  779. bl .save_nvgprs
  780. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  781. bl .unrecoverable_exception
  782. b 1b
  783. /*
  784. * Here r13 points to the paca, r9 contains the saved CR,
  785. * SRR0 and SRR1 are saved in r11 and r12,
  786. * r9 - r13 are saved in paca->exgen.
  787. */
  788. .align 7
  789. .globl data_access_common
  790. data_access_common:
  791. RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
  792. mfspr r10,SPRN_DAR
  793. std r10,PACA_EXGEN+EX_DAR(r13)
  794. mfspr r10,SPRN_DSISR
  795. stw r10,PACA_EXGEN+EX_DSISR(r13)
  796. EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
  797. ld r3,PACA_EXGEN+EX_DAR(r13)
  798. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  799. li r5,0x300
  800. b .do_hash_page /* Try to handle as hpte fault */
  801. .align 7
  802. .globl instruction_access_common
  803. instruction_access_common:
  804. EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
  805. ld r3,_NIP(r1)
  806. andis. r4,r12,0x5820
  807. li r5,0x400
  808. b .do_hash_page /* Try to handle as hpte fault */
  809. /*
  810. * Here is the common SLB miss user that is used when going to virtual
  811. * mode for SLB misses, that is currently not used
  812. */
  813. #ifdef __DISABLED__
  814. .align 7
  815. .globl slb_miss_user_common
  816. slb_miss_user_common:
  817. mflr r10
  818. std r3,PACA_EXGEN+EX_DAR(r13)
  819. stw r9,PACA_EXGEN+EX_CCR(r13)
  820. std r10,PACA_EXGEN+EX_LR(r13)
  821. std r11,PACA_EXGEN+EX_SRR0(r13)
  822. bl .slb_allocate_user
  823. ld r10,PACA_EXGEN+EX_LR(r13)
  824. ld r3,PACA_EXGEN+EX_R3(r13)
  825. lwz r9,PACA_EXGEN+EX_CCR(r13)
  826. ld r11,PACA_EXGEN+EX_SRR0(r13)
  827. mtlr r10
  828. beq- slb_miss_fault
  829. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  830. beq- unrecov_user_slb
  831. mfmsr r10
  832. .machine push
  833. .machine "power4"
  834. mtcrf 0x80,r9
  835. .machine pop
  836. clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
  837. mtmsrd r10,1
  838. mtspr SRR0,r11
  839. mtspr SRR1,r12
  840. ld r9,PACA_EXGEN+EX_R9(r13)
  841. ld r10,PACA_EXGEN+EX_R10(r13)
  842. ld r11,PACA_EXGEN+EX_R11(r13)
  843. ld r12,PACA_EXGEN+EX_R12(r13)
  844. ld r13,PACA_EXGEN+EX_R13(r13)
  845. rfid
  846. b .
  847. slb_miss_fault:
  848. EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
  849. ld r4,PACA_EXGEN+EX_DAR(r13)
  850. li r5,0
  851. std r4,_DAR(r1)
  852. std r5,_DSISR(r1)
  853. b .handle_page_fault
  854. unrecov_user_slb:
  855. EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
  856. DISABLE_INTS
  857. bl .save_nvgprs
  858. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  859. bl .unrecoverable_exception
  860. b 1b
  861. #endif /* __DISABLED__ */
  862. /*
  863. * r13 points to the PACA, r9 contains the saved CR,
  864. * r12 contain the saved SRR1, SRR0 is still ready for return
  865. * r3 has the faulting address
  866. * r9 - r13 are saved in paca->exslb.
  867. * r3 is saved in paca->slb_r3
  868. * We assume we aren't going to take any exceptions during this procedure.
  869. */
  870. _GLOBAL(slb_miss_realmode)
  871. mflr r10
  872. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  873. std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
  874. bl .slb_allocate_realmode
  875. /* All done -- return from exception. */
  876. ld r10,PACA_EXSLB+EX_LR(r13)
  877. ld r3,PACA_EXSLB+EX_R3(r13)
  878. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  879. #ifdef CONFIG_PPC_ISERIES
  880. ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
  881. #endif /* CONFIG_PPC_ISERIES */
  882. mtlr r10
  883. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  884. beq- unrecov_slb
  885. .machine push
  886. .machine "power4"
  887. mtcrf 0x80,r9
  888. mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
  889. .machine pop
  890. #ifdef CONFIG_PPC_ISERIES
  891. mtspr SPRN_SRR0,r11
  892. mtspr SPRN_SRR1,r12
  893. #endif /* CONFIG_PPC_ISERIES */
  894. ld r9,PACA_EXSLB+EX_R9(r13)
  895. ld r10,PACA_EXSLB+EX_R10(r13)
  896. ld r11,PACA_EXSLB+EX_R11(r13)
  897. ld r12,PACA_EXSLB+EX_R12(r13)
  898. ld r13,PACA_EXSLB+EX_R13(r13)
  899. rfid
  900. b . /* prevent speculative execution */
  901. unrecov_slb:
  902. EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
  903. DISABLE_INTS
  904. bl .save_nvgprs
  905. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  906. bl .unrecoverable_exception
  907. b 1b
  908. .align 7
  909. .globl hardware_interrupt_common
  910. .globl hardware_interrupt_entry
  911. hardware_interrupt_common:
  912. EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
  913. hardware_interrupt_entry:
  914. DISABLE_INTS
  915. addi r3,r1,STACK_FRAME_OVERHEAD
  916. bl .do_IRQ
  917. b .ret_from_except_lite
  918. .align 7
  919. .globl alignment_common
  920. alignment_common:
  921. mfspr r10,SPRN_DAR
  922. std r10,PACA_EXGEN+EX_DAR(r13)
  923. mfspr r10,SPRN_DSISR
  924. stw r10,PACA_EXGEN+EX_DSISR(r13)
  925. EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
  926. ld r3,PACA_EXGEN+EX_DAR(r13)
  927. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  928. std r3,_DAR(r1)
  929. std r4,_DSISR(r1)
  930. bl .save_nvgprs
  931. addi r3,r1,STACK_FRAME_OVERHEAD
  932. ENABLE_INTS
  933. bl .alignment_exception
  934. b .ret_from_except
  935. .align 7
  936. .globl program_check_common
  937. program_check_common:
  938. EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
  939. bl .save_nvgprs
  940. addi r3,r1,STACK_FRAME_OVERHEAD
  941. ENABLE_INTS
  942. bl .program_check_exception
  943. b .ret_from_except
  944. .align 7
  945. .globl fp_unavailable_common
  946. fp_unavailable_common:
  947. EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
  948. bne .load_up_fpu /* if from user, just load it up */
  949. bl .save_nvgprs
  950. addi r3,r1,STACK_FRAME_OVERHEAD
  951. ENABLE_INTS
  952. bl .kernel_fp_unavailable_exception
  953. BUG_OPCODE
  954. .align 7
  955. .globl altivec_unavailable_common
  956. altivec_unavailable_common:
  957. EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
  958. #ifdef CONFIG_ALTIVEC
  959. BEGIN_FTR_SECTION
  960. bne .load_up_altivec /* if from user, just load it up */
  961. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  962. #endif
  963. bl .save_nvgprs
  964. addi r3,r1,STACK_FRAME_OVERHEAD
  965. ENABLE_INTS
  966. bl .altivec_unavailable_exception
  967. b .ret_from_except
  968. #ifdef CONFIG_ALTIVEC
  969. /*
  970. * load_up_altivec(unused, unused, tsk)
  971. * Disable VMX for the task which had it previously,
  972. * and save its vector registers in its thread_struct.
  973. * Enables the VMX for use in the kernel on return.
  974. * On SMP we know the VMX is free, since we give it up every
  975. * switch (ie, no lazy save of the vector registers).
  976. * On entry: r13 == 'current' && last_task_used_altivec != 'current'
  977. */
  978. _STATIC(load_up_altivec)
  979. mfmsr r5 /* grab the current MSR */
  980. oris r5,r5,MSR_VEC@h
  981. mtmsrd r5 /* enable use of VMX now */
  982. isync
  983. /*
  984. * For SMP, we don't do lazy VMX switching because it just gets too
  985. * horrendously complex, especially when a task switches from one CPU
  986. * to another. Instead we call giveup_altvec in switch_to.
  987. * VRSAVE isn't dealt with here, that is done in the normal context
  988. * switch code. Note that we could rely on vrsave value to eventually
  989. * avoid saving all of the VREGs here...
  990. */
  991. #ifndef CONFIG_SMP
  992. ld r3,last_task_used_altivec@got(r2)
  993. ld r4,0(r3)
  994. cmpdi 0,r4,0
  995. beq 1f
  996. /* Save VMX state to last_task_used_altivec's THREAD struct */
  997. addi r4,r4,THREAD
  998. SAVE_32VRS(0,r5,r4)
  999. mfvscr vr0
  1000. li r10,THREAD_VSCR
  1001. stvx vr0,r10,r4
  1002. /* Disable VMX for last_task_used_altivec */
  1003. ld r5,PT_REGS(r4)
  1004. ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  1005. lis r6,MSR_VEC@h
  1006. andc r4,r4,r6
  1007. std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  1008. 1:
  1009. #endif /* CONFIG_SMP */
  1010. /* Hack: if we get an altivec unavailable trap with VRSAVE
  1011. * set to all zeros, we assume this is a broken application
  1012. * that fails to set it properly, and thus we switch it to
  1013. * all 1's
  1014. */
  1015. mfspr r4,SPRN_VRSAVE
  1016. cmpdi 0,r4,0
  1017. bne+ 1f
  1018. li r4,-1
  1019. mtspr SPRN_VRSAVE,r4
  1020. 1:
  1021. /* enable use of VMX after return */
  1022. ld r4,PACACURRENT(r13)
  1023. addi r5,r4,THREAD /* Get THREAD */
  1024. oris r12,r12,MSR_VEC@h
  1025. std r12,_MSR(r1)
  1026. li r4,1
  1027. li r10,THREAD_VSCR
  1028. stw r4,THREAD_USED_VR(r5)
  1029. lvx vr0,r10,r5
  1030. mtvscr vr0
  1031. REST_32VRS(0,r4,r5)
  1032. #ifndef CONFIG_SMP
  1033. /* Update last_task_used_math to 'current' */
  1034. subi r4,r5,THREAD /* Back to 'current' */
  1035. std r4,0(r3)
  1036. #endif /* CONFIG_SMP */
  1037. /* restore registers and return */
  1038. b fast_exception_return
  1039. #endif /* CONFIG_ALTIVEC */
  1040. /*
  1041. * Hash table stuff
  1042. */
  1043. .align 7
  1044. _GLOBAL(do_hash_page)
  1045. std r3,_DAR(r1)
  1046. std r4,_DSISR(r1)
  1047. andis. r0,r4,0xa450 /* weird error? */
  1048. bne- .handle_page_fault /* if not, try to insert a HPTE */
  1049. BEGIN_FTR_SECTION
  1050. andis. r0,r4,0x0020 /* Is it a segment table fault? */
  1051. bne- .do_ste_alloc /* If so handle it */
  1052. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  1053. /*
  1054. * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
  1055. * accessing a userspace segment (even from the kernel). We assume
  1056. * kernel addresses always have the high bit set.
  1057. */
  1058. rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
  1059. rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
  1060. orc r0,r12,r0 /* MSR_PR | ~high_bit */
  1061. rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
  1062. ori r4,r4,1 /* add _PAGE_PRESENT */
  1063. rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
  1064. /*
  1065. * On iSeries, we soft-disable interrupts here, then
  1066. * hard-enable interrupts so that the hash_page code can spin on
  1067. * the hash_table_lock without problems on a shared processor.
  1068. */
  1069. DISABLE_INTS
  1070. /*
  1071. * r3 contains the faulting address
  1072. * r4 contains the required access permissions
  1073. * r5 contains the trap number
  1074. *
  1075. * at return r3 = 0 for success
  1076. */
  1077. bl .hash_page /* build HPTE if possible */
  1078. cmpdi r3,0 /* see if hash_page succeeded */
  1079. #ifdef DO_SOFT_DISABLE
  1080. /*
  1081. * If we had interrupts soft-enabled at the point where the
  1082. * DSI/ISI occurred, and an interrupt came in during hash_page,
  1083. * handle it now.
  1084. * We jump to ret_from_except_lite rather than fast_exception_return
  1085. * because ret_from_except_lite will check for and handle pending
  1086. * interrupts if necessary.
  1087. */
  1088. beq .ret_from_except_lite
  1089. /* For a hash failure, we don't bother re-enabling interrupts */
  1090. ble- 12f
  1091. /*
  1092. * hash_page couldn't handle it, set soft interrupt enable back
  1093. * to what it was before the trap. Note that .local_irq_restore
  1094. * handles any interrupts pending at this point.
  1095. */
  1096. ld r3,SOFTE(r1)
  1097. bl .local_irq_restore
  1098. b 11f
  1099. #else
  1100. beq fast_exception_return /* Return from exception on success */
  1101. ble- 12f /* Failure return from hash_page */
  1102. /* fall through */
  1103. #endif
  1104. /* Here we have a page fault that hash_page can't handle. */
  1105. _GLOBAL(handle_page_fault)
  1106. ENABLE_INTS
  1107. 11: ld r4,_DAR(r1)
  1108. ld r5,_DSISR(r1)
  1109. addi r3,r1,STACK_FRAME_OVERHEAD
  1110. bl .do_page_fault
  1111. cmpdi r3,0
  1112. beq+ .ret_from_except_lite
  1113. bl .save_nvgprs
  1114. mr r5,r3
  1115. addi r3,r1,STACK_FRAME_OVERHEAD
  1116. lwz r4,_DAR(r1)
  1117. bl .bad_page_fault
  1118. b .ret_from_except
  1119. /* We have a page fault that hash_page could handle but HV refused
  1120. * the PTE insertion
  1121. */
  1122. 12: bl .save_nvgprs
  1123. addi r3,r1,STACK_FRAME_OVERHEAD
  1124. lwz r4,_DAR(r1)
  1125. bl .low_hash_fault
  1126. b .ret_from_except
  1127. /* here we have a segment miss */
  1128. _GLOBAL(do_ste_alloc)
  1129. bl .ste_allocate /* try to insert stab entry */
  1130. cmpdi r3,0
  1131. beq+ fast_exception_return
  1132. b .handle_page_fault
  1133. /*
  1134. * r13 points to the PACA, r9 contains the saved CR,
  1135. * r11 and r12 contain the saved SRR0 and SRR1.
  1136. * r9 - r13 are saved in paca->exslb.
  1137. * We assume we aren't going to take any exceptions during this procedure.
  1138. * We assume (DAR >> 60) == 0xc.
  1139. */
  1140. .align 7
  1141. _GLOBAL(do_stab_bolted)
  1142. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  1143. std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
  1144. /* Hash to the primary group */
  1145. ld r10,PACASTABVIRT(r13)
  1146. mfspr r11,SPRN_DAR
  1147. srdi r11,r11,28
  1148. rldimi r10,r11,7,52 /* r10 = first ste of the group */
  1149. /* Calculate VSID */
  1150. /* This is a kernel address, so protovsid = ESID */
  1151. ASM_VSID_SCRAMBLE(r11, r9)
  1152. rldic r9,r11,12,16 /* r9 = vsid << 12 */
  1153. /* Search the primary group for a free entry */
  1154. 1: ld r11,0(r10) /* Test valid bit of the current ste */
  1155. andi. r11,r11,0x80
  1156. beq 2f
  1157. addi r10,r10,16
  1158. andi. r11,r10,0x70
  1159. bne 1b
  1160. /* Stick for only searching the primary group for now. */
  1161. /* At least for now, we use a very simple random castout scheme */
  1162. /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
  1163. mftb r11
  1164. rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
  1165. ori r11,r11,0x10
  1166. /* r10 currently points to an ste one past the group of interest */
  1167. /* make it point to the randomly selected entry */
  1168. subi r10,r10,128
  1169. or r10,r10,r11 /* r10 is the entry to invalidate */
  1170. isync /* mark the entry invalid */
  1171. ld r11,0(r10)
  1172. rldicl r11,r11,56,1 /* clear the valid bit */
  1173. rotldi r11,r11,8
  1174. std r11,0(r10)
  1175. sync
  1176. clrrdi r11,r11,28 /* Get the esid part of the ste */
  1177. slbie r11
  1178. 2: std r9,8(r10) /* Store the vsid part of the ste */
  1179. eieio
  1180. mfspr r11,SPRN_DAR /* Get the new esid */
  1181. clrrdi r11,r11,28 /* Permits a full 32b of ESID */
  1182. ori r11,r11,0x90 /* Turn on valid and kp */
  1183. std r11,0(r10) /* Put new entry back into the stab */
  1184. sync
  1185. /* All done -- return from exception. */
  1186. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  1187. ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
  1188. andi. r10,r12,MSR_RI
  1189. beq- unrecov_slb
  1190. mtcrf 0x80,r9 /* restore CR */
  1191. mfmsr r10
  1192. clrrdi r10,r10,2
  1193. mtmsrd r10,1
  1194. mtspr SPRN_SRR0,r11
  1195. mtspr SPRN_SRR1,r12
  1196. ld r9,PACA_EXSLB+EX_R9(r13)
  1197. ld r10,PACA_EXSLB+EX_R10(r13)
  1198. ld r11,PACA_EXSLB+EX_R11(r13)
  1199. ld r12,PACA_EXSLB+EX_R12(r13)
  1200. ld r13,PACA_EXSLB+EX_R13(r13)
  1201. rfid
  1202. b . /* prevent speculative execution */
  1203. /*
  1204. * Space for CPU0's segment table.
  1205. *
  1206. * On iSeries, the hypervisor must fill in at least one entry before
  1207. * we get control (with relocate on). The address is give to the hv
  1208. * as a page number (see xLparMap in lpardata.c), so this must be at a
  1209. * fixed address (the linker can't compute (u64)&initial_stab >>
  1210. * PAGE_SHIFT).
  1211. */
  1212. . = STAB0_PHYS_ADDR /* 0x6000 */
  1213. .globl initial_stab
  1214. initial_stab:
  1215. .space 4096
  1216. /*
  1217. * Data area reserved for FWNMI option.
  1218. * This address (0x7000) is fixed by the RPA.
  1219. */
  1220. .= 0x7000
  1221. .globl fwnmi_data_area
  1222. fwnmi_data_area:
  1223. /* iSeries does not use the FWNMI stuff, so it is safe to put
  1224. * this here, even if we later allow kernels that will boot on
  1225. * both pSeries and iSeries */
  1226. #ifdef CONFIG_PPC_ISERIES
  1227. . = LPARMAP_PHYS
  1228. #include "lparmap.s"
  1229. /*
  1230. * This ".text" is here for old compilers that generate a trailing
  1231. * .note section when compiling .c files to .s
  1232. */
  1233. .text
  1234. #endif /* CONFIG_PPC_ISERIES */
  1235. . = 0x8000
  1236. /*
  1237. * On pSeries, secondary processors spin in the following code.
  1238. * At entry, r3 = this processor's number (physical cpu id)
  1239. */
  1240. _GLOBAL(pSeries_secondary_smp_init)
  1241. mr r24,r3
  1242. /* turn on 64-bit mode */
  1243. bl .enable_64b_mode
  1244. isync
  1245. /* Copy some CPU settings from CPU 0 */
  1246. bl .__restore_cpu_setup
  1247. /* Set up a paca value for this processor. Since we have the
  1248. * physical cpu id in r24, we need to search the pacas to find
  1249. * which logical id maps to our physical one.
  1250. */
  1251. LOADADDR(r13, paca) /* Get base vaddr of paca array */
  1252. li r5,0 /* logical cpu id */
  1253. 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
  1254. cmpw r6,r24 /* Compare to our id */
  1255. beq 2f
  1256. addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
  1257. addi r5,r5,1
  1258. cmpwi r5,NR_CPUS
  1259. blt 1b
  1260. mr r3,r24 /* not found, copy phys to r3 */
  1261. b .kexec_wait /* next kernel might do better */
  1262. 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1263. /* From now on, r24 is expected to be logical cpuid */
  1264. mr r24,r5
  1265. 3: HMT_LOW
  1266. lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
  1267. /* start. */
  1268. sync
  1269. /* Create a temp kernel stack for use before relocation is on. */
  1270. ld r1,PACAEMERGSP(r13)
  1271. subi r1,r1,STACK_FRAME_OVERHEAD
  1272. cmpwi 0,r23,0
  1273. #ifdef CONFIG_SMP
  1274. bne .__secondary_start
  1275. #endif
  1276. b 3b /* Loop until told to go */
  1277. #ifdef CONFIG_PPC_ISERIES
  1278. _STATIC(__start_initialization_iSeries)
  1279. /* Clear out the BSS */
  1280. LOADADDR(r11,__bss_stop)
  1281. LOADADDR(r8,__bss_start)
  1282. sub r11,r11,r8 /* bss size */
  1283. addi r11,r11,7 /* round up to an even double word */
  1284. rldicl. r11,r11,61,3 /* shift right by 3 */
  1285. beq 4f
  1286. addi r8,r8,-8
  1287. li r0,0
  1288. mtctr r11 /* zero this many doublewords */
  1289. 3: stdu r0,8(r8)
  1290. bdnz 3b
  1291. 4:
  1292. LOADADDR(r1,init_thread_union)
  1293. addi r1,r1,THREAD_SIZE
  1294. li r0,0
  1295. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1296. LOADADDR(r3,cpu_specs)
  1297. LOADADDR(r4,cur_cpu_spec)
  1298. li r5,0
  1299. bl .identify_cpu
  1300. LOADADDR(r2,__toc_start)
  1301. addi r2,r2,0x4000
  1302. addi r2,r2,0x4000
  1303. bl .iSeries_early_setup
  1304. bl .early_setup
  1305. /* relocation is on at this point */
  1306. b .start_here_common
  1307. #endif /* CONFIG_PPC_ISERIES */
  1308. #ifdef CONFIG_PPC_MULTIPLATFORM
  1309. _STATIC(__mmu_off)
  1310. mfmsr r3
  1311. andi. r0,r3,MSR_IR|MSR_DR
  1312. beqlr
  1313. andc r3,r3,r0
  1314. mtspr SPRN_SRR0,r4
  1315. mtspr SPRN_SRR1,r3
  1316. sync
  1317. rfid
  1318. b . /* prevent speculative execution */
  1319. /*
  1320. * Here is our main kernel entry point. We support currently 2 kind of entries
  1321. * depending on the value of r5.
  1322. *
  1323. * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
  1324. * in r3...r7
  1325. *
  1326. * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
  1327. * DT block, r4 is a physical pointer to the kernel itself
  1328. *
  1329. */
  1330. _GLOBAL(__start_initialization_multiplatform)
  1331. /*
  1332. * Are we booted from a PROM Of-type client-interface ?
  1333. */
  1334. cmpldi cr0,r5,0
  1335. bne .__boot_from_prom /* yes -> prom */
  1336. /* Save parameters */
  1337. mr r31,r3
  1338. mr r30,r4
  1339. /* Make sure we are running in 64 bits mode */
  1340. bl .enable_64b_mode
  1341. /* Setup some critical 970 SPRs before switching MMU off */
  1342. bl .__970_cpu_preinit
  1343. /* cpu # */
  1344. li r24,0
  1345. /* Switch off MMU if not already */
  1346. LOADADDR(r4, .__after_prom_start - KERNELBASE)
  1347. add r4,r4,r30
  1348. bl .__mmu_off
  1349. b .__after_prom_start
  1350. _STATIC(__boot_from_prom)
  1351. /* Save parameters */
  1352. mr r31,r3
  1353. mr r30,r4
  1354. mr r29,r5
  1355. mr r28,r6
  1356. mr r27,r7
  1357. /* Make sure we are running in 64 bits mode */
  1358. bl .enable_64b_mode
  1359. /* put a relocation offset into r3 */
  1360. bl .reloc_offset
  1361. LOADADDR(r2,__toc_start)
  1362. addi r2,r2,0x4000
  1363. addi r2,r2,0x4000
  1364. /* Relocate the TOC from a virt addr to a real addr */
  1365. add r2,r2,r3
  1366. /* Restore parameters */
  1367. mr r3,r31
  1368. mr r4,r30
  1369. mr r5,r29
  1370. mr r6,r28
  1371. mr r7,r27
  1372. /* Do all of the interaction with OF client interface */
  1373. bl .prom_init
  1374. /* We never return */
  1375. trap
  1376. /*
  1377. * At this point, r3 contains the physical address we are running at,
  1378. * returned by prom_init()
  1379. */
  1380. _STATIC(__after_prom_start)
  1381. /*
  1382. * We need to run with __start at physical address 0.
  1383. * This will leave some code in the first 256B of
  1384. * real memory, which are reserved for software use.
  1385. * The remainder of the first page is loaded with the fixed
  1386. * interrupt vectors. The next two pages are filled with
  1387. * unknown exception placeholders.
  1388. *
  1389. * Note: This process overwrites the OF exception vectors.
  1390. * r26 == relocation offset
  1391. * r27 == KERNELBASE
  1392. */
  1393. bl .reloc_offset
  1394. mr r26,r3
  1395. SET_REG_TO_CONST(r27,KERNELBASE)
  1396. li r3,0 /* target addr */
  1397. // XXX FIXME: Use phys returned by OF (r30)
  1398. add r4,r27,r26 /* source addr */
  1399. /* current address of _start */
  1400. /* i.e. where we are running */
  1401. /* the source addr */
  1402. LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
  1403. sub r5,r5,r27
  1404. li r6,0x100 /* Start offset, the first 0x100 */
  1405. /* bytes were copied earlier. */
  1406. bl .copy_and_flush /* copy the first n bytes */
  1407. /* this includes the code being */
  1408. /* executed here. */
  1409. LOADADDR(r0, 4f) /* Jump to the copy of this code */
  1410. mtctr r0 /* that we just made/relocated */
  1411. bctr
  1412. 4: LOADADDR(r5,klimit)
  1413. add r5,r5,r26
  1414. ld r5,0(r5) /* get the value of klimit */
  1415. sub r5,r5,r27
  1416. bl .copy_and_flush /* copy the rest */
  1417. b .start_here_multiplatform
  1418. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1419. /*
  1420. * Copy routine used to copy the kernel to start at physical address 0
  1421. * and flush and invalidate the caches as needed.
  1422. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
  1423. * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
  1424. *
  1425. * Note: this routine *only* clobbers r0, r6 and lr
  1426. */
  1427. _GLOBAL(copy_and_flush)
  1428. addi r5,r5,-8
  1429. addi r6,r6,-8
  1430. 4: li r0,16 /* Use the least common */
  1431. /* denominator cache line */
  1432. /* size. This results in */
  1433. /* extra cache line flushes */
  1434. /* but operation is correct. */
  1435. /* Can't get cache line size */
  1436. /* from NACA as it is being */
  1437. /* moved too. */
  1438. mtctr r0 /* put # words/line in ctr */
  1439. 3: addi r6,r6,8 /* copy a cache line */
  1440. ldx r0,r6,r4
  1441. stdx r0,r6,r3
  1442. bdnz 3b
  1443. dcbst r6,r3 /* write it to memory */
  1444. sync
  1445. icbi r6,r3 /* flush the icache line */
  1446. cmpld 0,r6,r5
  1447. blt 4b
  1448. sync
  1449. addi r5,r5,8
  1450. addi r6,r6,8
  1451. blr
  1452. .align 8
  1453. copy_to_here:
  1454. #ifdef CONFIG_SMP
  1455. #ifdef CONFIG_PPC_PMAC
  1456. /*
  1457. * On PowerMac, secondary processors starts from the reset vector, which
  1458. * is temporarily turned into a call to one of the functions below.
  1459. */
  1460. .section ".text";
  1461. .align 2 ;
  1462. .globl __secondary_start_pmac_0
  1463. __secondary_start_pmac_0:
  1464. /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
  1465. li r24,0
  1466. b 1f
  1467. li r24,1
  1468. b 1f
  1469. li r24,2
  1470. b 1f
  1471. li r24,3
  1472. 1:
  1473. _GLOBAL(pmac_secondary_start)
  1474. /* turn on 64-bit mode */
  1475. bl .enable_64b_mode
  1476. isync
  1477. /* Copy some CPU settings from CPU 0 */
  1478. bl .__restore_cpu_setup
  1479. /* pSeries do that early though I don't think we really need it */
  1480. mfmsr r3
  1481. ori r3,r3,MSR_RI
  1482. mtmsrd r3 /* RI on */
  1483. /* Set up a paca value for this processor. */
  1484. LOADADDR(r4, paca) /* Get base vaddr of paca array */
  1485. mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
  1486. add r13,r13,r4 /* for this processor. */
  1487. mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1488. /* Create a temp kernel stack for use before relocation is on. */
  1489. ld r1,PACAEMERGSP(r13)
  1490. subi r1,r1,STACK_FRAME_OVERHEAD
  1491. b .__secondary_start
  1492. #endif /* CONFIG_PPC_PMAC */
  1493. /*
  1494. * This function is called after the master CPU has released the
  1495. * secondary processors. The execution environment is relocation off.
  1496. * The paca for this processor has the following fields initialized at
  1497. * this point:
  1498. * 1. Processor number
  1499. * 2. Segment table pointer (virtual address)
  1500. * On entry the following are set:
  1501. * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
  1502. * r24 = cpu# (in Linux terms)
  1503. * r13 = paca virtual address
  1504. * SPRG3 = paca virtual address
  1505. */
  1506. _GLOBAL(__secondary_start)
  1507. /* Set thread priority to MEDIUM */
  1508. HMT_MEDIUM
  1509. /* Load TOC */
  1510. ld r2,PACATOC(r13)
  1511. /* Do early setup for that CPU (stab, slb, hash table pointer) */
  1512. bl .early_setup_secondary
  1513. /* Initialize the kernel stack. Just a repeat for iSeries. */
  1514. LOADADDR(r3,current_set)
  1515. sldi r28,r24,3 /* get current_set[cpu#] */
  1516. ldx r1,r3,r28
  1517. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  1518. std r1,PACAKSAVE(r13)
  1519. /* Clear backchain so we get nice backtraces */
  1520. li r7,0
  1521. mtlr r7
  1522. /* enable MMU and jump to start_secondary */
  1523. LOADADDR(r3,.start_secondary_prolog)
  1524. SET_REG_TO_CONST(r4, MSR_KERNEL)
  1525. #ifdef DO_SOFT_DISABLE
  1526. ori r4,r4,MSR_EE
  1527. #endif
  1528. mtspr SPRN_SRR0,r3
  1529. mtspr SPRN_SRR1,r4
  1530. rfid
  1531. b . /* prevent speculative execution */
  1532. /*
  1533. * Running with relocation on at this point. All we want to do is
  1534. * zero the stack back-chain pointer before going into C code.
  1535. */
  1536. _GLOBAL(start_secondary_prolog)
  1537. li r3,0
  1538. std r3,0(r1) /* Zero the stack frame pointer */
  1539. bl .start_secondary
  1540. b .
  1541. #endif
  1542. /*
  1543. * This subroutine clobbers r11 and r12
  1544. */
  1545. _GLOBAL(enable_64b_mode)
  1546. mfmsr r11 /* grab the current MSR */
  1547. li r12,1
  1548. rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
  1549. or r11,r11,r12
  1550. li r12,1
  1551. rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
  1552. or r11,r11,r12
  1553. mtmsrd r11
  1554. isync
  1555. blr
  1556. #ifdef CONFIG_PPC_MULTIPLATFORM
  1557. /*
  1558. * This is where the main kernel code starts.
  1559. */
  1560. _STATIC(start_here_multiplatform)
  1561. /* get a new offset, now that the kernel has moved. */
  1562. bl .reloc_offset
  1563. mr r26,r3
  1564. /* Clear out the BSS. It may have been done in prom_init,
  1565. * already but that's irrelevant since prom_init will soon
  1566. * be detached from the kernel completely. Besides, we need
  1567. * to clear it now for kexec-style entry.
  1568. */
  1569. LOADADDR(r11,__bss_stop)
  1570. LOADADDR(r8,__bss_start)
  1571. sub r11,r11,r8 /* bss size */
  1572. addi r11,r11,7 /* round up to an even double word */
  1573. rldicl. r11,r11,61,3 /* shift right by 3 */
  1574. beq 4f
  1575. addi r8,r8,-8
  1576. li r0,0
  1577. mtctr r11 /* zero this many doublewords */
  1578. 3: stdu r0,8(r8)
  1579. bdnz 3b
  1580. 4:
  1581. mfmsr r6
  1582. ori r6,r6,MSR_RI
  1583. mtmsrd r6 /* RI on */
  1584. #ifdef CONFIG_HMT
  1585. /* Start up the second thread on cpu 0 */
  1586. mfspr r3,SPRN_PVR
  1587. srwi r3,r3,16
  1588. cmpwi r3,0x34 /* Pulsar */
  1589. beq 90f
  1590. cmpwi r3,0x36 /* Icestar */
  1591. beq 90f
  1592. cmpwi r3,0x37 /* SStar */
  1593. beq 90f
  1594. b 91f /* HMT not supported */
  1595. 90: li r3,0
  1596. bl .hmt_start_secondary
  1597. 91:
  1598. #endif
  1599. /* The following gets the stack and TOC set up with the regs */
  1600. /* pointing to the real addr of the kernel stack. This is */
  1601. /* all done to support the C function call below which sets */
  1602. /* up the htab. This is done because we have relocated the */
  1603. /* kernel but are still running in real mode. */
  1604. LOADADDR(r3,init_thread_union)
  1605. add r3,r3,r26
  1606. /* set up a stack pointer (physical address) */
  1607. addi r1,r3,THREAD_SIZE
  1608. li r0,0
  1609. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1610. /* set up the TOC (physical address) */
  1611. LOADADDR(r2,__toc_start)
  1612. addi r2,r2,0x4000
  1613. addi r2,r2,0x4000
  1614. add r2,r2,r26
  1615. LOADADDR(r3,cpu_specs)
  1616. add r3,r3,r26
  1617. LOADADDR(r4,cur_cpu_spec)
  1618. add r4,r4,r26
  1619. mr r5,r26
  1620. bl .identify_cpu
  1621. /* Save some low level config HIDs of CPU0 to be copied to
  1622. * other CPUs later on, or used for suspend/resume
  1623. */
  1624. bl .__save_cpu_setup
  1625. sync
  1626. /* Setup a valid physical PACA pointer in SPRG3 for early_setup
  1627. * note that boot_cpuid can always be 0 nowadays since there is
  1628. * nowhere it can be initialized differently before we reach this
  1629. * code
  1630. */
  1631. LOADADDR(r27, boot_cpuid)
  1632. add r27,r27,r26
  1633. lwz r27,0(r27)
  1634. LOADADDR(r24, paca) /* Get base vaddr of paca array */
  1635. mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
  1636. add r13,r13,r24 /* for this processor. */
  1637. add r13,r13,r26 /* convert to physical addr */
  1638. mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
  1639. /* Do very early kernel initializations, including initial hash table,
  1640. * stab and slb setup before we turn on relocation. */
  1641. /* Restore parameters passed from prom_init/kexec */
  1642. mr r3,r31
  1643. bl .early_setup
  1644. LOADADDR(r3,.start_here_common)
  1645. SET_REG_TO_CONST(r4, MSR_KERNEL)
  1646. mtspr SPRN_SRR0,r3
  1647. mtspr SPRN_SRR1,r4
  1648. rfid
  1649. b . /* prevent speculative execution */
  1650. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1651. /* This is where all platforms converge execution */
  1652. _STATIC(start_here_common)
  1653. /* relocation is on at this point */
  1654. /* The following code sets up the SP and TOC now that we are */
  1655. /* running with translation enabled. */
  1656. LOADADDR(r3,init_thread_union)
  1657. /* set up the stack */
  1658. addi r1,r3,THREAD_SIZE
  1659. li r0,0
  1660. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1661. /* Apply the CPUs-specific fixups (nop out sections not relevant
  1662. * to this CPU
  1663. */
  1664. li r3,0
  1665. bl .do_cpu_ftr_fixups
  1666. LOADADDR(r26, boot_cpuid)
  1667. lwz r26,0(r26)
  1668. LOADADDR(r24, paca) /* Get base vaddr of paca array */
  1669. mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
  1670. add r13,r13,r24 /* for this processor. */
  1671. mtspr SPRN_SPRG3,r13
  1672. /* ptr to current */
  1673. LOADADDR(r4,init_task)
  1674. std r4,PACACURRENT(r13)
  1675. /* Load the TOC */
  1676. ld r2,PACATOC(r13)
  1677. std r1,PACAKSAVE(r13)
  1678. bl .setup_system
  1679. /* Load up the kernel context */
  1680. 5:
  1681. #ifdef DO_SOFT_DISABLE
  1682. li r5,0
  1683. stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
  1684. mfmsr r5
  1685. ori r5,r5,MSR_EE /* Hard Enabled */
  1686. mtmsrd r5
  1687. #endif
  1688. bl .start_kernel
  1689. _GLOBAL(hmt_init)
  1690. #ifdef CONFIG_HMT
  1691. LOADADDR(r5, hmt_thread_data)
  1692. mfspr r7,SPRN_PVR
  1693. srwi r7,r7,16
  1694. cmpwi r7,0x34 /* Pulsar */
  1695. beq 90f
  1696. cmpwi r7,0x36 /* Icestar */
  1697. beq 91f
  1698. cmpwi r7,0x37 /* SStar */
  1699. beq 91f
  1700. b 101f
  1701. 90: mfspr r6,SPRN_PIR
  1702. andi. r6,r6,0x1f
  1703. b 92f
  1704. 91: mfspr r6,SPRN_PIR
  1705. andi. r6,r6,0x3ff
  1706. 92: sldi r4,r24,3
  1707. stwx r6,r5,r4
  1708. bl .hmt_start_secondary
  1709. b 101f
  1710. __hmt_secondary_hold:
  1711. LOADADDR(r5, hmt_thread_data)
  1712. clrldi r5,r5,4
  1713. li r7,0
  1714. mfspr r6,SPRN_PIR
  1715. mfspr r8,SPRN_PVR
  1716. srwi r8,r8,16
  1717. cmpwi r8,0x34
  1718. bne 93f
  1719. andi. r6,r6,0x1f
  1720. b 103f
  1721. 93: andi. r6,r6,0x3f
  1722. 103: lwzx r8,r5,r7
  1723. cmpw r8,r6
  1724. beq 104f
  1725. addi r7,r7,8
  1726. b 103b
  1727. 104: addi r7,r7,4
  1728. lwzx r9,r5,r7
  1729. mr r24,r9
  1730. 101:
  1731. #endif
  1732. mr r3,r24
  1733. b .pSeries_secondary_smp_init
  1734. #ifdef CONFIG_HMT
  1735. _GLOBAL(hmt_start_secondary)
  1736. LOADADDR(r4,__hmt_secondary_hold)
  1737. clrldi r4,r4,4
  1738. mtspr SPRN_NIADORM, r4
  1739. mfspr r4, SPRN_MSRDORM
  1740. li r5, -65
  1741. and r4, r4, r5
  1742. mtspr SPRN_MSRDORM, r4
  1743. lis r4,0xffef
  1744. ori r4,r4,0x7403
  1745. mtspr SPRN_TSC, r4
  1746. li r4,0x1f4
  1747. mtspr SPRN_TST, r4
  1748. mfspr r4, SPRN_HID0
  1749. ori r4, r4, 0x1
  1750. mtspr SPRN_HID0, r4
  1751. mfspr r4, SPRN_CTRLF
  1752. oris r4, r4, 0x40
  1753. mtspr SPRN_CTRLT, r4
  1754. blr
  1755. #endif
  1756. /*
  1757. * We put a few things here that have to be page-aligned.
  1758. * This stuff goes at the beginning of the bss, which is page-aligned.
  1759. */
  1760. .section ".bss"
  1761. .align PAGE_SHIFT
  1762. .globl empty_zero_page
  1763. empty_zero_page:
  1764. .space PAGE_SIZE
  1765. .globl swapper_pg_dir
  1766. swapper_pg_dir:
  1767. .space PAGE_SIZE
  1768. /*
  1769. * This space gets a copy of optional info passed to us by the bootstrap
  1770. * Used to pass parameters into the kernel like root=/dev/sda1, etc.
  1771. */
  1772. .globl cmd_line
  1773. cmd_line:
  1774. .space COMMAND_LINE_SIZE