head_64.S 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  6. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  7. * Adapted for Power Macintosh by Paul Mackerras.
  8. * Low-level exception handlers and MMU support
  9. * rewritten by Paul Mackerras.
  10. * Copyright (C) 1996 Paul Mackerras.
  11. *
  12. * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
  13. * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
  14. *
  15. * This file contains the low-level support and setup for the
  16. * PowerPC-64 platform, including trap and interrupt dispatch.
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. */
  23. #include <linux/threads.h>
  24. #include <asm/reg.h>
  25. #include <asm/page.h>
  26. #include <asm/mmu.h>
  27. #include <asm/ppc_asm.h>
  28. #include <asm/asm-offsets.h>
  29. #include <asm/bug.h>
  30. #include <asm/cputable.h>
  31. #include <asm/setup.h>
  32. #include <asm/hvcall.h>
  33. #include <asm/iseries/lpar_map.h>
  34. #include <asm/thread_info.h>
  35. #include <asm/firmware.h>
  36. #define DO_SOFT_DISABLE
  37. /*
  38. * We layout physical memory as follows:
  39. * 0x0000 - 0x00ff : Secondary processor spin code
  40. * 0x0100 - 0x2fff : pSeries Interrupt prologs
  41. * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
  42. * 0x6000 - 0x6fff : Initial (CPU0) segment table
  43. * 0x7000 - 0x7fff : FWNMI data area
  44. * 0x8000 - : Early init and support code
  45. */
  46. /*
  47. * SPRG Usage
  48. *
  49. * Register Definition
  50. *
  51. * SPRG0 reserved for hypervisor
  52. * SPRG1 temp - used to save gpr
  53. * SPRG2 temp - used to save gpr
  54. * SPRG3 virt addr of paca
  55. */
  56. /*
  57. * Entering into this code we make the following assumptions:
  58. * For pSeries:
  59. * 1. The MMU is off & open firmware is running in real mode.
  60. * 2. The kernel is entered at __start
  61. *
  62. * For iSeries:
  63. * 1. The MMU is on (as it always is for iSeries)
  64. * 2. The kernel is entered at system_reset_iSeries
  65. */
  66. .text
  67. .globl _stext
  68. _stext:
  69. #ifdef CONFIG_PPC_MULTIPLATFORM
  70. _GLOBAL(__start)
  71. /* NOP this out unconditionally */
  72. BEGIN_FTR_SECTION
  73. b .__start_initialization_multiplatform
  74. END_FTR_SECTION(0, 1)
  75. #endif /* CONFIG_PPC_MULTIPLATFORM */
  76. /* Catch branch to 0 in real mode */
  77. trap
  78. /* Secondary processors spin on this value until it goes to 1. */
  79. .globl __secondary_hold_spinloop
  80. __secondary_hold_spinloop:
  81. .llong 0x0
  82. /* Secondary processors write this value with their cpu # */
  83. /* after they enter the spin loop immediately below. */
  84. .globl __secondary_hold_acknowledge
  85. __secondary_hold_acknowledge:
  86. .llong 0x0
  87. #ifdef CONFIG_PPC_ISERIES
  88. /*
  89. * At offset 0x20, there is a pointer to iSeries LPAR data.
  90. * This is required by the hypervisor
  91. */
  92. . = 0x20
  93. .llong hvReleaseData-KERNELBASE
  94. #endif /* CONFIG_PPC_ISERIES */
  95. . = 0x60
  96. /*
  97. * The following code is used on pSeries to hold secondary processors
  98. * in a spin loop after they have been freed from OpenFirmware, but
  99. * before the bulk of the kernel has been relocated. This code
  100. * is relocated to physical address 0x60 before prom_init is run.
  101. * All of it must fit below the first exception vector at 0x100.
  102. */
  103. _GLOBAL(__secondary_hold)
  104. mfmsr r24
  105. ori r24,r24,MSR_RI
  106. mtmsrd r24 /* RI on */
  107. /* Grab our physical cpu number */
  108. mr r24,r3
  109. /* Tell the master cpu we're here */
  110. /* Relocation is off & we are located at an address less */
  111. /* than 0x100, so only need to grab low order offset. */
  112. std r24,__secondary_hold_acknowledge@l(0)
  113. sync
  114. /* All secondary cpus wait here until told to start. */
  115. 100: ld r4,__secondary_hold_spinloop@l(0)
  116. cmpdi 0,r4,1
  117. bne 100b
  118. #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
  119. LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
  120. mtctr r4
  121. mr r3,r24
  122. bctr
  123. #else
  124. BUG_OPCODE
  125. #endif
  126. /* This value is used to mark exception frames on the stack. */
  127. .section ".toc","aw"
  128. exception_marker:
  129. .tc ID_72656773_68657265[TC],0x7265677368657265
  130. .text
  131. /*
  132. * The following macros define the code that appears as
  133. * the prologue to each of the exception handlers. They
  134. * are split into two parts to allow a single kernel binary
  135. * to be used for pSeries and iSeries.
  136. * LOL. One day... - paulus
  137. */
  138. /*
  139. * We make as much of the exception code common between native
  140. * exception handlers (including pSeries LPAR) and iSeries LPAR
  141. * implementations as possible.
  142. */
  143. /*
  144. * This is the start of the interrupt handlers for pSeries
  145. * This code runs with relocation off.
  146. */
  147. #define EX_R9 0
  148. #define EX_R10 8
  149. #define EX_R11 16
  150. #define EX_R12 24
  151. #define EX_R13 32
  152. #define EX_SRR0 40
  153. #define EX_DAR 48
  154. #define EX_DSISR 56
  155. #define EX_CCR 60
  156. #define EX_R3 64
  157. #define EX_LR 72
  158. /*
  159. * We're short on space and time in the exception prolog, so we can't
  160. * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
  161. * low halfword of the address, but for Kdump we need the whole low
  162. * word.
  163. */
  164. #ifdef CONFIG_CRASH_DUMP
  165. #define LOAD_HANDLER(reg, label) \
  166. oris reg,reg,(label)@h; /* virt addr of handler ... */ \
  167. ori reg,reg,(label)@l; /* .. and the rest */
  168. #else
  169. #define LOAD_HANDLER(reg, label) \
  170. ori reg,reg,(label)@l; /* virt addr of handler ... */
  171. #endif
  172. /*
  173. * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
  174. * The firmware calls the registered system_reset_fwnmi and
  175. * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
  176. * a 32bit application at the time of the event.
  177. * This firmware bug is present on POWER4 and JS20.
  178. */
  179. #define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
  180. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  181. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  182. std r10,area+EX_R10(r13); \
  183. std r11,area+EX_R11(r13); \
  184. std r12,area+EX_R12(r13); \
  185. mfspr r9,SPRN_SPRG1; \
  186. std r9,area+EX_R13(r13); \
  187. mfcr r9; \
  188. clrrdi r12,r13,32; /* get high part of &label */ \
  189. mfmsr r10; \
  190. /* force 64bit mode */ \
  191. li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
  192. rldimi r10,r11,61,0; /* insert into top 3 bits */ \
  193. /* done 64bit mode */ \
  194. mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  195. LOAD_HANDLER(r12,label) \
  196. ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  197. mtspr SPRN_SRR0,r12; \
  198. mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  199. mtspr SPRN_SRR1,r10; \
  200. rfid; \
  201. b . /* prevent speculative execution */
  202. #define EXCEPTION_PROLOG_PSERIES(area, label) \
  203. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  204. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  205. std r10,area+EX_R10(r13); \
  206. std r11,area+EX_R11(r13); \
  207. std r12,area+EX_R12(r13); \
  208. mfspr r9,SPRN_SPRG1; \
  209. std r9,area+EX_R13(r13); \
  210. mfcr r9; \
  211. clrrdi r12,r13,32; /* get high part of &label */ \
  212. mfmsr r10; \
  213. mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  214. LOAD_HANDLER(r12,label) \
  215. ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  216. mtspr SPRN_SRR0,r12; \
  217. mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  218. mtspr SPRN_SRR1,r10; \
  219. rfid; \
  220. b . /* prevent speculative execution */
  221. /*
  222. * This is the start of the interrupt handlers for iSeries
  223. * This code runs with relocation on.
  224. */
  225. #define EXCEPTION_PROLOG_ISERIES_1(area) \
  226. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  227. std r9,area+EX_R9(r13); /* save r9 - r12 */ \
  228. std r10,area+EX_R10(r13); \
  229. std r11,area+EX_R11(r13); \
  230. std r12,area+EX_R12(r13); \
  231. mfspr r9,SPRN_SPRG1; \
  232. std r9,area+EX_R13(r13); \
  233. mfcr r9
  234. #define EXCEPTION_PROLOG_ISERIES_2 \
  235. mfmsr r10; \
  236. ld r12,PACALPPACAPTR(r13); \
  237. ld r11,LPPACASRR0(r12); \
  238. ld r12,LPPACASRR1(r12); \
  239. ori r10,r10,MSR_RI; \
  240. mtmsrd r10,1
  241. /*
  242. * The common exception prolog is used for all except a few exceptions
  243. * such as a segment miss on a kernel address. We have to be prepared
  244. * to take another exception from the point where we first touch the
  245. * kernel stack onwards.
  246. *
  247. * On entry r13 points to the paca, r9-r13 are saved in the paca,
  248. * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
  249. * SRR1, and relocation is on.
  250. */
  251. #define EXCEPTION_PROLOG_COMMON(n, area) \
  252. andi. r10,r12,MSR_PR; /* See if coming from user */ \
  253. mr r10,r1; /* Save r1 */ \
  254. subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
  255. beq- 1f; \
  256. ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
  257. 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
  258. bge- cr1,bad_stack; /* abort if it is */ \
  259. std r9,_CCR(r1); /* save CR in stackframe */ \
  260. std r11,_NIP(r1); /* save SRR0 in stackframe */ \
  261. std r12,_MSR(r1); /* save SRR1 in stackframe */ \
  262. std r10,0(r1); /* make stack chain pointer */ \
  263. std r0,GPR0(r1); /* save r0 in stackframe */ \
  264. std r10,GPR1(r1); /* save r1 in stackframe */ \
  265. ACCOUNT_CPU_USER_ENTRY(r9, r10); \
  266. std r2,GPR2(r1); /* save r2 in stackframe */ \
  267. SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
  268. SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
  269. ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
  270. ld r10,area+EX_R10(r13); \
  271. std r9,GPR9(r1); \
  272. std r10,GPR10(r1); \
  273. ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
  274. ld r10,area+EX_R12(r13); \
  275. ld r11,area+EX_R13(r13); \
  276. std r9,GPR11(r1); \
  277. std r10,GPR12(r1); \
  278. std r11,GPR13(r1); \
  279. ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
  280. mflr r9; /* save LR in stackframe */ \
  281. std r9,_LINK(r1); \
  282. mfctr r10; /* save CTR in stackframe */ \
  283. std r10,_CTR(r1); \
  284. lbz r10,PACASOFTIRQEN(r13); \
  285. mfspr r11,SPRN_XER; /* save XER in stackframe */ \
  286. std r10,SOFTE(r1); \
  287. std r11,_XER(r1); \
  288. li r9,(n)+1; \
  289. std r9,_TRAP(r1); /* set trap number */ \
  290. li r10,0; \
  291. ld r11,exception_marker@toc(r2); \
  292. std r10,RESULT(r1); /* clear regs->result */ \
  293. std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
  294. /*
  295. * Exception vectors.
  296. */
  297. #define STD_EXCEPTION_PSERIES(n, label) \
  298. . = n; \
  299. .globl label##_pSeries; \
  300. label##_pSeries: \
  301. HMT_MEDIUM; \
  302. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  303. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
  304. #define HSTD_EXCEPTION_PSERIES(n, label) \
  305. . = n; \
  306. .globl label##_pSeries; \
  307. label##_pSeries: \
  308. HMT_MEDIUM; \
  309. mtspr SPRN_SPRG1,r20; /* save r20 */ \
  310. mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
  311. mtspr SPRN_SRR0,r20; \
  312. mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
  313. mtspr SPRN_SRR1,r20; \
  314. mfspr r20,SPRN_SPRG1; /* restore r20 */ \
  315. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  316. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
  317. #define MASKABLE_EXCEPTION_PSERIES(n, label) \
  318. . = n; \
  319. .globl label##_pSeries; \
  320. label##_pSeries: \
  321. HMT_MEDIUM; \
  322. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  323. mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
  324. std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
  325. std r10,PACA_EXGEN+EX_R10(r13); \
  326. lbz r10,PACASOFTIRQEN(r13); \
  327. mfcr r9; \
  328. cmpwi r10,0; \
  329. beq masked_interrupt; \
  330. mfspr r10,SPRN_SPRG1; \
  331. std r10,PACA_EXGEN+EX_R13(r13); \
  332. std r11,PACA_EXGEN+EX_R11(r13); \
  333. std r12,PACA_EXGEN+EX_R12(r13); \
  334. clrrdi r12,r13,32; /* get high part of &label */ \
  335. mfmsr r10; \
  336. mfspr r11,SPRN_SRR0; /* save SRR0 */ \
  337. LOAD_HANDLER(r12,label##_common) \
  338. ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
  339. mtspr SPRN_SRR0,r12; \
  340. mfspr r12,SPRN_SRR1; /* and SRR1 */ \
  341. mtspr SPRN_SRR1,r10; \
  342. rfid; \
  343. b . /* prevent speculative execution */
  344. #define STD_EXCEPTION_ISERIES(n, label, area) \
  345. .globl label##_iSeries; \
  346. label##_iSeries: \
  347. HMT_MEDIUM; \
  348. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  349. EXCEPTION_PROLOG_ISERIES_1(area); \
  350. EXCEPTION_PROLOG_ISERIES_2; \
  351. b label##_common
  352. #define MASKABLE_EXCEPTION_ISERIES(n, label) \
  353. .globl label##_iSeries; \
  354. label##_iSeries: \
  355. HMT_MEDIUM; \
  356. mtspr SPRN_SPRG1,r13; /* save r13 */ \
  357. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
  358. lbz r10,PACASOFTIRQEN(r13); \
  359. cmpwi 0,r10,0; \
  360. beq- label##_iSeries_masked; \
  361. EXCEPTION_PROLOG_ISERIES_2; \
  362. b label##_common; \
  363. #ifdef CONFIG_PPC_ISERIES
  364. #define DISABLE_INTS \
  365. li r11,0; \
  366. stb r11,PACASOFTIRQEN(r13); \
  367. BEGIN_FW_FTR_SECTION; \
  368. stb r11,PACAHARDIRQEN(r13); \
  369. END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
  370. BEGIN_FW_FTR_SECTION; \
  371. mfmsr r10; \
  372. ori r10,r10,MSR_EE; \
  373. mtmsrd r10,1; \
  374. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  375. #else
  376. #define DISABLE_INTS \
  377. li r11,0; \
  378. stb r11,PACASOFTIRQEN(r13); \
  379. stb r11,PACAHARDIRQEN(r13)
  380. #endif /* CONFIG_PPC_ISERIES */
  381. #define ENABLE_INTS \
  382. ld r12,_MSR(r1); \
  383. mfmsr r11; \
  384. rlwimi r11,r12,0,MSR_EE; \
  385. mtmsrd r11,1
  386. #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
  387. .align 7; \
  388. .globl label##_common; \
  389. label##_common: \
  390. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  391. DISABLE_INTS; \
  392. bl .save_nvgprs; \
  393. addi r3,r1,STACK_FRAME_OVERHEAD; \
  394. bl hdlr; \
  395. b .ret_from_except
  396. /*
  397. * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
  398. * in the idle task and therefore need the special idle handling.
  399. */
  400. #define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
  401. .align 7; \
  402. .globl label##_common; \
  403. label##_common: \
  404. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  405. FINISH_NAP; \
  406. DISABLE_INTS; \
  407. bl .save_nvgprs; \
  408. addi r3,r1,STACK_FRAME_OVERHEAD; \
  409. bl hdlr; \
  410. b .ret_from_except
  411. #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
  412. .align 7; \
  413. .globl label##_common; \
  414. label##_common: \
  415. EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
  416. FINISH_NAP; \
  417. DISABLE_INTS; \
  418. bl .ppc64_runlatch_on; \
  419. addi r3,r1,STACK_FRAME_OVERHEAD; \
  420. bl hdlr; \
  421. b .ret_from_except_lite
  422. /*
  423. * When the idle code in power4_idle puts the CPU into NAP mode,
  424. * it has to do so in a loop, and relies on the external interrupt
  425. * and decrementer interrupt entry code to get it out of the loop.
  426. * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
  427. * to signal that it is in the loop and needs help to get out.
  428. */
  429. #ifdef CONFIG_PPC_970_NAP
  430. #define FINISH_NAP \
  431. BEGIN_FTR_SECTION \
  432. clrrdi r11,r1,THREAD_SHIFT; \
  433. ld r9,TI_LOCAL_FLAGS(r11); \
  434. andi. r10,r9,_TLF_NAPPING; \
  435. bnel power4_fixup_nap; \
  436. END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
  437. #else
  438. #define FINISH_NAP
  439. #endif
  440. /*
  441. * Start of pSeries system interrupt routines
  442. */
  443. . = 0x100
  444. .globl __start_interrupts
  445. __start_interrupts:
  446. STD_EXCEPTION_PSERIES(0x100, system_reset)
  447. . = 0x200
  448. _machine_check_pSeries:
  449. HMT_MEDIUM
  450. mtspr SPRN_SPRG1,r13 /* save r13 */
  451. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  452. . = 0x300
  453. .globl data_access_pSeries
  454. data_access_pSeries:
  455. HMT_MEDIUM
  456. mtspr SPRN_SPRG1,r13
  457. BEGIN_FTR_SECTION
  458. mtspr SPRN_SPRG2,r12
  459. mfspr r13,SPRN_DAR
  460. mfspr r12,SPRN_DSISR
  461. srdi r13,r13,60
  462. rlwimi r13,r12,16,0x20
  463. mfcr r12
  464. cmpwi r13,0x2c
  465. beq .do_stab_bolted_pSeries
  466. mtcrf 0x80,r12
  467. mfspr r12,SPRN_SPRG2
  468. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  469. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
  470. . = 0x380
  471. .globl data_access_slb_pSeries
  472. data_access_slb_pSeries:
  473. HMT_MEDIUM
  474. mtspr SPRN_SPRG1,r13
  475. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  476. std r3,PACA_EXSLB+EX_R3(r13)
  477. mfspr r3,SPRN_DAR
  478. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  479. mfcr r9
  480. #ifdef __DISABLED__
  481. /* Keep that around for when we re-implement dynamic VSIDs */
  482. cmpdi r3,0
  483. bge slb_miss_user_pseries
  484. #endif /* __DISABLED__ */
  485. std r10,PACA_EXSLB+EX_R10(r13)
  486. std r11,PACA_EXSLB+EX_R11(r13)
  487. std r12,PACA_EXSLB+EX_R12(r13)
  488. mfspr r10,SPRN_SPRG1
  489. std r10,PACA_EXSLB+EX_R13(r13)
  490. mfspr r12,SPRN_SRR1 /* and SRR1 */
  491. b .slb_miss_realmode /* Rel. branch works in real mode */
  492. STD_EXCEPTION_PSERIES(0x400, instruction_access)
  493. . = 0x480
  494. .globl instruction_access_slb_pSeries
  495. instruction_access_slb_pSeries:
  496. HMT_MEDIUM
  497. mtspr SPRN_SPRG1,r13
  498. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  499. std r3,PACA_EXSLB+EX_R3(r13)
  500. mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
  501. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  502. mfcr r9
  503. #ifdef __DISABLED__
  504. /* Keep that around for when we re-implement dynamic VSIDs */
  505. cmpdi r3,0
  506. bge slb_miss_user_pseries
  507. #endif /* __DISABLED__ */
  508. std r10,PACA_EXSLB+EX_R10(r13)
  509. std r11,PACA_EXSLB+EX_R11(r13)
  510. std r12,PACA_EXSLB+EX_R12(r13)
  511. mfspr r10,SPRN_SPRG1
  512. std r10,PACA_EXSLB+EX_R13(r13)
  513. mfspr r12,SPRN_SRR1 /* and SRR1 */
  514. b .slb_miss_realmode /* Rel. branch works in real mode */
  515. MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
  516. STD_EXCEPTION_PSERIES(0x600, alignment)
  517. STD_EXCEPTION_PSERIES(0x700, program_check)
  518. STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
  519. MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
  520. STD_EXCEPTION_PSERIES(0xa00, trap_0a)
  521. STD_EXCEPTION_PSERIES(0xb00, trap_0b)
  522. . = 0xc00
  523. .globl system_call_pSeries
  524. system_call_pSeries:
  525. HMT_MEDIUM
  526. mr r9,r13
  527. mfmsr r10
  528. mfspr r13,SPRN_SPRG3
  529. mfspr r11,SPRN_SRR0
  530. clrrdi r12,r13,32
  531. oris r12,r12,system_call_common@h
  532. ori r12,r12,system_call_common@l
  533. mtspr SPRN_SRR0,r12
  534. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  535. mfspr r12,SPRN_SRR1
  536. mtspr SPRN_SRR1,r10
  537. rfid
  538. b . /* prevent speculative execution */
  539. STD_EXCEPTION_PSERIES(0xd00, single_step)
  540. STD_EXCEPTION_PSERIES(0xe00, trap_0e)
  541. /* We need to deal with the Altivec unavailable exception
  542. * here which is at 0xf20, thus in the middle of the
  543. * prolog code of the PerformanceMonitor one. A little
  544. * trickery is thus necessary
  545. */
  546. . = 0xf00
  547. b performance_monitor_pSeries
  548. STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
  549. #ifdef CONFIG_CBE_RAS
  550. HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
  551. #endif /* CONFIG_CBE_RAS */
  552. STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
  553. #ifdef CONFIG_CBE_RAS
  554. HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
  555. #endif /* CONFIG_CBE_RAS */
  556. STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
  557. #ifdef CONFIG_CBE_RAS
  558. HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
  559. #endif /* CONFIG_CBE_RAS */
  560. . = 0x3000
  561. /*** pSeries interrupt support ***/
  562. /* moved from 0xf00 */
  563. MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
  564. /*
  565. * An interrupt came in while soft-disabled; clear EE in SRR1,
  566. * clear paca->hard_enabled and return.
  567. */
  568. masked_interrupt:
  569. stb r10,PACAHARDIRQEN(r13)
  570. mtcrf 0x80,r9
  571. ld r9,PACA_EXGEN+EX_R9(r13)
  572. mfspr r10,SPRN_SRR1
  573. rldicl r10,r10,48,1 /* clear MSR_EE */
  574. rotldi r10,r10,16
  575. mtspr SPRN_SRR1,r10
  576. ld r10,PACA_EXGEN+EX_R10(r13)
  577. mfspr r13,SPRN_SPRG1
  578. rfid
  579. b .
  580. .align 7
  581. _GLOBAL(do_stab_bolted_pSeries)
  582. mtcrf 0x80,r12
  583. mfspr r12,SPRN_SPRG2
  584. EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
  585. /*
  586. * We have some room here we use that to put
  587. * the peries slb miss user trampoline code so it's reasonably
  588. * away from slb_miss_user_common to avoid problems with rfid
  589. *
  590. * This is used for when the SLB miss handler has to go virtual,
  591. * which doesn't happen for now anymore but will once we re-implement
  592. * dynamic VSIDs for shared page tables
  593. */
  594. #ifdef __DISABLED__
  595. slb_miss_user_pseries:
  596. std r10,PACA_EXGEN+EX_R10(r13)
  597. std r11,PACA_EXGEN+EX_R11(r13)
  598. std r12,PACA_EXGEN+EX_R12(r13)
  599. mfspr r10,SPRG1
  600. ld r11,PACA_EXSLB+EX_R9(r13)
  601. ld r12,PACA_EXSLB+EX_R3(r13)
  602. std r10,PACA_EXGEN+EX_R13(r13)
  603. std r11,PACA_EXGEN+EX_R9(r13)
  604. std r12,PACA_EXGEN+EX_R3(r13)
  605. clrrdi r12,r13,32
  606. mfmsr r10
  607. mfspr r11,SRR0 /* save SRR0 */
  608. ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
  609. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  610. mtspr SRR0,r12
  611. mfspr r12,SRR1 /* and SRR1 */
  612. mtspr SRR1,r10
  613. rfid
  614. b . /* prevent spec. execution */
  615. #endif /* __DISABLED__ */
  616. /*
  617. * Vectors for the FWNMI option. Share common code.
  618. */
  619. .globl system_reset_fwnmi
  620. .align 7
  621. system_reset_fwnmi:
  622. HMT_MEDIUM
  623. mtspr SPRN_SPRG1,r13 /* save r13 */
  624. EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
  625. .globl machine_check_fwnmi
  626. .align 7
  627. machine_check_fwnmi:
  628. HMT_MEDIUM
  629. mtspr SPRN_SPRG1,r13 /* save r13 */
  630. EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
  631. #ifdef CONFIG_PPC_ISERIES
  632. /*** ISeries-LPAR interrupt handlers ***/
  633. STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
  634. .globl data_access_iSeries
  635. data_access_iSeries:
  636. mtspr SPRN_SPRG1,r13
  637. BEGIN_FTR_SECTION
  638. mtspr SPRN_SPRG2,r12
  639. mfspr r13,SPRN_DAR
  640. mfspr r12,SPRN_DSISR
  641. srdi r13,r13,60
  642. rlwimi r13,r12,16,0x20
  643. mfcr r12
  644. cmpwi r13,0x2c
  645. beq .do_stab_bolted_iSeries
  646. mtcrf 0x80,r12
  647. mfspr r12,SPRN_SPRG2
  648. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  649. EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
  650. EXCEPTION_PROLOG_ISERIES_2
  651. b data_access_common
  652. .do_stab_bolted_iSeries:
  653. mtcrf 0x80,r12
  654. mfspr r12,SPRN_SPRG2
  655. EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
  656. EXCEPTION_PROLOG_ISERIES_2
  657. b .do_stab_bolted
  658. .globl data_access_slb_iSeries
  659. data_access_slb_iSeries:
  660. mtspr SPRN_SPRG1,r13 /* save r13 */
  661. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  662. std r3,PACA_EXSLB+EX_R3(r13)
  663. mfspr r3,SPRN_DAR
  664. std r9,PACA_EXSLB+EX_R9(r13)
  665. mfcr r9
  666. #ifdef __DISABLED__
  667. cmpdi r3,0
  668. bge slb_miss_user_iseries
  669. #endif
  670. std r10,PACA_EXSLB+EX_R10(r13)
  671. std r11,PACA_EXSLB+EX_R11(r13)
  672. std r12,PACA_EXSLB+EX_R12(r13)
  673. mfspr r10,SPRN_SPRG1
  674. std r10,PACA_EXSLB+EX_R13(r13)
  675. ld r12,PACALPPACAPTR(r13)
  676. ld r12,LPPACASRR1(r12)
  677. b .slb_miss_realmode
  678. STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
  679. .globl instruction_access_slb_iSeries
  680. instruction_access_slb_iSeries:
  681. mtspr SPRN_SPRG1,r13 /* save r13 */
  682. mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
  683. std r3,PACA_EXSLB+EX_R3(r13)
  684. ld r3,PACALPPACAPTR(r13)
  685. ld r3,LPPACASRR0(r3) /* get SRR0 value */
  686. std r9,PACA_EXSLB+EX_R9(r13)
  687. mfcr r9
  688. #ifdef __DISABLED__
  689. cmpdi r3,0
  690. bge .slb_miss_user_iseries
  691. #endif
  692. std r10,PACA_EXSLB+EX_R10(r13)
  693. std r11,PACA_EXSLB+EX_R11(r13)
  694. std r12,PACA_EXSLB+EX_R12(r13)
  695. mfspr r10,SPRN_SPRG1
  696. std r10,PACA_EXSLB+EX_R13(r13)
  697. ld r12,PACALPPACAPTR(r13)
  698. ld r12,LPPACASRR1(r12)
  699. b .slb_miss_realmode
  700. #ifdef __DISABLED__
  701. slb_miss_user_iseries:
  702. std r10,PACA_EXGEN+EX_R10(r13)
  703. std r11,PACA_EXGEN+EX_R11(r13)
  704. std r12,PACA_EXGEN+EX_R12(r13)
  705. mfspr r10,SPRG1
  706. ld r11,PACA_EXSLB+EX_R9(r13)
  707. ld r12,PACA_EXSLB+EX_R3(r13)
  708. std r10,PACA_EXGEN+EX_R13(r13)
  709. std r11,PACA_EXGEN+EX_R9(r13)
  710. std r12,PACA_EXGEN+EX_R3(r13)
  711. EXCEPTION_PROLOG_ISERIES_2
  712. b slb_miss_user_common
  713. #endif
  714. MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
  715. STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
  716. STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
  717. STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
  718. MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
  719. STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
  720. STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
  721. .globl system_call_iSeries
  722. system_call_iSeries:
  723. mr r9,r13
  724. mfspr r13,SPRN_SPRG3
  725. EXCEPTION_PROLOG_ISERIES_2
  726. b system_call_common
  727. STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
  728. STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
  729. STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
  730. .globl system_reset_iSeries
  731. system_reset_iSeries:
  732. mfspr r13,SPRN_SPRG3 /* Get paca address */
  733. mfmsr r24
  734. ori r24,r24,MSR_RI
  735. mtmsrd r24 /* RI on */
  736. lhz r24,PACAPACAINDEX(r13) /* Get processor # */
  737. cmpwi 0,r24,0 /* Are we processor 0? */
  738. beq .__start_initialization_iSeries /* Start up the first processor */
  739. mfspr r4,SPRN_CTRLF
  740. li r5,CTRL_RUNLATCH /* Turn off the run light */
  741. andc r4,r4,r5
  742. mtspr SPRN_CTRLT,r4
  743. 1:
  744. HMT_LOW
  745. #ifdef CONFIG_SMP
  746. lbz r23,PACAPROCSTART(r13) /* Test if this processor
  747. * should start */
  748. sync
  749. LOAD_REG_IMMEDIATE(r3,current_set)
  750. sldi r28,r24,3 /* get current_set[cpu#] */
  751. ldx r3,r3,r28
  752. addi r1,r3,THREAD_SIZE
  753. subi r1,r1,STACK_FRAME_OVERHEAD
  754. cmpwi 0,r23,0
  755. beq iSeries_secondary_smp_loop /* Loop until told to go */
  756. bne .__secondary_start /* Loop until told to go */
  757. iSeries_secondary_smp_loop:
  758. /* Let the Hypervisor know we are alive */
  759. /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
  760. lis r3,0x8002
  761. rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
  762. #else /* CONFIG_SMP */
  763. /* Yield the processor. This is required for non-SMP kernels
  764. which are running on multi-threaded machines. */
  765. lis r3,0x8000
  766. rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
  767. addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
  768. li r4,0 /* "yield timed" */
  769. li r5,-1 /* "yield forever" */
  770. #endif /* CONFIG_SMP */
  771. li r0,-1 /* r0=-1 indicates a Hypervisor call */
  772. sc /* Invoke the hypervisor via a system call */
  773. mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
  774. b 1b /* If SMP not configured, secondaries
  775. * loop forever */
  776. .globl decrementer_iSeries_masked
  777. decrementer_iSeries_masked:
  778. /* We may not have a valid TOC pointer in here. */
  779. li r11,1
  780. ld r12,PACALPPACAPTR(r13)
  781. stb r11,LPPACADECRINT(r12)
  782. LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
  783. lwz r12,0(r12)
  784. mtspr SPRN_DEC,r12
  785. /* fall through */
  786. .globl hardware_interrupt_iSeries_masked
  787. hardware_interrupt_iSeries_masked:
  788. mtcrf 0x80,r9 /* Restore regs */
  789. ld r12,PACALPPACAPTR(r13)
  790. ld r11,LPPACASRR0(r12)
  791. ld r12,LPPACASRR1(r12)
  792. mtspr SPRN_SRR0,r11
  793. mtspr SPRN_SRR1,r12
  794. ld r9,PACA_EXGEN+EX_R9(r13)
  795. ld r10,PACA_EXGEN+EX_R10(r13)
  796. ld r11,PACA_EXGEN+EX_R11(r13)
  797. ld r12,PACA_EXGEN+EX_R12(r13)
  798. ld r13,PACA_EXGEN+EX_R13(r13)
  799. rfid
  800. b . /* prevent speculative execution */
  801. #endif /* CONFIG_PPC_ISERIES */
  802. /*** Common interrupt handlers ***/
  803. STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
  804. /*
  805. * Machine check is different because we use a different
  806. * save area: PACA_EXMC instead of PACA_EXGEN.
  807. */
  808. .align 7
  809. .globl machine_check_common
  810. machine_check_common:
  811. EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
  812. FINISH_NAP
  813. DISABLE_INTS
  814. bl .save_nvgprs
  815. addi r3,r1,STACK_FRAME_OVERHEAD
  816. bl .machine_check_exception
  817. b .ret_from_except
  818. STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
  819. STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
  820. STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
  821. STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
  822. STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
  823. STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
  824. STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
  825. #ifdef CONFIG_ALTIVEC
  826. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
  827. #else
  828. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
  829. #endif
  830. #ifdef CONFIG_CBE_RAS
  831. STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
  832. STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
  833. STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
  834. #endif /* CONFIG_CBE_RAS */
  835. /*
  836. * Here we have detected that the kernel stack pointer is bad.
  837. * R9 contains the saved CR, r13 points to the paca,
  838. * r10 contains the (bad) kernel stack pointer,
  839. * r11 and r12 contain the saved SRR0 and SRR1.
  840. * We switch to using an emergency stack, save the registers there,
  841. * and call kernel_bad_stack(), which panics.
  842. */
  843. bad_stack:
  844. ld r1,PACAEMERGSP(r13)
  845. subi r1,r1,64+INT_FRAME_SIZE
  846. std r9,_CCR(r1)
  847. std r10,GPR1(r1)
  848. std r11,_NIP(r1)
  849. std r12,_MSR(r1)
  850. mfspr r11,SPRN_DAR
  851. mfspr r12,SPRN_DSISR
  852. std r11,_DAR(r1)
  853. std r12,_DSISR(r1)
  854. mflr r10
  855. mfctr r11
  856. mfxer r12
  857. std r10,_LINK(r1)
  858. std r11,_CTR(r1)
  859. std r12,_XER(r1)
  860. SAVE_GPR(0,r1)
  861. SAVE_GPR(2,r1)
  862. SAVE_4GPRS(3,r1)
  863. SAVE_2GPRS(7,r1)
  864. SAVE_10GPRS(12,r1)
  865. SAVE_10GPRS(22,r1)
  866. addi r11,r1,INT_FRAME_SIZE
  867. std r11,0(r1)
  868. li r12,0
  869. std r12,0(r11)
  870. ld r2,PACATOC(r13)
  871. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  872. bl .kernel_bad_stack
  873. b 1b
  874. /*
  875. * Return from an exception with minimal checks.
  876. * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
  877. * If interrupts have been enabled, or anything has been
  878. * done that might have changed the scheduling status of
  879. * any task or sent any task a signal, you should use
  880. * ret_from_except or ret_from_except_lite instead of this.
  881. */
  882. fast_exc_return_irq: /* restores irq state too */
  883. ld r3,SOFTE(r1)
  884. ld r12,_MSR(r1)
  885. stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */
  886. rldicl r4,r12,49,63 /* get MSR_EE to LSB */
  887. stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
  888. b 1f
  889. .globl fast_exception_return
  890. fast_exception_return:
  891. ld r12,_MSR(r1)
  892. 1: ld r11,_NIP(r1)
  893. andi. r3,r12,MSR_RI /* check if RI is set */
  894. beq- unrecov_fer
  895. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  896. andi. r3,r12,MSR_PR
  897. beq 2f
  898. ACCOUNT_CPU_USER_EXIT(r3, r4)
  899. 2:
  900. #endif
  901. ld r3,_CCR(r1)
  902. ld r4,_LINK(r1)
  903. ld r5,_CTR(r1)
  904. ld r6,_XER(r1)
  905. mtcr r3
  906. mtlr r4
  907. mtctr r5
  908. mtxer r6
  909. REST_GPR(0, r1)
  910. REST_8GPRS(2, r1)
  911. mfmsr r10
  912. rldicl r10,r10,48,1 /* clear EE */
  913. rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
  914. mtmsrd r10,1
  915. mtspr SPRN_SRR1,r12
  916. mtspr SPRN_SRR0,r11
  917. REST_4GPRS(10, r1)
  918. ld r1,GPR1(r1)
  919. rfid
  920. b . /* prevent speculative execution */
  921. unrecov_fer:
  922. bl .save_nvgprs
  923. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  924. bl .unrecoverable_exception
  925. b 1b
  926. /*
  927. * Here r13 points to the paca, r9 contains the saved CR,
  928. * SRR0 and SRR1 are saved in r11 and r12,
  929. * r9 - r13 are saved in paca->exgen.
  930. */
  931. .align 7
  932. .globl data_access_common
  933. data_access_common:
  934. mfspr r10,SPRN_DAR
  935. std r10,PACA_EXGEN+EX_DAR(r13)
  936. mfspr r10,SPRN_DSISR
  937. stw r10,PACA_EXGEN+EX_DSISR(r13)
  938. EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
  939. ld r3,PACA_EXGEN+EX_DAR(r13)
  940. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  941. li r5,0x300
  942. b .do_hash_page /* Try to handle as hpte fault */
  943. .align 7
  944. .globl instruction_access_common
  945. instruction_access_common:
  946. EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
  947. ld r3,_NIP(r1)
  948. andis. r4,r12,0x5820
  949. li r5,0x400
  950. b .do_hash_page /* Try to handle as hpte fault */
  951. /*
  952. * Here is the common SLB miss user that is used when going to virtual
  953. * mode for SLB misses, that is currently not used
  954. */
  955. #ifdef __DISABLED__
  956. .align 7
  957. .globl slb_miss_user_common
  958. slb_miss_user_common:
  959. mflr r10
  960. std r3,PACA_EXGEN+EX_DAR(r13)
  961. stw r9,PACA_EXGEN+EX_CCR(r13)
  962. std r10,PACA_EXGEN+EX_LR(r13)
  963. std r11,PACA_EXGEN+EX_SRR0(r13)
  964. bl .slb_allocate_user
  965. ld r10,PACA_EXGEN+EX_LR(r13)
  966. ld r3,PACA_EXGEN+EX_R3(r13)
  967. lwz r9,PACA_EXGEN+EX_CCR(r13)
  968. ld r11,PACA_EXGEN+EX_SRR0(r13)
  969. mtlr r10
  970. beq- slb_miss_fault
  971. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  972. beq- unrecov_user_slb
  973. mfmsr r10
  974. .machine push
  975. .machine "power4"
  976. mtcrf 0x80,r9
  977. .machine pop
  978. clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
  979. mtmsrd r10,1
  980. mtspr SRR0,r11
  981. mtspr SRR1,r12
  982. ld r9,PACA_EXGEN+EX_R9(r13)
  983. ld r10,PACA_EXGEN+EX_R10(r13)
  984. ld r11,PACA_EXGEN+EX_R11(r13)
  985. ld r12,PACA_EXGEN+EX_R12(r13)
  986. ld r13,PACA_EXGEN+EX_R13(r13)
  987. rfid
  988. b .
  989. slb_miss_fault:
  990. EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
  991. ld r4,PACA_EXGEN+EX_DAR(r13)
  992. li r5,0
  993. std r4,_DAR(r1)
  994. std r5,_DSISR(r1)
  995. b .handle_page_fault
  996. unrecov_user_slb:
  997. EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
  998. DISABLE_INTS
  999. bl .save_nvgprs
  1000. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  1001. bl .unrecoverable_exception
  1002. b 1b
  1003. #endif /* __DISABLED__ */
  1004. /*
  1005. * r13 points to the PACA, r9 contains the saved CR,
  1006. * r12 contain the saved SRR1, SRR0 is still ready for return
  1007. * r3 has the faulting address
  1008. * r9 - r13 are saved in paca->exslb.
  1009. * r3 is saved in paca->slb_r3
  1010. * We assume we aren't going to take any exceptions during this procedure.
  1011. */
  1012. _GLOBAL(slb_miss_realmode)
  1013. mflr r10
  1014. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  1015. std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
  1016. bl .slb_allocate_realmode
  1017. /* All done -- return from exception. */
  1018. ld r10,PACA_EXSLB+EX_LR(r13)
  1019. ld r3,PACA_EXSLB+EX_R3(r13)
  1020. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  1021. #ifdef CONFIG_PPC_ISERIES
  1022. BEGIN_FW_FTR_SECTION
  1023. ld r11,PACALPPACAPTR(r13)
  1024. ld r11,LPPACASRR0(r11) /* get SRR0 value */
  1025. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  1026. #endif /* CONFIG_PPC_ISERIES */
  1027. mtlr r10
  1028. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  1029. beq- unrecov_slb
  1030. .machine push
  1031. .machine "power4"
  1032. mtcrf 0x80,r9
  1033. mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
  1034. .machine pop
  1035. #ifdef CONFIG_PPC_ISERIES
  1036. BEGIN_FW_FTR_SECTION
  1037. mtspr SPRN_SRR0,r11
  1038. mtspr SPRN_SRR1,r12
  1039. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  1040. #endif /* CONFIG_PPC_ISERIES */
  1041. ld r9,PACA_EXSLB+EX_R9(r13)
  1042. ld r10,PACA_EXSLB+EX_R10(r13)
  1043. ld r11,PACA_EXSLB+EX_R11(r13)
  1044. ld r12,PACA_EXSLB+EX_R12(r13)
  1045. ld r13,PACA_EXSLB+EX_R13(r13)
  1046. rfid
  1047. b . /* prevent speculative execution */
  1048. unrecov_slb:
  1049. EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
  1050. DISABLE_INTS
  1051. bl .save_nvgprs
  1052. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  1053. bl .unrecoverable_exception
  1054. b 1b
  1055. .align 7
  1056. .globl hardware_interrupt_common
  1057. .globl hardware_interrupt_entry
  1058. hardware_interrupt_common:
  1059. EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
  1060. FINISH_NAP
  1061. hardware_interrupt_entry:
  1062. DISABLE_INTS
  1063. bl .ppc64_runlatch_on
  1064. addi r3,r1,STACK_FRAME_OVERHEAD
  1065. bl .do_IRQ
  1066. b .ret_from_except_lite
  1067. #ifdef CONFIG_PPC_970_NAP
  1068. power4_fixup_nap:
  1069. andc r9,r9,r10
  1070. std r9,TI_LOCAL_FLAGS(r11)
  1071. ld r10,_LINK(r1) /* make idle task do the */
  1072. std r10,_NIP(r1) /* equivalent of a blr */
  1073. blr
  1074. #endif
  1075. .align 7
  1076. .globl alignment_common
  1077. alignment_common:
  1078. mfspr r10,SPRN_DAR
  1079. std r10,PACA_EXGEN+EX_DAR(r13)
  1080. mfspr r10,SPRN_DSISR
  1081. stw r10,PACA_EXGEN+EX_DSISR(r13)
  1082. EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
  1083. ld r3,PACA_EXGEN+EX_DAR(r13)
  1084. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  1085. std r3,_DAR(r1)
  1086. std r4,_DSISR(r1)
  1087. bl .save_nvgprs
  1088. addi r3,r1,STACK_FRAME_OVERHEAD
  1089. ENABLE_INTS
  1090. bl .alignment_exception
  1091. b .ret_from_except
  1092. .align 7
  1093. .globl program_check_common
  1094. program_check_common:
  1095. EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
  1096. bl .save_nvgprs
  1097. addi r3,r1,STACK_FRAME_OVERHEAD
  1098. ENABLE_INTS
  1099. bl .program_check_exception
  1100. b .ret_from_except
  1101. .align 7
  1102. .globl fp_unavailable_common
  1103. fp_unavailable_common:
  1104. EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
  1105. bne .load_up_fpu /* if from user, just load it up */
  1106. bl .save_nvgprs
  1107. addi r3,r1,STACK_FRAME_OVERHEAD
  1108. ENABLE_INTS
  1109. bl .kernel_fp_unavailable_exception
  1110. BUG_OPCODE
  1111. .align 7
  1112. .globl altivec_unavailable_common
  1113. altivec_unavailable_common:
  1114. EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
  1115. #ifdef CONFIG_ALTIVEC
  1116. BEGIN_FTR_SECTION
  1117. bne .load_up_altivec /* if from user, just load it up */
  1118. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1119. #endif
  1120. bl .save_nvgprs
  1121. addi r3,r1,STACK_FRAME_OVERHEAD
  1122. ENABLE_INTS
  1123. bl .altivec_unavailable_exception
  1124. b .ret_from_except
  1125. #ifdef CONFIG_ALTIVEC
  1126. /*
  1127. * load_up_altivec(unused, unused, tsk)
  1128. * Disable VMX for the task which had it previously,
  1129. * and save its vector registers in its thread_struct.
  1130. * Enables the VMX for use in the kernel on return.
  1131. * On SMP we know the VMX is free, since we give it up every
  1132. * switch (ie, no lazy save of the vector registers).
  1133. * On entry: r13 == 'current' && last_task_used_altivec != 'current'
  1134. */
  1135. _STATIC(load_up_altivec)
  1136. mfmsr r5 /* grab the current MSR */
  1137. oris r5,r5,MSR_VEC@h
  1138. mtmsrd r5 /* enable use of VMX now */
  1139. isync
  1140. /*
  1141. * For SMP, we don't do lazy VMX switching because it just gets too
  1142. * horrendously complex, especially when a task switches from one CPU
  1143. * to another. Instead we call giveup_altvec in switch_to.
  1144. * VRSAVE isn't dealt with here, that is done in the normal context
  1145. * switch code. Note that we could rely on vrsave value to eventually
  1146. * avoid saving all of the VREGs here...
  1147. */
  1148. #ifndef CONFIG_SMP
  1149. ld r3,last_task_used_altivec@got(r2)
  1150. ld r4,0(r3)
  1151. cmpdi 0,r4,0
  1152. beq 1f
  1153. /* Save VMX state to last_task_used_altivec's THREAD struct */
  1154. addi r4,r4,THREAD
  1155. SAVE_32VRS(0,r5,r4)
  1156. mfvscr vr0
  1157. li r10,THREAD_VSCR
  1158. stvx vr0,r10,r4
  1159. /* Disable VMX for last_task_used_altivec */
  1160. ld r5,PT_REGS(r4)
  1161. ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  1162. lis r6,MSR_VEC@h
  1163. andc r4,r4,r6
  1164. std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  1165. 1:
  1166. #endif /* CONFIG_SMP */
  1167. /* Hack: if we get an altivec unavailable trap with VRSAVE
  1168. * set to all zeros, we assume this is a broken application
  1169. * that fails to set it properly, and thus we switch it to
  1170. * all 1's
  1171. */
  1172. mfspr r4,SPRN_VRSAVE
  1173. cmpdi 0,r4,0
  1174. bne+ 1f
  1175. li r4,-1
  1176. mtspr SPRN_VRSAVE,r4
  1177. 1:
  1178. /* enable use of VMX after return */
  1179. ld r4,PACACURRENT(r13)
  1180. addi r5,r4,THREAD /* Get THREAD */
  1181. oris r12,r12,MSR_VEC@h
  1182. std r12,_MSR(r1)
  1183. li r4,1
  1184. li r10,THREAD_VSCR
  1185. stw r4,THREAD_USED_VR(r5)
  1186. lvx vr0,r10,r5
  1187. mtvscr vr0
  1188. REST_32VRS(0,r4,r5)
  1189. #ifndef CONFIG_SMP
  1190. /* Update last_task_used_math to 'current' */
  1191. subi r4,r5,THREAD /* Back to 'current' */
  1192. std r4,0(r3)
  1193. #endif /* CONFIG_SMP */
  1194. /* restore registers and return */
  1195. b fast_exception_return
  1196. #endif /* CONFIG_ALTIVEC */
  1197. /*
  1198. * Hash table stuff
  1199. */
  1200. .align 7
  1201. _GLOBAL(do_hash_page)
  1202. std r3,_DAR(r1)
  1203. std r4,_DSISR(r1)
  1204. andis. r0,r4,0xa450 /* weird error? */
  1205. bne- .handle_page_fault /* if not, try to insert a HPTE */
  1206. BEGIN_FTR_SECTION
  1207. andis. r0,r4,0x0020 /* Is it a segment table fault? */
  1208. bne- .do_ste_alloc /* If so handle it */
  1209. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  1210. /*
  1211. * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
  1212. * accessing a userspace segment (even from the kernel). We assume
  1213. * kernel addresses always have the high bit set.
  1214. */
  1215. rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
  1216. rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
  1217. orc r0,r12,r0 /* MSR_PR | ~high_bit */
  1218. rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
  1219. ori r4,r4,1 /* add _PAGE_PRESENT */
  1220. rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
  1221. /*
  1222. * On iSeries, we soft-disable interrupts here, then
  1223. * hard-enable interrupts so that the hash_page code can spin on
  1224. * the hash_table_lock without problems on a shared processor.
  1225. */
  1226. DISABLE_INTS
  1227. /*
  1228. * r3 contains the faulting address
  1229. * r4 contains the required access permissions
  1230. * r5 contains the trap number
  1231. *
  1232. * at return r3 = 0 for success
  1233. */
  1234. bl .hash_page /* build HPTE if possible */
  1235. cmpdi r3,0 /* see if hash_page succeeded */
  1236. #ifdef DO_SOFT_DISABLE
  1237. BEGIN_FW_FTR_SECTION
  1238. /*
  1239. * If we had interrupts soft-enabled at the point where the
  1240. * DSI/ISI occurred, and an interrupt came in during hash_page,
  1241. * handle it now.
  1242. * We jump to ret_from_except_lite rather than fast_exception_return
  1243. * because ret_from_except_lite will check for and handle pending
  1244. * interrupts if necessary.
  1245. */
  1246. beq .ret_from_except_lite
  1247. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  1248. #endif
  1249. BEGIN_FW_FTR_SECTION
  1250. /*
  1251. * Here we have interrupts hard-disabled, so it is sufficient
  1252. * to restore paca->{soft,hard}_enable and get out.
  1253. */
  1254. beq fast_exc_return_irq /* Return from exception on success */
  1255. END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
  1256. /* For a hash failure, we don't bother re-enabling interrupts */
  1257. ble- 12f
  1258. /*
  1259. * hash_page couldn't handle it, set soft interrupt enable back
  1260. * to what it was before the trap. Note that .local_irq_restore
  1261. * handles any interrupts pending at this point.
  1262. */
  1263. ld r3,SOFTE(r1)
  1264. bl .local_irq_restore
  1265. b 11f
  1266. /* Here we have a page fault that hash_page can't handle. */
  1267. _GLOBAL(handle_page_fault)
  1268. ENABLE_INTS
  1269. 11: ld r4,_DAR(r1)
  1270. ld r5,_DSISR(r1)
  1271. addi r3,r1,STACK_FRAME_OVERHEAD
  1272. bl .do_page_fault
  1273. cmpdi r3,0
  1274. beq+ .ret_from_except_lite
  1275. bl .save_nvgprs
  1276. mr r5,r3
  1277. addi r3,r1,STACK_FRAME_OVERHEAD
  1278. lwz r4,_DAR(r1)
  1279. bl .bad_page_fault
  1280. b .ret_from_except
  1281. /* We have a page fault that hash_page could handle but HV refused
  1282. * the PTE insertion
  1283. */
  1284. 12: bl .save_nvgprs
  1285. addi r3,r1,STACK_FRAME_OVERHEAD
  1286. lwz r4,_DAR(r1)
  1287. bl .low_hash_fault
  1288. b .ret_from_except
  1289. /* here we have a segment miss */
  1290. _GLOBAL(do_ste_alloc)
  1291. bl .ste_allocate /* try to insert stab entry */
  1292. cmpdi r3,0
  1293. beq+ fast_exception_return
  1294. b .handle_page_fault
  1295. /*
  1296. * r13 points to the PACA, r9 contains the saved CR,
  1297. * r11 and r12 contain the saved SRR0 and SRR1.
  1298. * r9 - r13 are saved in paca->exslb.
  1299. * We assume we aren't going to take any exceptions during this procedure.
  1300. * We assume (DAR >> 60) == 0xc.
  1301. */
  1302. .align 7
  1303. _GLOBAL(do_stab_bolted)
  1304. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  1305. std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
  1306. /* Hash to the primary group */
  1307. ld r10,PACASTABVIRT(r13)
  1308. mfspr r11,SPRN_DAR
  1309. srdi r11,r11,28
  1310. rldimi r10,r11,7,52 /* r10 = first ste of the group */
  1311. /* Calculate VSID */
  1312. /* This is a kernel address, so protovsid = ESID */
  1313. ASM_VSID_SCRAMBLE(r11, r9)
  1314. rldic r9,r11,12,16 /* r9 = vsid << 12 */
  1315. /* Search the primary group for a free entry */
  1316. 1: ld r11,0(r10) /* Test valid bit of the current ste */
  1317. andi. r11,r11,0x80
  1318. beq 2f
  1319. addi r10,r10,16
  1320. andi. r11,r10,0x70
  1321. bne 1b
  1322. /* Stick for only searching the primary group for now. */
  1323. /* At least for now, we use a very simple random castout scheme */
  1324. /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
  1325. mftb r11
  1326. rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
  1327. ori r11,r11,0x10
  1328. /* r10 currently points to an ste one past the group of interest */
  1329. /* make it point to the randomly selected entry */
  1330. subi r10,r10,128
  1331. or r10,r10,r11 /* r10 is the entry to invalidate */
  1332. isync /* mark the entry invalid */
  1333. ld r11,0(r10)
  1334. rldicl r11,r11,56,1 /* clear the valid bit */
  1335. rotldi r11,r11,8
  1336. std r11,0(r10)
  1337. sync
  1338. clrrdi r11,r11,28 /* Get the esid part of the ste */
  1339. slbie r11
  1340. 2: std r9,8(r10) /* Store the vsid part of the ste */
  1341. eieio
  1342. mfspr r11,SPRN_DAR /* Get the new esid */
  1343. clrrdi r11,r11,28 /* Permits a full 32b of ESID */
  1344. ori r11,r11,0x90 /* Turn on valid and kp */
  1345. std r11,0(r10) /* Put new entry back into the stab */
  1346. sync
  1347. /* All done -- return from exception. */
  1348. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  1349. ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
  1350. andi. r10,r12,MSR_RI
  1351. beq- unrecov_slb
  1352. mtcrf 0x80,r9 /* restore CR */
  1353. mfmsr r10
  1354. clrrdi r10,r10,2
  1355. mtmsrd r10,1
  1356. mtspr SPRN_SRR0,r11
  1357. mtspr SPRN_SRR1,r12
  1358. ld r9,PACA_EXSLB+EX_R9(r13)
  1359. ld r10,PACA_EXSLB+EX_R10(r13)
  1360. ld r11,PACA_EXSLB+EX_R11(r13)
  1361. ld r12,PACA_EXSLB+EX_R12(r13)
  1362. ld r13,PACA_EXSLB+EX_R13(r13)
  1363. rfid
  1364. b . /* prevent speculative execution */
  1365. /*
  1366. * Space for CPU0's segment table.
  1367. *
  1368. * On iSeries, the hypervisor must fill in at least one entry before
  1369. * we get control (with relocate on). The address is give to the hv
  1370. * as a page number (see xLparMap in lpardata.c), so this must be at a
  1371. * fixed address (the linker can't compute (u64)&initial_stab >>
  1372. * PAGE_SHIFT).
  1373. */
  1374. . = STAB0_OFFSET /* 0x6000 */
  1375. .globl initial_stab
  1376. initial_stab:
  1377. .space 4096
  1378. /*
  1379. * Data area reserved for FWNMI option.
  1380. * This address (0x7000) is fixed by the RPA.
  1381. */
  1382. .= 0x7000
  1383. .globl fwnmi_data_area
  1384. fwnmi_data_area:
  1385. /* iSeries does not use the FWNMI stuff, so it is safe to put
  1386. * this here, even if we later allow kernels that will boot on
  1387. * both pSeries and iSeries */
  1388. #ifdef CONFIG_PPC_ISERIES
  1389. . = LPARMAP_PHYS
  1390. #include "lparmap.s"
  1391. /*
  1392. * This ".text" is here for old compilers that generate a trailing
  1393. * .note section when compiling .c files to .s
  1394. */
  1395. .text
  1396. #endif /* CONFIG_PPC_ISERIES */
  1397. . = 0x8000
  1398. /*
  1399. * On pSeries and most other platforms, secondary processors spin
  1400. * in the following code.
  1401. * At entry, r3 = this processor's number (physical cpu id)
  1402. */
  1403. _GLOBAL(generic_secondary_smp_init)
  1404. mr r24,r3
  1405. /* turn on 64-bit mode */
  1406. bl .enable_64b_mode
  1407. isync
  1408. /* Set up a paca value for this processor. Since we have the
  1409. * physical cpu id in r24, we need to search the pacas to find
  1410. * which logical id maps to our physical one.
  1411. */
  1412. LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */
  1413. li r5,0 /* logical cpu id */
  1414. 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
  1415. cmpw r6,r24 /* Compare to our id */
  1416. beq 2f
  1417. addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
  1418. addi r5,r5,1
  1419. cmpwi r5,NR_CPUS
  1420. blt 1b
  1421. mr r3,r24 /* not found, copy phys to r3 */
  1422. b .kexec_wait /* next kernel might do better */
  1423. 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1424. /* From now on, r24 is expected to be logical cpuid */
  1425. mr r24,r5
  1426. 3: HMT_LOW
  1427. lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
  1428. /* start. */
  1429. sync
  1430. #ifndef CONFIG_SMP
  1431. b 3b /* Never go on non-SMP */
  1432. #else
  1433. cmpwi 0,r23,0
  1434. beq 3b /* Loop until told to go */
  1435. /* See if we need to call a cpu state restore handler */
  1436. LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
  1437. ld r23,0(r23)
  1438. ld r23,CPU_SPEC_RESTORE(r23)
  1439. cmpdi 0,r23,0
  1440. beq 4f
  1441. ld r23,0(r23)
  1442. mtctr r23
  1443. bctrl
  1444. 4: /* Create a temp kernel stack for use before relocation is on. */
  1445. ld r1,PACAEMERGSP(r13)
  1446. subi r1,r1,STACK_FRAME_OVERHEAD
  1447. b .__secondary_start
  1448. #endif
  1449. #ifdef CONFIG_PPC_ISERIES
  1450. _STATIC(__start_initialization_iSeries)
  1451. /* Clear out the BSS */
  1452. LOAD_REG_IMMEDIATE(r11,__bss_stop)
  1453. LOAD_REG_IMMEDIATE(r8,__bss_start)
  1454. sub r11,r11,r8 /* bss size */
  1455. addi r11,r11,7 /* round up to an even double word */
  1456. rldicl. r11,r11,61,3 /* shift right by 3 */
  1457. beq 4f
  1458. addi r8,r8,-8
  1459. li r0,0
  1460. mtctr r11 /* zero this many doublewords */
  1461. 3: stdu r0,8(r8)
  1462. bdnz 3b
  1463. 4:
  1464. LOAD_REG_IMMEDIATE(r1,init_thread_union)
  1465. addi r1,r1,THREAD_SIZE
  1466. li r0,0
  1467. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1468. LOAD_REG_IMMEDIATE(r3,cpu_specs)
  1469. LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
  1470. li r5,0
  1471. bl .identify_cpu
  1472. LOAD_REG_IMMEDIATE(r2,__toc_start)
  1473. addi r2,r2,0x4000
  1474. addi r2,r2,0x4000
  1475. bl .iSeries_early_setup
  1476. bl .early_setup
  1477. /* relocation is on at this point */
  1478. b .start_here_common
  1479. #endif /* CONFIG_PPC_ISERIES */
  1480. #ifdef CONFIG_PPC_MULTIPLATFORM
  1481. _STATIC(__mmu_off)
  1482. mfmsr r3
  1483. andi. r0,r3,MSR_IR|MSR_DR
  1484. beqlr
  1485. andc r3,r3,r0
  1486. mtspr SPRN_SRR0,r4
  1487. mtspr SPRN_SRR1,r3
  1488. sync
  1489. rfid
  1490. b . /* prevent speculative execution */
  1491. /*
  1492. * Here is our main kernel entry point. We support currently 2 kind of entries
  1493. * depending on the value of r5.
  1494. *
  1495. * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
  1496. * in r3...r7
  1497. *
  1498. * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
  1499. * DT block, r4 is a physical pointer to the kernel itself
  1500. *
  1501. */
  1502. _GLOBAL(__start_initialization_multiplatform)
  1503. #ifdef CONFIG_PPC_MULTIPLATFORM
  1504. /*
  1505. * Are we booted from a PROM Of-type client-interface ?
  1506. */
  1507. cmpldi cr0,r5,0
  1508. bne .__boot_from_prom /* yes -> prom */
  1509. #endif
  1510. /* Save parameters */
  1511. mr r31,r3
  1512. mr r30,r4
  1513. /* Make sure we are running in 64 bits mode */
  1514. bl .enable_64b_mode
  1515. /* Setup some critical 970 SPRs before switching MMU off */
  1516. mfspr r0,SPRN_PVR
  1517. srwi r0,r0,16
  1518. cmpwi r0,0x39 /* 970 */
  1519. beq 1f
  1520. cmpwi r0,0x3c /* 970FX */
  1521. beq 1f
  1522. cmpwi r0,0x44 /* 970MP */
  1523. bne 2f
  1524. 1: bl .__cpu_preinit_ppc970
  1525. 2:
  1526. /* Switch off MMU if not already */
  1527. LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
  1528. add r4,r4,r30
  1529. bl .__mmu_off
  1530. b .__after_prom_start
  1531. #ifdef CONFIG_PPC_MULTIPLATFORM
  1532. _STATIC(__boot_from_prom)
  1533. /* Save parameters */
  1534. mr r31,r3
  1535. mr r30,r4
  1536. mr r29,r5
  1537. mr r28,r6
  1538. mr r27,r7
  1539. /*
  1540. * Align the stack to 16-byte boundary
  1541. * Depending on the size and layout of the ELF sections in the initial
  1542. * boot binary, the stack pointer will be unalignet on PowerMac
  1543. */
  1544. rldicr r1,r1,0,59
  1545. /* Make sure we are running in 64 bits mode */
  1546. bl .enable_64b_mode
  1547. /* put a relocation offset into r3 */
  1548. bl .reloc_offset
  1549. LOAD_REG_IMMEDIATE(r2,__toc_start)
  1550. addi r2,r2,0x4000
  1551. addi r2,r2,0x4000
  1552. /* Relocate the TOC from a virt addr to a real addr */
  1553. add r2,r2,r3
  1554. /* Restore parameters */
  1555. mr r3,r31
  1556. mr r4,r30
  1557. mr r5,r29
  1558. mr r6,r28
  1559. mr r7,r27
  1560. /* Do all of the interaction with OF client interface */
  1561. bl .prom_init
  1562. /* We never return */
  1563. trap
  1564. #endif
  1565. /*
  1566. * At this point, r3 contains the physical address we are running at,
  1567. * returned by prom_init()
  1568. */
  1569. _STATIC(__after_prom_start)
  1570. /*
  1571. * We need to run with __start at physical address PHYSICAL_START.
  1572. * This will leave some code in the first 256B of
  1573. * real memory, which are reserved for software use.
  1574. * The remainder of the first page is loaded with the fixed
  1575. * interrupt vectors. The next two pages are filled with
  1576. * unknown exception placeholders.
  1577. *
  1578. * Note: This process overwrites the OF exception vectors.
  1579. * r26 == relocation offset
  1580. * r27 == KERNELBASE
  1581. */
  1582. bl .reloc_offset
  1583. mr r26,r3
  1584. LOAD_REG_IMMEDIATE(r27, KERNELBASE)
  1585. LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */
  1586. // XXX FIXME: Use phys returned by OF (r30)
  1587. add r4,r27,r26 /* source addr */
  1588. /* current address of _start */
  1589. /* i.e. where we are running */
  1590. /* the source addr */
  1591. cmpdi r4,0 /* In some cases the loader may */
  1592. beq .start_here_multiplatform /* have already put us at zero */
  1593. /* so we can skip the copy. */
  1594. LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
  1595. sub r5,r5,r27
  1596. li r6,0x100 /* Start offset, the first 0x100 */
  1597. /* bytes were copied earlier. */
  1598. bl .copy_and_flush /* copy the first n bytes */
  1599. /* this includes the code being */
  1600. /* executed here. */
  1601. LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */
  1602. mtctr r0 /* that we just made/relocated */
  1603. bctr
  1604. 4: LOAD_REG_IMMEDIATE(r5,klimit)
  1605. add r5,r5,r26
  1606. ld r5,0(r5) /* get the value of klimit */
  1607. sub r5,r5,r27
  1608. bl .copy_and_flush /* copy the rest */
  1609. b .start_here_multiplatform
  1610. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1611. /*
  1612. * Copy routine used to copy the kernel to start at physical address 0
  1613. * and flush and invalidate the caches as needed.
  1614. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
  1615. * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
  1616. *
  1617. * Note: this routine *only* clobbers r0, r6 and lr
  1618. */
  1619. _GLOBAL(copy_and_flush)
  1620. addi r5,r5,-8
  1621. addi r6,r6,-8
  1622. 4: li r0,8 /* Use the smallest common */
  1623. /* denominator cache line */
  1624. /* size. This results in */
  1625. /* extra cache line flushes */
  1626. /* but operation is correct. */
  1627. /* Can't get cache line size */
  1628. /* from NACA as it is being */
  1629. /* moved too. */
  1630. mtctr r0 /* put # words/line in ctr */
  1631. 3: addi r6,r6,8 /* copy a cache line */
  1632. ldx r0,r6,r4
  1633. stdx r0,r6,r3
  1634. bdnz 3b
  1635. dcbst r6,r3 /* write it to memory */
  1636. sync
  1637. icbi r6,r3 /* flush the icache line */
  1638. cmpld 0,r6,r5
  1639. blt 4b
  1640. sync
  1641. addi r5,r5,8
  1642. addi r6,r6,8
  1643. blr
  1644. .align 8
  1645. copy_to_here:
  1646. #ifdef CONFIG_SMP
  1647. #ifdef CONFIG_PPC_PMAC
  1648. /*
  1649. * On PowerMac, secondary processors starts from the reset vector, which
  1650. * is temporarily turned into a call to one of the functions below.
  1651. */
  1652. .section ".text";
  1653. .align 2 ;
  1654. .globl __secondary_start_pmac_0
  1655. __secondary_start_pmac_0:
  1656. /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
  1657. li r24,0
  1658. b 1f
  1659. li r24,1
  1660. b 1f
  1661. li r24,2
  1662. b 1f
  1663. li r24,3
  1664. 1:
  1665. _GLOBAL(pmac_secondary_start)
  1666. /* turn on 64-bit mode */
  1667. bl .enable_64b_mode
  1668. isync
  1669. /* Copy some CPU settings from CPU 0 */
  1670. bl .__restore_cpu_ppc970
  1671. /* pSeries do that early though I don't think we really need it */
  1672. mfmsr r3
  1673. ori r3,r3,MSR_RI
  1674. mtmsrd r3 /* RI on */
  1675. /* Set up a paca value for this processor. */
  1676. LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
  1677. mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
  1678. add r13,r13,r4 /* for this processor. */
  1679. mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
  1680. /* Create a temp kernel stack for use before relocation is on. */
  1681. ld r1,PACAEMERGSP(r13)
  1682. subi r1,r1,STACK_FRAME_OVERHEAD
  1683. b .__secondary_start
  1684. #endif /* CONFIG_PPC_PMAC */
  1685. /*
  1686. * This function is called after the master CPU has released the
  1687. * secondary processors. The execution environment is relocation off.
  1688. * The paca for this processor has the following fields initialized at
  1689. * this point:
  1690. * 1. Processor number
  1691. * 2. Segment table pointer (virtual address)
  1692. * On entry the following are set:
  1693. * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
  1694. * r24 = cpu# (in Linux terms)
  1695. * r13 = paca virtual address
  1696. * SPRG3 = paca virtual address
  1697. */
  1698. _GLOBAL(__secondary_start)
  1699. /* Set thread priority to MEDIUM */
  1700. HMT_MEDIUM
  1701. /* Load TOC */
  1702. ld r2,PACATOC(r13)
  1703. /* Do early setup for that CPU (stab, slb, hash table pointer) */
  1704. bl .early_setup_secondary
  1705. /* Initialize the kernel stack. Just a repeat for iSeries. */
  1706. LOAD_REG_ADDR(r3, current_set)
  1707. sldi r28,r24,3 /* get current_set[cpu#] */
  1708. ldx r1,r3,r28
  1709. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  1710. std r1,PACAKSAVE(r13)
  1711. /* Clear backchain so we get nice backtraces */
  1712. li r7,0
  1713. mtlr r7
  1714. /* enable MMU and jump to start_secondary */
  1715. LOAD_REG_ADDR(r3, .start_secondary_prolog)
  1716. LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
  1717. #ifdef CONFIG_PPC_ISERIES
  1718. BEGIN_FW_FTR_SECTION
  1719. ori r4,r4,MSR_EE
  1720. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  1721. #endif
  1722. BEGIN_FW_FTR_SECTION
  1723. stb r7,PACASOFTIRQEN(r13)
  1724. stb r7,PACAHARDIRQEN(r13)
  1725. END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
  1726. mtspr SPRN_SRR0,r3
  1727. mtspr SPRN_SRR1,r4
  1728. rfid
  1729. b . /* prevent speculative execution */
  1730. /*
  1731. * Running with relocation on at this point. All we want to do is
  1732. * zero the stack back-chain pointer before going into C code.
  1733. */
  1734. _GLOBAL(start_secondary_prolog)
  1735. li r3,0
  1736. std r3,0(r1) /* Zero the stack frame pointer */
  1737. bl .start_secondary
  1738. b .
  1739. #endif
  1740. /*
  1741. * This subroutine clobbers r11 and r12
  1742. */
  1743. _GLOBAL(enable_64b_mode)
  1744. mfmsr r11 /* grab the current MSR */
  1745. li r12,1
  1746. rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
  1747. or r11,r11,r12
  1748. li r12,1
  1749. rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
  1750. or r11,r11,r12
  1751. mtmsrd r11
  1752. isync
  1753. blr
  1754. #ifdef CONFIG_PPC_MULTIPLATFORM
  1755. /*
  1756. * This is where the main kernel code starts.
  1757. */
  1758. _STATIC(start_here_multiplatform)
  1759. /* get a new offset, now that the kernel has moved. */
  1760. bl .reloc_offset
  1761. mr r26,r3
  1762. /* Clear out the BSS. It may have been done in prom_init,
  1763. * already but that's irrelevant since prom_init will soon
  1764. * be detached from the kernel completely. Besides, we need
  1765. * to clear it now for kexec-style entry.
  1766. */
  1767. LOAD_REG_IMMEDIATE(r11,__bss_stop)
  1768. LOAD_REG_IMMEDIATE(r8,__bss_start)
  1769. sub r11,r11,r8 /* bss size */
  1770. addi r11,r11,7 /* round up to an even double word */
  1771. rldicl. r11,r11,61,3 /* shift right by 3 */
  1772. beq 4f
  1773. addi r8,r8,-8
  1774. li r0,0
  1775. mtctr r11 /* zero this many doublewords */
  1776. 3: stdu r0,8(r8)
  1777. bdnz 3b
  1778. 4:
  1779. mfmsr r6
  1780. ori r6,r6,MSR_RI
  1781. mtmsrd r6 /* RI on */
  1782. /* The following gets the stack and TOC set up with the regs */
  1783. /* pointing to the real addr of the kernel stack. This is */
  1784. /* all done to support the C function call below which sets */
  1785. /* up the htab. This is done because we have relocated the */
  1786. /* kernel but are still running in real mode. */
  1787. LOAD_REG_IMMEDIATE(r3,init_thread_union)
  1788. add r3,r3,r26
  1789. /* set up a stack pointer (physical address) */
  1790. addi r1,r3,THREAD_SIZE
  1791. li r0,0
  1792. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1793. /* set up the TOC (physical address) */
  1794. LOAD_REG_IMMEDIATE(r2,__toc_start)
  1795. addi r2,r2,0x4000
  1796. addi r2,r2,0x4000
  1797. add r2,r2,r26
  1798. LOAD_REG_IMMEDIATE(r3, cpu_specs)
  1799. add r3,r3,r26
  1800. LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
  1801. add r4,r4,r26
  1802. mr r5,r26
  1803. bl .identify_cpu
  1804. /* Do very early kernel initializations, including initial hash table,
  1805. * stab and slb setup before we turn on relocation. */
  1806. /* Restore parameters passed from prom_init/kexec */
  1807. mr r3,r31
  1808. bl .early_setup
  1809. LOAD_REG_IMMEDIATE(r3, .start_here_common)
  1810. LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
  1811. mtspr SPRN_SRR0,r3
  1812. mtspr SPRN_SRR1,r4
  1813. rfid
  1814. b . /* prevent speculative execution */
  1815. #endif /* CONFIG_PPC_MULTIPLATFORM */
  1816. /* This is where all platforms converge execution */
  1817. _STATIC(start_here_common)
  1818. /* relocation is on at this point */
  1819. /* The following code sets up the SP and TOC now that we are */
  1820. /* running with translation enabled. */
  1821. LOAD_REG_IMMEDIATE(r3,init_thread_union)
  1822. /* set up the stack */
  1823. addi r1,r3,THREAD_SIZE
  1824. li r0,0
  1825. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  1826. /* Apply the CPUs-specific fixups (nop out sections not relevant
  1827. * to this CPU
  1828. */
  1829. li r3,0
  1830. bl .do_cpu_ftr_fixups
  1831. bl .do_fw_ftr_fixups
  1832. /* ptr to current */
  1833. LOAD_REG_IMMEDIATE(r4, init_task)
  1834. std r4,PACACURRENT(r13)
  1835. /* Load the TOC */
  1836. ld r2,PACATOC(r13)
  1837. std r1,PACAKSAVE(r13)
  1838. bl .setup_system
  1839. /* Load up the kernel context */
  1840. 5:
  1841. li r5,0
  1842. stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
  1843. #ifdef CONFIG_PPC_ISERIES
  1844. BEGIN_FW_FTR_SECTION
  1845. mfmsr r5
  1846. ori r5,r5,MSR_EE /* Hard Enabled */
  1847. mtmsrd r5
  1848. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  1849. #endif
  1850. BEGIN_FW_FTR_SECTION
  1851. stb r5,PACAHARDIRQEN(r13)
  1852. END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
  1853. bl .start_kernel
  1854. /* Not reached */
  1855. BUG_OPCODE
  1856. /*
  1857. * We put a few things here that have to be page-aligned.
  1858. * This stuff goes at the beginning of the bss, which is page-aligned.
  1859. */
  1860. .section ".bss"
  1861. .align PAGE_SHIFT
  1862. .globl empty_zero_page
  1863. empty_zero_page:
  1864. .space PAGE_SIZE
  1865. .globl swapper_pg_dir
  1866. swapper_pg_dir:
  1867. .space PAGE_SIZE
  1868. /*
  1869. * This space gets a copy of optional info passed to us by the bootstrap
  1870. * Used to pass parameters into the kernel like root=/dev/sda1, etc.
  1871. */
  1872. .globl cmd_line
  1873. cmd_line:
  1874. .space COMMAND_LINE_SIZE