exceptions-64s.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. * This file contains the 64-bit "server" PowerPC variant
  3. * of the low level exception handling including exception
  4. * vectors, exception return, part of the slb and stab
  5. * handling and other fixed offset specific things.
  6. *
  7. * This file is meant to be #included from head_64.S due to
  8. * position dependant assembly.
  9. *
  10. * Most of this originates from head_64.S and thus has the same
  11. * copyright history.
  12. *
  13. */
  14. #include <asm/exception-64s.h>
  15. /*
  16. * We layout physical memory as follows:
  17. * 0x0000 - 0x00ff : Secondary processor spin code
  18. * 0x0100 - 0x2fff : pSeries Interrupt prologs
  19. * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
  20. * 0x6000 - 0x6fff : Initial (CPU0) segment table
  21. * 0x7000 - 0x7fff : FWNMI data area
  22. * 0x8000 - : Early init and support code
  23. */
  24. /*
  25. * This is the start of the interrupt handlers for pSeries
  26. * This code runs with relocation off.
  27. * Code from here to __end_interrupts gets copied down to real
  28. * address 0x100 when we are running a relocatable kernel.
  29. * Therefore any relative branches in this section must only
  30. * branch to labels in this section.
  31. */
  32. . = 0x100
  33. .globl __start_interrupts
  34. __start_interrupts:
  35. STD_EXCEPTION_PSERIES(0x100, system_reset)
  36. . = 0x200
  37. _machine_check_pSeries:
  38. HMT_MEDIUM
  39. mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
  40. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  41. . = 0x300
  42. .globl data_access_pSeries
  43. data_access_pSeries:
  44. HMT_MEDIUM
  45. mtspr SPRN_SPRG_SCRATCH0,r13
  46. BEGIN_FTR_SECTION
  47. mtspr SPRN_SPRG_SCRATCH1,r12
  48. mfspr r13,SPRN_DAR
  49. mfspr r12,SPRN_DSISR
  50. srdi r13,r13,60
  51. rlwimi r13,r12,16,0x20
  52. mfcr r12
  53. cmpwi r13,0x2c
  54. beq do_stab_bolted_pSeries
  55. mtcrf 0x80,r12
  56. mfspr r12,SPRN_SPRG_SCRATCH1
  57. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  58. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
  59. . = 0x380
  60. .globl data_access_slb_pSeries
  61. data_access_slb_pSeries:
  62. HMT_MEDIUM
  63. mtspr SPRN_SPRG_SCRATCH0,r13
  64. mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
  65. std r3,PACA_EXSLB+EX_R3(r13)
  66. mfspr r3,SPRN_DAR
  67. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  68. mfcr r9
  69. #ifdef __DISABLED__
  70. /* Keep that around for when we re-implement dynamic VSIDs */
  71. cmpdi r3,0
  72. bge slb_miss_user_pseries
  73. #endif /* __DISABLED__ */
  74. std r10,PACA_EXSLB+EX_R10(r13)
  75. std r11,PACA_EXSLB+EX_R11(r13)
  76. std r12,PACA_EXSLB+EX_R12(r13)
  77. mfspr r10,SPRN_SPRG_SCRATCH0
  78. std r10,PACA_EXSLB+EX_R13(r13)
  79. mfspr r12,SPRN_SRR1 /* and SRR1 */
  80. #ifndef CONFIG_RELOCATABLE
  81. b .slb_miss_realmode
  82. #else
  83. /*
  84. * We can't just use a direct branch to .slb_miss_realmode
  85. * because the distance from here to there depends on where
  86. * the kernel ends up being put.
  87. */
  88. mfctr r11
  89. ld r10,PACAKBASE(r13)
  90. LOAD_HANDLER(r10, .slb_miss_realmode)
  91. mtctr r10
  92. bctr
  93. #endif
  94. STD_EXCEPTION_PSERIES(0x400, instruction_access)
  95. . = 0x480
  96. .globl instruction_access_slb_pSeries
  97. instruction_access_slb_pSeries:
  98. HMT_MEDIUM
  99. mtspr SPRN_SPRG_SCRATCH0,r13
  100. mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
  101. std r3,PACA_EXSLB+EX_R3(r13)
  102. mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
  103. std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
  104. mfcr r9
  105. #ifdef __DISABLED__
  106. /* Keep that around for when we re-implement dynamic VSIDs */
  107. cmpdi r3,0
  108. bge slb_miss_user_pseries
  109. #endif /* __DISABLED__ */
  110. std r10,PACA_EXSLB+EX_R10(r13)
  111. std r11,PACA_EXSLB+EX_R11(r13)
  112. std r12,PACA_EXSLB+EX_R12(r13)
  113. mfspr r10,SPRN_SPRG_SCRATCH0
  114. std r10,PACA_EXSLB+EX_R13(r13)
  115. mfspr r12,SPRN_SRR1 /* and SRR1 */
  116. #ifndef CONFIG_RELOCATABLE
  117. b .slb_miss_realmode
  118. #else
  119. mfctr r11
  120. ld r10,PACAKBASE(r13)
  121. LOAD_HANDLER(r10, .slb_miss_realmode)
  122. mtctr r10
  123. bctr
  124. #endif
  125. MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
  126. STD_EXCEPTION_PSERIES(0x600, alignment)
  127. STD_EXCEPTION_PSERIES(0x700, program_check)
  128. STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
  129. MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
  130. STD_EXCEPTION_PSERIES(0xa00, trap_0a)
  131. STD_EXCEPTION_PSERIES(0xb00, trap_0b)
  132. . = 0xc00
  133. .globl system_call_pSeries
  134. system_call_pSeries:
  135. HMT_MEDIUM
  136. BEGIN_FTR_SECTION
  137. cmpdi r0,0x1ebe
  138. beq- 1f
  139. END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
  140. mr r9,r13
  141. mfspr r13,SPRN_SPRG_PACA
  142. mfspr r11,SPRN_SRR0
  143. ld r12,PACAKBASE(r13)
  144. ld r10,PACAKMSR(r13)
  145. LOAD_HANDLER(r12, system_call_entry)
  146. mtspr SPRN_SRR0,r12
  147. mfspr r12,SPRN_SRR1
  148. mtspr SPRN_SRR1,r10
  149. rfid
  150. b . /* prevent speculative execution */
  151. /* Fast LE/BE switch system call */
  152. 1: mfspr r12,SPRN_SRR1
  153. xori r12,r12,MSR_LE
  154. mtspr SPRN_SRR1,r12
  155. rfid /* return to userspace */
  156. b .
  157. STD_EXCEPTION_PSERIES(0xd00, single_step)
  158. STD_EXCEPTION_PSERIES(0xe00, trap_0e)
  159. /* We need to deal with the Altivec unavailable exception
  160. * here which is at 0xf20, thus in the middle of the
  161. * prolog code of the PerformanceMonitor one. A little
  162. * trickery is thus necessary
  163. */
  164. . = 0xf00
  165. b performance_monitor_pSeries
  166. . = 0xf20
  167. b altivec_unavailable_pSeries
  168. . = 0xf40
  169. b vsx_unavailable_pSeries
  170. #ifdef CONFIG_CBE_RAS
  171. HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
  172. #endif /* CONFIG_CBE_RAS */
  173. STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
  174. #ifdef CONFIG_CBE_RAS
  175. HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
  176. #endif /* CONFIG_CBE_RAS */
  177. STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
  178. #ifdef CONFIG_CBE_RAS
  179. HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
  180. #endif /* CONFIG_CBE_RAS */
  181. . = 0x3000
  182. /*** pSeries interrupt support ***/
  183. /* moved from 0xf00 */
  184. STD_EXCEPTION_PSERIES(., performance_monitor)
  185. STD_EXCEPTION_PSERIES(., altivec_unavailable)
  186. STD_EXCEPTION_PSERIES(., vsx_unavailable)
  187. /*
  188. * An interrupt came in while soft-disabled; clear EE in SRR1,
  189. * clear paca->hard_enabled and return.
  190. */
  191. masked_interrupt:
  192. stb r10,PACAHARDIRQEN(r13)
  193. mtcrf 0x80,r9
  194. ld r9,PACA_EXGEN+EX_R9(r13)
  195. mfspr r10,SPRN_SRR1
  196. rldicl r10,r10,48,1 /* clear MSR_EE */
  197. rotldi r10,r10,16
  198. mtspr SPRN_SRR1,r10
  199. ld r10,PACA_EXGEN+EX_R10(r13)
  200. mfspr r13,SPRN_SPRG_SCRATCH0
  201. rfid
  202. b .
  203. .align 7
  204. do_stab_bolted_pSeries:
  205. mtcrf 0x80,r12
  206. mfspr r12,SPRN_SPRG_SCRATCH1
  207. EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
  208. #ifdef CONFIG_PPC_PSERIES
  209. /*
  210. * Vectors for the FWNMI option. Share common code.
  211. */
  212. .globl system_reset_fwnmi
  213. .align 7
  214. system_reset_fwnmi:
  215. HMT_MEDIUM
  216. mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
  217. EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
  218. .globl machine_check_fwnmi
  219. .align 7
  220. machine_check_fwnmi:
  221. HMT_MEDIUM
  222. mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
  223. EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
  224. #endif /* CONFIG_PPC_PSERIES */
  225. #ifdef __DISABLED__
  226. /*
  227. * This is used for when the SLB miss handler has to go virtual,
  228. * which doesn't happen for now anymore but will once we re-implement
  229. * dynamic VSIDs for shared page tables
  230. */
  231. slb_miss_user_pseries:
  232. std r10,PACA_EXGEN+EX_R10(r13)
  233. std r11,PACA_EXGEN+EX_R11(r13)
  234. std r12,PACA_EXGEN+EX_R12(r13)
  235. mfspr r10,SPRG_SCRATCH0
  236. ld r11,PACA_EXSLB+EX_R9(r13)
  237. ld r12,PACA_EXSLB+EX_R3(r13)
  238. std r10,PACA_EXGEN+EX_R13(r13)
  239. std r11,PACA_EXGEN+EX_R9(r13)
  240. std r12,PACA_EXGEN+EX_R3(r13)
  241. clrrdi r12,r13,32
  242. mfmsr r10
  243. mfspr r11,SRR0 /* save SRR0 */
  244. ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
  245. ori r10,r10,MSR_IR|MSR_DR|MSR_RI
  246. mtspr SRR0,r12
  247. mfspr r12,SRR1 /* and SRR1 */
  248. mtspr SRR1,r10
  249. rfid
  250. b . /* prevent spec. execution */
  251. #endif /* __DISABLED__ */
  252. .align 7
  253. .globl __end_interrupts
  254. __end_interrupts:
  255. /*
  256. * Code from here down to __end_handlers is invoked from the
  257. * exception prologs above. Because the prologs assemble the
  258. * addresses of these handlers using the LOAD_HANDLER macro,
  259. * which uses an addi instruction, these handlers must be in
  260. * the first 32k of the kernel image.
  261. */
  262. /*** Common interrupt handlers ***/
  263. STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
  264. /*
  265. * Machine check is different because we use a different
  266. * save area: PACA_EXMC instead of PACA_EXGEN.
  267. */
  268. .align 7
  269. .globl machine_check_common
  270. machine_check_common:
  271. EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
  272. FINISH_NAP
  273. DISABLE_INTS
  274. bl .save_nvgprs
  275. addi r3,r1,STACK_FRAME_OVERHEAD
  276. bl .machine_check_exception
  277. b .ret_from_except
  278. STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
  279. STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
  280. STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
  281. STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
  282. STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
  283. STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
  284. STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
  285. #ifdef CONFIG_ALTIVEC
  286. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
  287. #else
  288. STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
  289. #endif
  290. #ifdef CONFIG_CBE_RAS
  291. STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
  292. STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
  293. STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
  294. #endif /* CONFIG_CBE_RAS */
  295. .align 7
  296. system_call_entry:
  297. b system_call_common
  298. /*
  299. * Here we have detected that the kernel stack pointer is bad.
  300. * R9 contains the saved CR, r13 points to the paca,
  301. * r10 contains the (bad) kernel stack pointer,
  302. * r11 and r12 contain the saved SRR0 and SRR1.
  303. * We switch to using an emergency stack, save the registers there,
  304. * and call kernel_bad_stack(), which panics.
  305. */
  306. bad_stack:
  307. ld r1,PACAEMERGSP(r13)
  308. subi r1,r1,64+INT_FRAME_SIZE
  309. std r9,_CCR(r1)
  310. std r10,GPR1(r1)
  311. std r11,_NIP(r1)
  312. std r12,_MSR(r1)
  313. mfspr r11,SPRN_DAR
  314. mfspr r12,SPRN_DSISR
  315. std r11,_DAR(r1)
  316. std r12,_DSISR(r1)
  317. mflr r10
  318. mfctr r11
  319. mfxer r12
  320. std r10,_LINK(r1)
  321. std r11,_CTR(r1)
  322. std r12,_XER(r1)
  323. SAVE_GPR(0,r1)
  324. SAVE_GPR(2,r1)
  325. SAVE_4GPRS(3,r1)
  326. SAVE_2GPRS(7,r1)
  327. SAVE_10GPRS(12,r1)
  328. SAVE_10GPRS(22,r1)
  329. lhz r12,PACA_TRAP_SAVE(r13)
  330. std r12,_TRAP(r1)
  331. addi r11,r1,INT_FRAME_SIZE
  332. std r11,0(r1)
  333. li r12,0
  334. std r12,0(r11)
  335. ld r2,PACATOC(r13)
  336. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  337. bl .kernel_bad_stack
  338. b 1b
  339. /*
  340. * Here r13 points to the paca, r9 contains the saved CR,
  341. * SRR0 and SRR1 are saved in r11 and r12,
  342. * r9 - r13 are saved in paca->exgen.
  343. */
  344. .align 7
  345. .globl data_access_common
  346. data_access_common:
  347. mfspr r10,SPRN_DAR
  348. std r10,PACA_EXGEN+EX_DAR(r13)
  349. mfspr r10,SPRN_DSISR
  350. stw r10,PACA_EXGEN+EX_DSISR(r13)
  351. EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
  352. ld r3,PACA_EXGEN+EX_DAR(r13)
  353. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  354. li r5,0x300
  355. b .do_hash_page /* Try to handle as hpte fault */
  356. .align 7
  357. .globl instruction_access_common
  358. instruction_access_common:
  359. EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
  360. ld r3,_NIP(r1)
  361. andis. r4,r12,0x5820
  362. li r5,0x400
  363. b .do_hash_page /* Try to handle as hpte fault */
  364. /*
  365. * Here is the common SLB miss user that is used when going to virtual
  366. * mode for SLB misses, that is currently not used
  367. */
  368. #ifdef __DISABLED__
  369. .align 7
  370. .globl slb_miss_user_common
  371. slb_miss_user_common:
  372. mflr r10
  373. std r3,PACA_EXGEN+EX_DAR(r13)
  374. stw r9,PACA_EXGEN+EX_CCR(r13)
  375. std r10,PACA_EXGEN+EX_LR(r13)
  376. std r11,PACA_EXGEN+EX_SRR0(r13)
  377. bl .slb_allocate_user
  378. ld r10,PACA_EXGEN+EX_LR(r13)
  379. ld r3,PACA_EXGEN+EX_R3(r13)
  380. lwz r9,PACA_EXGEN+EX_CCR(r13)
  381. ld r11,PACA_EXGEN+EX_SRR0(r13)
  382. mtlr r10
  383. beq- slb_miss_fault
  384. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  385. beq- unrecov_user_slb
  386. mfmsr r10
  387. .machine push
  388. .machine "power4"
  389. mtcrf 0x80,r9
  390. .machine pop
  391. clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
  392. mtmsrd r10,1
  393. mtspr SRR0,r11
  394. mtspr SRR1,r12
  395. ld r9,PACA_EXGEN+EX_R9(r13)
  396. ld r10,PACA_EXGEN+EX_R10(r13)
  397. ld r11,PACA_EXGEN+EX_R11(r13)
  398. ld r12,PACA_EXGEN+EX_R12(r13)
  399. ld r13,PACA_EXGEN+EX_R13(r13)
  400. rfid
  401. b .
  402. slb_miss_fault:
  403. EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
  404. ld r4,PACA_EXGEN+EX_DAR(r13)
  405. li r5,0
  406. std r4,_DAR(r1)
  407. std r5,_DSISR(r1)
  408. b handle_page_fault
  409. unrecov_user_slb:
  410. EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
  411. DISABLE_INTS
  412. bl .save_nvgprs
  413. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  414. bl .unrecoverable_exception
  415. b 1b
  416. #endif /* __DISABLED__ */
  417. /*
  418. * r13 points to the PACA, r9 contains the saved CR,
  419. * r12 contain the saved SRR1, SRR0 is still ready for return
  420. * r3 has the faulting address
  421. * r9 - r13 are saved in paca->exslb.
  422. * r3 is saved in paca->slb_r3
  423. * We assume we aren't going to take any exceptions during this procedure.
  424. */
  425. _GLOBAL(slb_miss_realmode)
  426. mflr r10
  427. #ifdef CONFIG_RELOCATABLE
  428. mtctr r11
  429. #endif
  430. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  431. std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
  432. bl .slb_allocate_realmode
  433. /* All done -- return from exception. */
  434. ld r10,PACA_EXSLB+EX_LR(r13)
  435. ld r3,PACA_EXSLB+EX_R3(r13)
  436. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  437. #ifdef CONFIG_PPC_ISERIES
  438. BEGIN_FW_FTR_SECTION
  439. ld r11,PACALPPACAPTR(r13)
  440. ld r11,LPPACASRR0(r11) /* get SRR0 value */
  441. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  442. #endif /* CONFIG_PPC_ISERIES */
  443. mtlr r10
  444. andi. r10,r12,MSR_RI /* check for unrecoverable exception */
  445. beq- 2f
  446. .machine push
  447. .machine "power4"
  448. mtcrf 0x80,r9
  449. mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
  450. .machine pop
  451. #ifdef CONFIG_PPC_ISERIES
  452. BEGIN_FW_FTR_SECTION
  453. mtspr SPRN_SRR0,r11
  454. mtspr SPRN_SRR1,r12
  455. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  456. #endif /* CONFIG_PPC_ISERIES */
  457. ld r9,PACA_EXSLB+EX_R9(r13)
  458. ld r10,PACA_EXSLB+EX_R10(r13)
  459. ld r11,PACA_EXSLB+EX_R11(r13)
  460. ld r12,PACA_EXSLB+EX_R12(r13)
  461. ld r13,PACA_EXSLB+EX_R13(r13)
  462. rfid
  463. b . /* prevent speculative execution */
  464. 2:
  465. #ifdef CONFIG_PPC_ISERIES
  466. BEGIN_FW_FTR_SECTION
  467. b unrecov_slb
  468. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  469. #endif /* CONFIG_PPC_ISERIES */
  470. mfspr r11,SPRN_SRR0
  471. ld r10,PACAKBASE(r13)
  472. LOAD_HANDLER(r10,unrecov_slb)
  473. mtspr SPRN_SRR0,r10
  474. ld r10,PACAKMSR(r13)
  475. mtspr SPRN_SRR1,r10
  476. rfid
  477. b .
  478. unrecov_slb:
  479. EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
  480. DISABLE_INTS
  481. bl .save_nvgprs
  482. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  483. bl .unrecoverable_exception
  484. b 1b
  485. .align 7
  486. .globl hardware_interrupt_common
  487. .globl hardware_interrupt_entry
  488. hardware_interrupt_common:
  489. EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
  490. FINISH_NAP
  491. hardware_interrupt_entry:
  492. DISABLE_INTS
  493. BEGIN_FTR_SECTION
  494. bl .ppc64_runlatch_on
  495. END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
  496. addi r3,r1,STACK_FRAME_OVERHEAD
  497. bl .do_IRQ
  498. b .ret_from_except_lite
  499. #ifdef CONFIG_PPC_970_NAP
  500. power4_fixup_nap:
  501. andc r9,r9,r10
  502. std r9,TI_LOCAL_FLAGS(r11)
  503. ld r10,_LINK(r1) /* make idle task do the */
  504. std r10,_NIP(r1) /* equivalent of a blr */
  505. blr
  506. #endif
  507. .align 7
  508. .globl alignment_common
  509. alignment_common:
  510. mfspr r10,SPRN_DAR
  511. std r10,PACA_EXGEN+EX_DAR(r13)
  512. mfspr r10,SPRN_DSISR
  513. stw r10,PACA_EXGEN+EX_DSISR(r13)
  514. EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
  515. ld r3,PACA_EXGEN+EX_DAR(r13)
  516. lwz r4,PACA_EXGEN+EX_DSISR(r13)
  517. std r3,_DAR(r1)
  518. std r4,_DSISR(r1)
  519. bl .save_nvgprs
  520. addi r3,r1,STACK_FRAME_OVERHEAD
  521. ENABLE_INTS
  522. bl .alignment_exception
  523. b .ret_from_except
  524. .align 7
  525. .globl program_check_common
  526. program_check_common:
  527. EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
  528. bl .save_nvgprs
  529. addi r3,r1,STACK_FRAME_OVERHEAD
  530. ENABLE_INTS
  531. bl .program_check_exception
  532. b .ret_from_except
  533. .align 7
  534. .globl fp_unavailable_common
  535. fp_unavailable_common:
  536. EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
  537. bne 1f /* if from user, just load it up */
  538. bl .save_nvgprs
  539. addi r3,r1,STACK_FRAME_OVERHEAD
  540. ENABLE_INTS
  541. bl .kernel_fp_unavailable_exception
  542. BUG_OPCODE
  543. 1: bl .load_up_fpu
  544. b fast_exception_return
  545. .align 7
  546. .globl altivec_unavailable_common
  547. altivec_unavailable_common:
  548. EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
  549. #ifdef CONFIG_ALTIVEC
  550. BEGIN_FTR_SECTION
  551. beq 1f
  552. bl .load_up_altivec
  553. b fast_exception_return
  554. 1:
  555. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  556. #endif
  557. bl .save_nvgprs
  558. addi r3,r1,STACK_FRAME_OVERHEAD
  559. ENABLE_INTS
  560. bl .altivec_unavailable_exception
  561. b .ret_from_except
  562. .align 7
  563. .globl vsx_unavailable_common
  564. vsx_unavailable_common:
  565. EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
  566. #ifdef CONFIG_VSX
  567. BEGIN_FTR_SECTION
  568. bne .load_up_vsx
  569. 1:
  570. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  571. #endif
  572. bl .save_nvgprs
  573. addi r3,r1,STACK_FRAME_OVERHEAD
  574. ENABLE_INTS
  575. bl .vsx_unavailable_exception
  576. b .ret_from_except
  577. .align 7
  578. .globl __end_handlers
  579. __end_handlers:
  580. /*
  581. * Return from an exception with minimal checks.
  582. * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
  583. * If interrupts have been enabled, or anything has been
  584. * done that might have changed the scheduling status of
  585. * any task or sent any task a signal, you should use
  586. * ret_from_except or ret_from_except_lite instead of this.
  587. */
  588. fast_exc_return_irq: /* restores irq state too */
  589. ld r3,SOFTE(r1)
  590. TRACE_AND_RESTORE_IRQ(r3);
  591. ld r12,_MSR(r1)
  592. rldicl r4,r12,49,63 /* get MSR_EE to LSB */
  593. stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
  594. b 1f
  595. .globl fast_exception_return
  596. fast_exception_return:
  597. ld r12,_MSR(r1)
  598. 1: ld r11,_NIP(r1)
  599. andi. r3,r12,MSR_RI /* check if RI is set */
  600. beq- unrecov_fer
  601. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  602. andi. r3,r12,MSR_PR
  603. beq 2f
  604. ACCOUNT_CPU_USER_EXIT(r3, r4)
  605. 2:
  606. #endif
  607. ld r3,_CCR(r1)
  608. ld r4,_LINK(r1)
  609. ld r5,_CTR(r1)
  610. ld r6,_XER(r1)
  611. mtcr r3
  612. mtlr r4
  613. mtctr r5
  614. mtxer r6
  615. REST_GPR(0, r1)
  616. REST_8GPRS(2, r1)
  617. mfmsr r10
  618. rldicl r10,r10,48,1 /* clear EE */
  619. rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
  620. mtmsrd r10,1
  621. mtspr SPRN_SRR1,r12
  622. mtspr SPRN_SRR0,r11
  623. REST_4GPRS(10, r1)
  624. ld r1,GPR1(r1)
  625. rfid
  626. b . /* prevent speculative execution */
  627. unrecov_fer:
  628. bl .save_nvgprs
  629. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  630. bl .unrecoverable_exception
  631. b 1b
  632. /*
  633. * Hash table stuff
  634. */
  635. .align 7
  636. _STATIC(do_hash_page)
  637. std r3,_DAR(r1)
  638. std r4,_DSISR(r1)
  639. andis. r0,r4,0xa450 /* weird error? */
  640. bne- handle_page_fault /* if not, try to insert a HPTE */
  641. BEGIN_FTR_SECTION
  642. andis. r0,r4,0x0020 /* Is it a segment table fault? */
  643. bne- do_ste_alloc /* If so handle it */
  644. END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
  645. /*
  646. * On iSeries, we soft-disable interrupts here, then
  647. * hard-enable interrupts so that the hash_page code can spin on
  648. * the hash_table_lock without problems on a shared processor.
  649. */
  650. DISABLE_INTS
  651. /*
  652. * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
  653. * and will clobber volatile registers when irq tracing is enabled
  654. * so we need to reload them. It may be possible to be smarter here
  655. * and move the irq tracing elsewhere but let's keep it simple for
  656. * now
  657. */
  658. #ifdef CONFIG_TRACE_IRQFLAGS
  659. ld r3,_DAR(r1)
  660. ld r4,_DSISR(r1)
  661. ld r5,_TRAP(r1)
  662. ld r12,_MSR(r1)
  663. clrrdi r5,r5,4
  664. #endif /* CONFIG_TRACE_IRQFLAGS */
  665. /*
  666. * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
  667. * accessing a userspace segment (even from the kernel). We assume
  668. * kernel addresses always have the high bit set.
  669. */
  670. rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
  671. rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
  672. orc r0,r12,r0 /* MSR_PR | ~high_bit */
  673. rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
  674. ori r4,r4,1 /* add _PAGE_PRESENT */
  675. rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
  676. /*
  677. * r3 contains the faulting address
  678. * r4 contains the required access permissions
  679. * r5 contains the trap number
  680. *
  681. * at return r3 = 0 for success
  682. */
  683. bl .hash_page /* build HPTE if possible */
  684. cmpdi r3,0 /* see if hash_page succeeded */
  685. BEGIN_FW_FTR_SECTION
  686. /*
  687. * If we had interrupts soft-enabled at the point where the
  688. * DSI/ISI occurred, and an interrupt came in during hash_page,
  689. * handle it now.
  690. * We jump to ret_from_except_lite rather than fast_exception_return
  691. * because ret_from_except_lite will check for and handle pending
  692. * interrupts if necessary.
  693. */
  694. beq 13f
  695. END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
  696. BEGIN_FW_FTR_SECTION
  697. /*
  698. * Here we have interrupts hard-disabled, so it is sufficient
  699. * to restore paca->{soft,hard}_enable and get out.
  700. */
  701. beq fast_exc_return_irq /* Return from exception on success */
  702. END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
  703. /* For a hash failure, we don't bother re-enabling interrupts */
  704. ble- 12f
  705. /*
  706. * hash_page couldn't handle it, set soft interrupt enable back
  707. * to what it was before the trap. Note that .raw_local_irq_restore
  708. * handles any interrupts pending at this point.
  709. */
  710. ld r3,SOFTE(r1)
  711. TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
  712. bl .raw_local_irq_restore
  713. b 11f
  714. /* Here we have a page fault that hash_page can't handle. */
  715. handle_page_fault:
  716. ENABLE_INTS
  717. 11: ld r4,_DAR(r1)
  718. ld r5,_DSISR(r1)
  719. addi r3,r1,STACK_FRAME_OVERHEAD
  720. bl .do_page_fault
  721. cmpdi r3,0
  722. beq+ 13f
  723. bl .save_nvgprs
  724. mr r5,r3
  725. addi r3,r1,STACK_FRAME_OVERHEAD
  726. lwz r4,_DAR(r1)
  727. bl .bad_page_fault
  728. b .ret_from_except
  729. 13: b .ret_from_except_lite
  730. /* We have a page fault that hash_page could handle but HV refused
  731. * the PTE insertion
  732. */
  733. 12: bl .save_nvgprs
  734. mr r5,r3
  735. addi r3,r1,STACK_FRAME_OVERHEAD
  736. ld r4,_DAR(r1)
  737. bl .low_hash_fault
  738. b .ret_from_except
  739. /* here we have a segment miss */
  740. do_ste_alloc:
  741. bl .ste_allocate /* try to insert stab entry */
  742. cmpdi r3,0
  743. bne- handle_page_fault
  744. b fast_exception_return
  745. /*
  746. * r13 points to the PACA, r9 contains the saved CR,
  747. * r11 and r12 contain the saved SRR0 and SRR1.
  748. * r9 - r13 are saved in paca->exslb.
  749. * We assume we aren't going to take any exceptions during this procedure.
  750. * We assume (DAR >> 60) == 0xc.
  751. */
  752. .align 7
  753. _GLOBAL(do_stab_bolted)
  754. stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
  755. std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
  756. /* Hash to the primary group */
  757. ld r10,PACASTABVIRT(r13)
  758. mfspr r11,SPRN_DAR
  759. srdi r11,r11,28
  760. rldimi r10,r11,7,52 /* r10 = first ste of the group */
  761. /* Calculate VSID */
  762. /* This is a kernel address, so protovsid = ESID */
  763. ASM_VSID_SCRAMBLE(r11, r9, 256M)
  764. rldic r9,r11,12,16 /* r9 = vsid << 12 */
  765. /* Search the primary group for a free entry */
  766. 1: ld r11,0(r10) /* Test valid bit of the current ste */
  767. andi. r11,r11,0x80
  768. beq 2f
  769. addi r10,r10,16
  770. andi. r11,r10,0x70
  771. bne 1b
  772. /* Stick for only searching the primary group for now. */
  773. /* At least for now, we use a very simple random castout scheme */
  774. /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
  775. mftb r11
  776. rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
  777. ori r11,r11,0x10
  778. /* r10 currently points to an ste one past the group of interest */
  779. /* make it point to the randomly selected entry */
  780. subi r10,r10,128
  781. or r10,r10,r11 /* r10 is the entry to invalidate */
  782. isync /* mark the entry invalid */
  783. ld r11,0(r10)
  784. rldicl r11,r11,56,1 /* clear the valid bit */
  785. rotldi r11,r11,8
  786. std r11,0(r10)
  787. sync
  788. clrrdi r11,r11,28 /* Get the esid part of the ste */
  789. slbie r11
  790. 2: std r9,8(r10) /* Store the vsid part of the ste */
  791. eieio
  792. mfspr r11,SPRN_DAR /* Get the new esid */
  793. clrrdi r11,r11,28 /* Permits a full 32b of ESID */
  794. ori r11,r11,0x90 /* Turn on valid and kp */
  795. std r11,0(r10) /* Put new entry back into the stab */
  796. sync
  797. /* All done -- return from exception. */
  798. lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
  799. ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
  800. andi. r10,r12,MSR_RI
  801. beq- unrecov_slb
  802. mtcrf 0x80,r9 /* restore CR */
  803. mfmsr r10
  804. clrrdi r10,r10,2
  805. mtmsrd r10,1
  806. mtspr SPRN_SRR0,r11
  807. mtspr SPRN_SRR1,r12
  808. ld r9,PACA_EXSLB+EX_R9(r13)
  809. ld r10,PACA_EXSLB+EX_R10(r13)
  810. ld r11,PACA_EXSLB+EX_R11(r13)
  811. ld r12,PACA_EXSLB+EX_R12(r13)
  812. ld r13,PACA_EXSLB+EX_R13(r13)
  813. rfid
  814. b . /* prevent speculative execution */
  815. /*
  816. * Space for CPU0's segment table.
  817. *
  818. * On iSeries, the hypervisor must fill in at least one entry before
  819. * we get control (with relocate on). The address is given to the hv
  820. * as a page number (see xLparMap below), so this must be at a
  821. * fixed address (the linker can't compute (u64)&initial_stab >>
  822. * PAGE_SHIFT).
  823. */
  824. . = STAB0_OFFSET /* 0x6000 */
  825. .globl initial_stab
  826. initial_stab:
  827. .space 4096
  828. #ifdef CONFIG_PPC_PSERIES
  829. /*
  830. * Data area reserved for FWNMI option.
  831. * This address (0x7000) is fixed by the RPA.
  832. */
  833. .= 0x7000
  834. .globl fwnmi_data_area
  835. fwnmi_data_area:
  836. #endif /* CONFIG_PPC_PSERIES */
  837. /* iSeries does not use the FWNMI stuff, so it is safe to put
  838. * this here, even if we later allow kernels that will boot on
  839. * both pSeries and iSeries */
  840. #ifdef CONFIG_PPC_ISERIES
  841. . = LPARMAP_PHYS
  842. .globl xLparMap
  843. xLparMap:
  844. .quad HvEsidsToMap /* xNumberEsids */
  845. .quad HvRangesToMap /* xNumberRanges */
  846. .quad STAB0_PAGE /* xSegmentTableOffs */
  847. .zero 40 /* xRsvd */
  848. /* xEsids (HvEsidsToMap entries of 2 quads) */
  849. .quad PAGE_OFFSET_ESID /* xKernelEsid */
  850. .quad PAGE_OFFSET_VSID /* xKernelVsid */
  851. .quad VMALLOC_START_ESID /* xKernelEsid */
  852. .quad VMALLOC_START_VSID /* xKernelVsid */
  853. /* xRanges (HvRangesToMap entries of 3 quads) */
  854. .quad HvPagesToMap /* xPages */
  855. .quad 0 /* xOffset */
  856. .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
  857. #endif /* CONFIG_PPC_ISERIES */
  858. #ifdef CONFIG_PPC_PSERIES
  859. . = 0x8000
  860. #endif /* CONFIG_PPC_PSERIES */