mca_asm.S 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. //
  2. // assembly portion of the IA64 MCA handling
  3. //
  4. // Mods by cfleck to integrate into kernel build
  5. // 00/03/15 davidm Added various stop bits to get a clean compile
  6. //
  7. // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
  8. // kstack, switch modes, jump to C INIT handler
  9. //
  10. // 02/01/04 J.Hall <jenna.s.hall@intel.com>
  11. // Before entering virtual mode code:
  12. // 1. Check for TLB CPU error
  13. // 2. Restore current thread pointer to kr6
  14. // 3. Move stack ptr 16 bytes to conform to C calling convention
  15. //
  16. // 04/11/12 Russ Anderson <rja@sgi.com>
  17. // Added per cpu MCA/INIT stack save areas.
  18. //
  19. #include <linux/config.h>
  20. #include <linux/threads.h>
  21. #include <asm/asmmacro.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/processor.h>
  24. #include <asm/mca_asm.h>
  25. #include <asm/mca.h>
  26. /*
  27. * When we get a machine check, the kernel stack pointer is no longer
  28. * valid, so we need to set a new stack pointer.
  29. */
  30. #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
  31. /*
  32. * Needed for return context to SAL
  33. */
  34. #define IA64_MCA_SAME_CONTEXT 0
  35. #define IA64_MCA_COLD_BOOT -2
  36. #include "minstate.h"
  37. /*
  38. * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
  39. * 1. GR1 = OS GP
  40. * 2. GR8 = PAL_PROC physical address
  41. * 3. GR9 = SAL_PROC physical address
  42. * 4. GR10 = SAL GP (physical)
  43. * 5. GR11 = Rendez state
  44. * 6. GR12 = Return address to location within SAL_CHECK
  45. */
  46. #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
  47. LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
  48. st8 [_tmp]=r1,0x08;; \
  49. st8 [_tmp]=r8,0x08;; \
  50. st8 [_tmp]=r9,0x08;; \
  51. st8 [_tmp]=r10,0x08;; \
  52. st8 [_tmp]=r11,0x08;; \
  53. st8 [_tmp]=r12,0x08;; \
  54. st8 [_tmp]=r17,0x08;; \
  55. st8 [_tmp]=r18,0x08
  56. /*
  57. * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
  58. * (p6) is executed if we never entered virtual mode (TLB error)
  59. * (p7) is executed if we entered virtual mode as expected (normal case)
  60. * 1. GR8 = OS_MCA return status
  61. * 2. GR9 = SAL GP (physical)
  62. * 3. GR10 = 0/1 returning same/new context
  63. * 4. GR22 = New min state save area pointer
  64. * returns ptr to SAL rtn save loc in _tmp
  65. */
  66. #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
  67. movl _tmp=ia64_os_to_sal_handoff_state;; \
  68. DATA_VA_TO_PA(_tmp);; \
  69. ld8 r8=[_tmp],0x08;; \
  70. ld8 r9=[_tmp],0x08;; \
  71. ld8 r10=[_tmp],0x08;; \
  72. ld8 r22=[_tmp],0x08;;
  73. // now _tmp is pointing to SAL rtn save location
  74. /*
  75. * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
  76. * imots_os_status=IA64_MCA_COLD_BOOT
  77. * imots_sal_gp=SAL GP
  78. * imots_context=IA64_MCA_SAME_CONTEXT
  79. * imots_new_min_state=Min state save area pointer
  80. * imots_sal_check_ra=Return address to location within SAL_CHECK
  81. *
  82. */
  83. #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
  84. movl tmp=IA64_MCA_COLD_BOOT; \
  85. movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
  86. movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
  87. st8 [os_to_sal_handoff]=tmp,8;; \
  88. ld8 tmp=[sal_to_os_handoff],48;; \
  89. st8 [os_to_sal_handoff]=tmp,8;; \
  90. movl tmp=IA64_MCA_SAME_CONTEXT;; \
  91. st8 [os_to_sal_handoff]=tmp,8;; \
  92. ld8 tmp=[sal_to_os_handoff],-8;; \
  93. st8 [os_to_sal_handoff]=tmp,8;; \
  94. ld8 tmp=[sal_to_os_handoff];; \
  95. st8 [os_to_sal_handoff]=tmp;;
  96. #define GET_IA64_MCA_DATA(reg) \
  97. GET_THIS_PADDR(reg, ia64_mca_data) \
  98. ;; \
  99. ld8 reg=[reg]
  100. .global ia64_os_mca_dispatch
  101. .global ia64_os_mca_dispatch_end
  102. .global ia64_sal_to_os_handoff_state
  103. .global ia64_os_to_sal_handoff_state
  104. .global ia64_do_tlb_purge
  105. .text
  106. .align 16
  107. /*
  108. * Just the TLB purge part is moved to a separate function
  109. * so we can re-use the code for cpu hotplug code as well
  110. * Caller should now setup b1, so we can branch once the
  111. * tlb flush is complete.
  112. */
  113. ia64_do_tlb_purge:
  114. #define O(member) IA64_CPUINFO_##member##_OFFSET
  115. GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
  116. ;;
  117. addl r17=O(PTCE_STRIDE),r2
  118. addl r2=O(PTCE_BASE),r2
  119. ;;
  120. ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
  121. ld4 r19=[r2],4 // r19=ptce_count[0]
  122. ld4 r21=[r17],4 // r21=ptce_stride[0]
  123. ;;
  124. ld4 r20=[r2] // r20=ptce_count[1]
  125. ld4 r22=[r17] // r22=ptce_stride[1]
  126. mov r24=0
  127. ;;
  128. adds r20=-1,r20
  129. ;;
  130. #undef O
  131. 2:
  132. cmp.ltu p6,p7=r24,r19
  133. (p7) br.cond.dpnt.few 4f
  134. mov ar.lc=r20
  135. 3:
  136. ptc.e r18
  137. ;;
  138. add r18=r22,r18
  139. br.cloop.sptk.few 3b
  140. ;;
  141. add r18=r21,r18
  142. add r24=1,r24
  143. ;;
  144. br.sptk.few 2b
  145. 4:
  146. srlz.i // srlz.i implies srlz.d
  147. ;;
  148. // Now purge addresses formerly mapped by TR registers
  149. // 1. Purge ITR&DTR for kernel.
  150. movl r16=KERNEL_START
  151. mov r18=KERNEL_TR_PAGE_SHIFT<<2
  152. ;;
  153. ptr.i r16, r18
  154. ptr.d r16, r18
  155. ;;
  156. srlz.i
  157. ;;
  158. srlz.d
  159. ;;
  160. // 2. Purge DTR for PERCPU data.
  161. movl r16=PERCPU_ADDR
  162. mov r18=PERCPU_PAGE_SHIFT<<2
  163. ;;
  164. ptr.d r16,r18
  165. ;;
  166. srlz.d
  167. ;;
  168. // 3. Purge ITR for PAL code.
  169. GET_THIS_PADDR(r2, ia64_mca_pal_base)
  170. ;;
  171. ld8 r16=[r2]
  172. mov r18=IA64_GRANULE_SHIFT<<2
  173. ;;
  174. ptr.i r16,r18
  175. ;;
  176. srlz.i
  177. ;;
  178. // 4. Purge DTR for stack.
  179. mov r16=IA64_KR(CURRENT_STACK)
  180. ;;
  181. shl r16=r16,IA64_GRANULE_SHIFT
  182. movl r19=PAGE_OFFSET
  183. ;;
  184. add r16=r19,r16
  185. mov r18=IA64_GRANULE_SHIFT<<2
  186. ;;
  187. ptr.d r16,r18
  188. ;;
  189. srlz.i
  190. ;;
  191. // Now branch away to caller.
  192. br.sptk.many b1
  193. ;;
  194. ia64_os_mca_dispatch:
  195. // Serialize all MCA processing
  196. mov r3=1;;
  197. LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
  198. ia64_os_mca_spin:
  199. xchg8 r4=[r2],r3;;
  200. cmp.ne p6,p0=r4,r0
  201. (p6) br ia64_os_mca_spin
  202. // Save the SAL to OS MCA handoff state as defined
  203. // by SAL SPEC 3.0
  204. // NOTE : The order in which the state gets saved
  205. // is dependent on the way the C-structure
  206. // for ia64_mca_sal_to_os_state_t has been
  207. // defined in include/asm/mca.h
  208. SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
  209. ;;
  210. // LOG PROCESSOR STATE INFO FROM HERE ON..
  211. begin_os_mca_dump:
  212. br ia64_os_mca_proc_state_dump;;
  213. ia64_os_mca_done_dump:
  214. LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
  215. ;;
  216. ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
  217. ;;
  218. tbit.nz p6,p7=r18,60
  219. (p7) br.spnt done_tlb_purge_and_reload
  220. // The following code purges TC and TR entries. Then reload all TC entries.
  221. // Purge percpu data TC entries.
  222. begin_tlb_purge_and_reload:
  223. movl r18=ia64_reload_tr;;
  224. LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
  225. mov b1=r18;;
  226. br.sptk.many ia64_do_tlb_purge;;
  227. ia64_reload_tr:
  228. // Finally reload the TR registers.
  229. // 1. Reload DTR/ITR registers for kernel.
  230. mov r18=KERNEL_TR_PAGE_SHIFT<<2
  231. movl r17=KERNEL_START
  232. ;;
  233. mov cr.itir=r18
  234. mov cr.ifa=r17
  235. mov r16=IA64_TR_KERNEL
  236. mov r19=ip
  237. movl r18=PAGE_KERNEL
  238. ;;
  239. dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
  240. ;;
  241. or r18=r17,r18
  242. ;;
  243. itr.i itr[r16]=r18
  244. ;;
  245. itr.d dtr[r16]=r18
  246. ;;
  247. srlz.i
  248. srlz.d
  249. ;;
  250. // 2. Reload DTR register for PERCPU data.
  251. GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
  252. ;;
  253. movl r16=PERCPU_ADDR // vaddr
  254. movl r18=PERCPU_PAGE_SHIFT<<2
  255. ;;
  256. mov cr.itir=r18
  257. mov cr.ifa=r16
  258. ;;
  259. ld8 r18=[r2] // load per-CPU PTE
  260. mov r16=IA64_TR_PERCPU_DATA;
  261. ;;
  262. itr.d dtr[r16]=r18
  263. ;;
  264. srlz.d
  265. ;;
  266. // 3. Reload ITR for PAL code.
  267. GET_THIS_PADDR(r2, ia64_mca_pal_pte)
  268. ;;
  269. ld8 r18=[r2] // load PAL PTE
  270. ;;
  271. GET_THIS_PADDR(r2, ia64_mca_pal_base)
  272. ;;
  273. ld8 r16=[r2] // load PAL vaddr
  274. mov r19=IA64_GRANULE_SHIFT<<2
  275. ;;
  276. mov cr.itir=r19
  277. mov cr.ifa=r16
  278. mov r20=IA64_TR_PALCODE
  279. ;;
  280. itr.i itr[r20]=r18
  281. ;;
  282. srlz.i
  283. ;;
  284. // 4. Reload DTR for stack.
  285. mov r16=IA64_KR(CURRENT_STACK)
  286. ;;
  287. shl r16=r16,IA64_GRANULE_SHIFT
  288. movl r19=PAGE_OFFSET
  289. ;;
  290. add r18=r19,r16
  291. movl r20=PAGE_KERNEL
  292. ;;
  293. add r16=r20,r16
  294. mov r19=IA64_GRANULE_SHIFT<<2
  295. ;;
  296. mov cr.itir=r19
  297. mov cr.ifa=r18
  298. mov r20=IA64_TR_CURRENT_STACK
  299. ;;
  300. itr.d dtr[r20]=r16
  301. ;;
  302. srlz.d
  303. ;;
  304. br.sptk.many done_tlb_purge_and_reload
  305. err:
  306. COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
  307. br.sptk.many ia64_os_mca_done_restore
  308. done_tlb_purge_and_reload:
  309. // Setup new stack frame for OS_MCA handling
  310. GET_IA64_MCA_DATA(r2)
  311. ;;
  312. add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
  313. add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
  314. ;;
  315. rse_switch_context(r6,r3,r2);; // RSC management in this new context
  316. GET_IA64_MCA_DATA(r2)
  317. ;;
  318. add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
  319. ;;
  320. mov r12=r2 // establish new stack-pointer
  321. // Enter virtual mode from physical mode
  322. VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
  323. ia64_os_mca_virtual_begin:
  324. // Call virtual mode handler
  325. movl r2=ia64_mca_ucmc_handler;;
  326. mov b6=r2;;
  327. br.call.sptk.many b0=b6;;
  328. .ret0:
  329. // Revert back to physical mode before going back to SAL
  330. PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
  331. ia64_os_mca_virtual_end:
  332. // restore the original stack frame here
  333. GET_IA64_MCA_DATA(r2)
  334. ;;
  335. add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
  336. ;;
  337. movl r4=IA64_PSR_MC
  338. ;;
  339. rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
  340. // let us restore all the registers from our PSI structure
  341. mov r8=gp
  342. ;;
  343. begin_os_mca_restore:
  344. br ia64_os_mca_proc_state_restore;;
  345. ia64_os_mca_done_restore:
  346. OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
  347. // branch back to SALE_CHECK
  348. ld8 r3=[r2];;
  349. mov b0=r3;; // SAL_CHECK return address
  350. // release lock
  351. movl r3=ia64_mca_serialize;;
  352. DATA_VA_TO_PA(r3);;
  353. st8.rel [r3]=r0
  354. br b0
  355. ;;
  356. ia64_os_mca_dispatch_end:
  357. //EndMain//////////////////////////////////////////////////////////////////////
  358. //++
  359. // Name:
  360. // ia64_os_mca_proc_state_dump()
  361. //
  362. // Stub Description:
  363. //
  364. // This stub dumps the processor state during MCHK to a data area
  365. //
  366. //--
  367. ia64_os_mca_proc_state_dump:
  368. // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
  369. // to virtual addressing mode.
  370. GET_IA64_MCA_DATA(r2)
  371. ;;
  372. add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
  373. ;;
  374. // save ar.NaT
  375. mov r5=ar.unat // ar.unat
  376. // save banked GRs 16-31 along with NaT bits
  377. bsw.1;;
  378. st8.spill [r2]=r16,8;;
  379. st8.spill [r2]=r17,8;;
  380. st8.spill [r2]=r18,8;;
  381. st8.spill [r2]=r19,8;;
  382. st8.spill [r2]=r20,8;;
  383. st8.spill [r2]=r21,8;;
  384. st8.spill [r2]=r22,8;;
  385. st8.spill [r2]=r23,8;;
  386. st8.spill [r2]=r24,8;;
  387. st8.spill [r2]=r25,8;;
  388. st8.spill [r2]=r26,8;;
  389. st8.spill [r2]=r27,8;;
  390. st8.spill [r2]=r28,8;;
  391. st8.spill [r2]=r29,8;;
  392. st8.spill [r2]=r30,8;;
  393. st8.spill [r2]=r31,8;;
  394. mov r4=ar.unat;;
  395. st8 [r2]=r4,8 // save User NaT bits for r16-r31
  396. mov ar.unat=r5 // restore original unat
  397. bsw.0;;
  398. //save BRs
  399. add r4=8,r2 // duplicate r2 in r4
  400. add r6=2*8,r2 // duplicate r2 in r4
  401. mov r3=b0
  402. mov r5=b1
  403. mov r7=b2;;
  404. st8 [r2]=r3,3*8
  405. st8 [r4]=r5,3*8
  406. st8 [r6]=r7,3*8;;
  407. mov r3=b3
  408. mov r5=b4
  409. mov r7=b5;;
  410. st8 [r2]=r3,3*8
  411. st8 [r4]=r5,3*8
  412. st8 [r6]=r7,3*8;;
  413. mov r3=b6
  414. mov r5=b7;;
  415. st8 [r2]=r3,2*8
  416. st8 [r4]=r5,2*8;;
  417. cSaveCRs:
  418. // save CRs
  419. add r4=8,r2 // duplicate r2 in r4
  420. add r6=2*8,r2 // duplicate r2 in r4
  421. mov r3=cr.dcr
  422. mov r5=cr.itm
  423. mov r7=cr.iva;;
  424. st8 [r2]=r3,8*8
  425. st8 [r4]=r5,3*8
  426. st8 [r6]=r7,3*8;; // 48 byte rements
  427. mov r3=cr.pta;;
  428. st8 [r2]=r3,8*8;; // 64 byte rements
  429. // if PSR.ic=0, reading interruption registers causes an illegal operation fault
  430. mov r3=psr;;
  431. tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
  432. (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
  433. begin_skip_intr_regs:
  434. (p6) br SkipIntrRegs;;
  435. add r4=8,r2 // duplicate r2 in r4
  436. add r6=2*8,r2 // duplicate r2 in r6
  437. mov r3=cr.ipsr
  438. mov r5=cr.isr
  439. mov r7=r0;;
  440. st8 [r2]=r3,3*8
  441. st8 [r4]=r5,3*8
  442. st8 [r6]=r7,3*8;;
  443. mov r3=cr.iip
  444. mov r5=cr.ifa
  445. mov r7=cr.itir;;
  446. st8 [r2]=r3,3*8
  447. st8 [r4]=r5,3*8
  448. st8 [r6]=r7,3*8;;
  449. mov r3=cr.iipa
  450. mov r5=cr.ifs
  451. mov r7=cr.iim;;
  452. st8 [r2]=r3,3*8
  453. st8 [r4]=r5,3*8
  454. st8 [r6]=r7,3*8;;
  455. mov r3=cr25;; // cr.iha
  456. st8 [r2]=r3,160;; // 160 byte rement
  457. SkipIntrRegs:
  458. st8 [r2]=r0,152;; // another 152 byte .
  459. add r4=8,r2 // duplicate r2 in r4
  460. add r6=2*8,r2 // duplicate r2 in r6
  461. mov r3=cr.lid
  462. // mov r5=cr.ivr // cr.ivr, don't read it
  463. mov r7=cr.tpr;;
  464. st8 [r2]=r3,3*8
  465. st8 [r4]=r5,3*8
  466. st8 [r6]=r7,3*8;;
  467. mov r3=r0 // cr.eoi => cr67
  468. mov r5=r0 // cr.irr0 => cr68
  469. mov r7=r0;; // cr.irr1 => cr69
  470. st8 [r2]=r3,3*8
  471. st8 [r4]=r5,3*8
  472. st8 [r6]=r7,3*8;;
  473. mov r3=r0 // cr.irr2 => cr70
  474. mov r5=r0 // cr.irr3 => cr71
  475. mov r7=cr.itv;;
  476. st8 [r2]=r3,3*8
  477. st8 [r4]=r5,3*8
  478. st8 [r6]=r7,3*8;;
  479. mov r3=cr.pmv
  480. mov r5=cr.cmcv;;
  481. st8 [r2]=r3,7*8
  482. st8 [r4]=r5,7*8;;
  483. mov r3=r0 // cr.lrr0 => cr80
  484. mov r5=r0;; // cr.lrr1 => cr81
  485. st8 [r2]=r3,23*8
  486. st8 [r4]=r5,23*8;;
  487. adds r2=25*8,r2;;
  488. cSaveARs:
  489. // save ARs
  490. add r4=8,r2 // duplicate r2 in r4
  491. add r6=2*8,r2 // duplicate r2 in r6
  492. mov r3=ar.k0
  493. mov r5=ar.k1
  494. mov r7=ar.k2;;
  495. st8 [r2]=r3,3*8
  496. st8 [r4]=r5,3*8
  497. st8 [r6]=r7,3*8;;
  498. mov r3=ar.k3
  499. mov r5=ar.k4
  500. mov r7=ar.k5;;
  501. st8 [r2]=r3,3*8
  502. st8 [r4]=r5,3*8
  503. st8 [r6]=r7,3*8;;
  504. mov r3=ar.k6
  505. mov r5=ar.k7
  506. mov r7=r0;; // ar.kr8
  507. st8 [r2]=r3,10*8
  508. st8 [r4]=r5,10*8
  509. st8 [r6]=r7,10*8;; // rement by 72 bytes
  510. mov r3=ar.rsc
  511. mov ar.rsc=r0 // put RSE in enforced lazy mode
  512. mov r5=ar.bsp
  513. ;;
  514. mov r7=ar.bspstore;;
  515. st8 [r2]=r3,3*8
  516. st8 [r4]=r5,3*8
  517. st8 [r6]=r7,3*8;;
  518. mov r3=ar.rnat;;
  519. st8 [r2]=r3,8*13 // increment by 13x8 bytes
  520. mov r3=ar.ccv;;
  521. st8 [r2]=r3,8*4
  522. mov r3=ar.unat;;
  523. st8 [r2]=r3,8*4
  524. mov r3=ar.fpsr;;
  525. st8 [r2]=r3,8*4
  526. mov r3=ar.itc;;
  527. st8 [r2]=r3,160 // 160
  528. mov r3=ar.pfs;;
  529. st8 [r2]=r3,8
  530. mov r3=ar.lc;;
  531. st8 [r2]=r3,8
  532. mov r3=ar.ec;;
  533. st8 [r2]=r3
  534. add r2=8*62,r2 //padding
  535. // save RRs
  536. mov ar.lc=0x08-1
  537. movl r4=0x00;;
  538. cStRR:
  539. dep.z r5=r4,61,3;;
  540. mov r3=rr[r5];;
  541. st8 [r2]=r3,8
  542. add r4=1,r4
  543. br.cloop.sptk.few cStRR
  544. ;;
  545. end_os_mca_dump:
  546. br ia64_os_mca_done_dump;;
  547. //EndStub//////////////////////////////////////////////////////////////////////
  548. //++
  549. // Name:
  550. // ia64_os_mca_proc_state_restore()
  551. //
  552. // Stub Description:
  553. //
  554. // This is a stub to restore the saved processor state during MCHK
  555. //
  556. //--
  557. ia64_os_mca_proc_state_restore:
  558. // Restore bank1 GR16-31
  559. GET_IA64_MCA_DATA(r2)
  560. ;;
  561. add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
  562. restore_GRs: // restore bank-1 GRs 16-31
  563. bsw.1;;
  564. add r3=16*8,r2;; // to get to NaT of GR 16-31
  565. ld8 r3=[r3];;
  566. mov ar.unat=r3;; // first restore NaT
  567. ld8.fill r16=[r2],8;;
  568. ld8.fill r17=[r2],8;;
  569. ld8.fill r18=[r2],8;;
  570. ld8.fill r19=[r2],8;;
  571. ld8.fill r20=[r2],8;;
  572. ld8.fill r21=[r2],8;;
  573. ld8.fill r22=[r2],8;;
  574. ld8.fill r23=[r2],8;;
  575. ld8.fill r24=[r2],8;;
  576. ld8.fill r25=[r2],8;;
  577. ld8.fill r26=[r2],8;;
  578. ld8.fill r27=[r2],8;;
  579. ld8.fill r28=[r2],8;;
  580. ld8.fill r29=[r2],8;;
  581. ld8.fill r30=[r2],8;;
  582. ld8.fill r31=[r2],8;;
  583. ld8 r3=[r2],8;; // increment to skip NaT
  584. bsw.0;;
  585. restore_BRs:
  586. add r4=8,r2 // duplicate r2 in r4
  587. add r6=2*8,r2;; // duplicate r2 in r4
  588. ld8 r3=[r2],3*8
  589. ld8 r5=[r4],3*8
  590. ld8 r7=[r6],3*8;;
  591. mov b0=r3
  592. mov b1=r5
  593. mov b2=r7;;
  594. ld8 r3=[r2],3*8
  595. ld8 r5=[r4],3*8
  596. ld8 r7=[r6],3*8;;
  597. mov b3=r3
  598. mov b4=r5
  599. mov b5=r7;;
  600. ld8 r3=[r2],2*8
  601. ld8 r5=[r4],2*8;;
  602. mov b6=r3
  603. mov b7=r5;;
  604. restore_CRs:
  605. add r4=8,r2 // duplicate r2 in r4
  606. add r6=2*8,r2;; // duplicate r2 in r4
  607. ld8 r3=[r2],8*8
  608. ld8 r5=[r4],3*8
  609. ld8 r7=[r6],3*8;; // 48 byte increments
  610. mov cr.dcr=r3
  611. mov cr.itm=r5
  612. mov cr.iva=r7;;
  613. ld8 r3=[r2],8*8;; // 64 byte increments
  614. // mov cr.pta=r3
  615. // if PSR.ic=1, reading interruption registers causes an illegal operation fault
  616. mov r3=psr;;
  617. tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
  618. (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
  619. begin_rskip_intr_regs:
  620. (p6) br rSkipIntrRegs;;
  621. add r4=8,r2 // duplicate r2 in r4
  622. add r6=2*8,r2;; // duplicate r2 in r4
  623. ld8 r3=[r2],3*8
  624. ld8 r5=[r4],3*8
  625. ld8 r7=[r6],3*8;;
  626. mov cr.ipsr=r3
  627. // mov cr.isr=r5 // cr.isr is read only
  628. ld8 r3=[r2],3*8
  629. ld8 r5=[r4],3*8
  630. ld8 r7=[r6],3*8;;
  631. mov cr.iip=r3
  632. mov cr.ifa=r5
  633. mov cr.itir=r7;;
  634. ld8 r3=[r2],3*8
  635. ld8 r5=[r4],3*8
  636. ld8 r7=[r6],3*8;;
  637. mov cr.iipa=r3
  638. mov cr.ifs=r5
  639. mov cr.iim=r7
  640. ld8 r3=[r2],160;; // 160 byte increment
  641. mov cr.iha=r3
  642. rSkipIntrRegs:
  643. ld8 r3=[r2],152;; // another 152 byte inc.
  644. add r4=8,r2 // duplicate r2 in r4
  645. add r6=2*8,r2;; // duplicate r2 in r6
  646. ld8 r3=[r2],8*3
  647. ld8 r5=[r4],8*3
  648. ld8 r7=[r6],8*3;;
  649. mov cr.lid=r3
  650. // mov cr.ivr=r5 // cr.ivr is read only
  651. mov cr.tpr=r7;;
  652. ld8 r3=[r2],8*3
  653. ld8 r5=[r4],8*3
  654. ld8 r7=[r6],8*3;;
  655. // mov cr.eoi=r3
  656. // mov cr.irr0=r5 // cr.irr0 is read only
  657. // mov cr.irr1=r7;; // cr.irr1 is read only
  658. ld8 r3=[r2],8*3
  659. ld8 r5=[r4],8*3
  660. ld8 r7=[r6],8*3;;
  661. // mov cr.irr2=r3 // cr.irr2 is read only
  662. // mov cr.irr3=r5 // cr.irr3 is read only
  663. mov cr.itv=r7;;
  664. ld8 r3=[r2],8*7
  665. ld8 r5=[r4],8*7;;
  666. mov cr.pmv=r3
  667. mov cr.cmcv=r5;;
  668. ld8 r3=[r2],8*23
  669. ld8 r5=[r4],8*23;;
  670. adds r2=8*23,r2
  671. adds r4=8*23,r4;;
  672. // mov cr.lrr0=r3
  673. // mov cr.lrr1=r5
  674. adds r2=8*2,r2;;
  675. restore_ARs:
  676. add r4=8,r2 // duplicate r2 in r4
  677. add r6=2*8,r2;; // duplicate r2 in r4
  678. ld8 r3=[r2],3*8
  679. ld8 r5=[r4],3*8
  680. ld8 r7=[r6],3*8;;
  681. mov ar.k0=r3
  682. mov ar.k1=r5
  683. mov ar.k2=r7;;
  684. ld8 r3=[r2],3*8
  685. ld8 r5=[r4],3*8
  686. ld8 r7=[r6],3*8;;
  687. mov ar.k3=r3
  688. mov ar.k4=r5
  689. mov ar.k5=r7;;
  690. ld8 r3=[r2],10*8
  691. ld8 r5=[r4],10*8
  692. ld8 r7=[r6],10*8;;
  693. mov ar.k6=r3
  694. mov ar.k7=r5
  695. ;;
  696. ld8 r3=[r2],3*8
  697. ld8 r5=[r4],3*8
  698. ld8 r7=[r6],3*8;;
  699. // mov ar.rsc=r3
  700. // mov ar.bsp=r5 // ar.bsp is read only
  701. mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
  702. ;;
  703. mov ar.bspstore=r7;;
  704. ld8 r9=[r2],8*13;;
  705. mov ar.rnat=r9
  706. mov ar.rsc=r3
  707. ld8 r3=[r2],8*4;;
  708. mov ar.ccv=r3
  709. ld8 r3=[r2],8*4;;
  710. mov ar.unat=r3
  711. ld8 r3=[r2],8*4;;
  712. mov ar.fpsr=r3
  713. ld8 r3=[r2],160;; // 160
  714. // mov ar.itc=r3
  715. ld8 r3=[r2],8;;
  716. mov ar.pfs=r3
  717. ld8 r3=[r2],8;;
  718. mov ar.lc=r3
  719. ld8 r3=[r2];;
  720. mov ar.ec=r3
  721. add r2=8*62,r2;; // padding
  722. restore_RRs:
  723. mov r5=ar.lc
  724. mov ar.lc=0x08-1
  725. movl r4=0x00;;
  726. cStRRr:
  727. dep.z r7=r4,61,3
  728. ld8 r3=[r2],8;;
  729. mov rr[r7]=r3 // what are its access previledges?
  730. add r4=1,r4
  731. br.cloop.sptk.few cStRRr
  732. ;;
  733. mov ar.lc=r5
  734. ;;
  735. end_os_mca_restore:
  736. br ia64_os_mca_done_restore;;
  737. //EndStub//////////////////////////////////////////////////////////////////////
  738. // ok, the issue here is that we need to save state information so
  739. // it can be useable by the kernel debugger and show regs routines.
  740. // In order to do this, our best bet is save the current state (plus
  741. // the state information obtain from the MIN_STATE_AREA) into a pt_regs
  742. // format. This way we can pass it on in a useable format.
  743. //
  744. //
  745. // SAL to OS entry point for INIT on the monarch processor
  746. // This has been defined for registration purposes with SAL
  747. // as a part of ia64_mca_init.
  748. //
  749. // When we get here, the following registers have been
  750. // set by the SAL for our use
  751. //
  752. // 1. GR1 = OS INIT GP
  753. // 2. GR8 = PAL_PROC physical address
  754. // 3. GR9 = SAL_PROC physical address
  755. // 4. GR10 = SAL GP (physical)
  756. // 5. GR11 = Init Reason
  757. // 0 = Received INIT for event other than crash dump switch
  758. // 1 = Received wakeup at the end of an OS_MCA corrected machine check
  759. // 2 = Received INIT dude to CrashDump switch assertion
  760. //
  761. // 6. GR12 = Return address to location within SAL_INIT procedure
  762. GLOBAL_ENTRY(ia64_monarch_init_handler)
  763. .prologue
  764. // stash the information the SAL passed to os
  765. SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
  766. ;;
  767. SAVE_MIN_WITH_COVER
  768. ;;
  769. mov r8=cr.ifa
  770. mov r9=cr.isr
  771. adds r3=8,r2 // set up second base pointer
  772. ;;
  773. SAVE_REST
  774. // ok, enough should be saved at this point to be dangerous, and supply
  775. // information for a dump
  776. // We need to switch to Virtual mode before hitting the C functions.
  777. movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
  778. mov r3=psr // get the current psr, minimum enabled at this point
  779. ;;
  780. or r2=r2,r3
  781. ;;
  782. movl r3=IVirtual_Switch
  783. ;;
  784. mov cr.iip=r3 // short return to set the appropriate bits
  785. mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
  786. ;;
  787. rfi
  788. ;;
  789. IVirtual_Switch:
  790. //
  791. // We should now be running virtual
  792. //
  793. // Let's call the C handler to get the rest of the state info
  794. //
  795. alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
  796. ;;
  797. adds out0=16,sp // out0 = pointer to pt_regs
  798. ;;
  799. DO_SAVE_SWITCH_STACK
  800. .body
  801. adds out1=16,sp // out0 = pointer to switch_stack
  802. br.call.sptk.many rp=ia64_init_handler
  803. .ret1:
  804. return_from_init:
  805. br.sptk return_from_init
  806. END(ia64_monarch_init_handler)
  807. //
  808. // SAL to OS entry point for INIT on the slave processor
  809. // This has been defined for registration purposes with SAL
  810. // as a part of ia64_mca_init.
  811. //
  812. GLOBAL_ENTRY(ia64_slave_init_handler)
  813. 1: br.sptk 1b
  814. END(ia64_slave_init_handler)