ivt.S 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803
  1. /*
  2. * arch/ia64/kernel/ivt.S
  3. *
  4. * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * David Mosberger <davidm@hpl.hp.com>
  7. * Copyright (C) 2000, 2002-2003 Intel Co
  8. * Asit Mallick <asit.k.mallick@intel.com>
  9. * Suresh Siddha <suresh.b.siddha@intel.com>
  10. * Kenneth Chen <kenneth.w.chen@intel.com>
  11. * Fenghua Yu <fenghua.yu@intel.com>
  12. *
  13. * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
  14. * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
  15. */
  16. /*
  17. * This file defines the interruption vector table used by the CPU.
  18. * It does not include one entry per possible cause of interruption.
  19. *
  20. * The first 20 entries of the table contain 64 bundles each while the
  21. * remaining 48 entries contain only 16 bundles each.
  22. *
  23. * The 64 bundles are used to allow inlining the whole handler for critical
  24. * interruptions like TLB misses.
  25. *
  26. * For each entry, the comment is as follows:
  27. *
  28. * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  29. * entry offset ----/ / / / /
  30. * entry number ---------/ / / /
  31. * size of the entry -------------/ / /
  32. * vector name -------------------------------------/ /
  33. * interruptions triggering this vector ----------------------/
  34. *
  35. * The table is 32KB in size and must be aligned on 32KB boundary.
  36. * (The CPU ignores the 15 lower bits of the address)
  37. *
  38. * Table is based upon EAS2.6 (Oct 1999)
  39. */
  40. #include <asm/asmmacro.h>
  41. #include <asm/break.h>
  42. #include <asm/ia32.h>
  43. #include <asm/kregs.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/pgtable.h>
  46. #include <asm/processor.h>
  47. #include <asm/ptrace.h>
  48. #include <asm/system.h>
  49. #include <asm/thread_info.h>
  50. #include <asm/unistd.h>
  51. #include <asm/errno.h>
  52. #if 1
  53. # define PSR_DEFAULT_BITS psr.ac
  54. #else
  55. # define PSR_DEFAULT_BITS 0
  56. #endif
  57. #if 0
  58. /*
  59. * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
  60. * needed for something else before enabling this...
  61. */
  62. # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
  63. #else
  64. # define DBG_FAULT(i)
  65. #endif
  66. #include "minstate.h"
  67. #define FAULT(n) \
  68. mov r31=pr; \
  69. mov r19=n;; /* prepare to save predicates */ \
  70. br.sptk.many dispatch_to_fault_handler
  71. .section .text.ivt,"ax"
  72. .align 32768 // align on 32KB boundary
  73. .global ia64_ivt
  74. ia64_ivt:
  75. /////////////////////////////////////////////////////////////////////////////////////////
  76. // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
  77. ENTRY(vhpt_miss)
  78. DBG_FAULT(0)
  79. /*
  80. * The VHPT vector is invoked when the TLB entry for the virtual page table
  81. * is missing. This happens only as a result of a previous
  82. * (the "original") TLB miss, which may either be caused by an instruction
  83. * fetch or a data access (or non-access).
  84. *
  85. * What we do here is normal TLB miss handing for the _original_ miss,
  86. * followed by inserting the TLB entry for the virtual page table page
  87. * that the VHPT walker was attempting to access. The latter gets
  88. * inserted as long as page table entry above pte level have valid
  89. * mappings for the faulting address. The TLB entry for the original
  90. * miss gets inserted only if the pte entry indicates that the page is
  91. * present.
  92. *
  93. * do_page_fault gets invoked in the following cases:
  94. * - the faulting virtual address uses unimplemented address bits
  95. * - the faulting virtual address has no valid page table mapping
  96. */
  97. mov r16=cr.ifa // get address that caused the TLB miss
  98. #ifdef CONFIG_HUGETLB_PAGE
  99. movl r18=PAGE_SHIFT
  100. mov r25=cr.itir
  101. #endif
  102. ;;
  103. rsm psr.dt // use physical addressing for data
  104. mov r31=pr // save the predicate registers
  105. mov r19=IA64_KR(PT_BASE) // get page table base address
  106. shl r21=r16,3 // shift bit 60 into sign bit
  107. shr.u r17=r16,61 // get the region number into r17
  108. ;;
  109. shr.u r22=r21,3
  110. #ifdef CONFIG_HUGETLB_PAGE
  111. extr.u r26=r25,2,6
  112. ;;
  113. cmp.ne p8,p0=r18,r26
  114. sub r27=r26,r18
  115. ;;
  116. (p8) dep r25=r18,r25,2,6
  117. (p8) shr r22=r22,r27
  118. #endif
  119. ;;
  120. cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
  121. shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
  122. ;;
  123. (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
  124. srlz.d
  125. LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
  126. .pred.rel "mutex", p6, p7
  127. (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
  128. (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
  129. ;;
  130. (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
  131. (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
  132. cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
  133. #ifdef CONFIG_PGTABLE_4
  134. shr.u r28=r22,PUD_SHIFT // shift pud index into position
  135. #else
  136. shr.u r18=r22,PMD_SHIFT // shift pmd index into position
  137. #endif
  138. ;;
  139. ld8 r17=[r17] // get *pgd (may be 0)
  140. ;;
  141. (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
  142. #ifdef CONFIG_PGTABLE_4
  143. dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
  144. ;;
  145. shr.u r18=r22,PMD_SHIFT // shift pmd index into position
  146. (p7) ld8 r29=[r28] // get *pud (may be 0)
  147. ;;
  148. (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
  149. dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
  150. #else
  151. dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
  152. #endif
  153. ;;
  154. (p7) ld8 r20=[r17] // get *pmd (may be 0)
  155. shr.u r19=r22,PAGE_SHIFT // shift pte index into position
  156. ;;
  157. (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
  158. dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
  159. ;;
  160. (p7) ld8 r18=[r21] // read *pte
  161. mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
  162. ;;
  163. (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
  164. mov r22=cr.iha // get the VHPT address that caused the TLB miss
  165. ;; // avoid RAW on p7
  166. (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
  167. dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
  168. ;;
  169. (p10) itc.i r18 // insert the instruction TLB entry
  170. (p11) itc.d r18 // insert the data TLB entry
  171. (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
  172. mov cr.ifa=r22
  173. #ifdef CONFIG_HUGETLB_PAGE
  174. (p8) mov cr.itir=r25 // change to default page-size for VHPT
  175. #endif
  176. /*
  177. * Now compute and insert the TLB entry for the virtual page table. We never
  178. * execute in a page table page so there is no need to set the exception deferral
  179. * bit.
  180. */
  181. adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
  182. ;;
  183. (p7) itc.d r24
  184. ;;
  185. #ifdef CONFIG_SMP
  186. /*
  187. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  188. * cannot possibly affect the following loads:
  189. */
  190. dv_serialize_data
  191. /*
  192. * Re-check pagetable entry. If they changed, we may have received a ptc.g
  193. * between reading the pagetable and the "itc". If so, flush the entry we
  194. * inserted and retry. At this point, we have:
  195. *
  196. * r28 = equivalent of pud_offset(pgd, ifa)
  197. * r17 = equivalent of pmd_offset(pud, ifa)
  198. * r21 = equivalent of pte_offset(pmd, ifa)
  199. *
  200. * r29 = *pud
  201. * r20 = *pmd
  202. * r18 = *pte
  203. */
  204. ld8 r25=[r21] // read *pte again
  205. ld8 r26=[r17] // read *pmd again
  206. #ifdef CONFIG_PGTABLE_4
  207. ld8 r19=[r28] // read *pud again
  208. #endif
  209. cmp.ne p6,p7=r0,r0
  210. ;;
  211. cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
  212. #ifdef CONFIG_PGTABLE_4
  213. cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
  214. #endif
  215. mov r27=PAGE_SHIFT<<2
  216. ;;
  217. (p6) ptc.l r22,r27 // purge PTE page translation
  218. (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
  219. ;;
  220. (p6) ptc.l r16,r27 // purge translation
  221. #endif
  222. mov pr=r31,-1 // restore predicate registers
  223. rfi
  224. END(vhpt_miss)
  225. .org ia64_ivt+0x400
  226. /////////////////////////////////////////////////////////////////////////////////////////
  227. // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
  228. ENTRY(itlb_miss)
  229. DBG_FAULT(1)
  230. /*
  231. * The ITLB handler accesses the PTE via the virtually mapped linear
  232. * page table. If a nested TLB miss occurs, we switch into physical
  233. * mode, walk the page table, and then re-execute the PTE read and
  234. * go on normally after that.
  235. */
  236. mov r16=cr.ifa // get virtual address
  237. mov r29=b0 // save b0
  238. mov r31=pr // save predicates
  239. .itlb_fault:
  240. mov r17=cr.iha // get virtual address of PTE
  241. movl r30=1f // load nested fault continuation point
  242. ;;
  243. 1: ld8 r18=[r17] // read *pte
  244. ;;
  245. mov b0=r29
  246. tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
  247. (p6) br.cond.spnt page_fault
  248. ;;
  249. itc.i r18
  250. ;;
  251. #ifdef CONFIG_SMP
  252. /*
  253. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  254. * cannot possibly affect the following loads:
  255. */
  256. dv_serialize_data
  257. ld8 r19=[r17] // read *pte again and see if same
  258. mov r20=PAGE_SHIFT<<2 // setup page size for purge
  259. ;;
  260. cmp.ne p7,p0=r18,r19
  261. ;;
  262. (p7) ptc.l r16,r20
  263. #endif
  264. mov pr=r31,-1
  265. rfi
  266. END(itlb_miss)
  267. .org ia64_ivt+0x0800
  268. /////////////////////////////////////////////////////////////////////////////////////////
  269. // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
  270. ENTRY(dtlb_miss)
  271. DBG_FAULT(2)
  272. /*
  273. * The DTLB handler accesses the PTE via the virtually mapped linear
  274. * page table. If a nested TLB miss occurs, we switch into physical
  275. * mode, walk the page table, and then re-execute the PTE read and
  276. * go on normally after that.
  277. */
  278. mov r16=cr.ifa // get virtual address
  279. mov r29=b0 // save b0
  280. mov r31=pr // save predicates
  281. dtlb_fault:
  282. mov r17=cr.iha // get virtual address of PTE
  283. movl r30=1f // load nested fault continuation point
  284. ;;
  285. 1: ld8 r18=[r17] // read *pte
  286. ;;
  287. mov b0=r29
  288. tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
  289. (p6) br.cond.spnt page_fault
  290. ;;
  291. itc.d r18
  292. ;;
  293. #ifdef CONFIG_SMP
  294. /*
  295. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  296. * cannot possibly affect the following loads:
  297. */
  298. dv_serialize_data
  299. ld8 r19=[r17] // read *pte again and see if same
  300. mov r20=PAGE_SHIFT<<2 // setup page size for purge
  301. ;;
  302. cmp.ne p7,p0=r18,r19
  303. ;;
  304. (p7) ptc.l r16,r20
  305. #endif
  306. mov pr=r31,-1
  307. rfi
  308. END(dtlb_miss)
  309. .org ia64_ivt+0x0c00
  310. /////////////////////////////////////////////////////////////////////////////////////////
  311. // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
  312. ENTRY(alt_itlb_miss)
  313. DBG_FAULT(3)
  314. mov r16=cr.ifa // get address that caused the TLB miss
  315. movl r17=PAGE_KERNEL
  316. mov r21=cr.ipsr
  317. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  318. mov r31=pr
  319. ;;
  320. #ifdef CONFIG_DISABLE_VHPT
  321. shr.u r22=r16,61 // get the region number into r21
  322. ;;
  323. cmp.gt p8,p0=6,r22 // user mode
  324. ;;
  325. (p8) thash r17=r16
  326. ;;
  327. (p8) mov cr.iha=r17
  328. (p8) mov r29=b0 // save b0
  329. (p8) br.cond.dptk .itlb_fault
  330. #endif
  331. extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
  332. and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
  333. shr.u r18=r16,57 // move address bit 61 to bit 4
  334. ;;
  335. andcm r18=0x10,r18 // bit 4=~address-bit(61)
  336. cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
  337. or r19=r17,r19 // insert PTE control bits into r19
  338. ;;
  339. or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
  340. (p8) br.cond.spnt page_fault
  341. ;;
  342. itc.i r19 // insert the TLB entry
  343. mov pr=r31,-1
  344. rfi
  345. END(alt_itlb_miss)
  346. .org ia64_ivt+0x1000
  347. /////////////////////////////////////////////////////////////////////////////////////////
  348. // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
  349. ENTRY(alt_dtlb_miss)
  350. DBG_FAULT(4)
  351. mov r16=cr.ifa // get address that caused the TLB miss
  352. movl r17=PAGE_KERNEL
  353. mov r20=cr.isr
  354. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  355. mov r21=cr.ipsr
  356. mov r31=pr
  357. mov r24=PERCPU_ADDR
  358. ;;
  359. #ifdef CONFIG_DISABLE_VHPT
  360. shr.u r22=r16,61 // get the region number into r21
  361. ;;
  362. cmp.gt p8,p0=6,r22 // access to region 0-5
  363. ;;
  364. (p8) thash r17=r16
  365. ;;
  366. (p8) mov cr.iha=r17
  367. (p8) mov r29=b0 // save b0
  368. (p8) br.cond.dptk dtlb_fault
  369. #endif
  370. cmp.ge p10,p11=r16,r24 // access to per_cpu_data?
  371. tbit.z p12,p0=r16,61 // access to region 6?
  372. mov r25=PERCPU_PAGE_SHIFT << 2
  373. mov r26=PERCPU_PAGE_SIZE
  374. nop.m 0
  375. nop.b 0
  376. ;;
  377. (p10) mov r19=IA64_KR(PER_CPU_DATA)
  378. (p11) and r19=r19,r16 // clear non-ppn fields
  379. extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
  380. and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
  381. tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
  382. tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
  383. ;;
  384. (p10) sub r19=r19,r26
  385. (p10) mov cr.itir=r25
  386. cmp.ne p8,p0=r0,r23
  387. (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
  388. (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
  389. (p8) br.cond.spnt page_fault
  390. dep r21=-1,r21,IA64_PSR_ED_BIT,1
  391. ;;
  392. or r19=r19,r17 // insert PTE control bits into r19
  393. (p6) mov cr.ipsr=r21
  394. ;;
  395. (p7) itc.d r19 // insert the TLB entry
  396. mov pr=r31,-1
  397. rfi
  398. END(alt_dtlb_miss)
  399. .org ia64_ivt+0x1400
  400. /////////////////////////////////////////////////////////////////////////////////////////
  401. // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
  402. ENTRY(nested_dtlb_miss)
  403. /*
  404. * In the absence of kernel bugs, we get here when the virtually mapped linear
  405. * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
  406. * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
  407. * table is missing, a nested TLB miss fault is triggered and control is
  408. * transferred to this point. When this happens, we lookup the pte for the
  409. * faulting address by walking the page table in physical mode and return to the
  410. * continuation point passed in register r30 (or call page_fault if the address is
  411. * not mapped).
  412. *
  413. * Input: r16: faulting address
  414. * r29: saved b0
  415. * r30: continuation address
  416. * r31: saved pr
  417. *
  418. * Output: r17: physical address of PTE of faulting address
  419. * r29: saved b0
  420. * r30: continuation address
  421. * r31: saved pr
  422. *
  423. * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
  424. */
  425. rsm psr.dt // switch to using physical data addressing
  426. mov r19=IA64_KR(PT_BASE) // get the page table base address
  427. shl r21=r16,3 // shift bit 60 into sign bit
  428. mov r18=cr.itir
  429. ;;
  430. shr.u r17=r16,61 // get the region number into r17
  431. extr.u r18=r18,2,6 // get the faulting page size
  432. ;;
  433. cmp.eq p6,p7=5,r17 // is faulting address in region 5?
  434. add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
  435. add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
  436. ;;
  437. shr.u r22=r16,r22
  438. shr.u r18=r16,r18
  439. (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
  440. srlz.d
  441. LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
  442. .pred.rel "mutex", p6, p7
  443. (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
  444. (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
  445. ;;
  446. (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
  447. (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
  448. cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
  449. #ifdef CONFIG_PGTABLE_4
  450. shr.u r18=r22,PUD_SHIFT // shift pud index into position
  451. #else
  452. shr.u r18=r22,PMD_SHIFT // shift pmd index into position
  453. #endif
  454. ;;
  455. ld8 r17=[r17] // get *pgd (may be 0)
  456. ;;
  457. (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
  458. dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
  459. ;;
  460. #ifdef CONFIG_PGTABLE_4
  461. (p7) ld8 r17=[r17] // get *pud (may be 0)
  462. shr.u r18=r22,PMD_SHIFT // shift pmd index into position
  463. ;;
  464. (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
  465. dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
  466. ;;
  467. #endif
  468. (p7) ld8 r17=[r17] // get *pmd (may be 0)
  469. shr.u r19=r22,PAGE_SHIFT // shift pte index into position
  470. ;;
  471. (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
  472. dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
  473. (p6) br.cond.spnt page_fault
  474. mov b0=r30
  475. br.sptk.many b0 // return to continuation point
  476. END(nested_dtlb_miss)
  477. .org ia64_ivt+0x1800
  478. /////////////////////////////////////////////////////////////////////////////////////////
  479. // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  480. ENTRY(ikey_miss)
  481. DBG_FAULT(6)
  482. FAULT(6)
  483. END(ikey_miss)
  484. //-----------------------------------------------------------------------------------
  485. // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
  486. ENTRY(page_fault)
  487. ssm psr.dt
  488. ;;
  489. srlz.i
  490. ;;
  491. SAVE_MIN_WITH_COVER
  492. alloc r15=ar.pfs,0,0,3,0
  493. mov out0=cr.ifa
  494. mov out1=cr.isr
  495. adds r3=8,r2 // set up second base pointer
  496. ;;
  497. ssm psr.ic | PSR_DEFAULT_BITS
  498. ;;
  499. srlz.i // guarantee that interruption collectin is on
  500. ;;
  501. (p15) ssm psr.i // restore psr.i
  502. movl r14=ia64_leave_kernel
  503. ;;
  504. SAVE_REST
  505. mov rp=r14
  506. ;;
  507. adds out2=16,r12 // out2 = pointer to pt_regs
  508. br.call.sptk.many b6=ia64_do_page_fault // ignore return address
  509. END(page_fault)
  510. .org ia64_ivt+0x1c00
  511. /////////////////////////////////////////////////////////////////////////////////////////
  512. // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  513. ENTRY(dkey_miss)
  514. DBG_FAULT(7)
  515. FAULT(7)
  516. END(dkey_miss)
  517. .org ia64_ivt+0x2000
  518. /////////////////////////////////////////////////////////////////////////////////////////
  519. // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  520. ENTRY(dirty_bit)
  521. DBG_FAULT(8)
  522. /*
  523. * What we do here is to simply turn on the dirty bit in the PTE. We need to
  524. * update both the page-table and the TLB entry. To efficiently access the PTE,
  525. * we address it through the virtual page table. Most likely, the TLB entry for
  526. * the relevant virtual page table page is still present in the TLB so we can
  527. * normally do this without additional TLB misses. In case the necessary virtual
  528. * page table TLB entry isn't present, we take a nested TLB miss hit where we look
  529. * up the physical address of the L3 PTE and then continue at label 1 below.
  530. */
  531. mov r16=cr.ifa // get the address that caused the fault
  532. movl r30=1f // load continuation point in case of nested fault
  533. ;;
  534. thash r17=r16 // compute virtual address of L3 PTE
  535. mov r29=b0 // save b0 in case of nested fault
  536. mov r31=pr // save pr
  537. #ifdef CONFIG_SMP
  538. mov r28=ar.ccv // save ar.ccv
  539. ;;
  540. 1: ld8 r18=[r17]
  541. ;; // avoid RAW on r18
  542. mov ar.ccv=r18 // set compare value for cmpxchg
  543. or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
  544. tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
  545. ;;
  546. (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
  547. mov r24=PAGE_SHIFT<<2
  548. ;;
  549. (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
  550. ;;
  551. (p6) itc.d r25 // install updated PTE
  552. ;;
  553. /*
  554. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  555. * cannot possibly affect the following loads:
  556. */
  557. dv_serialize_data
  558. ld8 r18=[r17] // read PTE again
  559. ;;
  560. cmp.eq p6,p7=r18,r25 // is it same as the newly installed
  561. ;;
  562. (p7) ptc.l r16,r24
  563. mov b0=r29 // restore b0
  564. mov ar.ccv=r28
  565. #else
  566. ;;
  567. 1: ld8 r18=[r17]
  568. ;; // avoid RAW on r18
  569. or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
  570. mov b0=r29 // restore b0
  571. ;;
  572. st8 [r17]=r18 // store back updated PTE
  573. itc.d r18 // install updated PTE
  574. #endif
  575. mov pr=r31,-1 // restore pr
  576. rfi
  577. END(dirty_bit)
  578. .org ia64_ivt+0x2400
  579. /////////////////////////////////////////////////////////////////////////////////////////
  580. // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  581. ENTRY(iaccess_bit)
  582. DBG_FAULT(9)
  583. // Like Entry 8, except for instruction access
  584. mov r16=cr.ifa // get the address that caused the fault
  585. movl r30=1f // load continuation point in case of nested fault
  586. mov r31=pr // save predicates
  587. #ifdef CONFIG_ITANIUM
  588. /*
  589. * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
  590. */
  591. mov r17=cr.ipsr
  592. ;;
  593. mov r18=cr.iip
  594. tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
  595. ;;
  596. (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
  597. #endif /* CONFIG_ITANIUM */
  598. ;;
  599. thash r17=r16 // compute virtual address of L3 PTE
  600. mov r29=b0 // save b0 in case of nested fault)
  601. #ifdef CONFIG_SMP
  602. mov r28=ar.ccv // save ar.ccv
  603. ;;
  604. 1: ld8 r18=[r17]
  605. ;;
  606. mov ar.ccv=r18 // set compare value for cmpxchg
  607. or r25=_PAGE_A,r18 // set the accessed bit
  608. tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
  609. ;;
  610. (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
  611. mov r24=PAGE_SHIFT<<2
  612. ;;
  613. (p6) cmp.eq p6,p7=r26,r18 // Only if page present
  614. ;;
  615. (p6) itc.i r25 // install updated PTE
  616. ;;
  617. /*
  618. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  619. * cannot possibly affect the following loads:
  620. */
  621. dv_serialize_data
  622. ld8 r18=[r17] // read PTE again
  623. ;;
  624. cmp.eq p6,p7=r18,r25 // is it same as the newly installed
  625. ;;
  626. (p7) ptc.l r16,r24
  627. mov b0=r29 // restore b0
  628. mov ar.ccv=r28
  629. #else /* !CONFIG_SMP */
  630. ;;
  631. 1: ld8 r18=[r17]
  632. ;;
  633. or r18=_PAGE_A,r18 // set the accessed bit
  634. mov b0=r29 // restore b0
  635. ;;
  636. st8 [r17]=r18 // store back updated PTE
  637. itc.i r18 // install updated PTE
  638. #endif /* !CONFIG_SMP */
  639. mov pr=r31,-1
  640. rfi
  641. END(iaccess_bit)
  642. .org ia64_ivt+0x2800
  643. /////////////////////////////////////////////////////////////////////////////////////////
  644. // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  645. ENTRY(daccess_bit)
  646. DBG_FAULT(10)
  647. // Like Entry 8, except for data access
  648. mov r16=cr.ifa // get the address that caused the fault
  649. movl r30=1f // load continuation point in case of nested fault
  650. ;;
  651. thash r17=r16 // compute virtual address of L3 PTE
  652. mov r31=pr
  653. mov r29=b0 // save b0 in case of nested fault)
  654. #ifdef CONFIG_SMP
  655. mov r28=ar.ccv // save ar.ccv
  656. ;;
  657. 1: ld8 r18=[r17]
  658. ;; // avoid RAW on r18
  659. mov ar.ccv=r18 // set compare value for cmpxchg
  660. or r25=_PAGE_A,r18 // set the dirty bit
  661. tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
  662. ;;
  663. (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
  664. mov r24=PAGE_SHIFT<<2
  665. ;;
  666. (p6) cmp.eq p6,p7=r26,r18 // Only if page is present
  667. ;;
  668. (p6) itc.d r25 // install updated PTE
  669. /*
  670. * Tell the assemblers dependency-violation checker that the above "itc" instructions
  671. * cannot possibly affect the following loads:
  672. */
  673. dv_serialize_data
  674. ;;
  675. ld8 r18=[r17] // read PTE again
  676. ;;
  677. cmp.eq p6,p7=r18,r25 // is it same as the newly installed
  678. ;;
  679. (p7) ptc.l r16,r24
  680. mov ar.ccv=r28
  681. #else
  682. ;;
  683. 1: ld8 r18=[r17]
  684. ;; // avoid RAW on r18
  685. or r18=_PAGE_A,r18 // set the accessed bit
  686. ;;
  687. st8 [r17]=r18 // store back updated PTE
  688. itc.d r18 // install updated PTE
  689. #endif
  690. mov b0=r29 // restore b0
  691. mov pr=r31,-1
  692. rfi
  693. END(daccess_bit)
  694. .org ia64_ivt+0x2c00
  695. /////////////////////////////////////////////////////////////////////////////////////////
  696. // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
  697. ENTRY(break_fault)
  698. /*
  699. * The streamlined system call entry/exit paths only save/restore the initial part
  700. * of pt_regs. This implies that the callers of system-calls must adhere to the
  701. * normal procedure calling conventions.
  702. *
  703. * Registers to be saved & restored:
  704. * CR registers: cr.ipsr, cr.iip, cr.ifs
  705. * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
  706. * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
  707. * Registers to be restored only:
  708. * r8-r11: output value from the system call.
  709. *
  710. * During system call exit, scratch registers (including r15) are modified/cleared
  711. * to prevent leaking bits from kernel to user level.
  712. */
  713. DBG_FAULT(11)
  714. mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
  715. mov r29=cr.ipsr // M2 (12 cyc)
  716. mov r31=pr // I0 (2 cyc)
  717. mov r17=cr.iim // M2 (2 cyc)
  718. mov.m r27=ar.rsc // M2 (12 cyc)
  719. mov r18=__IA64_BREAK_SYSCALL // A
  720. mov.m ar.rsc=0 // M2
  721. mov.m r21=ar.fpsr // M2 (12 cyc)
  722. mov r19=b6 // I0 (2 cyc)
  723. ;;
  724. mov.m r23=ar.bspstore // M2 (12 cyc)
  725. mov.m r24=ar.rnat // M2 (5 cyc)
  726. mov.i r26=ar.pfs // I0 (2 cyc)
  727. invala // M0|1
  728. nop.m 0 // M
  729. mov r20=r1 // A save r1
  730. nop.m 0
  731. movl r30=sys_call_table // X
  732. mov r28=cr.iip // M2 (2 cyc)
  733. cmp.eq p0,p7=r18,r17 // I0 is this a system call?
  734. (p7) br.cond.spnt non_syscall // B no ->
  735. //
  736. // From this point on, we are definitely on the syscall-path
  737. // and we can use (non-banked) scratch registers.
  738. //
  739. ///////////////////////////////////////////////////////////////////////
  740. mov r1=r16 // A move task-pointer to "addl"-addressable reg
  741. mov r2=r16 // A setup r2 for ia64_syscall_setup
  742. add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
  743. adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
  744. adds r15=-1024,r15 // A subtract 1024 from syscall number
  745. mov r3=NR_syscalls - 1
  746. ;;
  747. ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
  748. ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
  749. extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
  750. shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
  751. addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
  752. cmp.leu p6,p7=r15,r3 // A syscall number in range?
  753. ;;
  754. lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
  755. (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
  756. tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
  757. mov.m ar.bspstore=r22 // M2 switch to kernel RBS
  758. cmp.eq p8,p9=2,r8 // A isr.ei==2?
  759. ;;
  760. (p8) mov r8=0 // A clear ei to 0
  761. (p7) movl r30=sys_ni_syscall // X
  762. (p8) adds r28=16,r28 // A switch cr.iip to next bundle
  763. (p9) adds r8=1,r8 // A increment ei to next slot
  764. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  765. ;;
  766. mov b6=r30 // I0 setup syscall handler branch reg early
  767. #else
  768. nop.i 0
  769. ;;
  770. #endif
  771. mov.m r25=ar.unat // M2 (5 cyc)
  772. dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
  773. adds r15=1024,r15 // A restore original syscall number
  774. //
  775. // If any of the above loads miss in L1D, we'll stall here until
  776. // the data arrives.
  777. //
  778. ///////////////////////////////////////////////////////////////////////
  779. st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
  780. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  781. mov.m r30=ar.itc // M get cycle for accounting
  782. #else
  783. mov b6=r30 // I0 setup syscall handler branch reg early
  784. #endif
  785. cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
  786. and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
  787. mov r18=ar.bsp // M2 (12 cyc)
  788. (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
  789. ;;
  790. .back_from_break_fixup:
  791. (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
  792. cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
  793. br.call.sptk.many b7=ia64_syscall_setup // B
  794. 1:
  795. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  796. // mov.m r30=ar.itc is called in advance, and r13 is current
  797. add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
  798. add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
  799. (pKStk) br.cond.spnt .skip_accounting // B unlikely skip
  800. ;;
  801. ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
  802. ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
  803. ;;
  804. ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
  805. ld8 r21=[r17] // M cumulated utime
  806. sub r22=r19,r18 // A stime before leave
  807. ;;
  808. st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
  809. sub r18=r30,r19 // A elapsed time in user
  810. ;;
  811. add r20=r20,r22 // A sum stime
  812. add r21=r21,r18 // A sum utime
  813. ;;
  814. st8 [r16]=r20 // M update stime
  815. st8 [r17]=r21 // M update utime
  816. ;;
  817. .skip_accounting:
  818. #endif
  819. mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
  820. nop 0
  821. bsw.1 // B (6 cyc) regs are saved, switch to bank 1
  822. ;;
  823. ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
  824. movl r3=ia64_ret_from_syscall // X
  825. ;;
  826. srlz.i // M0 ensure interruption collection is on
  827. mov rp=r3 // I0 set the real return addr
  828. (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
  829. (p15) ssm psr.i // M2 restore psr.i
  830. (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
  831. br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
  832. // NOT REACHED
  833. ///////////////////////////////////////////////////////////////////////
  834. // On entry, we optimistically assumed that we're coming from user-space.
  835. // For the rare cases where a system-call is done from within the kernel,
  836. // we fix things up at this point:
  837. .break_fixup:
  838. add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
  839. mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
  840. ;;
  841. mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
  842. br.cond.sptk .back_from_break_fixup
  843. END(break_fault)
  844. .org ia64_ivt+0x3000
  845. /////////////////////////////////////////////////////////////////////////////////////////
  846. // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
  847. ENTRY(interrupt)
  848. DBG_FAULT(12)
  849. mov r31=pr // prepare to save predicates
  850. ;;
  851. SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
  852. ssm psr.ic | PSR_DEFAULT_BITS
  853. ;;
  854. adds r3=8,r2 // set up second base pointer for SAVE_REST
  855. srlz.i // ensure everybody knows psr.ic is back on
  856. ;;
  857. SAVE_REST
  858. ;;
  859. MCA_RECOVER_RANGE(interrupt)
  860. alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  861. mov out0=cr.ivr // pass cr.ivr as first arg
  862. add out1=16,sp // pass pointer to pt_regs as second arg
  863. ;;
  864. srlz.d // make sure we see the effect of cr.ivr
  865. movl r14=ia64_leave_kernel
  866. ;;
  867. mov rp=r14
  868. br.call.sptk.many b6=ia64_handle_irq
  869. END(interrupt)
  870. .org ia64_ivt+0x3400
  871. /////////////////////////////////////////////////////////////////////////////////////////
  872. // 0x3400 Entry 13 (size 64 bundles) Reserved
  873. DBG_FAULT(13)
  874. FAULT(13)
  875. .org ia64_ivt+0x3800
  876. /////////////////////////////////////////////////////////////////////////////////////////
  877. // 0x3800 Entry 14 (size 64 bundles) Reserved
  878. DBG_FAULT(14)
  879. FAULT(14)
  880. /*
  881. * There is no particular reason for this code to be here, other than that
  882. * there happens to be space here that would go unused otherwise. If this
  883. * fault ever gets "unreserved", simply moved the following code to a more
  884. * suitable spot...
  885. *
  886. * ia64_syscall_setup() is a separate subroutine so that it can
  887. * allocate stacked registers so it can safely demine any
  888. * potential NaT values from the input registers.
  889. *
  890. * On entry:
  891. * - executing on bank 0 or bank 1 register set (doesn't matter)
  892. * - r1: stack pointer
  893. * - r2: current task pointer
  894. * - r3: preserved
  895. * - r11: original contents (saved ar.pfs to be saved)
  896. * - r12: original contents (sp to be saved)
  897. * - r13: original contents (tp to be saved)
  898. * - r15: original contents (syscall # to be saved)
  899. * - r18: saved bsp (after switching to kernel stack)
  900. * - r19: saved b6
  901. * - r20: saved r1 (gp)
  902. * - r21: saved ar.fpsr
  903. * - r22: kernel's register backing store base (krbs_base)
  904. * - r23: saved ar.bspstore
  905. * - r24: saved ar.rnat
  906. * - r25: saved ar.unat
  907. * - r26: saved ar.pfs
  908. * - r27: saved ar.rsc
  909. * - r28: saved cr.iip
  910. * - r29: saved cr.ipsr
  911. * - r30: ar.itc for accounting (don't touch)
  912. * - r31: saved pr
  913. * - b0: original contents (to be saved)
  914. * On exit:
  915. * - p10: TRUE if syscall is invoked with more than 8 out
  916. * registers or r15's Nat is true
  917. * - r1: kernel's gp
  918. * - r3: preserved (same as on entry)
  919. * - r8: -EINVAL if p10 is true
  920. * - r12: points to kernel stack
  921. * - r13: points to current task
  922. * - r14: preserved (same as on entry)
  923. * - p13: preserved
  924. * - p15: TRUE if interrupts need to be re-enabled
  925. * - ar.fpsr: set to kernel settings
  926. * - b6: preserved (same as on entry)
  927. */
  928. GLOBAL_ENTRY(ia64_syscall_setup)
  929. #if PT(B6) != 0
  930. # error This code assumes that b6 is the first field in pt_regs.
  931. #endif
  932. st8 [r1]=r19 // save b6
  933. add r16=PT(CR_IPSR),r1 // initialize first base pointer
  934. add r17=PT(R11),r1 // initialize second base pointer
  935. ;;
  936. alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
  937. st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
  938. tnat.nz p8,p0=in0
  939. st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
  940. tnat.nz p9,p0=in1
  941. (pKStk) mov r18=r0 // make sure r18 isn't NaT
  942. ;;
  943. st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
  944. st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
  945. mov r28=b0 // save b0 (2 cyc)
  946. ;;
  947. st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
  948. dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
  949. (p8) mov in0=-1
  950. ;;
  951. st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
  952. extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
  953. and r8=0x7f,r19 // A // get sof of ar.pfs
  954. st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
  955. tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
  956. (p9) mov in1=-1
  957. ;;
  958. (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
  959. tnat.nz p10,p0=in2
  960. add r11=8,r11
  961. ;;
  962. (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
  963. (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
  964. tnat.nz p11,p0=in3
  965. ;;
  966. (p10) mov in2=-1
  967. tnat.nz p12,p0=in4 // [I0]
  968. (p11) mov in3=-1
  969. ;;
  970. (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
  971. (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
  972. shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
  973. ;;
  974. st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
  975. st8 [r17]=r28,PT(R1)-PT(B0) // save b0
  976. tnat.nz p13,p0=in5 // [I0]
  977. ;;
  978. st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
  979. st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
  980. (p12) mov in4=-1
  981. ;;
  982. .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
  983. .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
  984. (p13) mov in5=-1
  985. ;;
  986. st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
  987. tnat.nz p13,p0=in6
  988. cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
  989. ;;
  990. mov r8=1
  991. (p9) tnat.nz p10,p0=r15
  992. adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
  993. st8.spill [r17]=r15 // save r15
  994. tnat.nz p8,p0=in7
  995. nop.i 0
  996. mov r13=r2 // establish `current'
  997. movl r1=__gp // establish kernel global pointer
  998. ;;
  999. st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
  1000. (p13) mov in6=-1
  1001. (p8) mov in7=-1
  1002. cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
  1003. movl r17=FPSR_DEFAULT
  1004. ;;
  1005. mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
  1006. (p10) mov r8=-EINVAL
  1007. br.ret.sptk.many b7
  1008. END(ia64_syscall_setup)
  1009. .org ia64_ivt+0x3c00
  1010. /////////////////////////////////////////////////////////////////////////////////////////
  1011. // 0x3c00 Entry 15 (size 64 bundles) Reserved
  1012. DBG_FAULT(15)
  1013. FAULT(15)
  1014. .org ia64_ivt+0x4000
  1015. /////////////////////////////////////////////////////////////////////////////////////////
  1016. // 0x4000 Entry 16 (size 64 bundles) Reserved
  1017. DBG_FAULT(16)
  1018. FAULT(16)
  1019. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  1020. /*
  1021. * There is no particular reason for this code to be here, other than
  1022. * that there happens to be space here that would go unused otherwise.
  1023. * If this fault ever gets "unreserved", simply moved the following
  1024. * code to a more suitable spot...
  1025. *
  1026. * account_sys_enter is called from SAVE_MIN* macros if accounting is
  1027. * enabled and if the macro is entered from user mode.
  1028. */
  1029. ENTRY(account_sys_enter)
  1030. // mov.m r20=ar.itc is called in advance, and r13 is current
  1031. add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
  1032. add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
  1033. ;;
  1034. ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
  1035. ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
  1036. ;;
  1037. ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
  1038. ld8 r21=[r17] // cumulated utime
  1039. sub r22=r19,r18 // stime before leave kernel
  1040. ;;
  1041. st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
  1042. sub r18=r20,r19 // elapsed time in user mode
  1043. ;;
  1044. add r23=r23,r22 // sum stime
  1045. add r21=r21,r18 // sum utime
  1046. ;;
  1047. st8 [r16]=r23 // update stime
  1048. st8 [r17]=r21 // update utime
  1049. ;;
  1050. br.ret.sptk.many rp
  1051. END(account_sys_enter)
  1052. #endif
  1053. .org ia64_ivt+0x4400
  1054. /////////////////////////////////////////////////////////////////////////////////////////
  1055. // 0x4400 Entry 17 (size 64 bundles) Reserved
  1056. DBG_FAULT(17)
  1057. FAULT(17)
  1058. ENTRY(non_syscall)
  1059. mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
  1060. ;;
  1061. SAVE_MIN_WITH_COVER
  1062. // There is no particular reason for this code to be here, other than that
  1063. // there happens to be space here that would go unused otherwise. If this
  1064. // fault ever gets "unreserved", simply moved the following code to a more
  1065. // suitable spot...
  1066. alloc r14=ar.pfs,0,0,2,0
  1067. mov out0=cr.iim
  1068. add out1=16,sp
  1069. adds r3=8,r2 // set up second base pointer for SAVE_REST
  1070. ssm psr.ic | PSR_DEFAULT_BITS
  1071. ;;
  1072. srlz.i // guarantee that interruption collection is on
  1073. ;;
  1074. (p15) ssm psr.i // restore psr.i
  1075. movl r15=ia64_leave_kernel
  1076. ;;
  1077. SAVE_REST
  1078. mov rp=r15
  1079. ;;
  1080. br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
  1081. END(non_syscall)
  1082. .org ia64_ivt+0x4800
  1083. /////////////////////////////////////////////////////////////////////////////////////////
  1084. // 0x4800 Entry 18 (size 64 bundles) Reserved
  1085. DBG_FAULT(18)
  1086. FAULT(18)
  1087. /*
  1088. * There is no particular reason for this code to be here, other than that
  1089. * there happens to be space here that would go unused otherwise. If this
  1090. * fault ever gets "unreserved", simply moved the following code to a more
  1091. * suitable spot...
  1092. */
  1093. ENTRY(dispatch_unaligned_handler)
  1094. SAVE_MIN_WITH_COVER
  1095. ;;
  1096. alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
  1097. mov out0=cr.ifa
  1098. adds out1=16,sp
  1099. ssm psr.ic | PSR_DEFAULT_BITS
  1100. ;;
  1101. srlz.i // guarantee that interruption collection is on
  1102. ;;
  1103. (p15) ssm psr.i // restore psr.i
  1104. adds r3=8,r2 // set up second base pointer
  1105. ;;
  1106. SAVE_REST
  1107. movl r14=ia64_leave_kernel
  1108. ;;
  1109. mov rp=r14
  1110. br.sptk.many ia64_prepare_handle_unaligned
  1111. END(dispatch_unaligned_handler)
  1112. .org ia64_ivt+0x4c00
  1113. /////////////////////////////////////////////////////////////////////////////////////////
  1114. // 0x4c00 Entry 19 (size 64 bundles) Reserved
  1115. DBG_FAULT(19)
  1116. FAULT(19)
  1117. /*
  1118. * There is no particular reason for this code to be here, other than that
  1119. * there happens to be space here that would go unused otherwise. If this
  1120. * fault ever gets "unreserved", simply moved the following code to a more
  1121. * suitable spot...
  1122. */
  1123. ENTRY(dispatch_to_fault_handler)
  1124. /*
  1125. * Input:
  1126. * psr.ic: off
  1127. * r19: fault vector number (e.g., 24 for General Exception)
  1128. * r31: contains saved predicates (pr)
  1129. */
  1130. SAVE_MIN_WITH_COVER_R19
  1131. alloc r14=ar.pfs,0,0,5,0
  1132. mov out0=r15
  1133. mov out1=cr.isr
  1134. mov out2=cr.ifa
  1135. mov out3=cr.iim
  1136. mov out4=cr.itir
  1137. ;;
  1138. ssm psr.ic | PSR_DEFAULT_BITS
  1139. ;;
  1140. srlz.i // guarantee that interruption collection is on
  1141. ;;
  1142. (p15) ssm psr.i // restore psr.i
  1143. adds r3=8,r2 // set up second base pointer for SAVE_REST
  1144. ;;
  1145. SAVE_REST
  1146. movl r14=ia64_leave_kernel
  1147. ;;
  1148. mov rp=r14
  1149. br.call.sptk.many b6=ia64_fault
  1150. END(dispatch_to_fault_handler)
  1151. //
  1152. // --- End of long entries, Beginning of short entries
  1153. //
  1154. .org ia64_ivt+0x5000
  1155. /////////////////////////////////////////////////////////////////////////////////////////
  1156. // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
  1157. ENTRY(page_not_present)
  1158. DBG_FAULT(20)
  1159. mov r16=cr.ifa
  1160. rsm psr.dt
  1161. /*
  1162. * The Linux page fault handler doesn't expect non-present pages to be in
  1163. * the TLB. Flush the existing entry now, so we meet that expectation.
  1164. */
  1165. mov r17=PAGE_SHIFT<<2
  1166. ;;
  1167. ptc.l r16,r17
  1168. ;;
  1169. mov r31=pr
  1170. srlz.d
  1171. br.sptk.many page_fault
  1172. END(page_not_present)
  1173. .org ia64_ivt+0x5100
  1174. /////////////////////////////////////////////////////////////////////////////////////////
  1175. // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
  1176. ENTRY(key_permission)
  1177. DBG_FAULT(21)
  1178. mov r16=cr.ifa
  1179. rsm psr.dt
  1180. mov r31=pr
  1181. ;;
  1182. srlz.d
  1183. br.sptk.many page_fault
  1184. END(key_permission)
  1185. .org ia64_ivt+0x5200
  1186. /////////////////////////////////////////////////////////////////////////////////////////
  1187. // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  1188. ENTRY(iaccess_rights)
  1189. DBG_FAULT(22)
  1190. mov r16=cr.ifa
  1191. rsm psr.dt
  1192. mov r31=pr
  1193. ;;
  1194. srlz.d
  1195. br.sptk.many page_fault
  1196. END(iaccess_rights)
  1197. .org ia64_ivt+0x5300
  1198. /////////////////////////////////////////////////////////////////////////////////////////
  1199. // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  1200. ENTRY(daccess_rights)
  1201. DBG_FAULT(23)
  1202. mov r16=cr.ifa
  1203. rsm psr.dt
  1204. mov r31=pr
  1205. ;;
  1206. srlz.d
  1207. br.sptk.many page_fault
  1208. END(daccess_rights)
  1209. .org ia64_ivt+0x5400
  1210. /////////////////////////////////////////////////////////////////////////////////////////
  1211. // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
  1212. ENTRY(general_exception)
  1213. DBG_FAULT(24)
  1214. mov r16=cr.isr
  1215. mov r31=pr
  1216. ;;
  1217. cmp4.eq p6,p0=0,r16
  1218. (p6) br.sptk.many dispatch_illegal_op_fault
  1219. ;;
  1220. mov r19=24 // fault number
  1221. br.sptk.many dispatch_to_fault_handler
  1222. END(general_exception)
  1223. .org ia64_ivt+0x5500
  1224. /////////////////////////////////////////////////////////////////////////////////////////
  1225. // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  1226. ENTRY(disabled_fp_reg)
  1227. DBG_FAULT(25)
  1228. rsm psr.dfh // ensure we can access fph
  1229. ;;
  1230. srlz.d
  1231. mov r31=pr
  1232. mov r19=25
  1233. br.sptk.many dispatch_to_fault_handler
  1234. END(disabled_fp_reg)
  1235. .org ia64_ivt+0x5600
  1236. /////////////////////////////////////////////////////////////////////////////////////////
  1237. // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  1238. ENTRY(nat_consumption)
  1239. DBG_FAULT(26)
  1240. mov r16=cr.ipsr
  1241. mov r17=cr.isr
  1242. mov r31=pr // save PR
  1243. ;;
  1244. and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
  1245. tbit.z p6,p0=r17,IA64_ISR_NA_BIT
  1246. ;;
  1247. cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
  1248. dep r16=-1,r16,IA64_PSR_ED_BIT,1
  1249. (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
  1250. ;;
  1251. mov cr.ipsr=r16 // set cr.ipsr.na
  1252. mov pr=r31,-1
  1253. ;;
  1254. rfi
  1255. 1: mov pr=r31,-1
  1256. ;;
  1257. FAULT(26)
  1258. END(nat_consumption)
  1259. .org ia64_ivt+0x5700
  1260. /////////////////////////////////////////////////////////////////////////////////////////
  1261. // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  1262. ENTRY(speculation_vector)
  1263. DBG_FAULT(27)
  1264. /*
  1265. * A [f]chk.[as] instruction needs to take the branch to the recovery code but
  1266. * this part of the architecture is not implemented in hardware on some CPUs, such
  1267. * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
  1268. * the relative target (not yet sign extended). So after sign extending it we
  1269. * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
  1270. * i.e., the slot to restart into.
  1271. *
  1272. * cr.imm contains zero_ext(imm21)
  1273. */
  1274. mov r18=cr.iim
  1275. ;;
  1276. mov r17=cr.iip
  1277. shl r18=r18,43 // put sign bit in position (43=64-21)
  1278. ;;
  1279. mov r16=cr.ipsr
  1280. shr r18=r18,39 // sign extend (39=43-4)
  1281. ;;
  1282. add r17=r17,r18 // now add the offset
  1283. ;;
  1284. mov cr.iip=r17
  1285. dep r16=0,r16,41,2 // clear EI
  1286. ;;
  1287. mov cr.ipsr=r16
  1288. ;;
  1289. rfi // and go back
  1290. END(speculation_vector)
  1291. .org ia64_ivt+0x5800
  1292. /////////////////////////////////////////////////////////////////////////////////////////
  1293. // 0x5800 Entry 28 (size 16 bundles) Reserved
  1294. DBG_FAULT(28)
  1295. FAULT(28)
  1296. .org ia64_ivt+0x5900
  1297. /////////////////////////////////////////////////////////////////////////////////////////
  1298. // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  1299. ENTRY(debug_vector)
  1300. DBG_FAULT(29)
  1301. FAULT(29)
  1302. END(debug_vector)
  1303. .org ia64_ivt+0x5a00
  1304. /////////////////////////////////////////////////////////////////////////////////////////
  1305. // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  1306. ENTRY(unaligned_access)
  1307. DBG_FAULT(30)
  1308. mov r31=pr // prepare to save predicates
  1309. ;;
  1310. br.sptk.many dispatch_unaligned_handler
  1311. END(unaligned_access)
  1312. .org ia64_ivt+0x5b00
  1313. /////////////////////////////////////////////////////////////////////////////////////////
  1314. // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  1315. ENTRY(unsupported_data_reference)
  1316. DBG_FAULT(31)
  1317. FAULT(31)
  1318. END(unsupported_data_reference)
  1319. .org ia64_ivt+0x5c00
  1320. /////////////////////////////////////////////////////////////////////////////////////////
  1321. // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
  1322. ENTRY(floating_point_fault)
  1323. DBG_FAULT(32)
  1324. FAULT(32)
  1325. END(floating_point_fault)
  1326. .org ia64_ivt+0x5d00
  1327. /////////////////////////////////////////////////////////////////////////////////////////
  1328. // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  1329. ENTRY(floating_point_trap)
  1330. DBG_FAULT(33)
  1331. FAULT(33)
  1332. END(floating_point_trap)
  1333. .org ia64_ivt+0x5e00
  1334. /////////////////////////////////////////////////////////////////////////////////////////
  1335. // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  1336. ENTRY(lower_privilege_trap)
  1337. DBG_FAULT(34)
  1338. FAULT(34)
  1339. END(lower_privilege_trap)
  1340. .org ia64_ivt+0x5f00
  1341. /////////////////////////////////////////////////////////////////////////////////////////
  1342. // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  1343. ENTRY(taken_branch_trap)
  1344. DBG_FAULT(35)
  1345. FAULT(35)
  1346. END(taken_branch_trap)
  1347. .org ia64_ivt+0x6000
  1348. /////////////////////////////////////////////////////////////////////////////////////////
  1349. // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  1350. ENTRY(single_step_trap)
  1351. DBG_FAULT(36)
  1352. FAULT(36)
  1353. END(single_step_trap)
  1354. .org ia64_ivt+0x6100
  1355. /////////////////////////////////////////////////////////////////////////////////////////
  1356. // 0x6100 Entry 37 (size 16 bundles) Reserved
  1357. DBG_FAULT(37)
  1358. FAULT(37)
  1359. .org ia64_ivt+0x6200
  1360. /////////////////////////////////////////////////////////////////////////////////////////
  1361. // 0x6200 Entry 38 (size 16 bundles) Reserved
  1362. DBG_FAULT(38)
  1363. FAULT(38)
  1364. .org ia64_ivt+0x6300
  1365. /////////////////////////////////////////////////////////////////////////////////////////
  1366. // 0x6300 Entry 39 (size 16 bundles) Reserved
  1367. DBG_FAULT(39)
  1368. FAULT(39)
  1369. .org ia64_ivt+0x6400
  1370. /////////////////////////////////////////////////////////////////////////////////////////
  1371. // 0x6400 Entry 40 (size 16 bundles) Reserved
  1372. DBG_FAULT(40)
  1373. FAULT(40)
  1374. .org ia64_ivt+0x6500
  1375. /////////////////////////////////////////////////////////////////////////////////////////
  1376. // 0x6500 Entry 41 (size 16 bundles) Reserved
  1377. DBG_FAULT(41)
  1378. FAULT(41)
  1379. .org ia64_ivt+0x6600
  1380. /////////////////////////////////////////////////////////////////////////////////////////
  1381. // 0x6600 Entry 42 (size 16 bundles) Reserved
  1382. DBG_FAULT(42)
  1383. FAULT(42)
  1384. .org ia64_ivt+0x6700
  1385. /////////////////////////////////////////////////////////////////////////////////////////
  1386. // 0x6700 Entry 43 (size 16 bundles) Reserved
  1387. DBG_FAULT(43)
  1388. FAULT(43)
  1389. .org ia64_ivt+0x6800
  1390. /////////////////////////////////////////////////////////////////////////////////////////
  1391. // 0x6800 Entry 44 (size 16 bundles) Reserved
  1392. DBG_FAULT(44)
  1393. FAULT(44)
  1394. .org ia64_ivt+0x6900
  1395. /////////////////////////////////////////////////////////////////////////////////////////
  1396. // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  1397. ENTRY(ia32_exception)
  1398. DBG_FAULT(45)
  1399. FAULT(45)
  1400. END(ia32_exception)
  1401. .org ia64_ivt+0x6a00
  1402. /////////////////////////////////////////////////////////////////////////////////////////
  1403. // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
  1404. ENTRY(ia32_intercept)
  1405. DBG_FAULT(46)
  1406. #ifdef CONFIG_IA32_SUPPORT
  1407. mov r31=pr
  1408. mov r16=cr.isr
  1409. ;;
  1410. extr.u r17=r16,16,8 // get ISR.code
  1411. mov r18=ar.eflag
  1412. mov r19=cr.iim // old eflag value
  1413. ;;
  1414. cmp.ne p6,p0=2,r17
  1415. (p6) br.cond.spnt 1f // not a system flag fault
  1416. xor r16=r18,r19
  1417. ;;
  1418. extr.u r17=r16,18,1 // get the eflags.ac bit
  1419. ;;
  1420. cmp.eq p6,p0=0,r17
  1421. (p6) br.cond.spnt 1f // eflags.ac bit didn't change
  1422. ;;
  1423. mov pr=r31,-1 // restore predicate registers
  1424. rfi
  1425. 1:
  1426. #endif // CONFIG_IA32_SUPPORT
  1427. FAULT(46)
  1428. END(ia32_intercept)
  1429. .org ia64_ivt+0x6b00
  1430. /////////////////////////////////////////////////////////////////////////////////////////
  1431. // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
  1432. ENTRY(ia32_interrupt)
  1433. DBG_FAULT(47)
  1434. #ifdef CONFIG_IA32_SUPPORT
  1435. mov r31=pr
  1436. br.sptk.many dispatch_to_ia32_handler
  1437. #else
  1438. FAULT(47)
  1439. #endif
  1440. END(ia32_interrupt)
  1441. .org ia64_ivt+0x6c00
  1442. /////////////////////////////////////////////////////////////////////////////////////////
  1443. // 0x6c00 Entry 48 (size 16 bundles) Reserved
  1444. DBG_FAULT(48)
  1445. FAULT(48)
  1446. .org ia64_ivt+0x6d00
  1447. /////////////////////////////////////////////////////////////////////////////////////////
  1448. // 0x6d00 Entry 49 (size 16 bundles) Reserved
  1449. DBG_FAULT(49)
  1450. FAULT(49)
  1451. .org ia64_ivt+0x6e00
  1452. /////////////////////////////////////////////////////////////////////////////////////////
  1453. // 0x6e00 Entry 50 (size 16 bundles) Reserved
  1454. DBG_FAULT(50)
  1455. FAULT(50)
  1456. .org ia64_ivt+0x6f00
  1457. /////////////////////////////////////////////////////////////////////////////////////////
  1458. // 0x6f00 Entry 51 (size 16 bundles) Reserved
  1459. DBG_FAULT(51)
  1460. FAULT(51)
  1461. .org ia64_ivt+0x7000
  1462. /////////////////////////////////////////////////////////////////////////////////////////
  1463. // 0x7000 Entry 52 (size 16 bundles) Reserved
  1464. DBG_FAULT(52)
  1465. FAULT(52)
  1466. .org ia64_ivt+0x7100
  1467. /////////////////////////////////////////////////////////////////////////////////////////
  1468. // 0x7100 Entry 53 (size 16 bundles) Reserved
  1469. DBG_FAULT(53)
  1470. FAULT(53)
  1471. .org ia64_ivt+0x7200
  1472. /////////////////////////////////////////////////////////////////////////////////////////
  1473. // 0x7200 Entry 54 (size 16 bundles) Reserved
  1474. DBG_FAULT(54)
  1475. FAULT(54)
  1476. .org ia64_ivt+0x7300
  1477. /////////////////////////////////////////////////////////////////////////////////////////
  1478. // 0x7300 Entry 55 (size 16 bundles) Reserved
  1479. DBG_FAULT(55)
  1480. FAULT(55)
  1481. .org ia64_ivt+0x7400
  1482. /////////////////////////////////////////////////////////////////////////////////////////
  1483. // 0x7400 Entry 56 (size 16 bundles) Reserved
  1484. DBG_FAULT(56)
  1485. FAULT(56)
  1486. .org ia64_ivt+0x7500
  1487. /////////////////////////////////////////////////////////////////////////////////////////
  1488. // 0x7500 Entry 57 (size 16 bundles) Reserved
  1489. DBG_FAULT(57)
  1490. FAULT(57)
  1491. .org ia64_ivt+0x7600
  1492. /////////////////////////////////////////////////////////////////////////////////////////
  1493. // 0x7600 Entry 58 (size 16 bundles) Reserved
  1494. DBG_FAULT(58)
  1495. FAULT(58)
  1496. .org ia64_ivt+0x7700
  1497. /////////////////////////////////////////////////////////////////////////////////////////
  1498. // 0x7700 Entry 59 (size 16 bundles) Reserved
  1499. DBG_FAULT(59)
  1500. FAULT(59)
  1501. .org ia64_ivt+0x7800
  1502. /////////////////////////////////////////////////////////////////////////////////////////
  1503. // 0x7800 Entry 60 (size 16 bundles) Reserved
  1504. DBG_FAULT(60)
  1505. FAULT(60)
  1506. .org ia64_ivt+0x7900
  1507. /////////////////////////////////////////////////////////////////////////////////////////
  1508. // 0x7900 Entry 61 (size 16 bundles) Reserved
  1509. DBG_FAULT(61)
  1510. FAULT(61)
  1511. .org ia64_ivt+0x7a00
  1512. /////////////////////////////////////////////////////////////////////////////////////////
  1513. // 0x7a00 Entry 62 (size 16 bundles) Reserved
  1514. DBG_FAULT(62)
  1515. FAULT(62)
  1516. .org ia64_ivt+0x7b00
  1517. /////////////////////////////////////////////////////////////////////////////////////////
  1518. // 0x7b00 Entry 63 (size 16 bundles) Reserved
  1519. DBG_FAULT(63)
  1520. FAULT(63)
  1521. .org ia64_ivt+0x7c00
  1522. /////////////////////////////////////////////////////////////////////////////////////////
  1523. // 0x7c00 Entry 64 (size 16 bundles) Reserved
  1524. DBG_FAULT(64)
  1525. FAULT(64)
  1526. .org ia64_ivt+0x7d00
  1527. /////////////////////////////////////////////////////////////////////////////////////////
  1528. // 0x7d00 Entry 65 (size 16 bundles) Reserved
  1529. DBG_FAULT(65)
  1530. FAULT(65)
  1531. .org ia64_ivt+0x7e00
  1532. /////////////////////////////////////////////////////////////////////////////////////////
  1533. // 0x7e00 Entry 66 (size 16 bundles) Reserved
  1534. DBG_FAULT(66)
  1535. FAULT(66)
  1536. .org ia64_ivt+0x7f00
  1537. /////////////////////////////////////////////////////////////////////////////////////////
  1538. // 0x7f00 Entry 67 (size 16 bundles) Reserved
  1539. DBG_FAULT(67)
  1540. FAULT(67)
  1541. /*
  1542. * Squatting in this space ...
  1543. *
  1544. * This special case dispatcher for illegal operation faults allows preserved
  1545. * registers to be modified through a callback function (asm only) that is handed
  1546. * back from the fault handler in r8. Up to three arguments can be passed to the
  1547. * callback function by returning an aggregate with the callback as its first
  1548. * element, followed by the arguments.
  1549. */
  1550. ENTRY(dispatch_illegal_op_fault)
  1551. .prologue
  1552. .body
  1553. SAVE_MIN_WITH_COVER
  1554. ssm psr.ic | PSR_DEFAULT_BITS
  1555. ;;
  1556. srlz.i // guarantee that interruption collection is on
  1557. ;;
  1558. (p15) ssm psr.i // restore psr.i
  1559. adds r3=8,r2 // set up second base pointer for SAVE_REST
  1560. ;;
  1561. alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
  1562. mov out0=ar.ec
  1563. ;;
  1564. SAVE_REST
  1565. PT_REGS_UNWIND_INFO(0)
  1566. ;;
  1567. br.call.sptk.many rp=ia64_illegal_op_fault
  1568. .ret0: ;;
  1569. alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
  1570. mov out0=r9
  1571. mov out1=r10
  1572. mov out2=r11
  1573. movl r15=ia64_leave_kernel
  1574. ;;
  1575. mov rp=r15
  1576. mov b6=r8
  1577. ;;
  1578. cmp.ne p6,p0=0,r8
  1579. (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
  1580. br.sptk.many ia64_leave_kernel
  1581. END(dispatch_illegal_op_fault)
  1582. #ifdef CONFIG_IA32_SUPPORT
  1583. /*
  1584. * There is no particular reason for this code to be here, other than that
  1585. * there happens to be space here that would go unused otherwise. If this
  1586. * fault ever gets "unreserved", simply moved the following code to a more
  1587. * suitable spot...
  1588. */
  1589. // IA32 interrupt entry point
  1590. ENTRY(dispatch_to_ia32_handler)
  1591. SAVE_MIN
  1592. ;;
  1593. mov r14=cr.isr
  1594. ssm psr.ic | PSR_DEFAULT_BITS
  1595. ;;
  1596. srlz.i // guarantee that interruption collection is on
  1597. ;;
  1598. (p15) ssm psr.i
  1599. adds r3=8,r2 // Base pointer for SAVE_REST
  1600. ;;
  1601. SAVE_REST
  1602. ;;
  1603. mov r15=0x80
  1604. shr r14=r14,16 // Get interrupt number
  1605. ;;
  1606. cmp.ne p6,p0=r14,r15
  1607. (p6) br.call.dpnt.many b6=non_ia32_syscall
  1608. adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
  1609. adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
  1610. ;;
  1611. cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
  1612. ld8 r8=[r14] // get r8
  1613. ;;
  1614. st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
  1615. ;;
  1616. alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
  1617. ;;
  1618. ld4 r8=[r14],8 // r8 == eax (syscall number)
  1619. mov r15=IA32_NR_syscalls
  1620. ;;
  1621. cmp.ltu.unc p6,p7=r8,r15
  1622. ld4 out1=[r14],8 // r9 == ecx
  1623. ;;
  1624. ld4 out2=[r14],8 // r10 == edx
  1625. ;;
  1626. ld4 out0=[r14] // r11 == ebx
  1627. adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
  1628. ;;
  1629. ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
  1630. ;;
  1631. ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
  1632. adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
  1633. ;;
  1634. ld4 out4=[r14] // r15 == edi
  1635. movl r16=ia32_syscall_table
  1636. ;;
  1637. (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
  1638. ld4 r2=[r2] // r2 = current_thread_info()->flags
  1639. ;;
  1640. ld8 r16=[r16]
  1641. and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
  1642. ;;
  1643. mov b6=r16
  1644. movl r15=ia32_ret_from_syscall
  1645. cmp.eq p8,p0=r2,r0
  1646. ;;
  1647. mov rp=r15
  1648. (p8) br.call.sptk.many b6=b6
  1649. br.cond.sptk ia32_trace_syscall
  1650. non_ia32_syscall:
  1651. alloc r15=ar.pfs,0,0,2,0
  1652. mov out0=r14 // interrupt #
  1653. add out1=16,sp // pointer to pt_regs
  1654. ;; // avoid WAW on CFM
  1655. br.call.sptk.many rp=ia32_bad_interrupt
  1656. .ret1: movl r15=ia64_leave_kernel
  1657. ;;
  1658. mov rp=r15
  1659. br.ret.sptk.many rp
  1660. END(dispatch_to_ia32_handler)
  1661. #endif /* CONFIG_IA32_SUPPORT */