12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330 |
- /*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- * kernel entry points (interruptions, system call wrappers)
- * Copyright (C) 1999,2000 Philipp Rumpf
- * Copyright (C) 1999 SuSE GmbH Nuernberg
- * Copyright (C) 2000 Hewlett-Packard (John Marvin)
- * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
- #include <asm/asm-offsets.h>
- /* we have the following possibilities to act on an interruption:
- * - handle in assembly and use shadowed registers only
- * - save registers to kernel stack and handle in assembly or C */
- #include <asm/psw.h>
- #include <asm/cache.h> /* for L1_CACHE_SHIFT */
- #include <asm/assembly.h> /* for LDREG/STREG defines */
- #include <asm/pgtable.h>
- #include <asm/signal.h>
- #include <asm/unistd.h>
- #include <asm/thread_info.h>
- #include <linux/linkage.h>
- #ifdef CONFIG_64BIT
- .level 2.0w
- #else
- .level 2.0
- #endif
- .import pa_dbit_lock,data
- /* space_to_prot macro creates a prot id from a space id */
- #if (SPACEID_SHIFT) == 0
- .macro space_to_prot spc prot
- depd,z \spc,62,31,\prot
- .endm
- #else
- .macro space_to_prot spc prot
- extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
- .endm
- #endif
- /* Switch to virtual mapping, trashing only %r1 */
- .macro virt_map
- /* pcxt_ssm_bug */
- rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
- mtsp %r0, %sr4
- mtsp %r0, %sr5
- mfsp %sr7, %r1
- or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
- mtsp %r1, %sr3
- tovirt_r1 %r29
- load32 KERNEL_PSW, %r1
- rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
- mtsp %r0, %sr6
- mtsp %r0, %sr7
- mtctl %r0, %cr17 /* Clear IIASQ tail */
- mtctl %r0, %cr17 /* Clear IIASQ head */
- mtctl %r1, %ipsw
- load32 4f, %r1
- mtctl %r1, %cr18 /* Set IIAOQ tail */
- ldo 4(%r1), %r1
- mtctl %r1, %cr18 /* Set IIAOQ head */
- rfir
- nop
- 4:
- .endm
- /*
- * The "get_stack" macros are responsible for determining the
- * kernel stack value.
- *
- * If sr7 == 0
- * Already using a kernel stack, so call the
- * get_stack_use_r30 macro to push a pt_regs structure
- * on the stack, and store registers there.
- * else
- * Need to set up a kernel stack, so call the
- * get_stack_use_cr30 macro to set up a pointer
- * to the pt_regs structure contained within the
- * task pointer pointed to by cr30. Set the stack
- * pointer to point to the end of the task structure.
- *
- * Note that we use shadowed registers for temps until
- * we can save %r26 and %r29. %r26 is used to preserve
- * %r8 (a shadowed register) which temporarily contained
- * either the fault type ("code") or the eirr. We need
- * to use a non-shadowed register to carry the value over
- * the rfir in virt_map. We use %r26 since this value winds
- * up being passed as the argument to either do_cpu_irq_mask
- * or handle_interruption. %r29 is used to hold a pointer
- * the register save area, and once again, it needs to
- * be a non-shadowed register so that it survives the rfir.
- *
- * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
- */
- .macro get_stack_use_cr30
- /* we save the registers in the task struct */
- mfctl %cr30, %r1
- tophys %r1,%r9
- LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
- tophys %r1,%r9
- ldo TASK_REGS(%r9),%r9
- STREG %r30, PT_GR30(%r9)
- STREG %r29,PT_GR29(%r9)
- STREG %r26,PT_GR26(%r9)
- copy %r9,%r29
- mfctl %cr30, %r1
- ldo THREAD_SZ_ALGN(%r1), %r30
- .endm
- .macro get_stack_use_r30
- /* we put a struct pt_regs on the stack and save the registers there */
- tophys %r30,%r9
- STREG %r30,PT_GR30(%r9)
- ldo PT_SZ_ALGN(%r30),%r30
- STREG %r29,PT_GR29(%r9)
- STREG %r26,PT_GR26(%r9)
- copy %r9,%r29
- .endm
- .macro rest_stack
- LDREG PT_GR1(%r29), %r1
- LDREG PT_GR30(%r29),%r30
- LDREG PT_GR29(%r29),%r29
- .endm
- /* default interruption handler
- * (calls traps.c:handle_interruption) */
- .macro def code
- b intr_save
- ldi \code, %r8
- .align 32
- .endm
- /* Interrupt interruption handler
- * (calls irq.c:do_cpu_irq_mask) */
- .macro extint code
- b intr_extint
- mfsp %sr7,%r16
- .align 32
- .endm
- .import os_hpmc, code
- /* HPMC handler */
- .macro hpmc code
- nop /* must be a NOP, will be patched later */
- load32 PA(os_hpmc), %r3
- bv,n 0(%r3)
- nop
- .word 0 /* checksum (will be patched) */
- .word PA(os_hpmc) /* address of handler */
- .word 0 /* length of handler */
- .endm
- /*
- * Performance Note: Instructions will be moved up into
- * this part of the code later on, once we are sure
- * that the tlb miss handlers are close to final form.
- */
- /* Register definitions for tlb miss handler macros */
- va = r8 /* virtual address for which the trap occured */
- spc = r24 /* space for which the trap occured */
- #ifndef CONFIG_64BIT
- /*
- * itlb miss interruption handler (parisc 1.1 - 32 bit)
- */
- .macro itlb_11 code
- mfctl %pcsq, spc
- b itlb_miss_11
- mfctl %pcoq, va
- .align 32
- .endm
- #endif
-
- /*
- * itlb miss interruption handler (parisc 2.0)
- */
- .macro itlb_20 code
- mfctl %pcsq, spc
- #ifdef CONFIG_64BIT
- b itlb_miss_20w
- #else
- b itlb_miss_20
- #endif
- mfctl %pcoq, va
- .align 32
- .endm
-
- #ifndef CONFIG_64BIT
- /*
- * naitlb miss interruption handler (parisc 1.1 - 32 bit)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
- */
- .macro naitlb_11 code
- mfctl %isr,spc
- b itlb_miss_11
- mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
- .align 32
- .endm
- #endif
-
- /*
- * naitlb miss interruption handler (parisc 2.0)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
- */
- .macro naitlb_20 code
- mfctl %isr,spc
- #ifdef CONFIG_64BIT
- b itlb_miss_20w
- #else
- b itlb_miss_20
- #endif
- mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
- .align 32
- .endm
-
- #ifndef CONFIG_64BIT
- /*
- * dtlb miss interruption handler (parisc 1.1 - 32 bit)
- */
- .macro dtlb_11 code
- mfctl %isr, spc
- b dtlb_miss_11
- mfctl %ior, va
- .align 32
- .endm
- #endif
- /*
- * dtlb miss interruption handler (parisc 2.0)
- */
- .macro dtlb_20 code
- mfctl %isr, spc
- #ifdef CONFIG_64BIT
- b dtlb_miss_20w
- #else
- b dtlb_miss_20
- #endif
- mfctl %ior, va
- .align 32
- .endm
-
- #ifndef CONFIG_64BIT
- /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
- .macro nadtlb_11 code
- mfctl %isr,spc
- b nadtlb_miss_11
- mfctl %ior,va
- .align 32
- .endm
- #endif
-
- /* nadtlb miss interruption handler (parisc 2.0) */
- .macro nadtlb_20 code
- mfctl %isr,spc
- #ifdef CONFIG_64BIT
- b nadtlb_miss_20w
- #else
- b nadtlb_miss_20
- #endif
- mfctl %ior,va
- .align 32
- .endm
-
- #ifndef CONFIG_64BIT
- /*
- * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
- */
- .macro dbit_11 code
- mfctl %isr,spc
- b dbit_trap_11
- mfctl %ior,va
- .align 32
- .endm
- #endif
- /*
- * dirty bit trap interruption handler (parisc 2.0)
- */
- .macro dbit_20 code
- mfctl %isr,spc
- #ifdef CONFIG_64BIT
- b dbit_trap_20w
- #else
- b dbit_trap_20
- #endif
- mfctl %ior,va
- .align 32
- .endm
- /* The following are simple 32 vs 64 bit instruction
- * abstractions for the macros */
- .macro EXTR reg1,start,length,reg2
- #ifdef CONFIG_64BIT
- extrd,u \reg1,32+\start,\length,\reg2
- #else
- extrw,u \reg1,\start,\length,\reg2
- #endif
- .endm
- .macro DEP reg1,start,length,reg2
- #ifdef CONFIG_64BIT
- depd \reg1,32+\start,\length,\reg2
- #else
- depw \reg1,\start,\length,\reg2
- #endif
- .endm
- .macro DEPI val,start,length,reg
- #ifdef CONFIG_64BIT
- depdi \val,32+\start,\length,\reg
- #else
- depwi \val,\start,\length,\reg
- #endif
- .endm
- /* In LP64, the space contains part of the upper 32 bits of the
- * fault. We have to extract this and place it in the va,
- * zeroing the corresponding bits in the space register */
- .macro space_adjust spc,va,tmp
- #ifdef CONFIG_64BIT
- extrd,u \spc,63,SPACEID_SHIFT,\tmp
- depd %r0,63,SPACEID_SHIFT,\spc
- depd \tmp,31,SPACEID_SHIFT,\va
- #endif
- .endm
- .import swapper_pg_dir,code
- /* Get the pgd. For faults on space zero (kernel space), this
- * is simply swapper_pg_dir. For user space faults, the
- * pgd is stored in %cr25 */
- .macro get_pgd spc,reg
- ldil L%PA(swapper_pg_dir),\reg
- ldo R%PA(swapper_pg_dir)(\reg),\reg
- or,COND(=) %r0,\spc,%r0
- mfctl %cr25,\reg
- .endm
- /*
- space_check(spc,tmp,fault)
- spc - The space we saw the fault with.
- tmp - The place to store the current space.
- fault - Function to call on failure.
- Only allow faults on different spaces from the
- currently active one if we're the kernel
- */
- .macro space_check spc,tmp,fault
- mfsp %sr7,\tmp
- or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
- * as kernel, so defeat the space
- * check if it is */
- copy \spc,\tmp
- or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
- cmpb,COND(<>),n \tmp,\spc,\fault
- .endm
- /* Look up a PTE in a 2-Level scheme (faulting at each
- * level if the entry isn't present
- *
- * NOTE: we use ldw even for LP64, since the short pointers
- * can address up to 1TB
- */
- .macro L2_ptep pmd,pte,index,va,fault
- #if PT_NLEVELS == 3
- EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
- #else
- EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- #endif
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
- copy %r0,\pte
- ldw,s \index(\pmd),\pmd
- bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
- DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
- copy \pmd,%r9
- SHLREG %r9,PxD_VALUE_SHIFT,\pmd
- EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
- shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
- LDREG %r0(\pmd),\pte /* pmd is now pte */
- bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
- .endm
- /* Look up PTE in a 3-Level scheme.
- *
- * Here we implement a Hybrid L2/L3 scheme: we allocate the
- * first pmd adjacent to the pgd. This means that we can
- * subtract a constant offset to get to it. The pmd and pgd
- * sizes are arranged so that a single pmd covers 4GB (giving
- * a full LP64 process access to 8TB) so our lookups are
- * effectively L2 for the first 4GB of the kernel (i.e. for
- * all ILP32 processes and all the kernel for machines with
- * under 4GB of memory) */
- .macro L3_ptep pgd,pte,index,va,fault
- #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
- extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- copy %r0,\pte
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- ldw,s \index(\pgd),\pgd
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- shld \pgd,PxD_VALUE_SHIFT,\index
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- copy \index,\pgd
- extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
- #endif
- L2_ptep \pgd,\pte,\index,\va,\fault
- .endm
- /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
- * don't needlessly dirty the cache line if it was already set */
- .macro update_ptep ptep,pte,tmp,tmp1
- ldi _PAGE_ACCESSED,\tmp1
- or \tmp1,\pte,\tmp
- and,COND(<>) \tmp1,\pte,%r0
- STREG \tmp,0(\ptep)
- .endm
- /* Set the dirty bit (and accessed bit). No need to be
- * clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
- ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
- or \tmp,\pte,\pte
- STREG \pte,0(\ptep)
- .endm
- /* Convert the pte and prot to tlb insertion values. How
- * this happens is quite subtle, read below */
- .macro make_insert_tlb spc,pte,prot
- space_to_prot \spc \prot /* create prot id from space */
- /* The following is the real subtlety. This is depositing
- * T <-> _PAGE_REFTRAP
- * D <-> _PAGE_DIRTY
- * B <-> _PAGE_DMB (memory break)
- *
- * Then incredible subtlety: The access rights are
- * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
- * See 3-14 of the parisc 2.0 manual
- *
- * Finally, _PAGE_READ goes in the top bit of PL1 (so we
- * trigger an access rights trap in user space if the user
- * tries to read an unreadable page */
- depd \pte,8,7,\prot
- /* PAGE_USER indicates the page can be read with user privileges,
- * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
- * contains _PAGE_READ */
- extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
- depdi 7,11,3,\prot
- /* If we're a gateway page, drop PL2 back to zero for promotion
- * to kernel privilege (so we can execute the page as kernel).
- * Any privilege promotion page always denys read and write */
- extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
- depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
- /* Enforce uncacheable pages.
- * This should ONLY be use for MMIO on PA 2.0 machines.
- * Memory/DMA is cache coherent on all PA2.0 machines we support
- * (that means T-class is NOT supported) and the memory controllers
- * on most of those machines only handles cache transactions.
- */
- extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
- depi 1,12,1,\prot
- /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
- depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
- .endm
- /* Identical macro to make_insert_tlb above, except it
- * makes the tlb entry for the differently formatted pa11
- * insertion instructions */
- .macro make_insert_tlb_11 spc,pte,prot
- zdep \spc,30,15,\prot
- dep \pte,8,7,\prot
- extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
- depi 1,12,1,\prot
- extru,= \pte,_PAGE_USER_BIT,1,%r0
- depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
- extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
- depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
- /* Get rid of prot bits and convert to page addr for iitlba */
- depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
- extru \pte,24,25,\pte
- .endm
- /* This is for ILP32 PA2.0 only. The TLB insertion needs
- * to extend into I/O space if the address is 0xfXXXXXXX
- * so we extend the f's into the top word of the pte in
- * this case */
- .macro f_extend pte,tmp
- extrd,s \pte,42,4,\tmp
- addi,<> 1,\tmp,%r0
- extrd,s \pte,63,25,\pte
- .endm
- /* The alias region is an 8MB aligned 16MB to do clear and
- * copy user pages at addresses congruent with the user
- * virtual address.
- *
- * To use the alias page, you set %r26 up with the to TLB
- * entry (identifying the physical page) and %r23 up with
- * the from tlb entry (or nothing if only a to entry---for
- * clear_user_page_asm) */
- .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
- cmpib,COND(<>),n 0,\spc,\fault
- ldil L%(TMPALIAS_MAP_START),\tmp
- #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
- /* on LP64, ldi will sign extend into the upper 32 bits,
- * which is behaviour we don't want */
- depdi 0,31,32,\tmp
- #endif
- copy \va,\tmp1
- DEPI 0,31,23,\tmp1
- cmpb,COND(<>),n \tmp,\tmp1,\fault
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
- depd,z \prot,8,7,\prot
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
- #ifdef CONFIG_64BIT
- extrd,u,*= \va,41,1,%r0
- #else
- extrw,u,= \va,9,1,%r0
- #endif
- or,COND(tr) %r23,%r0,\pte
- or %r26,%r0,\pte
- .endm
- /*
- * Align fault_vector_20 on 4K boundary so that both
- * fault_vector_11 and fault_vector_20 are on the
- * same page. This is only necessary as long as we
- * write protect the kernel text, which we may stop
- * doing once we use large page translations to cover
- * the static part of the kernel address space.
- */
- .text
- .align PAGE_SIZE
- ENTRY(fault_vector_20)
- /* First vector is invalid (0) */
- .ascii "cows can fly"
- .byte 0
- .align 32
- hpmc 1
- def 2
- def 3
- extint 4
- def 5
- itlb_20 6
- def 7
- def 8
- def 9
- def 10
- def 11
- def 12
- def 13
- def 14
- dtlb_20 15
- #if 0
- naitlb_20 16
- #else
- def 16
- #endif
- nadtlb_20 17
- def 18
- def 19
- dbit_20 20
- def 21
- def 22
- def 23
- def 24
- def 25
- def 26
- def 27
- def 28
- def 29
- def 30
- def 31
- END(fault_vector_20)
- #ifndef CONFIG_64BIT
- .align 2048
- ENTRY(fault_vector_11)
- /* First vector is invalid (0) */
- .ascii "cows can fly"
- .byte 0
- .align 32
- hpmc 1
- def 2
- def 3
- extint 4
- def 5
- itlb_11 6
- def 7
- def 8
- def 9
- def 10
- def 11
- def 12
- def 13
- def 14
- dtlb_11 15
- #if 0
- naitlb_11 16
- #else
- def 16
- #endif
- nadtlb_11 17
- def 18
- def 19
- dbit_11 20
- def 21
- def 22
- def 23
- def 24
- def 25
- def 26
- def 27
- def 28
- def 29
- def 30
- def 31
- END(fault_vector_11)
- #endif
- .import handle_interruption,code
- .import do_cpu_irq_mask,code
- /*
- * r26 = function to be called
- * r25 = argument to pass in
- * r24 = flags for do_fork()
- *
- * Kernel threads don't ever return, so they don't need
- * a true register context. We just save away the arguments
- * for copy_thread/ret_ to properly set up the child.
- */
- #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
- #define CLONE_UNTRACED 0x00800000
- .import do_fork
- ENTRY(__kernel_thread)
- STREG %r2, -RP_OFFSET(%r30)
- copy %r30, %r1
- ldo PT_SZ_ALGN(%r30),%r30
- #ifdef CONFIG_64BIT
- /* Yo, function pointers in wide mode are little structs... -PB */
- ldd 24(%r26), %r2
- STREG %r2, PT_GR27(%r1) /* Store childs %dp */
- ldd 16(%r26), %r26
- STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
- copy %r0, %r22 /* user_tid */
- #endif
- STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
- STREG %r25, PT_GR25(%r1)
- ldil L%CLONE_UNTRACED, %r26
- ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
- or %r26, %r24, %r26 /* will have kernel mappings. */
- ldi 1, %r25 /* stack_start, signals kernel thread */
- stw %r0, -52(%r30) /* user_tid */
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- BL do_fork, %r2
- copy %r1, %r24 /* pt_regs */
- /* Parent Returns here */
- LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
- ldo -PT_SZ_ALGN(%r30), %r30
- bv %r0(%r2)
- nop
- ENDPROC(__kernel_thread)
- /*
- * Child Returns here
- *
- * copy_thread moved args from temp save area set up above
- * into task save area.
- */
- ENTRY(ret_from_kernel_thread)
- /* Call schedule_tail first though */
- BL schedule_tail, %r2
- nop
- LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
- LDREG TASK_PT_GR25(%r1), %r26
- #ifdef CONFIG_64BIT
- LDREG TASK_PT_GR27(%r1), %r27
- LDREG TASK_PT_GR22(%r1), %r22
- #endif
- LDREG TASK_PT_GR26(%r1), %r1
- ble 0(%sr7, %r1)
- copy %r31, %r2
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- loadgp /* Thread could have been in a module */
- #endif
- #ifndef CONFIG_64BIT
- b sys_exit
- #else
- load32 sys_exit, %r1
- bv %r0(%r1)
- #endif
- ldi 0, %r26
- ENDPROC(ret_from_kernel_thread)
- .import sys_execve, code
- ENTRY(__execve)
- copy %r2, %r15
- copy %r30, %r16
- ldo PT_SZ_ALGN(%r30), %r30
- STREG %r26, PT_GR26(%r16)
- STREG %r25, PT_GR25(%r16)
- STREG %r24, PT_GR24(%r16)
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- BL sys_execve, %r2
- copy %r16, %r26
- cmpib,=,n 0,%r28,intr_return /* forward */
- /* yes, this will trap and die. */
- copy %r15, %r2
- copy %r16, %r30
- bv %r0(%r2)
- nop
- ENDPROC(__execve)
- /*
- * struct task_struct *_switch_to(struct task_struct *prev,
- * struct task_struct *next)
- *
- * switch kernel stacks and return prev */
- ENTRY(_switch_to)
- STREG %r2, -RP_OFFSET(%r30)
- callee_save_float
- callee_save
- load32 _switch_to_ret, %r2
- STREG %r2, TASK_PT_KPC(%r26)
- LDREG TASK_PT_KPC(%r25), %r2
- STREG %r30, TASK_PT_KSP(%r26)
- LDREG TASK_PT_KSP(%r25), %r30
- LDREG TASK_THREAD_INFO(%r25), %r25
- bv %r0(%r2)
- mtctl %r25,%cr30
- _switch_to_ret:
- mtctl %r0, %cr0 /* Needed for single stepping */
- callee_rest
- callee_rest_float
- LDREG -RP_OFFSET(%r30), %r2
- bv %r0(%r2)
- copy %r26, %r28
- ENDPROC(_switch_to)
- /*
- * Common rfi return path for interruptions, kernel execve, and
- * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
- * return via this path if the signal was received when the process
- * was running; if the process was blocked on a syscall then the
- * normal syscall_exit path is used. All syscalls for traced
- * proceses exit via intr_restore.
- *
- * XXX If any syscalls that change a processes space id ever exit
- * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
- * adjust IASQ[0..1].
- *
- */
- .align PAGE_SIZE
- ENTRY(syscall_exit_rfi)
- mfctl %cr30,%r16
- LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
- ldo TASK_REGS(%r16),%r16
- /* Force iaoq to userspace, as the user has had access to our current
- * context via sigcontext. Also Filter the PSW for the same reason.
- */
- LDREG PT_IAOQ0(%r16),%r19
- depi 3,31,2,%r19
- STREG %r19,PT_IAOQ0(%r16)
- LDREG PT_IAOQ1(%r16),%r19
- depi 3,31,2,%r19
- STREG %r19,PT_IAOQ1(%r16)
- LDREG PT_PSW(%r16),%r19
- load32 USER_PSW_MASK,%r1
- #ifdef CONFIG_64BIT
- load32 USER_PSW_HI_MASK,%r20
- depd %r20,31,32,%r1
- #endif
- and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
- load32 USER_PSW,%r1
- or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
- STREG %r19,PT_PSW(%r16)
- /*
- * If we aren't being traced, we never saved space registers
- * (we don't store them in the sigcontext), so set them
- * to "proper" values now (otherwise we'll wind up restoring
- * whatever was last stored in the task structure, which might
- * be inconsistent if an interrupt occured while on the gateway
- * page). Note that we may be "trashing" values the user put in
- * them, but we don't support the user changing them.
- */
- STREG %r0,PT_SR2(%r16)
- mfsp %sr3,%r19
- STREG %r19,PT_SR0(%r16)
- STREG %r19,PT_SR1(%r16)
- STREG %r19,PT_SR3(%r16)
- STREG %r19,PT_SR4(%r16)
- STREG %r19,PT_SR5(%r16)
- STREG %r19,PT_SR6(%r16)
- STREG %r19,PT_SR7(%r16)
- intr_return:
- /* NOTE: Need to enable interrupts incase we schedule. */
- ssm PSW_SM_I, %r0
- intr_check_resched:
- /* check for reschedule */
- mfctl %cr30,%r1
- LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
- bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
- .import do_notify_resume,code
- intr_check_sig:
- /* As above */
- mfctl %cr30,%r1
- LDREG TI_FLAGS(%r1),%r19
- ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
- and,COND(<>) %r19, %r20, %r0
- b,n intr_restore /* skip past if we've nothing to do */
- /* This check is critical to having LWS
- * working. The IASQ is zero on the gateway
- * page and we cannot deliver any signals until
- * we get off the gateway page.
- *
- * Only do signals if we are returning to user space
- */
- LDREG PT_IASQ0(%r16), %r20
- cmpib,COND(=),n 0,%r20,intr_restore /* backward */
- LDREG PT_IASQ1(%r16), %r20
- cmpib,COND(=),n 0,%r20,intr_restore /* backward */
- copy %r0, %r25 /* long in_syscall = 0 */
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- BL do_notify_resume,%r2
- copy %r16, %r26 /* struct pt_regs *regs */
- b,n intr_check_sig
- intr_restore:
- copy %r16,%r29
- ldo PT_FR31(%r29),%r1
- rest_fp %r1
- rest_general %r29
- /* inverse of virt_map */
- pcxt_ssm_bug
- rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
- tophys_r1 %r29
- /* Restore space id's and special cr's from PT_REGS
- * structure pointed to by r29
- */
- rest_specials %r29
- /* IMPORTANT: rest_stack restores r29 last (we are using it)!
- * It also restores r1 and r30.
- */
- rest_stack
- rfi
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- #ifndef CONFIG_PREEMPT
- # define intr_do_preempt intr_restore
- #endif /* !CONFIG_PREEMPT */
- .import schedule,code
- intr_do_resched:
- /* Only call schedule on return to userspace. If we're returning
- * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
- * we jump back to intr_restore.
- */
- LDREG PT_IASQ0(%r16), %r20
- cmpib,COND(=) 0, %r20, intr_do_preempt
- nop
- LDREG PT_IASQ1(%r16), %r20
- cmpib,COND(=) 0, %r20, intr_do_preempt
- nop
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- ldil L%intr_check_sig, %r2
- #ifndef CONFIG_64BIT
- b schedule
- #else
- load32 schedule, %r20
- bv %r0(%r20)
- #endif
- ldo R%intr_check_sig(%r2), %r2
- /* preempt the current task on returning to kernel
- * mode from an interrupt, iff need_resched is set,
- * and preempt_count is 0. otherwise, we continue on
- * our merry way back to the current running task.
- */
- #ifdef CONFIG_PREEMPT
- .import preempt_schedule_irq,code
- intr_do_preempt:
- rsm PSW_SM_I, %r0 /* disable interrupts */
- /* current_thread_info()->preempt_count */
- mfctl %cr30, %r1
- LDREG TI_PRE_COUNT(%r1), %r19
- cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
- nop /* prev insn branched backwards */
- /* check if we interrupted a critical path */
- LDREG PT_PSW(%r16), %r20
- bb,<,n %r20, 31 - PSW_SM_I, intr_restore
- nop
- BL preempt_schedule_irq, %r2
- nop
- b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
- #endif /* CONFIG_PREEMPT */
- /*
- * External interrupts.
- */
- intr_extint:
- cmpib,COND(=),n 0,%r16,1f
- get_stack_use_cr30
- b,n 2f
- 1:
- get_stack_use_r30
- 2:
- save_specials %r29
- virt_map
- save_general %r29
- ldo PT_FR0(%r29), %r24
- save_fp %r24
-
- loadgp
- copy %r29, %r26 /* arg0 is pt_regs */
- copy %r29, %r16 /* save pt_regs */
- ldil L%intr_return, %r2
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- b do_cpu_irq_mask
- ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
- ENDPROC(syscall_exit_rfi)
- /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
- ENTRY(intr_save) /* for os_hpmc */
- mfsp %sr7,%r16
- cmpib,COND(=),n 0,%r16,1f
- get_stack_use_cr30
- b 2f
- copy %r8,%r26
- 1:
- get_stack_use_r30
- copy %r8,%r26
- 2:
- save_specials %r29
- /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
- /*
- * FIXME: 1) Use a #define for the hardwired "6" below (and in
- * traps.c.
- * 2) Once we start executing code above 4 Gb, we need
- * to adjust iasq/iaoq here in the same way we
- * adjust isr/ior below.
- */
- cmpib,COND(=),n 6,%r26,skip_save_ior
- mfctl %cr20, %r16 /* isr */
- nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
- mfctl %cr21, %r17 /* ior */
- #ifdef CONFIG_64BIT
- /*
- * If the interrupted code was running with W bit off (32 bit),
- * clear the b bits (bits 0 & 1) in the ior.
- * save_specials left ipsw value in r8 for us to test.
- */
- extrd,u,*<> %r8,PSW_W_BIT,1,%r0
- depdi 0,1,2,%r17
- /*
- * FIXME: This code has hardwired assumptions about the split
- * between space bits and offset bits. This will change
- * when we allow alternate page sizes.
- */
- /* adjust isr/ior. */
- extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
- depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
- depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
- #endif
- STREG %r16, PT_ISR(%r29)
- STREG %r17, PT_IOR(%r29)
- skip_save_ior:
- virt_map
- save_general %r29
- ldo PT_FR0(%r29), %r25
- save_fp %r25
-
- loadgp
- copy %r29, %r25 /* arg1 is pt_regs */
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- ldil L%intr_check_sig, %r2
- copy %r25, %r16 /* save pt_regs */
- b handle_interruption
- ldo R%intr_check_sig(%r2), %r2
- ENDPROC(intr_save)
- /*
- * Note for all tlb miss handlers:
- *
- * cr24 contains a pointer to the kernel address space
- * page directory.
- *
- * cr25 contains a pointer to the current user address
- * space page directory.
- *
- * sr3 will contain the space id of the user address space
- * of the current running thread while that thread is
- * running in the kernel.
- */
- /*
- * register number allocations. Note that these are all
- * in the shadowed registers
- */
- t0 = r1 /* temporary register 0 */
- va = r8 /* virtual address for which the trap occured */
- t1 = r9 /* temporary register 1 */
- pte = r16 /* pte/phys page # */
- prot = r17 /* prot bits */
- spc = r24 /* space for which the trap occured */
- ptp = r25 /* page directory/page table pointer */
- #ifdef CONFIG_64BIT
- dtlb_miss_20w:
- space_adjust spc,va,t0
- get_pgd spc,ptp
- space_check spc,t0,dtlb_fault
- L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
-
- idtlbt pte,prot
- rfir
- nop
- dtlb_check_alias_20w:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
- idtlbt pte,prot
- rfir
- nop
- nadtlb_miss_20w:
- space_adjust spc,va,t0
- get_pgd spc,ptp
- space_check spc,t0,nadtlb_fault
- L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
- idtlbt pte,prot
- rfir
- nop
- nadtlb_check_flush_20w:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
- /* Insert a "flush only" translation */
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
- /* Get rid of prot bits and convert to page addr for idtlbt */
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
- idtlbt pte,prot
- rfir
- nop
- #else
- dtlb_miss_11:
- get_pgd spc,ptp
- space_check spc,t0,dtlb_fault
- L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
- make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- rfir
- nop
- dtlb_check_alias_11:
- /* Check to see if fault is in the temporary alias region */
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depw,z prot,8,7,prot
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
- idtlba pte,(va)
- idtlbp prot,(va)
- rfir
- nop
- nadtlb_miss_11:
- get_pgd spc,ptp
- space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
- update_ptep ptp,pte,t0,t1
- make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- rfir
- nop
- nadtlb_check_flush_11:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
- /* Insert a "flush only" translation */
- zdepi 7,7,3,prot
- depi 1,10,1,prot
- /* Get rid of prot bits and convert to page addr for idtlba */
- depi 0,31,12,pte
- extru pte,24,25,pte
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- rfir
- nop
- dtlb_miss_20:
- space_adjust spc,va,t0
- get_pgd spc,ptp
- space_check spc,t0,dtlb_fault
- L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
- f_extend pte,t0
- idtlbt pte,prot
- rfir
- nop
- dtlb_check_alias_20:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
-
- idtlbt pte,prot
- rfir
- nop
- nadtlb_miss_20:
- get_pgd spc,ptp
- space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
- f_extend pte,t0
-
- idtlbt pte,prot
- rfir
- nop
- nadtlb_check_flush_20:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
- /* Insert a "flush only" translation */
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
- /* Get rid of prot bits and convert to page addr for idtlbt */
- depdi 0,63,12,pte
- extrd,u pte,56,32,pte
- idtlbt pte,prot
- rfir
- nop
- #endif
- nadtlb_emulate:
- /*
- * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
- * probei instructions. We don't want to fault for these
- * instructions (not only does it not make sense, it can cause
- * deadlocks, since some flushes are done with the mmap
- * semaphore held). If the translation doesn't exist, we can't
- * insert a translation, so have to emulate the side effects
- * of the instruction. Since we don't insert a translation
- * we can get a lot of faults during a flush loop, so it makes
- * sense to try to do it here with minimum overhead. We only
- * emulate fdc,fic,pdc,probew,prober instructions whose base
- * and index registers are not shadowed. We defer everything
- * else to the "slow" path.
- */
- mfctl %cr19,%r9 /* Get iir */
- /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
- Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
- /* Checks for fdc,fdce,pdc,"fic,4f" only */
- ldi 0x280,%r16
- and %r9,%r16,%r17
- cmpb,<>,n %r16,%r17,nadtlb_probe_check
- bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
- BL get_register,%r25
- extrw,u %r9,15,5,%r8 /* Get index register # */
- cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
- copy %r1,%r24
- BL get_register,%r25
- extrw,u %r9,10,5,%r8 /* Get base register # */
- cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
- BL set_register,%r25
- add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
- nadtlb_nullify:
- mfctl %ipsw,%r8
- ldil L%PSW_N,%r9
- or %r8,%r9,%r8 /* Set PSW_N */
- mtctl %r8,%ipsw
- rfir
- nop
- /*
- When there is no translation for the probe address then we
- must nullify the insn and return zero in the target regsiter.
- This will indicate to the calling code that it does not have
- write/read privileges to this address.
- This should technically work for prober and probew in PA 1.1,
- and also probe,r and probe,w in PA 2.0
- WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
- THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
- */
- nadtlb_probe_check:
- ldi 0x80,%r16
- and %r9,%r16,%r17
- cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
- BL get_register,%r25 /* Find the target register */
- extrw,u %r9,31,5,%r8 /* Get target register */
- cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
- BL set_register,%r25
- copy %r0,%r1 /* Write zero to target register */
- b nadtlb_nullify /* Nullify return insn */
- nop
- #ifdef CONFIG_64BIT
- itlb_miss_20w:
- /*
- * I miss is a little different, since we allow users to fault
- * on the gateway page which is in the kernel address space.
- */
- space_adjust spc,va,t0
- get_pgd spc,ptp
- space_check spc,t0,itlb_fault
- L3_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
-
- iitlbt pte,prot
- rfir
- nop
- #else
- itlb_miss_11:
- get_pgd spc,ptp
- space_check spc,t0,itlb_fault
- L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
- make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
- iitlba pte,(%sr1,va)
- iitlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- rfir
- nop
- itlb_miss_20:
- get_pgd spc,ptp
- space_check spc,t0,itlb_fault
- L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
- f_extend pte,t0
- iitlbt pte,prot
- rfir
- nop
- #endif
- #ifdef CONFIG_64BIT
- dbit_trap_20w:
- space_adjust spc,va,t0
- get_pgd spc,ptp
- space_check spc,t0,dbit_fault
- L3_ptep ptp,pte,t0,va,dbit_fault
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20w
- load32 PA(pa_dbit_lock),t0
- dbit_spin_20w:
- LDCW 0(t0),t1
- cmpib,COND(=) 0,t1,dbit_spin_20w
- nop
- dbit_nolock_20w:
- #endif
- update_dirty ptp,pte,t1
- make_insert_tlb spc,pte,prot
-
- idtlbt pte,prot
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20w
- ldi 1,t1
- stw t1,0(t0)
- dbit_nounlock_20w:
- #endif
- rfir
- nop
- #else
- dbit_trap_11:
- get_pgd spc,ptp
- space_check spc,t0,dbit_fault
- L2_ptep ptp,pte,t0,va,dbit_fault
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_11
- load32 PA(pa_dbit_lock),t0
- dbit_spin_11:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_11
- nop
- dbit_nolock_11:
- #endif
- update_dirty ptp,pte,t1
- make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
- mtsp t1, %sr1 /* Restore sr1 */
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_11
- ldi 1,t1
- stw t1,0(t0)
- dbit_nounlock_11:
- #endif
- rfir
- nop
- dbit_trap_20:
- get_pgd spc,ptp
- space_check spc,t0,dbit_fault
- L2_ptep ptp,pte,t0,va,dbit_fault
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20
- load32 PA(pa_dbit_lock),t0
- dbit_spin_20:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20
- nop
- dbit_nolock_20:
- #endif
- update_dirty ptp,pte,t1
- make_insert_tlb spc,pte,prot
- f_extend pte,t1
-
- idtlbt pte,prot
- #ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20
- ldi 1,t1
- stw t1,0(t0)
- dbit_nounlock_20:
- #endif
- rfir
- nop
- #endif
- .import handle_interruption,code
- kernel_bad_space:
- b intr_save
- ldi 31,%r8 /* Use an unused code */
- dbit_fault:
- b intr_save
- ldi 20,%r8
- itlb_fault:
- b intr_save
- ldi 6,%r8
- nadtlb_fault:
- b intr_save
- ldi 17,%r8
- dtlb_fault:
- b intr_save
- ldi 15,%r8
- /* Register saving semantics for system calls:
- %r1 clobbered by system call macro in userspace
- %r2 saved in PT_REGS by gateway page
- %r3 - %r18 preserved by C code (saved by signal code)
- %r19 - %r20 saved in PT_REGS by gateway page
- %r21 - %r22 non-standard syscall args
- stored in kernel stack by gateway page
- %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
- %r27 - %r30 saved in PT_REGS by gateway page
- %r31 syscall return pointer
- */
- /* Floating point registers (FIXME: what do we do with these?)
- %fr0 - %fr3 status/exception, not preserved
- %fr4 - %fr7 arguments
- %fr8 - %fr11 not preserved by C code
- %fr12 - %fr21 preserved by C code
- %fr22 - %fr31 not preserved by C code
- */
- .macro reg_save regs
- STREG %r3, PT_GR3(\regs)
- STREG %r4, PT_GR4(\regs)
- STREG %r5, PT_GR5(\regs)
- STREG %r6, PT_GR6(\regs)
- STREG %r7, PT_GR7(\regs)
- STREG %r8, PT_GR8(\regs)
- STREG %r9, PT_GR9(\regs)
- STREG %r10,PT_GR10(\regs)
- STREG %r11,PT_GR11(\regs)
- STREG %r12,PT_GR12(\regs)
- STREG %r13,PT_GR13(\regs)
- STREG %r14,PT_GR14(\regs)
- STREG %r15,PT_GR15(\regs)
- STREG %r16,PT_GR16(\regs)
- STREG %r17,PT_GR17(\regs)
- STREG %r18,PT_GR18(\regs)
- .endm
- .macro reg_restore regs
- LDREG PT_GR3(\regs), %r3
- LDREG PT_GR4(\regs), %r4
- LDREG PT_GR5(\regs), %r5
- LDREG PT_GR6(\regs), %r6
- LDREG PT_GR7(\regs), %r7
- LDREG PT_GR8(\regs), %r8
- LDREG PT_GR9(\regs), %r9
- LDREG PT_GR10(\regs),%r10
- LDREG PT_GR11(\regs),%r11
- LDREG PT_GR12(\regs),%r12
- LDREG PT_GR13(\regs),%r13
- LDREG PT_GR14(\regs),%r14
- LDREG PT_GR15(\regs),%r15
- LDREG PT_GR16(\regs),%r16
- LDREG PT_GR17(\regs),%r17
- LDREG PT_GR18(\regs),%r18
- .endm
- ENTRY(sys_fork_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
- ldo TASK_REGS(%r1),%r1
- reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- /* These are call-clobbered registers and therefore
- also syscall-clobbered (we hope). */
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
- LDREG PT_GR30(%r1),%r25
- copy %r1,%r24
- BL sys_clone,%r2
- ldi SIGCHLD,%r26
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
- wrapper_exit:
- ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- LDREG PT_CR27(%r1), %r3
- mtctl %r3, %cr27
- reg_restore %r1
- /* strace expects syscall # to be preserved in r20 */
- ldi __NR_fork,%r20
- bv %r0(%r2)
- STREG %r20,PT_GR20(%r1)
- ENDPROC(sys_fork_wrapper)
- /* Set the return value for the child */
- ENTRY(child_return)
- BL schedule_tail, %r2
- nop
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
- LDREG TASK_PT_GR19(%r1),%r2
- b wrapper_exit
- copy %r0,%r28
- ENDPROC(child_return)
- ENTRY(sys_clone_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- /* WARNING - Clobbers r19 and r21, userspace must save these! */
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
- BL sys_clone,%r2
- copy %r1,%r24
- b wrapper_exit
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
- ENDPROC(sys_clone_wrapper)
- ENTRY(sys_vfork_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
- BL sys_vfork,%r2
- copy %r1,%r26
- b wrapper_exit
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
- ENDPROC(sys_vfork_wrapper)
-
- .macro execve_wrapper execve
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- /*
- * Do we need to save/restore r3-r18 here?
- * I don't think so. why would new thread need old
- * threads registers?
- */
- /* %arg0 - %arg3 are already saved for us. */
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- BL \execve,%r2
- copy %r1,%arg0
- ldo -FRAME_SIZE(%r30),%r30
- LDREG -RP_OFFSET(%r30),%r2
- /* If exec succeeded we need to load the args */
- ldo -1024(%r0),%r1
- cmpb,>>= %r28,%r1,error_\execve
- copy %r2,%r19
- error_\execve:
- bv %r0(%r19)
- nop
- .endm
- .import sys_execve
- ENTRY(sys_execve_wrapper)
- execve_wrapper sys_execve
- ENDPROC(sys_execve_wrapper)
- #ifdef CONFIG_64BIT
- .import sys32_execve
- ENTRY(sys32_execve_wrapper)
- execve_wrapper sys32_execve
- ENDPROC(sys32_execve_wrapper)
- #endif
- ENTRY(sys_rt_sigreturn_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
- ldo TASK_REGS(%r26),%r26 /* get pt regs */
- /* Don't save regs, we are going to restore them from sigcontext. */
- STREG %r2, -RP_OFFSET(%r30)
- #ifdef CONFIG_64BIT
- ldo FRAME_SIZE(%r30), %r30
- BL sys_rt_sigreturn,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
- #else
- BL sys_rt_sigreturn,%r2
- ldo FRAME_SIZE(%r30), %r30
- #endif
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
- /* FIXME: I think we need to restore a few more things here. */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- reg_restore %r1
- /* If the signal was received while the process was blocked on a
- * syscall, then r2 will take us to syscall_exit; otherwise r2 will
- * take us to syscall_exit_rfi and on to intr_return.
- */
- bv %r0(%r2)
- LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
- ENDPROC(sys_rt_sigreturn_wrapper)
- ENTRY(sys_sigaltstack_wrapper)
- /* Get the user stack pointer */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r24 /* get pt regs */
- LDREG TASK_PT_GR30(%r24),%r24
- STREG %r2, -RP_OFFSET(%r30)
- #ifdef CONFIG_64BIT
- ldo FRAME_SIZE(%r30), %r30
- BL do_sigaltstack,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
- #else
- BL do_sigaltstack,%r2
- ldo FRAME_SIZE(%r30), %r30
- #endif
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
- bv %r0(%r2)
- nop
- ENDPROC(sys_sigaltstack_wrapper)
- #ifdef CONFIG_64BIT
- ENTRY(sys32_sigaltstack_wrapper)
- /* Get the user stack pointer */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
- LDREG TASK_PT_GR30(%r24),%r24
- STREG %r2, -RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30), %r30
- BL do_sigaltstack32,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
- bv %r0(%r2)
- nop
- ENDPROC(sys32_sigaltstack_wrapper)
- #endif
- ENTRY(syscall_exit)
- /* NOTE: HP-UX syscalls also come through here
- * after hpux_syscall_exit fixes up return
- * values. */
- /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
- * via syscall_exit_rfi if the signal was received while the process
- * was running.
- */
- /* save return value now */
- mfctl %cr30, %r1
- LDREG TI_TASK(%r1),%r1
- STREG %r28,TASK_PT_GR28(%r1)
- #ifdef CONFIG_HPUX
- /* <linux/personality.h> cannot be easily included */
- #define PER_HPUX 0x10
- ldw TASK_PERSONALITY(%r1),%r19
- /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
- ldo -PER_HPUX(%r19), %r19
- cmpib,COND(<>),n 0,%r19,1f
- /* Save other hpux returns if personality is PER_HPUX */
- STREG %r22,TASK_PT_GR22(%r1)
- STREG %r29,TASK_PT_GR29(%r1)
- 1:
- #endif /* CONFIG_HPUX */
- /* Seems to me that dp could be wrong here, if the syscall involved
- * calling a module, and nothing got round to restoring dp on return.
- */
- loadgp
- syscall_check_resched:
- /* check for reschedule */
- LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
- bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
- .import do_signal,code
- syscall_check_sig:
- LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
- ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
- and,COND(<>) %r19, %r26, %r0
- b,n syscall_restore /* skip past if we've nothing to do */
- syscall_do_signal:
- /* Save callee-save registers (for sigcontext).
- * FIXME: After this point the process structure should be
- * consistent with all the relevant state of the process
- * before the syscall. We need to verify this.
- */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
- reg_save %r26
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #endif
- BL do_notify_resume,%r2
- ldi 1, %r25 /* long in_syscall = 1 */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
- reg_restore %r20
- b,n syscall_check_sig
- syscall_restore:
- /* Are we being ptraced? */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldw TASK_PTRACE(%r1), %r19
- bb,< %r19,31,syscall_restore_rfi
- nop
- ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
- rest_fp %r19
- LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
- mtsar %r19
- LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
- LDREG TASK_PT_GR19(%r1),%r19
- LDREG TASK_PT_GR20(%r1),%r20
- LDREG TASK_PT_GR21(%r1),%r21
- LDREG TASK_PT_GR22(%r1),%r22
- LDREG TASK_PT_GR23(%r1),%r23
- LDREG TASK_PT_GR24(%r1),%r24
- LDREG TASK_PT_GR25(%r1),%r25
- LDREG TASK_PT_GR26(%r1),%r26
- LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
- LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
- LDREG TASK_PT_GR29(%r1),%r29
- LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
- /* NOTE: We use rsm/ssm pair to make this operation atomic */
- rsm PSW_SM_I, %r0
- LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
- mfsp %sr3,%r1 /* Get users space id */
- mtsp %r1,%sr7 /* Restore sr7 */
- ssm PSW_SM_I, %r0
- /* Set sr2 to zero for userspace syscalls to work. */
- mtsp %r0,%sr2
- mtsp %r1,%sr4 /* Restore sr4 */
- mtsp %r1,%sr5 /* Restore sr5 */
- mtsp %r1,%sr6 /* Restore sr6 */
- depi 3,31,2,%r31 /* ensure return to user mode. */
- #ifdef CONFIG_64BIT
- /* decide whether to reset the wide mode bit
- *
- * For a syscall, the W bit is stored in the lowest bit
- * of sp. Extract it and reset W if it is zero */
- extrd,u,*<> %r30,63,1,%r1
- rsm PSW_SM_W, %r0
- /* now reset the lowest bit of sp if it was set */
- xor %r30,%r1,%r30
- #endif
- be,n 0(%sr3,%r31) /* return to user space */
- /* We have to return via an RFI, so that PSW T and R bits can be set
- * appropriately.
- * This sets up pt_regs so we can return via intr_restore, which is not
- * the most efficient way of doing things, but it works.
- */
- syscall_restore_rfi:
- ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
- mtctl %r2,%cr0 /* for immediate trap */
- LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
- ldi 0x0b,%r20 /* Create new PSW */
- depi -1,13,1,%r20 /* C, Q, D, and I bits */
- /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
- * set in include/linux/ptrace.h and converted to PA bitmap
- * numbers in asm-offsets.c */
- /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
- extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
- depi -1,27,1,%r20 /* R bit */
- /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
- extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
- depi -1,7,1,%r20 /* T bit */
- STREG %r20,TASK_PT_PSW(%r1)
- /* Always store space registers, since sr3 can be changed (e.g. fork) */
- mfsp %sr3,%r25
- STREG %r25,TASK_PT_SR3(%r1)
- STREG %r25,TASK_PT_SR4(%r1)
- STREG %r25,TASK_PT_SR5(%r1)
- STREG %r25,TASK_PT_SR6(%r1)
- STREG %r25,TASK_PT_SR7(%r1)
- STREG %r25,TASK_PT_IASQ0(%r1)
- STREG %r25,TASK_PT_IASQ1(%r1)
- /* XXX W bit??? */
- /* Now if old D bit is clear, it means we didn't save all registers
- * on syscall entry, so do that now. This only happens on TRACEME
- * calls, or if someone attached to us while we were on a syscall.
- * We could make this more efficient by not saving r3-r18, but
- * then we wouldn't be able to use the common intr_restore path.
- * It is only for traced processes anyway, so performance is not
- * an issue.
- */
- bb,< %r2,30,pt_regs_ok /* Branch if D set */
- ldo TASK_REGS(%r1),%r25
- reg_save %r25 /* Save r3 to r18 */
- /* Save the current sr */
- mfsp %sr0,%r2
- STREG %r2,TASK_PT_SR0(%r1)
- /* Save the scratch sr */
- mfsp %sr1,%r2
- STREG %r2,TASK_PT_SR1(%r1)
- /* sr2 should be set to zero for userspace syscalls */
- STREG %r0,TASK_PT_SR2(%r1)
- pt_regs_ok:
- LDREG TASK_PT_GR31(%r1),%r2
- depi 3,31,2,%r2 /* ensure return to user mode. */
- STREG %r2,TASK_PT_IAOQ0(%r1)
- ldo 4(%r2),%r2
- STREG %r2,TASK_PT_IAOQ1(%r1)
- copy %r25,%r16
- b intr_restore
- nop
- .import schedule,code
- syscall_do_resched:
- BL schedule,%r2
- #ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- #else
- nop
- #endif
- b syscall_check_resched /* if resched, we start over again */
- nop
- ENDPROC(syscall_exit)
- get_register:
- /*
- * get_register is used by the non access tlb miss handlers to
- * copy the value of the general register specified in r8 into
- * r1. This routine can't be used for shadowed registers, since
- * the rfir will restore the original value. So, for the shadowed
- * registers we put a -1 into r1 to indicate that the register
- * should not be used (the register being copied could also have
- * a -1 in it, but that is OK, it just means that we will have
- * to use the slow path instead).
- */
- blr %r8,%r0
- nop
- bv %r0(%r25) /* r0 */
- copy %r0,%r1
- bv %r0(%r25) /* r1 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r2 */
- copy %r2,%r1
- bv %r0(%r25) /* r3 */
- copy %r3,%r1
- bv %r0(%r25) /* r4 */
- copy %r4,%r1
- bv %r0(%r25) /* r5 */
- copy %r5,%r1
- bv %r0(%r25) /* r6 */
- copy %r6,%r1
- bv %r0(%r25) /* r7 */
- copy %r7,%r1
- bv %r0(%r25) /* r8 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r9 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r10 */
- copy %r10,%r1
- bv %r0(%r25) /* r11 */
- copy %r11,%r1
- bv %r0(%r25) /* r12 */
- copy %r12,%r1
- bv %r0(%r25) /* r13 */
- copy %r13,%r1
- bv %r0(%r25) /* r14 */
- copy %r14,%r1
- bv %r0(%r25) /* r15 */
- copy %r15,%r1
- bv %r0(%r25) /* r16 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r17 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r18 */
- copy %r18,%r1
- bv %r0(%r25) /* r19 */
- copy %r19,%r1
- bv %r0(%r25) /* r20 */
- copy %r20,%r1
- bv %r0(%r25) /* r21 */
- copy %r21,%r1
- bv %r0(%r25) /* r22 */
- copy %r22,%r1
- bv %r0(%r25) /* r23 */
- copy %r23,%r1
- bv %r0(%r25) /* r24 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r25 - shadowed */
- ldi -1,%r1
- bv %r0(%r25) /* r26 */
- copy %r26,%r1
- bv %r0(%r25) /* r27 */
- copy %r27,%r1
- bv %r0(%r25) /* r28 */
- copy %r28,%r1
- bv %r0(%r25) /* r29 */
- copy %r29,%r1
- bv %r0(%r25) /* r30 */
- copy %r30,%r1
- bv %r0(%r25) /* r31 */
- copy %r31,%r1
- set_register:
- /*
- * set_register is used by the non access tlb miss handlers to
- * copy the value of r1 into the general register specified in
- * r8.
- */
- blr %r8,%r0
- nop
- bv %r0(%r25) /* r0 (silly, but it is a place holder) */
- copy %r1,%r0
- bv %r0(%r25) /* r1 */
- copy %r1,%r1
- bv %r0(%r25) /* r2 */
- copy %r1,%r2
- bv %r0(%r25) /* r3 */
- copy %r1,%r3
- bv %r0(%r25) /* r4 */
- copy %r1,%r4
- bv %r0(%r25) /* r5 */
- copy %r1,%r5
- bv %r0(%r25) /* r6 */
- copy %r1,%r6
- bv %r0(%r25) /* r7 */
- copy %r1,%r7
- bv %r0(%r25) /* r8 */
- copy %r1,%r8
- bv %r0(%r25) /* r9 */
- copy %r1,%r9
- bv %r0(%r25) /* r10 */
- copy %r1,%r10
- bv %r0(%r25) /* r11 */
- copy %r1,%r11
- bv %r0(%r25) /* r12 */
- copy %r1,%r12
- bv %r0(%r25) /* r13 */
- copy %r1,%r13
- bv %r0(%r25) /* r14 */
- copy %r1,%r14
- bv %r0(%r25) /* r15 */
- copy %r1,%r15
- bv %r0(%r25) /* r16 */
- copy %r1,%r16
- bv %r0(%r25) /* r17 */
- copy %r1,%r17
- bv %r0(%r25) /* r18 */
- copy %r1,%r18
- bv %r0(%r25) /* r19 */
- copy %r1,%r19
- bv %r0(%r25) /* r20 */
- copy %r1,%r20
- bv %r0(%r25) /* r21 */
- copy %r1,%r21
- bv %r0(%r25) /* r22 */
- copy %r1,%r22
- bv %r0(%r25) /* r23 */
- copy %r1,%r23
- bv %r0(%r25) /* r24 */
- copy %r1,%r24
- bv %r0(%r25) /* r25 */
- copy %r1,%r25
- bv %r0(%r25) /* r26 */
- copy %r1,%r26
- bv %r0(%r25) /* r27 */
- copy %r1,%r27
- bv %r0(%r25) /* r28 */
- copy %r1,%r28
- bv %r0(%r25) /* r29 */
- copy %r1,%r29
- bv %r0(%r25) /* r30 */
- copy %r1,%r30
- bv %r0(%r25) /* r31 */
- copy %r1,%r31
|