dtlb_base.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
  2. * dtlb_base.S: Front end to DTLB miss replacement strategy.
  3. * This is included directly into the trap table.
  4. *
  5. * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
  6. * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
  7. */
  8. #include <asm/pgtable.h>
  9. #include <asm/mmu.h>
  10. /* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
  11. * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
  12. * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
  13. * (0xffe0000000000000) Cheetah (64-bit VA space)
  14. * %g7 __pa(current->mm->pgd)
  15. *
  16. * The VPTE base value is completely magic, but note that
  17. * few places in the kernel other than these TLB miss
  18. * handlers know anything about the VPTE mechanism or
  19. * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
  20. * Consider the 44-bit VADDR Ultra-I/II case as an example:
  21. *
  22. * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
  23. * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
  24. *
  25. * For Cheetah's 64-bit VADDR space this is:
  26. *
  27. * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
  28. * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
  29. *
  30. * If you're paying attention you'll notice that this means half of
  31. * the VPTE table is above %g3 and half is below, low VA addresses
  32. * map progressively upwards from %g3, and high VA addresses map
  33. * progressively upwards towards %g3. This trick was needed to make
  34. * the same 8 instruction handler work both for Spitfire/Blackbird's
  35. * peculiar VA space hole configuration and the full 64-bit VA space
  36. * one of Cheetah at the same time.
  37. */
  38. /* Ways we can get here:
  39. *
  40. * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
  41. * 2) Nucleus loads and stores to/from vmalloc() areas.
  42. * 3) User loads and stores.
  43. * 4) User space accesses by nucleus at tl0
  44. */
  45. #if PAGE_SHIFT == 13
  46. /*
  47. * To compute vpte offset, we need to do ((addr >> 13) << 3),
  48. * which can be optimized to (addr >> 10) if bits 10/11/12 can
  49. * be guaranteed to be 0 ... mmu_context.h does guarantee this
  50. * by only using 10 bits in the hwcontext value.
  51. */
  52. #define CREATE_VPTE_OFFSET1(r1, r2)
  53. #define CREATE_VPTE_OFFSET2(r1, r2) \
  54. srax r1, 10, r2
  55. #define CREATE_VPTE_NOP nop
  56. #else
  57. #define CREATE_VPTE_OFFSET1(r1, r2) \
  58. srax r1, PAGE_SHIFT, r2
  59. #define CREATE_VPTE_OFFSET2(r1, r2) \
  60. sllx r2, 3, r2
  61. #define CREATE_VPTE_NOP
  62. #endif
  63. /* DTLB ** ICACHE line 1: Quick user TLB misses */
  64. ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
  65. andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
  66. from_tl1_trap:
  67. rdpr %tl, %g5 ! For TL==3 test
  68. CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
  69. be,pn %xcc, 3f ! Yep, special processing
  70. CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
  71. cmp %g5, 4 ! Last trap level?
  72. be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
  73. nop ! delay slot
  74. /* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
  75. ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
  76. 1: brgez,pn %g5, longpath ! Invalid, branch out
  77. nop ! Delay-slot
  78. 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
  79. retry ! Trap return
  80. 3: brlz,pt %g4, 9b ! Kernel virtual map?
  81. xor %g2, %g4, %g5 ! Finish bit twiddles
  82. ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
  83. /* DTLB ** ICACHE line 3: winfixups+real_faults */
  84. longpath:
  85. rdpr %pstate, %g5 ! Move into alternate globals
  86. wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
  87. rdpr %tl, %g4 ! See where we came from.
  88. cmp %g4, 1 ! Is etrap/rtrap window fault?
  89. mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
  90. ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
  91. be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
  92. mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
  93. /* DTLB ** ICACHE line 4: Unused... */
  94. ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
  95. nop
  96. nop
  97. nop
  98. nop
  99. nop
  100. nop
  101. CREATE_VPTE_NOP
  102. #undef CREATE_VPTE_OFFSET1
  103. #undef CREATE_VPTE_OFFSET2
  104. #undef CREATE_VPTE_NOP