clear_page.S 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /* clear_page.S: UltraSparc optimized clear page.
  2. *
  3. * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  4. * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
  5. */
  6. #include <asm/visasm.h>
  7. #include <asm/thread_info.h>
  8. #include <asm/page.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/spitfire.h>
  11. /* What we used to do was lock a TLB entry into a specific
  12. * TLB slot, clear the page with interrupts disabled, then
  13. * restore the original TLB entry. This was great for
  14. * disturbing the TLB as little as possible, but it meant
  15. * we had to keep interrupts disabled for a long time.
  16. *
  17. * Now, we simply use the normal TLB loading mechanism,
  18. * and this makes the cpu choose a slot all by itself.
  19. * Then we do a normal TLB flush on exit. We need only
  20. * disable preemption during the clear.
  21. */
  22. #define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
  23. #define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
  24. .text
  25. .globl _clear_page
  26. _clear_page: /* %o0=dest */
  27. ba,pt %xcc, clear_page_common
  28. clr %o4
  29. /* This thing is pretty important, it shows up
  30. * on the profiles via do_anonymous_page().
  31. */
  32. .align 32
  33. .globl clear_user_page
  34. clear_user_page: /* %o0=dest, %o1=vaddr */
  35. lduw [%g6 + TI_PRE_COUNT], %o2
  36. sethi %uhi(PAGE_OFFSET), %g2
  37. sethi %hi(PAGE_SIZE), %o4
  38. sllx %g2, 32, %g2
  39. sethi %uhi(TTE_BITS_TOP), %g3
  40. sllx %g3, 32, %g3
  41. sub %o0, %g2, %g1 ! paddr
  42. or %g3, TTE_BITS_BOTTOM, %g3
  43. and %o1, %o4, %o0 ! vaddr D-cache alias bit
  44. or %g1, %g3, %g1 ! TTE data
  45. sethi %hi(TLBTEMP_BASE), %o3
  46. add %o2, 1, %o4
  47. add %o0, %o3, %o0 ! TTE vaddr
  48. /* Disable preemption. */
  49. mov TLB_TAG_ACCESS, %g3
  50. stw %o4, [%g6 + TI_PRE_COUNT]
  51. /* Load TLB entry. */
  52. rdpr %pstate, %o4
  53. wrpr %o4, PSTATE_IE, %pstate
  54. stxa %o0, [%g3] ASI_DMMU
  55. stxa %g1, [%g0] ASI_DTLB_DATA_IN
  56. flush %g6
  57. wrpr %o4, 0x0, %pstate
  58. mov 1, %o4
  59. clear_page_common:
  60. VISEntryHalf
  61. membar #StoreLoad | #StoreStore | #LoadStore
  62. fzero %f0
  63. sethi %hi(PAGE_SIZE/64), %o1
  64. mov %o0, %g1 ! remember vaddr for tlbflush
  65. fzero %f2
  66. or %o1, %lo(PAGE_SIZE/64), %o1
  67. faddd %f0, %f2, %f4
  68. fmuld %f0, %f2, %f6
  69. faddd %f0, %f2, %f8
  70. fmuld %f0, %f2, %f10
  71. faddd %f0, %f2, %f12
  72. fmuld %f0, %f2, %f14
  73. 1: stda %f0, [%o0 + %g0] ASI_BLK_P
  74. subcc %o1, 1, %o1
  75. bne,pt %icc, 1b
  76. add %o0, 0x40, %o0
  77. membar #Sync
  78. VISExitHalf
  79. brz,pn %o4, out
  80. nop
  81. stxa %g0, [%g1] ASI_DMMU_DEMAP
  82. membar #Sync
  83. stw %o2, [%g6 + TI_PRE_COUNT]
  84. out: retl
  85. nop