atomic_32.S 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /* atomic.S: Move this stuff here for better ICACHE hit rates.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
  4. */
  5. #include <asm/ptrace.h>
  6. #include <asm/psr.h>
  7. .text
  8. .align 4
  9. .globl __atomic_begin
  10. __atomic_begin:
  11. #ifndef CONFIG_SMP
  12. .globl ___xchg32_sun4c
  13. ___xchg32_sun4c:
  14. rd %psr, %g3
  15. andcc %g3, PSR_PIL, %g0
  16. bne 1f
  17. nop
  18. wr %g3, PSR_PIL, %psr
  19. nop; nop; nop
  20. 1:
  21. andcc %g3, PSR_PIL, %g0
  22. ld [%g1], %g7
  23. bne 1f
  24. st %g2, [%g1]
  25. wr %g3, 0x0, %psr
  26. nop; nop; nop
  27. 1:
  28. mov %g7, %g2
  29. jmpl %o7 + 8, %g0
  30. mov %g4, %o7
  31. .globl ___xchg32_sun4md
  32. ___xchg32_sun4md:
  33. swap [%g1], %g2
  34. jmpl %o7 + 8, %g0
  35. mov %g4, %o7
  36. #endif
  37. /* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
  38. * Really, some things here for SMP are overly clever, go read the header.
  39. */
  40. .globl ___atomic24_add
  41. ___atomic24_add:
  42. rd %psr, %g3 ! Keep the code small, old way was stupid
  43. nop; nop; nop; ! Let the bits set
  44. or %g3, PSR_PIL, %g7 ! Disable interrupts
  45. wr %g7, 0x0, %psr ! Set %psr
  46. nop; nop; nop; ! Let the bits set
  47. #ifdef CONFIG_SMP
  48. 1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
  49. orcc %g7, 0x0, %g0 ! Did we get it?
  50. bne 1b ! Nope...
  51. ld [%g1], %g7 ! Load locked atomic24_t
  52. sra %g7, 8, %g7 ! Get signed 24-bit integer
  53. add %g7, %g2, %g2 ! Add in argument
  54. sll %g2, 8, %g7 ! Transpose back to atomic24_t
  55. st %g7, [%g1] ! Clever: This releases the lock as well.
  56. #else
  57. ld [%g1], %g7 ! Load locked atomic24_t
  58. add %g7, %g2, %g2 ! Add in argument
  59. st %g2, [%g1] ! Store it back
  60. #endif
  61. wr %g3, 0x0, %psr ! Restore original PSR_PIL
  62. nop; nop; nop; ! Let the bits set
  63. jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
  64. mov %g4, %o7 ! Restore %o7
  65. .globl ___atomic24_sub
  66. ___atomic24_sub:
  67. rd %psr, %g3 ! Keep the code small, old way was stupid
  68. nop; nop; nop; ! Let the bits set
  69. or %g3, PSR_PIL, %g7 ! Disable interrupts
  70. wr %g7, 0x0, %psr ! Set %psr
  71. nop; nop; nop; ! Let the bits set
  72. #ifdef CONFIG_SMP
  73. 1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
  74. orcc %g7, 0x0, %g0 ! Did we get it?
  75. bne 1b ! Nope...
  76. ld [%g1], %g7 ! Load locked atomic24_t
  77. sra %g7, 8, %g7 ! Get signed 24-bit integer
  78. sub %g7, %g2, %g2 ! Subtract argument
  79. sll %g2, 8, %g7 ! Transpose back to atomic24_t
  80. st %g7, [%g1] ! Clever: This releases the lock as well
  81. #else
  82. ld [%g1], %g7 ! Load locked atomic24_t
  83. sub %g7, %g2, %g2 ! Subtract argument
  84. st %g2, [%g1] ! Store it back
  85. #endif
  86. wr %g3, 0x0, %psr ! Restore original PSR_PIL
  87. nop; nop; nop; ! Let the bits set
  88. jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
  89. mov %g4, %o7 ! Restore %o7
  90. .globl __atomic_end
  91. __atomic_end: