memcpy_64.S 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /*
  2. * Copyright (C) 2002 Paul Mackerras, IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <asm/processor.h>
  10. #include <asm/ppc_asm.h>
  11. .align 7
  12. _GLOBAL(memcpy)
  13. BEGIN_FTR_SECTION
  14. std r3,48(r1) /* save destination pointer for return value */
  15. FTR_SECTION_ELSE
  16. b memcpy_power7
  17. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
  18. PPC_MTOCRF(0x01,r5)
  19. cmpldi cr1,r5,16
  20. neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
  21. andi. r6,r6,7
  22. dcbt 0,r4
  23. blt cr1,.Lshort_copy
  24. /* Below we want to nop out the bne if we're on a CPU that has the
  25. CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
  26. cleared.
  27. At the time of writing the only CPU that has this combination of bits
  28. set is Power6. */
  29. BEGIN_FTR_SECTION
  30. nop
  31. FTR_SECTION_ELSE
  32. bne .Ldst_unaligned
  33. ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
  34. CPU_FTR_UNALIGNED_LD_STD)
  35. .Ldst_aligned:
  36. addi r3,r3,-16
  37. BEGIN_FTR_SECTION
  38. andi. r0,r4,7
  39. bne .Lsrc_unaligned
  40. END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
  41. srdi r7,r5,4
  42. ld r9,0(r4)
  43. addi r4,r4,-8
  44. mtctr r7
  45. andi. r5,r5,7
  46. bf cr7*4+0,2f
  47. addi r3,r3,8
  48. addi r4,r4,8
  49. mr r8,r9
  50. blt cr1,3f
  51. 1: ld r9,8(r4)
  52. std r8,8(r3)
  53. 2: ldu r8,16(r4)
  54. stdu r9,16(r3)
  55. bdnz 1b
  56. 3: std r8,8(r3)
  57. beq 3f
  58. addi r3,r3,16
  59. .Ldo_tail:
  60. bf cr7*4+1,1f
  61. lwz r9,8(r4)
  62. addi r4,r4,4
  63. stw r9,0(r3)
  64. addi r3,r3,4
  65. 1: bf cr7*4+2,2f
  66. lhz r9,8(r4)
  67. addi r4,r4,2
  68. sth r9,0(r3)
  69. addi r3,r3,2
  70. 2: bf cr7*4+3,3f
  71. lbz r9,8(r4)
  72. stb r9,0(r3)
  73. 3: ld r3,48(r1) /* return dest pointer */
  74. blr
  75. .Lsrc_unaligned:
  76. srdi r6,r5,3
  77. addi r5,r5,-16
  78. subf r4,r0,r4
  79. srdi r7,r5,4
  80. sldi r10,r0,3
  81. cmpdi cr6,r6,3
  82. andi. r5,r5,7
  83. mtctr r7
  84. subfic r11,r10,64
  85. add r5,r5,r0
  86. bt cr7*4+0,0f
  87. ld r9,0(r4) # 3+2n loads, 2+2n stores
  88. ld r0,8(r4)
  89. sld r6,r9,r10
  90. ldu r9,16(r4)
  91. srd r7,r0,r11
  92. sld r8,r0,r10
  93. or r7,r7,r6
  94. blt cr6,4f
  95. ld r0,8(r4)
  96. # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
  97. b 2f
  98. 0: ld r0,0(r4) # 4+2n loads, 3+2n stores
  99. ldu r9,8(r4)
  100. sld r8,r0,r10
  101. addi r3,r3,-8
  102. blt cr6,5f
  103. ld r0,8(r4)
  104. srd r12,r9,r11
  105. sld r6,r9,r10
  106. ldu r9,16(r4)
  107. or r12,r8,r12
  108. srd r7,r0,r11
  109. sld r8,r0,r10
  110. addi r3,r3,16
  111. beq cr6,3f
  112. # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
  113. 1: or r7,r7,r6
  114. ld r0,8(r4)
  115. std r12,8(r3)
  116. 2: srd r12,r9,r11
  117. sld r6,r9,r10
  118. ldu r9,16(r4)
  119. or r12,r8,r12
  120. stdu r7,16(r3)
  121. srd r7,r0,r11
  122. sld r8,r0,r10
  123. bdnz 1b
  124. 3: std r12,8(r3)
  125. or r7,r7,r6
  126. 4: std r7,16(r3)
  127. 5: srd r12,r9,r11
  128. or r12,r8,r12
  129. std r12,24(r3)
  130. beq 4f
  131. cmpwi cr1,r5,8
  132. addi r3,r3,32
  133. sld r9,r9,r10
  134. ble cr1,6f
  135. ld r0,8(r4)
  136. srd r7,r0,r11
  137. or r9,r7,r9
  138. 6:
  139. bf cr7*4+1,1f
  140. rotldi r9,r9,32
  141. stw r9,0(r3)
  142. addi r3,r3,4
  143. 1: bf cr7*4+2,2f
  144. rotldi r9,r9,16
  145. sth r9,0(r3)
  146. addi r3,r3,2
  147. 2: bf cr7*4+3,3f
  148. rotldi r9,r9,8
  149. stb r9,0(r3)
  150. 3: ld r3,48(r1) /* return dest pointer */
  151. blr
  152. .Ldst_unaligned:
  153. PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
  154. subf r5,r6,r5
  155. li r7,0
  156. cmpldi cr1,r5,16
  157. bf cr7*4+3,1f
  158. lbz r0,0(r4)
  159. stb r0,0(r3)
  160. addi r7,r7,1
  161. 1: bf cr7*4+2,2f
  162. lhzx r0,r7,r4
  163. sthx r0,r7,r3
  164. addi r7,r7,2
  165. 2: bf cr7*4+1,3f
  166. lwzx r0,r7,r4
  167. stwx r0,r7,r3
  168. 3: PPC_MTOCRF(0x01,r5)
  169. add r4,r6,r4
  170. add r3,r6,r3
  171. b .Ldst_aligned
  172. .Lshort_copy:
  173. bf cr7*4+0,1f
  174. lwz r0,0(r4)
  175. lwz r9,4(r4)
  176. addi r4,r4,8
  177. stw r0,0(r3)
  178. stw r9,4(r3)
  179. addi r3,r3,8
  180. 1: bf cr7*4+1,2f
  181. lwz r0,0(r4)
  182. addi r4,r4,4
  183. stw r0,0(r3)
  184. addi r3,r3,4
  185. 2: bf cr7*4+2,3f
  186. lhz r0,0(r4)
  187. addi r4,r4,2
  188. sth r0,0(r3)
  189. addi r3,r3,2
  190. 3: bf cr7*4+3,4f
  191. lbz r0,0(r4)
  192. stb r0,0(r3)
  193. 4: ld r3,48(r1) /* return dest pointer */
  194. blr