U1memcpy.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. /* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
  2. *
  3. * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
  4. * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
  5. */
  6. #ifdef __KERNEL__
  7. #include <asm/visasm.h>
  8. #include <asm/asi.h>
  9. #define GLOBAL_SPARE g7
  10. #else
  11. #define GLOBAL_SPARE g5
  12. #define ASI_BLK_P 0xf0
  13. #define FPRS_FEF 0x04
  14. #ifdef MEMCPY_DEBUG
  15. #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
  16. clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
  17. #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  18. #else
  19. #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
  20. #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
  21. #endif
  22. #endif
  23. #ifndef EX_LD
  24. #define EX_LD(x) x
  25. #endif
  26. #ifndef EX_ST
  27. #define EX_ST(x) x
  28. #endif
  29. #ifndef EX_RETVAL
  30. #define EX_RETVAL(x) x
  31. #endif
  32. #ifndef LOAD
  33. #define LOAD(type,addr,dest) type [addr], dest
  34. #endif
  35. #ifndef LOAD_BLK
  36. #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
  37. #endif
  38. #ifndef STORE
  39. #define STORE(type,src,addr) type src, [addr]
  40. #endif
  41. #ifndef STORE_BLK
  42. #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
  43. #endif
  44. #ifndef FUNC_NAME
  45. #define FUNC_NAME memcpy
  46. #endif
  47. #ifndef PREAMBLE
  48. #define PREAMBLE
  49. #endif
  50. #ifndef XCC
  51. #define XCC xcc
  52. #endif
  53. #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
  54. faligndata %f1, %f2, %f48; \
  55. faligndata %f2, %f3, %f50; \
  56. faligndata %f3, %f4, %f52; \
  57. faligndata %f4, %f5, %f54; \
  58. faligndata %f5, %f6, %f56; \
  59. faligndata %f6, %f7, %f58; \
  60. faligndata %f7, %f8, %f60; \
  61. faligndata %f8, %f9, %f62;
  62. #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
  63. EX_LD(LOAD_BLK(%src, %fdest)); \
  64. EX_ST(STORE_BLK(%fsrc, %dest)); \
  65. add %src, 0x40, %src; \
  66. subcc %len, 0x40, %len; \
  67. be,pn %xcc, jmptgt; \
  68. add %dest, 0x40, %dest; \
  69. #define LOOP_CHUNK1(src, dest, len, branch_dest) \
  70. MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
  71. #define LOOP_CHUNK2(src, dest, len, branch_dest) \
  72. MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
  73. #define LOOP_CHUNK3(src, dest, len, branch_dest) \
  74. MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
  75. #define STORE_SYNC(dest, fsrc) \
  76. EX_ST(STORE_BLK(%fsrc, %dest)); \
  77. add %dest, 0x40, %dest;
  78. #define STORE_JUMP(dest, fsrc, target) \
  79. EX_ST(STORE_BLK(%fsrc, %dest)); \
  80. add %dest, 0x40, %dest; \
  81. ba,pt %xcc, target;
  82. #define FINISH_VISCHUNK(dest, f0, f1, left) \
  83. subcc %left, 8, %left;\
  84. bl,pn %xcc, 95f; \
  85. faligndata %f0, %f1, %f48; \
  86. EX_ST(STORE(std, %f48, %dest)); \
  87. add %dest, 8, %dest;
  88. #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
  89. subcc %left, 8, %left; \
  90. bl,pn %xcc, 95f; \
  91. fsrc1 %f0, %f1;
  92. #define UNEVEN_VISCHUNK(dest, f0, f1, left) \
  93. UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
  94. ba,a,pt %xcc, 93f;
  95. .register %g2,#scratch
  96. .register %g3,#scratch
  97. .text
  98. .align 64
  99. .globl FUNC_NAME
  100. .type FUNC_NAME,#function
  101. FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
  102. srlx %o2, 31, %g2
  103. cmp %g2, 0
  104. tne %xcc, 5
  105. PREAMBLE
  106. mov %o0, %o4
  107. cmp %o2, 0
  108. be,pn %XCC, 85f
  109. or %o0, %o1, %o3
  110. cmp %o2, 16
  111. blu,a,pn %XCC, 80f
  112. or %o3, %o2, %o3
  113. cmp %o2, (5 * 64)
  114. blu,pt %XCC, 70f
  115. andcc %o3, 0x7, %g0
  116. /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
  117. VISEntry
  118. /* Is 'dst' already aligned on an 64-byte boundary? */
  119. andcc %o0, 0x3f, %g2
  120. be,pt %XCC, 2f
  121. /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
  122. * of bytes to copy to make 'dst' 64-byte aligned. We pre-
  123. * subtract this from 'len'.
  124. */
  125. sub %o0, %o1, %GLOBAL_SPARE
  126. sub %g2, 0x40, %g2
  127. sub %g0, %g2, %g2
  128. sub %o2, %g2, %o2
  129. andcc %g2, 0x7, %g1
  130. be,pt %icc, 2f
  131. and %g2, 0x38, %g2
  132. 1: subcc %g1, 0x1, %g1
  133. EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
  134. EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
  135. bgu,pt %XCC, 1b
  136. add %o1, 0x1, %o1
  137. add %o1, %GLOBAL_SPARE, %o0
  138. 2: cmp %g2, 0x0
  139. and %o1, 0x7, %g1
  140. be,pt %icc, 3f
  141. alignaddr %o1, %g0, %o1
  142. EX_LD(LOAD(ldd, %o1, %f4))
  143. 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
  144. add %o1, 0x8, %o1
  145. subcc %g2, 0x8, %g2
  146. faligndata %f4, %f6, %f0
  147. EX_ST(STORE(std, %f0, %o0))
  148. be,pn %icc, 3f
  149. add %o0, 0x8, %o0
  150. EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
  151. add %o1, 0x8, %o1
  152. subcc %g2, 0x8, %g2
  153. faligndata %f6, %f4, %f0
  154. EX_ST(STORE(std, %f0, %o0))
  155. bne,pt %icc, 1b
  156. add %o0, 0x8, %o0
  157. /* Destination is 64-byte aligned. */
  158. 3:
  159. membar #LoadStore | #StoreStore | #StoreLoad
  160. subcc %o2, 0x40, %GLOBAL_SPARE
  161. add %o1, %g1, %g1
  162. andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
  163. srl %g1, 3, %g2
  164. sub %o2, %GLOBAL_SPARE, %g3
  165. andn %o1, (0x40 - 1), %o1
  166. and %g2, 7, %g2
  167. andncc %g3, 0x7, %g3
  168. fmovd %f0, %f2
  169. sub %g3, 0x8, %g3
  170. sub %o2, %GLOBAL_SPARE, %o2
  171. add %g1, %GLOBAL_SPARE, %g1
  172. subcc %o2, %g3, %o2
  173. EX_LD(LOAD_BLK(%o1, %f0))
  174. add %o1, 0x40, %o1
  175. add %g1, %g3, %g1
  176. EX_LD(LOAD_BLK(%o1, %f16))
  177. add %o1, 0x40, %o1
  178. sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
  179. EX_LD(LOAD_BLK(%o1, %f32))
  180. add %o1, 0x40, %o1
  181. /* There are 8 instances of the unrolled loop,
  182. * one for each possible alignment of the
  183. * source buffer. Each loop instance is 452
  184. * bytes.
  185. */
  186. sll %g2, 3, %o3
  187. sub %o3, %g2, %o3
  188. sllx %o3, 4, %o3
  189. add %o3, %g2, %o3
  190. sllx %o3, 2, %g2
  191. 1: rd %pc, %o3
  192. add %o3, %lo(1f - 1b), %o3
  193. jmpl %o3 + %g2, %g0
  194. nop
  195. .align 64
  196. 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
  197. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  198. FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
  199. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  200. FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
  201. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  202. ba,pt %xcc, 1b+4
  203. faligndata %f0, %f2, %f48
  204. 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
  205. STORE_SYNC(o0, f48) membar #Sync
  206. FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
  207. STORE_JUMP(o0, f48, 40f) membar #Sync
  208. 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
  209. STORE_SYNC(o0, f48) membar #Sync
  210. FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
  211. STORE_JUMP(o0, f48, 48f) membar #Sync
  212. 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
  213. STORE_SYNC(o0, f48) membar #Sync
  214. FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
  215. STORE_JUMP(o0, f48, 56f) membar #Sync
  216. 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
  217. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  218. FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
  219. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  220. FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
  221. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  222. ba,pt %xcc, 1b+4
  223. faligndata %f2, %f4, %f48
  224. 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
  225. STORE_SYNC(o0, f48) membar #Sync
  226. FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
  227. STORE_JUMP(o0, f48, 41f) membar #Sync
  228. 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
  229. STORE_SYNC(o0, f48) membar #Sync
  230. FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
  231. STORE_JUMP(o0, f48, 49f) membar #Sync
  232. 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
  233. STORE_SYNC(o0, f48) membar #Sync
  234. FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
  235. STORE_JUMP(o0, f48, 57f) membar #Sync
  236. 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
  237. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  238. FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
  239. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  240. FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
  241. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  242. ba,pt %xcc, 1b+4
  243. faligndata %f4, %f6, %f48
  244. 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
  245. STORE_SYNC(o0, f48) membar #Sync
  246. FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
  247. STORE_JUMP(o0, f48, 42f) membar #Sync
  248. 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
  249. STORE_SYNC(o0, f48) membar #Sync
  250. FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
  251. STORE_JUMP(o0, f48, 50f) membar #Sync
  252. 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
  253. STORE_SYNC(o0, f48) membar #Sync
  254. FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
  255. STORE_JUMP(o0, f48, 58f) membar #Sync
  256. 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
  257. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  258. FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
  259. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  260. FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
  261. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  262. ba,pt %xcc, 1b+4
  263. faligndata %f6, %f8, %f48
  264. 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
  265. STORE_SYNC(o0, f48) membar #Sync
  266. FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
  267. STORE_JUMP(o0, f48, 43f) membar #Sync
  268. 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
  269. STORE_SYNC(o0, f48) membar #Sync
  270. FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
  271. STORE_JUMP(o0, f48, 51f) membar #Sync
  272. 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
  273. STORE_SYNC(o0, f48) membar #Sync
  274. FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
  275. STORE_JUMP(o0, f48, 59f) membar #Sync
  276. 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
  277. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  278. FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
  279. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  280. FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
  281. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  282. ba,pt %xcc, 1b+4
  283. faligndata %f8, %f10, %f48
  284. 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
  285. STORE_SYNC(o0, f48) membar #Sync
  286. FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
  287. STORE_JUMP(o0, f48, 44f) membar #Sync
  288. 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
  289. STORE_SYNC(o0, f48) membar #Sync
  290. FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
  291. STORE_JUMP(o0, f48, 52f) membar #Sync
  292. 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
  293. STORE_SYNC(o0, f48) membar #Sync
  294. FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
  295. STORE_JUMP(o0, f48, 60f) membar #Sync
  296. 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
  297. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  298. FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
  299. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  300. FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
  301. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  302. ba,pt %xcc, 1b+4
  303. faligndata %f10, %f12, %f48
  304. 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
  305. STORE_SYNC(o0, f48) membar #Sync
  306. FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
  307. STORE_JUMP(o0, f48, 45f) membar #Sync
  308. 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
  309. STORE_SYNC(o0, f48) membar #Sync
  310. FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
  311. STORE_JUMP(o0, f48, 53f) membar #Sync
  312. 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
  313. STORE_SYNC(o0, f48) membar #Sync
  314. FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
  315. STORE_JUMP(o0, f48, 61f) membar #Sync
  316. 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
  317. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  318. FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
  319. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  320. FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
  321. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  322. ba,pt %xcc, 1b+4
  323. faligndata %f12, %f14, %f48
  324. 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
  325. STORE_SYNC(o0, f48) membar #Sync
  326. FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
  327. STORE_JUMP(o0, f48, 46f) membar #Sync
  328. 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
  329. STORE_SYNC(o0, f48) membar #Sync
  330. FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
  331. STORE_JUMP(o0, f48, 54f) membar #Sync
  332. 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
  333. STORE_SYNC(o0, f48) membar #Sync
  334. FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
  335. STORE_JUMP(o0, f48, 62f) membar #Sync
  336. 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
  337. LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
  338. FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
  339. LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
  340. FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
  341. LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
  342. ba,pt %xcc, 1b+4
  343. faligndata %f14, %f16, %f48
  344. 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
  345. STORE_SYNC(o0, f48) membar #Sync
  346. FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
  347. STORE_JUMP(o0, f48, 47f) membar #Sync
  348. 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
  349. STORE_SYNC(o0, f48) membar #Sync
  350. FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
  351. STORE_JUMP(o0, f48, 55f) membar #Sync
  352. 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
  353. STORE_SYNC(o0, f48) membar #Sync
  354. FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
  355. STORE_JUMP(o0, f48, 63f) membar #Sync
  356. 40: FINISH_VISCHUNK(o0, f0, f2, g3)
  357. 41: FINISH_VISCHUNK(o0, f2, f4, g3)
  358. 42: FINISH_VISCHUNK(o0, f4, f6, g3)
  359. 43: FINISH_VISCHUNK(o0, f6, f8, g3)
  360. 44: FINISH_VISCHUNK(o0, f8, f10, g3)
  361. 45: FINISH_VISCHUNK(o0, f10, f12, g3)
  362. 46: FINISH_VISCHUNK(o0, f12, f14, g3)
  363. 47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
  364. 48: FINISH_VISCHUNK(o0, f16, f18, g3)
  365. 49: FINISH_VISCHUNK(o0, f18, f20, g3)
  366. 50: FINISH_VISCHUNK(o0, f20, f22, g3)
  367. 51: FINISH_VISCHUNK(o0, f22, f24, g3)
  368. 52: FINISH_VISCHUNK(o0, f24, f26, g3)
  369. 53: FINISH_VISCHUNK(o0, f26, f28, g3)
  370. 54: FINISH_VISCHUNK(o0, f28, f30, g3)
  371. 55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
  372. 56: FINISH_VISCHUNK(o0, f32, f34, g3)
  373. 57: FINISH_VISCHUNK(o0, f34, f36, g3)
  374. 58: FINISH_VISCHUNK(o0, f36, f38, g3)
  375. 59: FINISH_VISCHUNK(o0, f38, f40, g3)
  376. 60: FINISH_VISCHUNK(o0, f40, f42, g3)
  377. 61: FINISH_VISCHUNK(o0, f42, f44, g3)
  378. 62: FINISH_VISCHUNK(o0, f44, f46, g3)
  379. 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
  380. 93: EX_LD(LOAD(ldd, %o1, %f2))
  381. add %o1, 8, %o1
  382. subcc %g3, 8, %g3
  383. faligndata %f0, %f2, %f8
  384. EX_ST(STORE(std, %f8, %o0))
  385. bl,pn %xcc, 95f
  386. add %o0, 8, %o0
  387. EX_LD(LOAD(ldd, %o1, %f0))
  388. add %o1, 8, %o1
  389. subcc %g3, 8, %g3
  390. faligndata %f2, %f0, %f8
  391. EX_ST(STORE(std, %f8, %o0))
  392. bge,pt %xcc, 93b
  393. add %o0, 8, %o0
  394. 95: brz,pt %o2, 2f
  395. mov %g1, %o1
  396. 1: EX_LD(LOAD(ldub, %o1, %o3))
  397. add %o1, 1, %o1
  398. subcc %o2, 1, %o2
  399. EX_ST(STORE(stb, %o3, %o0))
  400. bne,pt %xcc, 1b
  401. add %o0, 1, %o0
  402. 2: membar #StoreLoad | #StoreStore
  403. VISExit
  404. retl
  405. mov EX_RETVAL(%o4), %o0
  406. .align 64
  407. 70: /* 16 < len <= (5 * 64) */
  408. bne,pn %XCC, 75f
  409. sub %o0, %o1, %o3
  410. 72: andn %o2, 0xf, %GLOBAL_SPARE
  411. and %o2, 0xf, %o2
  412. 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
  413. EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
  414. subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
  415. EX_ST(STORE(stx, %o5, %o1 + %o3))
  416. add %o1, 0x8, %o1
  417. EX_ST(STORE(stx, %g1, %o1 + %o3))
  418. bgu,pt %XCC, 1b
  419. add %o1, 0x8, %o1
  420. 73: andcc %o2, 0x8, %g0
  421. be,pt %XCC, 1f
  422. nop
  423. EX_LD(LOAD(ldx, %o1, %o5))
  424. sub %o2, 0x8, %o2
  425. EX_ST(STORE(stx, %o5, %o1 + %o3))
  426. add %o1, 0x8, %o1
  427. 1: andcc %o2, 0x4, %g0
  428. be,pt %XCC, 1f
  429. nop
  430. EX_LD(LOAD(lduw, %o1, %o5))
  431. sub %o2, 0x4, %o2
  432. EX_ST(STORE(stw, %o5, %o1 + %o3))
  433. add %o1, 0x4, %o1
  434. 1: cmp %o2, 0
  435. be,pt %XCC, 85f
  436. nop
  437. ba,pt %xcc, 90f
  438. nop
  439. 75: andcc %o0, 0x7, %g1
  440. sub %g1, 0x8, %g1
  441. be,pn %icc, 2f
  442. sub %g0, %g1, %g1
  443. sub %o2, %g1, %o2
  444. 1: EX_LD(LOAD(ldub, %o1, %o5))
  445. subcc %g1, 1, %g1
  446. EX_ST(STORE(stb, %o5, %o1 + %o3))
  447. bgu,pt %icc, 1b
  448. add %o1, 1, %o1
  449. 2: add %o1, %o3, %o0
  450. andcc %o1, 0x7, %g1
  451. bne,pt %icc, 8f
  452. sll %g1, 3, %g1
  453. cmp %o2, 16
  454. bgeu,pt %icc, 72b
  455. nop
  456. ba,a,pt %xcc, 73b
  457. 8: mov 64, %o3
  458. andn %o1, 0x7, %o1
  459. EX_LD(LOAD(ldx, %o1, %g2))
  460. sub %o3, %g1, %o3
  461. andn %o2, 0x7, %GLOBAL_SPARE
  462. sllx %g2, %g1, %g2
  463. 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
  464. subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
  465. add %o1, 0x8, %o1
  466. srlx %g3, %o3, %o5
  467. or %o5, %g2, %o5
  468. EX_ST(STORE(stx, %o5, %o0))
  469. add %o0, 0x8, %o0
  470. bgu,pt %icc, 1b
  471. sllx %g3, %g1, %g2
  472. srl %g1, 3, %g1
  473. andcc %o2, 0x7, %o2
  474. be,pn %icc, 85f
  475. add %o1, %g1, %o1
  476. ba,pt %xcc, 90f
  477. sub %o0, %o1, %o3
  478. .align 64
  479. 80: /* 0 < len <= 16 */
  480. andcc %o3, 0x3, %g0
  481. bne,pn %XCC, 90f
  482. sub %o0, %o1, %o3
  483. 1: EX_LD(LOAD(lduw, %o1, %g1))
  484. subcc %o2, 4, %o2
  485. EX_ST(STORE(stw, %g1, %o1 + %o3))
  486. bgu,pt %XCC, 1b
  487. add %o1, 4, %o1
  488. 85: retl
  489. mov EX_RETVAL(%o4), %o0
  490. .align 32
  491. 90: EX_LD(LOAD(ldub, %o1, %g1))
  492. subcc %o2, 1, %o2
  493. EX_ST(STORE(stb, %g1, %o1 + %o3))
  494. bgu,pt %XCC, 90b
  495. add %o1, 1, %o1
  496. retl
  497. mov EX_RETVAL(%o4), %o0
  498. .size FUNC_NAME, .-FUNC_NAME