fastcopy.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2008-2009 PetaLogix
  4. * Copyright (C) 2008 Jim Law - Iris LP All rights reserved.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General
  7. * Public License. See the file COPYING in the main directory of this
  8. * archive for more details.
  9. *
  10. * Written by Jim Law <jlaw@irispower.com>
  11. *
  12. * intended to replace:
  13. * memcpy in memcpy.c and
  14. * memmove in memmove.c
  15. * ... in arch/microblaze/lib
  16. *
  17. *
  18. * assly_fastcopy.S
  19. *
  20. * Attempt at quicker memcpy and memmove for MicroBlaze
  21. * Input : Operand1 in Reg r5 - destination address
  22. * Operand2 in Reg r6 - source address
  23. * Operand3 in Reg r7 - number of bytes to transfer
  24. * Output: Result in Reg r3 - starting destinaition address
  25. *
  26. *
  27. * Explanation:
  28. * Perform (possibly unaligned) copy of a block of memory
  29. * between mem locations with size of xfer spec'd in bytes
  30. */
  31. #include <linux/linkage.h>
  32. .globl memcpy
  33. .ent memcpy
  34. memcpy:
  35. fast_memcpy_ascending:
  36. /* move d to return register as value of function */
  37. addi r3, r5, 0
  38. addi r4, r0, 4 /* n = 4 */
  39. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  40. blti r4, a_xfer_end /* if n < 0, less than one word to transfer */
  41. /* transfer first 0~3 bytes to get aligned dest address */
  42. andi r4, r5, 3 /* n = d & 3 */
  43. /* if zero, destination already aligned */
  44. beqi r4, a_dalign_done
  45. /* n = 4 - n (yields 3, 2, 1 transfers for 1, 2, 3 addr offset) */
  46. rsubi r4, r4, 4
  47. rsub r7, r4, r7 /* c = c - n adjust c */
  48. a_xfer_first_loop:
  49. /* if no bytes left to transfer, transfer the bulk */
  50. beqi r4, a_dalign_done
  51. lbui r11, r6, 0 /* h = *s */
  52. sbi r11, r5, 0 /* *d = h */
  53. addi r6, r6, 1 /* s++ */
  54. addi r5, r5, 1 /* d++ */
  55. brid a_xfer_first_loop /* loop */
  56. addi r4, r4, -1 /* n-- (IN DELAY SLOT) */
  57. a_dalign_done:
  58. addi r4, r0, 32 /* n = 32 */
  59. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  60. /* if n < 0, less than one block to transfer */
  61. blti r4, a_block_done
  62. a_block_xfer:
  63. andi r4, r7, 0xffffffe0 /* n = c & ~31 */
  64. rsub r7, r4, r7 /* c = c - n */
  65. andi r9, r6, 3 /* t1 = s & 3 */
  66. /* if temp != 0, unaligned transfers needed */
  67. bnei r9, a_block_unaligned
  68. a_block_aligned:
  69. lwi r9, r6, 0 /* t1 = *(s + 0) */
  70. lwi r10, r6, 4 /* t2 = *(s + 4) */
  71. lwi r11, r6, 8 /* t3 = *(s + 8) */
  72. lwi r12, r6, 12 /* t4 = *(s + 12) */
  73. swi r9, r5, 0 /* *(d + 0) = t1 */
  74. swi r10, r5, 4 /* *(d + 4) = t2 */
  75. swi r11, r5, 8 /* *(d + 8) = t3 */
  76. swi r12, r5, 12 /* *(d + 12) = t4 */
  77. lwi r9, r6, 16 /* t1 = *(s + 16) */
  78. lwi r10, r6, 20 /* t2 = *(s + 20) */
  79. lwi r11, r6, 24 /* t3 = *(s + 24) */
  80. lwi r12, r6, 28 /* t4 = *(s + 28) */
  81. swi r9, r5, 16 /* *(d + 16) = t1 */
  82. swi r10, r5, 20 /* *(d + 20) = t2 */
  83. swi r11, r5, 24 /* *(d + 24) = t3 */
  84. swi r12, r5, 28 /* *(d + 28) = t4 */
  85. addi r6, r6, 32 /* s = s + 32 */
  86. addi r4, r4, -32 /* n = n - 32 */
  87. bneid r4, a_block_aligned /* while (n) loop */
  88. addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
  89. bri a_block_done
  90. a_block_unaligned:
  91. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  92. add r6, r6, r4 /* s = s + n */
  93. lwi r11, r8, 0 /* h = *(as + 0) */
  94. addi r9, r9, -1
  95. beqi r9, a_block_u1 /* t1 was 1 => 1 byte offset */
  96. addi r9, r9, -1
  97. beqi r9, a_block_u2 /* t1 was 2 => 2 byte offset */
  98. a_block_u3:
  99. bslli r11, r11, 24 /* h = h << 24 */
  100. a_bu3_loop:
  101. lwi r12, r8, 4 /* v = *(as + 4) */
  102. bsrli r9, r12, 8 /* t1 = v >> 8 */
  103. or r9, r11, r9 /* t1 = h | t1 */
  104. swi r9, r5, 0 /* *(d + 0) = t1 */
  105. bslli r11, r12, 24 /* h = v << 24 */
  106. lwi r12, r8, 8 /* v = *(as + 8) */
  107. bsrli r9, r12, 8 /* t1 = v >> 8 */
  108. or r9, r11, r9 /* t1 = h | t1 */
  109. swi r9, r5, 4 /* *(d + 4) = t1 */
  110. bslli r11, r12, 24 /* h = v << 24 */
  111. lwi r12, r8, 12 /* v = *(as + 12) */
  112. bsrli r9, r12, 8 /* t1 = v >> 8 */
  113. or r9, r11, r9 /* t1 = h | t1 */
  114. swi r9, r5, 8 /* *(d + 8) = t1 */
  115. bslli r11, r12, 24 /* h = v << 24 */
  116. lwi r12, r8, 16 /* v = *(as + 16) */
  117. bsrli r9, r12, 8 /* t1 = v >> 8 */
  118. or r9, r11, r9 /* t1 = h | t1 */
  119. swi r9, r5, 12 /* *(d + 12) = t1 */
  120. bslli r11, r12, 24 /* h = v << 24 */
  121. lwi r12, r8, 20 /* v = *(as + 20) */
  122. bsrli r9, r12, 8 /* t1 = v >> 8 */
  123. or r9, r11, r9 /* t1 = h | t1 */
  124. swi r9, r5, 16 /* *(d + 16) = t1 */
  125. bslli r11, r12, 24 /* h = v << 24 */
  126. lwi r12, r8, 24 /* v = *(as + 24) */
  127. bsrli r9, r12, 8 /* t1 = v >> 8 */
  128. or r9, r11, r9 /* t1 = h | t1 */
  129. swi r9, r5, 20 /* *(d + 20) = t1 */
  130. bslli r11, r12, 24 /* h = v << 24 */
  131. lwi r12, r8, 28 /* v = *(as + 28) */
  132. bsrli r9, r12, 8 /* t1 = v >> 8 */
  133. or r9, r11, r9 /* t1 = h | t1 */
  134. swi r9, r5, 24 /* *(d + 24) = t1 */
  135. bslli r11, r12, 24 /* h = v << 24 */
  136. lwi r12, r8, 32 /* v = *(as + 32) */
  137. bsrli r9, r12, 8 /* t1 = v >> 8 */
  138. or r9, r11, r9 /* t1 = h | t1 */
  139. swi r9, r5, 28 /* *(d + 28) = t1 */
  140. bslli r11, r12, 24 /* h = v << 24 */
  141. addi r8, r8, 32 /* as = as + 32 */
  142. addi r4, r4, -32 /* n = n - 32 */
  143. bneid r4, a_bu3_loop /* while (n) loop */
  144. addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
  145. bri a_block_done
  146. a_block_u1:
  147. bslli r11, r11, 8 /* h = h << 8 */
  148. a_bu1_loop:
  149. lwi r12, r8, 4 /* v = *(as + 4) */
  150. bsrli r9, r12, 24 /* t1 = v >> 24 */
  151. or r9, r11, r9 /* t1 = h | t1 */
  152. swi r9, r5, 0 /* *(d + 0) = t1 */
  153. bslli r11, r12, 8 /* h = v << 8 */
  154. lwi r12, r8, 8 /* v = *(as + 8) */
  155. bsrli r9, r12, 24 /* t1 = v >> 24 */
  156. or r9, r11, r9 /* t1 = h | t1 */
  157. swi r9, r5, 4 /* *(d + 4) = t1 */
  158. bslli r11, r12, 8 /* h = v << 8 */
  159. lwi r12, r8, 12 /* v = *(as + 12) */
  160. bsrli r9, r12, 24 /* t1 = v >> 24 */
  161. or r9, r11, r9 /* t1 = h | t1 */
  162. swi r9, r5, 8 /* *(d + 8) = t1 */
  163. bslli r11, r12, 8 /* h = v << 8 */
  164. lwi r12, r8, 16 /* v = *(as + 16) */
  165. bsrli r9, r12, 24 /* t1 = v >> 24 */
  166. or r9, r11, r9 /* t1 = h | t1 */
  167. swi r9, r5, 12 /* *(d + 12) = t1 */
  168. bslli r11, r12, 8 /* h = v << 8 */
  169. lwi r12, r8, 20 /* v = *(as + 20) */
  170. bsrli r9, r12, 24 /* t1 = v >> 24 */
  171. or r9, r11, r9 /* t1 = h | t1 */
  172. swi r9, r5, 16 /* *(d + 16) = t1 */
  173. bslli r11, r12, 8 /* h = v << 8 */
  174. lwi r12, r8, 24 /* v = *(as + 24) */
  175. bsrli r9, r12, 24 /* t1 = v >> 24 */
  176. or r9, r11, r9 /* t1 = h | t1 */
  177. swi r9, r5, 20 /* *(d + 20) = t1 */
  178. bslli r11, r12, 8 /* h = v << 8 */
  179. lwi r12, r8, 28 /* v = *(as + 28) */
  180. bsrli r9, r12, 24 /* t1 = v >> 24 */
  181. or r9, r11, r9 /* t1 = h | t1 */
  182. swi r9, r5, 24 /* *(d + 24) = t1 */
  183. bslli r11, r12, 8 /* h = v << 8 */
  184. lwi r12, r8, 32 /* v = *(as + 32) */
  185. bsrli r9, r12, 24 /* t1 = v >> 24 */
  186. or r9, r11, r9 /* t1 = h | t1 */
  187. swi r9, r5, 28 /* *(d + 28) = t1 */
  188. bslli r11, r12, 8 /* h = v << 8 */
  189. addi r8, r8, 32 /* as = as + 32 */
  190. addi r4, r4, -32 /* n = n - 32 */
  191. bneid r4, a_bu1_loop /* while (n) loop */
  192. addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
  193. bri a_block_done
  194. a_block_u2:
  195. bslli r11, r11, 16 /* h = h << 16 */
  196. a_bu2_loop:
  197. lwi r12, r8, 4 /* v = *(as + 4) */
  198. bsrli r9, r12, 16 /* t1 = v >> 16 */
  199. or r9, r11, r9 /* t1 = h | t1 */
  200. swi r9, r5, 0 /* *(d + 0) = t1 */
  201. bslli r11, r12, 16 /* h = v << 16 */
  202. lwi r12, r8, 8 /* v = *(as + 8) */
  203. bsrli r9, r12, 16 /* t1 = v >> 16 */
  204. or r9, r11, r9 /* t1 = h | t1 */
  205. swi r9, r5, 4 /* *(d + 4) = t1 */
  206. bslli r11, r12, 16 /* h = v << 16 */
  207. lwi r12, r8, 12 /* v = *(as + 12) */
  208. bsrli r9, r12, 16 /* t1 = v >> 16 */
  209. or r9, r11, r9 /* t1 = h | t1 */
  210. swi r9, r5, 8 /* *(d + 8) = t1 */
  211. bslli r11, r12, 16 /* h = v << 16 */
  212. lwi r12, r8, 16 /* v = *(as + 16) */
  213. bsrli r9, r12, 16 /* t1 = v >> 16 */
  214. or r9, r11, r9 /* t1 = h | t1 */
  215. swi r9, r5, 12 /* *(d + 12) = t1 */
  216. bslli r11, r12, 16 /* h = v << 16 */
  217. lwi r12, r8, 20 /* v = *(as + 20) */
  218. bsrli r9, r12, 16 /* t1 = v >> 16 */
  219. or r9, r11, r9 /* t1 = h | t1 */
  220. swi r9, r5, 16 /* *(d + 16) = t1 */
  221. bslli r11, r12, 16 /* h = v << 16 */
  222. lwi r12, r8, 24 /* v = *(as + 24) */
  223. bsrli r9, r12, 16 /* t1 = v >> 16 */
  224. or r9, r11, r9 /* t1 = h | t1 */
  225. swi r9, r5, 20 /* *(d + 20) = t1 */
  226. bslli r11, r12, 16 /* h = v << 16 */
  227. lwi r12, r8, 28 /* v = *(as + 28) */
  228. bsrli r9, r12, 16 /* t1 = v >> 16 */
  229. or r9, r11, r9 /* t1 = h | t1 */
  230. swi r9, r5, 24 /* *(d + 24) = t1 */
  231. bslli r11, r12, 16 /* h = v << 16 */
  232. lwi r12, r8, 32 /* v = *(as + 32) */
  233. bsrli r9, r12, 16 /* t1 = v >> 16 */
  234. or r9, r11, r9 /* t1 = h | t1 */
  235. swi r9, r5, 28 /* *(d + 28) = t1 */
  236. bslli r11, r12, 16 /* h = v << 16 */
  237. addi r8, r8, 32 /* as = as + 32 */
  238. addi r4, r4, -32 /* n = n - 32 */
  239. bneid r4, a_bu2_loop /* while (n) loop */
  240. addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
  241. a_block_done:
  242. addi r4, r0, 4 /* n = 4 */
  243. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  244. blti r4, a_xfer_end /* if n < 0, less than one word to transfer */
  245. a_word_xfer:
  246. andi r4, r7, 0xfffffffc /* n = c & ~3 */
  247. addi r10, r0, 0 /* offset = 0 */
  248. andi r9, r6, 3 /* t1 = s & 3 */
  249. /* if temp != 0, unaligned transfers needed */
  250. bnei r9, a_word_unaligned
  251. a_word_aligned:
  252. lw r9, r6, r10 /* t1 = *(s+offset) */
  253. sw r9, r5, r10 /* *(d+offset) = t1 */
  254. addi r4, r4,-4 /* n-- */
  255. bneid r4, a_word_aligned /* loop */
  256. addi r10, r10, 4 /* offset++ (IN DELAY SLOT) */
  257. bri a_word_done
  258. a_word_unaligned:
  259. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  260. lwi r11, r8, 0 /* h = *(as + 0) */
  261. addi r8, r8, 4 /* as = as + 4 */
  262. addi r9, r9, -1
  263. beqi r9, a_word_u1 /* t1 was 1 => 1 byte offset */
  264. addi r9, r9, -1
  265. beqi r9, a_word_u2 /* t1 was 2 => 2 byte offset */
  266. a_word_u3:
  267. bslli r11, r11, 24 /* h = h << 24 */
  268. a_wu3_loop:
  269. lw r12, r8, r10 /* v = *(as + offset) */
  270. bsrli r9, r12, 8 /* t1 = v >> 8 */
  271. or r9, r11, r9 /* t1 = h | t1 */
  272. sw r9, r5, r10 /* *(d + offset) = t1 */
  273. bslli r11, r12, 24 /* h = v << 24 */
  274. addi r4, r4,-4 /* n = n - 4 */
  275. bneid r4, a_wu3_loop /* while (n) loop */
  276. addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
  277. bri a_word_done
  278. a_word_u1:
  279. bslli r11, r11, 8 /* h = h << 8 */
  280. a_wu1_loop:
  281. lw r12, r8, r10 /* v = *(as + offset) */
  282. bsrli r9, r12, 24 /* t1 = v >> 24 */
  283. or r9, r11, r9 /* t1 = h | t1 */
  284. sw r9, r5, r10 /* *(d + offset) = t1 */
  285. bslli r11, r12, 8 /* h = v << 8 */
  286. addi r4, r4,-4 /* n = n - 4 */
  287. bneid r4, a_wu1_loop /* while (n) loop */
  288. addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
  289. bri a_word_done
  290. a_word_u2:
  291. bslli r11, r11, 16 /* h = h << 16 */
  292. a_wu2_loop:
  293. lw r12, r8, r10 /* v = *(as + offset) */
  294. bsrli r9, r12, 16 /* t1 = v >> 16 */
  295. or r9, r11, r9 /* t1 = h | t1 */
  296. sw r9, r5, r10 /* *(d + offset) = t1 */
  297. bslli r11, r12, 16 /* h = v << 16 */
  298. addi r4, r4,-4 /* n = n - 4 */
  299. bneid r4, a_wu2_loop /* while (n) loop */
  300. addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
  301. a_word_done:
  302. add r5, r5, r10 /* d = d + offset */
  303. add r6, r6, r10 /* s = s + offset */
  304. rsub r7, r10, r7 /* c = c - offset */
  305. a_xfer_end:
  306. a_xfer_end_loop:
  307. beqi r7, a_done /* while (c) */
  308. lbui r9, r6, 0 /* t1 = *s */
  309. addi r6, r6, 1 /* s++ */
  310. sbi r9, r5, 0 /* *d = t1 */
  311. addi r7, r7, -1 /* c-- */
  312. brid a_xfer_end_loop /* loop */
  313. addi r5, r5, 1 /* d++ (IN DELAY SLOT) */
  314. a_done:
  315. rtsd r15, 8
  316. nop
  317. .end memcpy
  318. /*----------------------------------------------------------------------------*/
  319. .globl memmove
  320. .ent memmove
  321. memmove:
  322. cmpu r4, r5, r6 /* n = s - d */
  323. bgei r4,fast_memcpy_ascending
  324. fast_memcpy_descending:
  325. /* move d to return register as value of function */
  326. addi r3, r5, 0
  327. add r5, r5, r7 /* d = d + c */
  328. add r6, r6, r7 /* s = s + c */
  329. addi r4, r0, 4 /* n = 4 */
  330. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  331. blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
  332. /* transfer first 0~3 bytes to get aligned dest address */
  333. andi r4, r5, 3 /* n = d & 3 */
  334. /* if zero, destination already aligned */
  335. beqi r4,d_dalign_done
  336. rsub r7, r4, r7 /* c = c - n adjust c */
  337. d_xfer_first_loop:
  338. /* if no bytes left to transfer, transfer the bulk */
  339. beqi r4,d_dalign_done
  340. addi r6, r6, -1 /* s-- */
  341. addi r5, r5, -1 /* d-- */
  342. lbui r11, r6, 0 /* h = *s */
  343. sbi r11, r5, 0 /* *d = h */
  344. brid d_xfer_first_loop /* loop */
  345. addi r4, r4, -1 /* n-- (IN DELAY SLOT) */
  346. d_dalign_done:
  347. addi r4, r0, 32 /* n = 32 */
  348. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  349. /* if n < 0, less than one block to transfer */
  350. blti r4, d_block_done
  351. d_block_xfer:
  352. andi r4, r7, 0xffffffe0 /* n = c & ~31 */
  353. rsub r7, r4, r7 /* c = c - n */
  354. andi r9, r6, 3 /* t1 = s & 3 */
  355. /* if temp != 0, unaligned transfers needed */
  356. bnei r9, d_block_unaligned
  357. d_block_aligned:
  358. addi r6, r6, -32 /* s = s - 32 */
  359. addi r5, r5, -32 /* d = d - 32 */
  360. lwi r9, r6, 28 /* t1 = *(s + 28) */
  361. lwi r10, r6, 24 /* t2 = *(s + 24) */
  362. lwi r11, r6, 20 /* t3 = *(s + 20) */
  363. lwi r12, r6, 16 /* t4 = *(s + 16) */
  364. swi r9, r5, 28 /* *(d + 28) = t1 */
  365. swi r10, r5, 24 /* *(d + 24) = t2 */
  366. swi r11, r5, 20 /* *(d + 20) = t3 */
  367. swi r12, r5, 16 /* *(d + 16) = t4 */
  368. lwi r9, r6, 12 /* t1 = *(s + 12) */
  369. lwi r10, r6, 8 /* t2 = *(s + 8) */
  370. lwi r11, r6, 4 /* t3 = *(s + 4) */
  371. lwi r12, r6, 0 /* t4 = *(s + 0) */
  372. swi r9, r5, 12 /* *(d + 12) = t1 */
  373. swi r10, r5, 8 /* *(d + 8) = t2 */
  374. swi r11, r5, 4 /* *(d + 4) = t3 */
  375. addi r4, r4, -32 /* n = n - 32 */
  376. bneid r4, d_block_aligned /* while (n) loop */
  377. swi r12, r5, 0 /* *(d + 0) = t4 (IN DELAY SLOT) */
  378. bri d_block_done
  379. d_block_unaligned:
  380. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  381. rsub r6, r4, r6 /* s = s - n */
  382. lwi r11, r8, 0 /* h = *(as + 0) */
  383. addi r9, r9, -1
  384. beqi r9,d_block_u1 /* t1 was 1 => 1 byte offset */
  385. addi r9, r9, -1
  386. beqi r9,d_block_u2 /* t1 was 2 => 2 byte offset */
  387. d_block_u3:
  388. bsrli r11, r11, 8 /* h = h >> 8 */
  389. d_bu3_loop:
  390. addi r8, r8, -32 /* as = as - 32 */
  391. addi r5, r5, -32 /* d = d - 32 */
  392. lwi r12, r8, 28 /* v = *(as + 28) */
  393. bslli r9, r12, 24 /* t1 = v << 24 */
  394. or r9, r11, r9 /* t1 = h | t1 */
  395. swi r9, r5, 28 /* *(d + 28) = t1 */
  396. bsrli r11, r12, 8 /* h = v >> 8 */
  397. lwi r12, r8, 24 /* v = *(as + 24) */
  398. bslli r9, r12, 24 /* t1 = v << 24 */
  399. or r9, r11, r9 /* t1 = h | t1 */
  400. swi r9, r5, 24 /* *(d + 24) = t1 */
  401. bsrli r11, r12, 8 /* h = v >> 8 */
  402. lwi r12, r8, 20 /* v = *(as + 20) */
  403. bslli r9, r12, 24 /* t1 = v << 24 */
  404. or r9, r11, r9 /* t1 = h | t1 */
  405. swi r9, r5, 20 /* *(d + 20) = t1 */
  406. bsrli r11, r12, 8 /* h = v >> 8 */
  407. lwi r12, r8, 16 /* v = *(as + 16) */
  408. bslli r9, r12, 24 /* t1 = v << 24 */
  409. or r9, r11, r9 /* t1 = h | t1 */
  410. swi r9, r5, 16 /* *(d + 16) = t1 */
  411. bsrli r11, r12, 8 /* h = v >> 8 */
  412. lwi r12, r8, 12 /* v = *(as + 12) */
  413. bslli r9, r12, 24 /* t1 = v << 24 */
  414. or r9, r11, r9 /* t1 = h | t1 */
  415. swi r9, r5, 12 /* *(d + 112) = t1 */
  416. bsrli r11, r12, 8 /* h = v >> 8 */
  417. lwi r12, r8, 8 /* v = *(as + 8) */
  418. bslli r9, r12, 24 /* t1 = v << 24 */
  419. or r9, r11, r9 /* t1 = h | t1 */
  420. swi r9, r5, 8 /* *(d + 8) = t1 */
  421. bsrli r11, r12, 8 /* h = v >> 8 */
  422. lwi r12, r8, 4 /* v = *(as + 4) */
  423. bslli r9, r12, 24 /* t1 = v << 24 */
  424. or r9, r11, r9 /* t1 = h | t1 */
  425. swi r9, r5, 4 /* *(d + 4) = t1 */
  426. bsrli r11, r12, 8 /* h = v >> 8 */
  427. lwi r12, r8, 0 /* v = *(as + 0) */
  428. bslli r9, r12, 24 /* t1 = v << 24 */
  429. or r9, r11, r9 /* t1 = h | t1 */
  430. swi r9, r5, 0 /* *(d + 0) = t1 */
  431. addi r4, r4, -32 /* n = n - 32 */
  432. bneid r4, d_bu3_loop /* while (n) loop */
  433. bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
  434. bri d_block_done
  435. d_block_u1:
  436. bsrli r11, r11, 24 /* h = h >> 24 */
  437. d_bu1_loop:
  438. addi r8, r8, -32 /* as = as - 32 */
  439. addi r5, r5, -32 /* d = d - 32 */
  440. lwi r12, r8, 28 /* v = *(as + 28) */
  441. bslli r9, r12, 8 /* t1 = v << 8 */
  442. or r9, r11, r9 /* t1 = h | t1 */
  443. swi r9, r5, 28 /* *(d + 28) = t1 */
  444. bsrli r11, r12, 24 /* h = v >> 24 */
  445. lwi r12, r8, 24 /* v = *(as + 24) */
  446. bslli r9, r12, 8 /* t1 = v << 8 */
  447. or r9, r11, r9 /* t1 = h | t1 */
  448. swi r9, r5, 24 /* *(d + 24) = t1 */
  449. bsrli r11, r12, 24 /* h = v >> 24 */
  450. lwi r12, r8, 20 /* v = *(as + 20) */
  451. bslli r9, r12, 8 /* t1 = v << 8 */
  452. or r9, r11, r9 /* t1 = h | t1 */
  453. swi r9, r5, 20 /* *(d + 20) = t1 */
  454. bsrli r11, r12, 24 /* h = v >> 24 */
  455. lwi r12, r8, 16 /* v = *(as + 16) */
  456. bslli r9, r12, 8 /* t1 = v << 8 */
  457. or r9, r11, r9 /* t1 = h | t1 */
  458. swi r9, r5, 16 /* *(d + 16) = t1 */
  459. bsrli r11, r12, 24 /* h = v >> 24 */
  460. lwi r12, r8, 12 /* v = *(as + 12) */
  461. bslli r9, r12, 8 /* t1 = v << 8 */
  462. or r9, r11, r9 /* t1 = h | t1 */
  463. swi r9, r5, 12 /* *(d + 112) = t1 */
  464. bsrli r11, r12, 24 /* h = v >> 24 */
  465. lwi r12, r8, 8 /* v = *(as + 8) */
  466. bslli r9, r12, 8 /* t1 = v << 8 */
  467. or r9, r11, r9 /* t1 = h | t1 */
  468. swi r9, r5, 8 /* *(d + 8) = t1 */
  469. bsrli r11, r12, 24 /* h = v >> 24 */
  470. lwi r12, r8, 4 /* v = *(as + 4) */
  471. bslli r9, r12, 8 /* t1 = v << 8 */
  472. or r9, r11, r9 /* t1 = h | t1 */
  473. swi r9, r5, 4 /* *(d + 4) = t1 */
  474. bsrli r11, r12, 24 /* h = v >> 24 */
  475. lwi r12, r8, 0 /* v = *(as + 0) */
  476. bslli r9, r12, 8 /* t1 = v << 8 */
  477. or r9, r11, r9 /* t1 = h | t1 */
  478. swi r9, r5, 0 /* *(d + 0) = t1 */
  479. addi r4, r4, -32 /* n = n - 32 */
  480. bneid r4, d_bu1_loop /* while (n) loop */
  481. bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
  482. bri d_block_done
  483. d_block_u2:
  484. bsrli r11, r11, 16 /* h = h >> 16 */
  485. d_bu2_loop:
  486. addi r8, r8, -32 /* as = as - 32 */
  487. addi r5, r5, -32 /* d = d - 32 */
  488. lwi r12, r8, 28 /* v = *(as + 28) */
  489. bslli r9, r12, 16 /* t1 = v << 16 */
  490. or r9, r11, r9 /* t1 = h | t1 */
  491. swi r9, r5, 28 /* *(d + 28) = t1 */
  492. bsrli r11, r12, 16 /* h = v >> 16 */
  493. lwi r12, r8, 24 /* v = *(as + 24) */
  494. bslli r9, r12, 16 /* t1 = v << 16 */
  495. or r9, r11, r9 /* t1 = h | t1 */
  496. swi r9, r5, 24 /* *(d + 24) = t1 */
  497. bsrli r11, r12, 16 /* h = v >> 16 */
  498. lwi r12, r8, 20 /* v = *(as + 20) */
  499. bslli r9, r12, 16 /* t1 = v << 16 */
  500. or r9, r11, r9 /* t1 = h | t1 */
  501. swi r9, r5, 20 /* *(d + 20) = t1 */
  502. bsrli r11, r12, 16 /* h = v >> 16 */
  503. lwi r12, r8, 16 /* v = *(as + 16) */
  504. bslli r9, r12, 16 /* t1 = v << 16 */
  505. or r9, r11, r9 /* t1 = h | t1 */
  506. swi r9, r5, 16 /* *(d + 16) = t1 */
  507. bsrli r11, r12, 16 /* h = v >> 16 */
  508. lwi r12, r8, 12 /* v = *(as + 12) */
  509. bslli r9, r12, 16 /* t1 = v << 16 */
  510. or r9, r11, r9 /* t1 = h | t1 */
  511. swi r9, r5, 12 /* *(d + 112) = t1 */
  512. bsrli r11, r12, 16 /* h = v >> 16 */
  513. lwi r12, r8, 8 /* v = *(as + 8) */
  514. bslli r9, r12, 16 /* t1 = v << 16 */
  515. or r9, r11, r9 /* t1 = h | t1 */
  516. swi r9, r5, 8 /* *(d + 8) = t1 */
  517. bsrli r11, r12, 16 /* h = v >> 16 */
  518. lwi r12, r8, 4 /* v = *(as + 4) */
  519. bslli r9, r12, 16 /* t1 = v << 16 */
  520. or r9, r11, r9 /* t1 = h | t1 */
  521. swi r9, r5, 4 /* *(d + 4) = t1 */
  522. bsrli r11, r12, 16 /* h = v >> 16 */
  523. lwi r12, r8, 0 /* v = *(as + 0) */
  524. bslli r9, r12, 16 /* t1 = v << 16 */
  525. or r9, r11, r9 /* t1 = h | t1 */
  526. swi r9, r5, 0 /* *(d + 0) = t1 */
  527. addi r4, r4, -32 /* n = n - 32 */
  528. bneid r4, d_bu2_loop /* while (n) loop */
  529. bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
  530. d_block_done:
  531. addi r4, r0, 4 /* n = 4 */
  532. cmpu r4, r4, r7 /* n = c - n (unsigned) */
  533. blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
  534. d_word_xfer:
  535. andi r4, r7, 0xfffffffc /* n = c & ~3 */
  536. rsub r5, r4, r5 /* d = d - n */
  537. rsub r6, r4, r6 /* s = s - n */
  538. rsub r7, r4, r7 /* c = c - n */
  539. andi r9, r6, 3 /* t1 = s & 3 */
  540. /* if temp != 0, unaligned transfers needed */
  541. bnei r9, d_word_unaligned
  542. d_word_aligned:
  543. addi r4, r4,-4 /* n-- */
  544. lw r9, r6, r4 /* t1 = *(s+n) */
  545. bneid r4, d_word_aligned /* loop */
  546. sw r9, r5, r4 /* *(d+n) = t1 (IN DELAY SLOT) */
  547. bri d_word_done
  548. d_word_unaligned:
  549. andi r8, r6, 0xfffffffc /* as = s & ~3 */
  550. lw r11, r8, r4 /* h = *(as + n) */
  551. addi r9, r9, -1
  552. beqi r9,d_word_u1 /* t1 was 1 => 1 byte offset */
  553. addi r9, r9, -1
  554. beqi r9,d_word_u2 /* t1 was 2 => 2 byte offset */
  555. d_word_u3:
  556. bsrli r11, r11, 8 /* h = h >> 8 */
  557. d_wu3_loop:
  558. addi r4, r4,-4 /* n = n - 4 */
  559. lw r12, r8, r4 /* v = *(as + n) */
  560. bslli r9, r12, 24 /* t1 = v << 24 */
  561. or r9, r11, r9 /* t1 = h | t1 */
  562. sw r9, r5, r4 /* *(d + n) = t1 */
  563. bneid r4, d_wu3_loop /* while (n) loop */
  564. bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
  565. bri d_word_done
  566. d_word_u1:
  567. bsrli r11, r11, 24 /* h = h >> 24 */
  568. d_wu1_loop:
  569. addi r4, r4,-4 /* n = n - 4 */
  570. lw r12, r8, r4 /* v = *(as + n) */
  571. bslli r9, r12, 8 /* t1 = v << 8 */
  572. or r9, r11, r9 /* t1 = h | t1 */
  573. sw r9, r5, r4 /* *(d + n) = t1 */
  574. bneid r4, d_wu1_loop /* while (n) loop */
  575. bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
  576. bri d_word_done
  577. d_word_u2:
  578. bsrli r11, r11, 16 /* h = h >> 16 */
  579. d_wu2_loop:
  580. addi r4, r4,-4 /* n = n - 4 */
  581. lw r12, r8, r4 /* v = *(as + n) */
  582. bslli r9, r12, 16 /* t1 = v << 16 */
  583. or r9, r11, r9 /* t1 = h | t1 */
  584. sw r9, r5, r4 /* *(d + n) = t1 */
  585. bneid r4, d_wu2_loop /* while (n) loop */
  586. bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
  587. d_word_done:
  588. d_xfer_end:
  589. d_xfer_end_loop:
  590. beqi r7, a_done /* while (c) */
  591. addi r6, r6, -1 /* s-- */
  592. lbui r9, r6, 0 /* t1 = *s */
  593. addi r5, r5, -1 /* d-- */
  594. sbi r9, r5, 0 /* *d = t1 */
  595. brid d_xfer_end_loop /* loop */
  596. addi r7, r7, -1 /* c-- (IN DELAY SLOT) */
  597. d_done:
  598. rtsd r15, 8
  599. nop
  600. .end memmove