memcpy_power7.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2012
  17. *
  18. * Author: Anton Blanchard <anton@au.ibm.com>
  19. */
  20. #include <asm/ppc_asm.h>
  21. _GLOBAL(memcpy_power7)
  22. #ifdef CONFIG_ALTIVEC
  23. cmpldi r5,16
  24. cmpldi cr1,r5,4096
  25. std r3,48(r1)
  26. blt .Lshort_copy
  27. bgt cr1,.Lvmx_copy
  28. #else
  29. cmpldi r5,16
  30. std r3,48(r1)
  31. blt .Lshort_copy
  32. #endif
  33. .Lnonvmx_copy:
  34. /* Get the source 8B aligned */
  35. neg r6,r4
  36. mtocrf 0x01,r6
  37. clrldi r6,r6,(64-3)
  38. bf cr7*4+3,1f
  39. lbz r0,0(r4)
  40. addi r4,r4,1
  41. stb r0,0(r3)
  42. addi r3,r3,1
  43. 1: bf cr7*4+2,2f
  44. lhz r0,0(r4)
  45. addi r4,r4,2
  46. sth r0,0(r3)
  47. addi r3,r3,2
  48. 2: bf cr7*4+1,3f
  49. lwz r0,0(r4)
  50. addi r4,r4,4
  51. stw r0,0(r3)
  52. addi r3,r3,4
  53. 3: sub r5,r5,r6
  54. cmpldi r5,128
  55. blt 5f
  56. mflr r0
  57. stdu r1,-STACKFRAMESIZE(r1)
  58. std r14,STK_REG(R14)(r1)
  59. std r15,STK_REG(R15)(r1)
  60. std r16,STK_REG(R16)(r1)
  61. std r17,STK_REG(R17)(r1)
  62. std r18,STK_REG(R18)(r1)
  63. std r19,STK_REG(R19)(r1)
  64. std r20,STK_REG(R20)(r1)
  65. std r21,STK_REG(R21)(r1)
  66. std r22,STK_REG(R22)(r1)
  67. std r0,STACKFRAMESIZE+16(r1)
  68. srdi r6,r5,7
  69. mtctr r6
  70. /* Now do cacheline (128B) sized loads and stores. */
  71. .align 5
  72. 4:
  73. ld r0,0(r4)
  74. ld r6,8(r4)
  75. ld r7,16(r4)
  76. ld r8,24(r4)
  77. ld r9,32(r4)
  78. ld r10,40(r4)
  79. ld r11,48(r4)
  80. ld r12,56(r4)
  81. ld r14,64(r4)
  82. ld r15,72(r4)
  83. ld r16,80(r4)
  84. ld r17,88(r4)
  85. ld r18,96(r4)
  86. ld r19,104(r4)
  87. ld r20,112(r4)
  88. ld r21,120(r4)
  89. addi r4,r4,128
  90. std r0,0(r3)
  91. std r6,8(r3)
  92. std r7,16(r3)
  93. std r8,24(r3)
  94. std r9,32(r3)
  95. std r10,40(r3)
  96. std r11,48(r3)
  97. std r12,56(r3)
  98. std r14,64(r3)
  99. std r15,72(r3)
  100. std r16,80(r3)
  101. std r17,88(r3)
  102. std r18,96(r3)
  103. std r19,104(r3)
  104. std r20,112(r3)
  105. std r21,120(r3)
  106. addi r3,r3,128
  107. bdnz 4b
  108. clrldi r5,r5,(64-7)
  109. ld r14,STK_REG(R14)(r1)
  110. ld r15,STK_REG(R15)(r1)
  111. ld r16,STK_REG(R16)(r1)
  112. ld r17,STK_REG(R17)(r1)
  113. ld r18,STK_REG(R18)(r1)
  114. ld r19,STK_REG(R19)(r1)
  115. ld r20,STK_REG(R20)(r1)
  116. ld r21,STK_REG(R21)(r1)
  117. ld r22,STK_REG(R22)(r1)
  118. addi r1,r1,STACKFRAMESIZE
  119. /* Up to 127B to go */
  120. 5: srdi r6,r5,4
  121. mtocrf 0x01,r6
  122. 6: bf cr7*4+1,7f
  123. ld r0,0(r4)
  124. ld r6,8(r4)
  125. ld r7,16(r4)
  126. ld r8,24(r4)
  127. ld r9,32(r4)
  128. ld r10,40(r4)
  129. ld r11,48(r4)
  130. ld r12,56(r4)
  131. addi r4,r4,64
  132. std r0,0(r3)
  133. std r6,8(r3)
  134. std r7,16(r3)
  135. std r8,24(r3)
  136. std r9,32(r3)
  137. std r10,40(r3)
  138. std r11,48(r3)
  139. std r12,56(r3)
  140. addi r3,r3,64
  141. /* Up to 63B to go */
  142. 7: bf cr7*4+2,8f
  143. ld r0,0(r4)
  144. ld r6,8(r4)
  145. ld r7,16(r4)
  146. ld r8,24(r4)
  147. addi r4,r4,32
  148. std r0,0(r3)
  149. std r6,8(r3)
  150. std r7,16(r3)
  151. std r8,24(r3)
  152. addi r3,r3,32
  153. /* Up to 31B to go */
  154. 8: bf cr7*4+3,9f
  155. ld r0,0(r4)
  156. ld r6,8(r4)
  157. addi r4,r4,16
  158. std r0,0(r3)
  159. std r6,8(r3)
  160. addi r3,r3,16
  161. 9: clrldi r5,r5,(64-4)
  162. /* Up to 15B to go */
  163. .Lshort_copy:
  164. mtocrf 0x01,r5
  165. bf cr7*4+0,12f
  166. lwz r0,0(r4) /* Less chance of a reject with word ops */
  167. lwz r6,4(r4)
  168. addi r4,r4,8
  169. stw r0,0(r3)
  170. stw r6,4(r3)
  171. addi r3,r3,8
  172. 12: bf cr7*4+1,13f
  173. lwz r0,0(r4)
  174. addi r4,r4,4
  175. stw r0,0(r3)
  176. addi r3,r3,4
  177. 13: bf cr7*4+2,14f
  178. lhz r0,0(r4)
  179. addi r4,r4,2
  180. sth r0,0(r3)
  181. addi r3,r3,2
  182. 14: bf cr7*4+3,15f
  183. lbz r0,0(r4)
  184. stb r0,0(r3)
  185. 15: ld r3,48(r1)
  186. blr
  187. .Lunwind_stack_nonvmx_copy:
  188. addi r1,r1,STACKFRAMESIZE
  189. b .Lnonvmx_copy
  190. #ifdef CONFIG_ALTIVEC
  191. .Lvmx_copy:
  192. mflr r0
  193. std r4,56(r1)
  194. std r5,64(r1)
  195. std r0,16(r1)
  196. stdu r1,-STACKFRAMESIZE(r1)
  197. bl .enter_vmx_copy
  198. cmpwi r3,0
  199. ld r0,STACKFRAMESIZE+16(r1)
  200. ld r3,STACKFRAMESIZE+48(r1)
  201. ld r4,STACKFRAMESIZE+56(r1)
  202. ld r5,STACKFRAMESIZE+64(r1)
  203. mtlr r0
  204. /*
  205. * We prefetch both the source and destination using enhanced touch
  206. * instructions. We use a stream ID of 0 for the load side and
  207. * 1 for the store side.
  208. */
  209. clrrdi r6,r4,7
  210. clrrdi r9,r3,7
  211. ori r9,r9,1 /* stream=1 */
  212. srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
  213. cmpldi cr1,r7,0x3FF
  214. ble cr1,1f
  215. li r7,0x3FF
  216. 1: lis r0,0x0E00 /* depth=7 */
  217. sldi r7,r7,7
  218. or r7,r7,r0
  219. ori r10,r7,1 /* stream=1 */
  220. lis r8,0x8000 /* GO=1 */
  221. clrldi r8,r8,32
  222. .machine push
  223. .machine "power4"
  224. dcbt r0,r6,0b01000
  225. dcbt r0,r7,0b01010
  226. dcbtst r0,r9,0b01000
  227. dcbtst r0,r10,0b01010
  228. eieio
  229. dcbt r0,r8,0b01010 /* GO */
  230. .machine pop
  231. beq .Lunwind_stack_nonvmx_copy
  232. /*
  233. * If source and destination are not relatively aligned we use a
  234. * slower permute loop.
  235. */
  236. xor r6,r4,r3
  237. rldicl. r6,r6,0,(64-4)
  238. bne .Lvmx_unaligned_copy
  239. /* Get the destination 16B aligned */
  240. neg r6,r3
  241. mtocrf 0x01,r6
  242. clrldi r6,r6,(64-4)
  243. bf cr7*4+3,1f
  244. lbz r0,0(r4)
  245. addi r4,r4,1
  246. stb r0,0(r3)
  247. addi r3,r3,1
  248. 1: bf cr7*4+2,2f
  249. lhz r0,0(r4)
  250. addi r4,r4,2
  251. sth r0,0(r3)
  252. addi r3,r3,2
  253. 2: bf cr7*4+1,3f
  254. lwz r0,0(r4)
  255. addi r4,r4,4
  256. stw r0,0(r3)
  257. addi r3,r3,4
  258. 3: bf cr7*4+0,4f
  259. ld r0,0(r4)
  260. addi r4,r4,8
  261. std r0,0(r3)
  262. addi r3,r3,8
  263. 4: sub r5,r5,r6
  264. /* Get the desination 128B aligned */
  265. neg r6,r3
  266. srdi r7,r6,4
  267. mtocrf 0x01,r7
  268. clrldi r6,r6,(64-7)
  269. li r9,16
  270. li r10,32
  271. li r11,48
  272. bf cr7*4+3,5f
  273. lvx vr1,r0,r4
  274. addi r4,r4,16
  275. stvx vr1,r0,r3
  276. addi r3,r3,16
  277. 5: bf cr7*4+2,6f
  278. lvx vr1,r0,r4
  279. lvx vr0,r4,r9
  280. addi r4,r4,32
  281. stvx vr1,r0,r3
  282. stvx vr0,r3,r9
  283. addi r3,r3,32
  284. 6: bf cr7*4+1,7f
  285. lvx vr3,r0,r4
  286. lvx vr2,r4,r9
  287. lvx vr1,r4,r10
  288. lvx vr0,r4,r11
  289. addi r4,r4,64
  290. stvx vr3,r0,r3
  291. stvx vr2,r3,r9
  292. stvx vr1,r3,r10
  293. stvx vr0,r3,r11
  294. addi r3,r3,64
  295. 7: sub r5,r5,r6
  296. srdi r6,r5,7
  297. std r14,STK_REG(R14)(r1)
  298. std r15,STK_REG(R15)(r1)
  299. std r16,STK_REG(R16)(r1)
  300. li r12,64
  301. li r14,80
  302. li r15,96
  303. li r16,112
  304. mtctr r6
  305. /*
  306. * Now do cacheline sized loads and stores. By this stage the
  307. * cacheline stores are also cacheline aligned.
  308. */
  309. .align 5
  310. 8:
  311. lvx vr7,r0,r4
  312. lvx vr6,r4,r9
  313. lvx vr5,r4,r10
  314. lvx vr4,r4,r11
  315. lvx vr3,r4,r12
  316. lvx vr2,r4,r14
  317. lvx vr1,r4,r15
  318. lvx vr0,r4,r16
  319. addi r4,r4,128
  320. stvx vr7,r0,r3
  321. stvx vr6,r3,r9
  322. stvx vr5,r3,r10
  323. stvx vr4,r3,r11
  324. stvx vr3,r3,r12
  325. stvx vr2,r3,r14
  326. stvx vr1,r3,r15
  327. stvx vr0,r3,r16
  328. addi r3,r3,128
  329. bdnz 8b
  330. ld r14,STK_REG(R14)(r1)
  331. ld r15,STK_REG(R15)(r1)
  332. ld r16,STK_REG(R16)(r1)
  333. /* Up to 127B to go */
  334. clrldi r5,r5,(64-7)
  335. srdi r6,r5,4
  336. mtocrf 0x01,r6
  337. bf cr7*4+1,9f
  338. lvx vr3,r0,r4
  339. lvx vr2,r4,r9
  340. lvx vr1,r4,r10
  341. lvx vr0,r4,r11
  342. addi r4,r4,64
  343. stvx vr3,r0,r3
  344. stvx vr2,r3,r9
  345. stvx vr1,r3,r10
  346. stvx vr0,r3,r11
  347. addi r3,r3,64
  348. 9: bf cr7*4+2,10f
  349. lvx vr1,r0,r4
  350. lvx vr0,r4,r9
  351. addi r4,r4,32
  352. stvx vr1,r0,r3
  353. stvx vr0,r3,r9
  354. addi r3,r3,32
  355. 10: bf cr7*4+3,11f
  356. lvx vr1,r0,r4
  357. addi r4,r4,16
  358. stvx vr1,r0,r3
  359. addi r3,r3,16
  360. /* Up to 15B to go */
  361. 11: clrldi r5,r5,(64-4)
  362. mtocrf 0x01,r5
  363. bf cr7*4+0,12f
  364. ld r0,0(r4)
  365. addi r4,r4,8
  366. std r0,0(r3)
  367. addi r3,r3,8
  368. 12: bf cr7*4+1,13f
  369. lwz r0,0(r4)
  370. addi r4,r4,4
  371. stw r0,0(r3)
  372. addi r3,r3,4
  373. 13: bf cr7*4+2,14f
  374. lhz r0,0(r4)
  375. addi r4,r4,2
  376. sth r0,0(r3)
  377. addi r3,r3,2
  378. 14: bf cr7*4+3,15f
  379. lbz r0,0(r4)
  380. stb r0,0(r3)
  381. 15: addi r1,r1,STACKFRAMESIZE
  382. ld r3,48(r1)
  383. b .exit_vmx_copy /* tail call optimise */
  384. .Lvmx_unaligned_copy:
  385. /* Get the destination 16B aligned */
  386. neg r6,r3
  387. mtocrf 0x01,r6
  388. clrldi r6,r6,(64-4)
  389. bf cr7*4+3,1f
  390. lbz r0,0(r4)
  391. addi r4,r4,1
  392. stb r0,0(r3)
  393. addi r3,r3,1
  394. 1: bf cr7*4+2,2f
  395. lhz r0,0(r4)
  396. addi r4,r4,2
  397. sth r0,0(r3)
  398. addi r3,r3,2
  399. 2: bf cr7*4+1,3f
  400. lwz r0,0(r4)
  401. addi r4,r4,4
  402. stw r0,0(r3)
  403. addi r3,r3,4
  404. 3: bf cr7*4+0,4f
  405. lwz r0,0(r4) /* Less chance of a reject with word ops */
  406. lwz r7,4(r4)
  407. addi r4,r4,8
  408. stw r0,0(r3)
  409. stw r7,4(r3)
  410. addi r3,r3,8
  411. 4: sub r5,r5,r6
  412. /* Get the desination 128B aligned */
  413. neg r6,r3
  414. srdi r7,r6,4
  415. mtocrf 0x01,r7
  416. clrldi r6,r6,(64-7)
  417. li r9,16
  418. li r10,32
  419. li r11,48
  420. lvsl vr16,0,r4 /* Setup permute control vector */
  421. lvx vr0,0,r4
  422. addi r4,r4,16
  423. bf cr7*4+3,5f
  424. lvx vr1,r0,r4
  425. vperm vr8,vr0,vr1,vr16
  426. addi r4,r4,16
  427. stvx vr8,r0,r3
  428. addi r3,r3,16
  429. vor vr0,vr1,vr1
  430. 5: bf cr7*4+2,6f
  431. lvx vr1,r0,r4
  432. vperm vr8,vr0,vr1,vr16
  433. lvx vr0,r4,r9
  434. vperm vr9,vr1,vr0,vr16
  435. addi r4,r4,32
  436. stvx vr8,r0,r3
  437. stvx vr9,r3,r9
  438. addi r3,r3,32
  439. 6: bf cr7*4+1,7f
  440. lvx vr3,r0,r4
  441. vperm vr8,vr0,vr3,vr16
  442. lvx vr2,r4,r9
  443. vperm vr9,vr3,vr2,vr16
  444. lvx vr1,r4,r10
  445. vperm vr10,vr2,vr1,vr16
  446. lvx vr0,r4,r11
  447. vperm vr11,vr1,vr0,vr16
  448. addi r4,r4,64
  449. stvx vr8,r0,r3
  450. stvx vr9,r3,r9
  451. stvx vr10,r3,r10
  452. stvx vr11,r3,r11
  453. addi r3,r3,64
  454. 7: sub r5,r5,r6
  455. srdi r6,r5,7
  456. std r14,STK_REG(R14)(r1)
  457. std r15,STK_REG(R15)(r1)
  458. std r16,STK_REG(R16)(r1)
  459. li r12,64
  460. li r14,80
  461. li r15,96
  462. li r16,112
  463. mtctr r6
  464. /*
  465. * Now do cacheline sized loads and stores. By this stage the
  466. * cacheline stores are also cacheline aligned.
  467. */
  468. .align 5
  469. 8:
  470. lvx vr7,r0,r4
  471. vperm vr8,vr0,vr7,vr16
  472. lvx vr6,r4,r9
  473. vperm vr9,vr7,vr6,vr16
  474. lvx vr5,r4,r10
  475. vperm vr10,vr6,vr5,vr16
  476. lvx vr4,r4,r11
  477. vperm vr11,vr5,vr4,vr16
  478. lvx vr3,r4,r12
  479. vperm vr12,vr4,vr3,vr16
  480. lvx vr2,r4,r14
  481. vperm vr13,vr3,vr2,vr16
  482. lvx vr1,r4,r15
  483. vperm vr14,vr2,vr1,vr16
  484. lvx vr0,r4,r16
  485. vperm vr15,vr1,vr0,vr16
  486. addi r4,r4,128
  487. stvx vr8,r0,r3
  488. stvx vr9,r3,r9
  489. stvx vr10,r3,r10
  490. stvx vr11,r3,r11
  491. stvx vr12,r3,r12
  492. stvx vr13,r3,r14
  493. stvx vr14,r3,r15
  494. stvx vr15,r3,r16
  495. addi r3,r3,128
  496. bdnz 8b
  497. ld r14,STK_REG(R14)(r1)
  498. ld r15,STK_REG(R15)(r1)
  499. ld r16,STK_REG(R16)(r1)
  500. /* Up to 127B to go */
  501. clrldi r5,r5,(64-7)
  502. srdi r6,r5,4
  503. mtocrf 0x01,r6
  504. bf cr7*4+1,9f
  505. lvx vr3,r0,r4
  506. vperm vr8,vr0,vr3,vr16
  507. lvx vr2,r4,r9
  508. vperm vr9,vr3,vr2,vr16
  509. lvx vr1,r4,r10
  510. vperm vr10,vr2,vr1,vr16
  511. lvx vr0,r4,r11
  512. vperm vr11,vr1,vr0,vr16
  513. addi r4,r4,64
  514. stvx vr8,r0,r3
  515. stvx vr9,r3,r9
  516. stvx vr10,r3,r10
  517. stvx vr11,r3,r11
  518. addi r3,r3,64
  519. 9: bf cr7*4+2,10f
  520. lvx vr1,r0,r4
  521. vperm vr8,vr0,vr1,vr16
  522. lvx vr0,r4,r9
  523. vperm vr9,vr1,vr0,vr16
  524. addi r4,r4,32
  525. stvx vr8,r0,r3
  526. stvx vr9,r3,r9
  527. addi r3,r3,32
  528. 10: bf cr7*4+3,11f
  529. lvx vr1,r0,r4
  530. vperm vr8,vr0,vr1,vr16
  531. addi r4,r4,16
  532. stvx vr8,r0,r3
  533. addi r3,r3,16
  534. /* Up to 15B to go */
  535. 11: clrldi r5,r5,(64-4)
  536. addi r4,r4,-16 /* Unwind the +16 load offset */
  537. mtocrf 0x01,r5
  538. bf cr7*4+0,12f
  539. lwz r0,0(r4) /* Less chance of a reject with word ops */
  540. lwz r6,4(r4)
  541. addi r4,r4,8
  542. stw r0,0(r3)
  543. stw r6,4(r3)
  544. addi r3,r3,8
  545. 12: bf cr7*4+1,13f
  546. lwz r0,0(r4)
  547. addi r4,r4,4
  548. stw r0,0(r3)
  549. addi r3,r3,4
  550. 13: bf cr7*4+2,14f
  551. lhz r0,0(r4)
  552. addi r4,r4,2
  553. sth r0,0(r3)
  554. addi r3,r3,2
  555. 14: bf cr7*4+3,15f
  556. lbz r0,0(r4)
  557. stb r0,0(r3)
  558. 15: addi r1,r1,STACKFRAMESIZE
  559. ld r3,48(r1)
  560. b .exit_vmx_copy /* tail call optimise */
  561. #endif /* CONFiG_ALTIVEC */