memcpy_power7.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2012
  17. *
  18. * Author: Anton Blanchard <anton@au.ibm.com>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #define STACKFRAMESIZE 256
  22. #define STK_REG(i) (112 + ((i)-14)*8)
  23. _GLOBAL(memcpy_power7)
  24. #ifdef CONFIG_ALTIVEC
  25. cmpldi r5,16
  26. cmpldi cr1,r5,4096
  27. std r3,48(r1)
  28. blt .Lshort_copy
  29. bgt cr1,.Lvmx_copy
  30. #else
  31. cmpldi r5,16
  32. std r3,48(r1)
  33. blt .Lshort_copy
  34. #endif
  35. .Lnonvmx_copy:
  36. /* Get the source 8B aligned */
  37. neg r6,r4
  38. mtocrf 0x01,r6
  39. clrldi r6,r6,(64-3)
  40. bf cr7*4+3,1f
  41. lbz r0,0(r4)
  42. addi r4,r4,1
  43. stb r0,0(r3)
  44. addi r3,r3,1
  45. 1: bf cr7*4+2,2f
  46. lhz r0,0(r4)
  47. addi r4,r4,2
  48. sth r0,0(r3)
  49. addi r3,r3,2
  50. 2: bf cr7*4+1,3f
  51. lwz r0,0(r4)
  52. addi r4,r4,4
  53. stw r0,0(r3)
  54. addi r3,r3,4
  55. 3: sub r5,r5,r6
  56. cmpldi r5,128
  57. blt 5f
  58. mflr r0
  59. stdu r1,-STACKFRAMESIZE(r1)
  60. std r14,STK_REG(R14)(r1)
  61. std r15,STK_REG(R15)(r1)
  62. std r16,STK_REG(R16)(r1)
  63. std r17,STK_REG(R17)(r1)
  64. std r18,STK_REG(R18)(r1)
  65. std r19,STK_REG(R19)(r1)
  66. std r20,STK_REG(R20)(r1)
  67. std r21,STK_REG(R21)(r1)
  68. std r22,STK_REG(R22)(r1)
  69. std r0,STACKFRAMESIZE+16(r1)
  70. srdi r6,r5,7
  71. mtctr r6
  72. /* Now do cacheline (128B) sized loads and stores. */
  73. .align 5
  74. 4:
  75. ld r0,0(r4)
  76. ld r6,8(r4)
  77. ld r7,16(r4)
  78. ld r8,24(r4)
  79. ld r9,32(r4)
  80. ld r10,40(r4)
  81. ld r11,48(r4)
  82. ld r12,56(r4)
  83. ld r14,64(r4)
  84. ld r15,72(r4)
  85. ld r16,80(r4)
  86. ld r17,88(r4)
  87. ld r18,96(r4)
  88. ld r19,104(r4)
  89. ld r20,112(r4)
  90. ld r21,120(r4)
  91. addi r4,r4,128
  92. std r0,0(r3)
  93. std r6,8(r3)
  94. std r7,16(r3)
  95. std r8,24(r3)
  96. std r9,32(r3)
  97. std r10,40(r3)
  98. std r11,48(r3)
  99. std r12,56(r3)
  100. std r14,64(r3)
  101. std r15,72(r3)
  102. std r16,80(r3)
  103. std r17,88(r3)
  104. std r18,96(r3)
  105. std r19,104(r3)
  106. std r20,112(r3)
  107. std r21,120(r3)
  108. addi r3,r3,128
  109. bdnz 4b
  110. clrldi r5,r5,(64-7)
  111. ld r14,STK_REG(R14)(r1)
  112. ld r15,STK_REG(R15)(r1)
  113. ld r16,STK_REG(R16)(r1)
  114. ld r17,STK_REG(R17)(r1)
  115. ld r18,STK_REG(R18)(r1)
  116. ld r19,STK_REG(R19)(r1)
  117. ld r20,STK_REG(R20)(r1)
  118. ld r21,STK_REG(R21)(r1)
  119. ld r22,STK_REG(R22)(r1)
  120. addi r1,r1,STACKFRAMESIZE
  121. /* Up to 127B to go */
  122. 5: srdi r6,r5,4
  123. mtocrf 0x01,r6
  124. 6: bf cr7*4+1,7f
  125. ld r0,0(r4)
  126. ld r6,8(r4)
  127. ld r7,16(r4)
  128. ld r8,24(r4)
  129. ld r9,32(r4)
  130. ld r10,40(r4)
  131. ld r11,48(r4)
  132. ld r12,56(r4)
  133. addi r4,r4,64
  134. std r0,0(r3)
  135. std r6,8(r3)
  136. std r7,16(r3)
  137. std r8,24(r3)
  138. std r9,32(r3)
  139. std r10,40(r3)
  140. std r11,48(r3)
  141. std r12,56(r3)
  142. addi r3,r3,64
  143. /* Up to 63B to go */
  144. 7: bf cr7*4+2,8f
  145. ld r0,0(r4)
  146. ld r6,8(r4)
  147. ld r7,16(r4)
  148. ld r8,24(r4)
  149. addi r4,r4,32
  150. std r0,0(r3)
  151. std r6,8(r3)
  152. std r7,16(r3)
  153. std r8,24(r3)
  154. addi r3,r3,32
  155. /* Up to 31B to go */
  156. 8: bf cr7*4+3,9f
  157. ld r0,0(r4)
  158. ld r6,8(r4)
  159. addi r4,r4,16
  160. std r0,0(r3)
  161. std r6,8(r3)
  162. addi r3,r3,16
  163. 9: clrldi r5,r5,(64-4)
  164. /* Up to 15B to go */
  165. .Lshort_copy:
  166. mtocrf 0x01,r5
  167. bf cr7*4+0,12f
  168. lwz r0,0(r4) /* Less chance of a reject with word ops */
  169. lwz r6,4(r4)
  170. addi r4,r4,8
  171. stw r0,0(r3)
  172. stw r6,4(r3)
  173. addi r3,r3,8
  174. 12: bf cr7*4+1,13f
  175. lwz r0,0(r4)
  176. addi r4,r4,4
  177. stw r0,0(r3)
  178. addi r3,r3,4
  179. 13: bf cr7*4+2,14f
  180. lhz r0,0(r4)
  181. addi r4,r4,2
  182. sth r0,0(r3)
  183. addi r3,r3,2
  184. 14: bf cr7*4+3,15f
  185. lbz r0,0(r4)
  186. stb r0,0(r3)
  187. 15: ld r3,48(r1)
  188. blr
  189. .Lunwind_stack_nonvmx_copy:
  190. addi r1,r1,STACKFRAMESIZE
  191. b .Lnonvmx_copy
  192. #ifdef CONFIG_ALTIVEC
  193. .Lvmx_copy:
  194. mflr r0
  195. std r4,56(r1)
  196. std r5,64(r1)
  197. std r0,16(r1)
  198. stdu r1,-STACKFRAMESIZE(r1)
  199. bl .enter_vmx_copy
  200. cmpwi r3,0
  201. ld r0,STACKFRAMESIZE+16(r1)
  202. ld r3,STACKFRAMESIZE+48(r1)
  203. ld r4,STACKFRAMESIZE+56(r1)
  204. ld r5,STACKFRAMESIZE+64(r1)
  205. mtlr r0
  206. /*
  207. * We prefetch both the source and destination using enhanced touch
  208. * instructions. We use a stream ID of 0 for the load side and
  209. * 1 for the store side.
  210. */
  211. clrrdi r6,r4,7
  212. clrrdi r9,r3,7
  213. ori r9,r9,1 /* stream=1 */
  214. srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
  215. cmpldi cr1,r7,0x3FF
  216. ble cr1,1f
  217. li r7,0x3FF
  218. 1: lis r0,0x0E00 /* depth=7 */
  219. sldi r7,r7,7
  220. or r7,r7,r0
  221. ori r10,r7,1 /* stream=1 */
  222. lis r8,0x8000 /* GO=1 */
  223. clrldi r8,r8,32
  224. .machine push
  225. .machine "power4"
  226. dcbt r0,r6,0b01000
  227. dcbt r0,r7,0b01010
  228. dcbtst r0,r9,0b01000
  229. dcbtst r0,r10,0b01010
  230. eieio
  231. dcbt r0,r8,0b01010 /* GO */
  232. .machine pop
  233. beq .Lunwind_stack_nonvmx_copy
  234. /*
  235. * If source and destination are not relatively aligned we use a
  236. * slower permute loop.
  237. */
  238. xor r6,r4,r3
  239. rldicl. r6,r6,0,(64-4)
  240. bne .Lvmx_unaligned_copy
  241. /* Get the destination 16B aligned */
  242. neg r6,r3
  243. mtocrf 0x01,r6
  244. clrldi r6,r6,(64-4)
  245. bf cr7*4+3,1f
  246. lbz r0,0(r4)
  247. addi r4,r4,1
  248. stb r0,0(r3)
  249. addi r3,r3,1
  250. 1: bf cr7*4+2,2f
  251. lhz r0,0(r4)
  252. addi r4,r4,2
  253. sth r0,0(r3)
  254. addi r3,r3,2
  255. 2: bf cr7*4+1,3f
  256. lwz r0,0(r4)
  257. addi r4,r4,4
  258. stw r0,0(r3)
  259. addi r3,r3,4
  260. 3: bf cr7*4+0,4f
  261. ld r0,0(r4)
  262. addi r4,r4,8
  263. std r0,0(r3)
  264. addi r3,r3,8
  265. 4: sub r5,r5,r6
  266. /* Get the desination 128B aligned */
  267. neg r6,r3
  268. srdi r7,r6,4
  269. mtocrf 0x01,r7
  270. clrldi r6,r6,(64-7)
  271. li r9,16
  272. li r10,32
  273. li r11,48
  274. bf cr7*4+3,5f
  275. lvx vr1,r0,r4
  276. addi r4,r4,16
  277. stvx vr1,r0,r3
  278. addi r3,r3,16
  279. 5: bf cr7*4+2,6f
  280. lvx vr1,r0,r4
  281. lvx vr0,r4,r9
  282. addi r4,r4,32
  283. stvx vr1,r0,r3
  284. stvx vr0,r3,r9
  285. addi r3,r3,32
  286. 6: bf cr7*4+1,7f
  287. lvx vr3,r0,r4
  288. lvx vr2,r4,r9
  289. lvx vr1,r4,r10
  290. lvx vr0,r4,r11
  291. addi r4,r4,64
  292. stvx vr3,r0,r3
  293. stvx vr2,r3,r9
  294. stvx vr1,r3,r10
  295. stvx vr0,r3,r11
  296. addi r3,r3,64
  297. 7: sub r5,r5,r6
  298. srdi r6,r5,7
  299. std r14,STK_REG(R14)(r1)
  300. std r15,STK_REG(R15)(r1)
  301. std r16,STK_REG(R16)(r1)
  302. li r12,64
  303. li r14,80
  304. li r15,96
  305. li r16,112
  306. mtctr r6
  307. /*
  308. * Now do cacheline sized loads and stores. By this stage the
  309. * cacheline stores are also cacheline aligned.
  310. */
  311. .align 5
  312. 8:
  313. lvx vr7,r0,r4
  314. lvx vr6,r4,r9
  315. lvx vr5,r4,r10
  316. lvx vr4,r4,r11
  317. lvx vr3,r4,r12
  318. lvx vr2,r4,r14
  319. lvx vr1,r4,r15
  320. lvx vr0,r4,r16
  321. addi r4,r4,128
  322. stvx vr7,r0,r3
  323. stvx vr6,r3,r9
  324. stvx vr5,r3,r10
  325. stvx vr4,r3,r11
  326. stvx vr3,r3,r12
  327. stvx vr2,r3,r14
  328. stvx vr1,r3,r15
  329. stvx vr0,r3,r16
  330. addi r3,r3,128
  331. bdnz 8b
  332. ld r14,STK_REG(R14)(r1)
  333. ld r15,STK_REG(R15)(r1)
  334. ld r16,STK_REG(R16)(r1)
  335. /* Up to 127B to go */
  336. clrldi r5,r5,(64-7)
  337. srdi r6,r5,4
  338. mtocrf 0x01,r6
  339. bf cr7*4+1,9f
  340. lvx vr3,r0,r4
  341. lvx vr2,r4,r9
  342. lvx vr1,r4,r10
  343. lvx vr0,r4,r11
  344. addi r4,r4,64
  345. stvx vr3,r0,r3
  346. stvx vr2,r3,r9
  347. stvx vr1,r3,r10
  348. stvx vr0,r3,r11
  349. addi r3,r3,64
  350. 9: bf cr7*4+2,10f
  351. lvx vr1,r0,r4
  352. lvx vr0,r4,r9
  353. addi r4,r4,32
  354. stvx vr1,r0,r3
  355. stvx vr0,r3,r9
  356. addi r3,r3,32
  357. 10: bf cr7*4+3,11f
  358. lvx vr1,r0,r4
  359. addi r4,r4,16
  360. stvx vr1,r0,r3
  361. addi r3,r3,16
  362. /* Up to 15B to go */
  363. 11: clrldi r5,r5,(64-4)
  364. mtocrf 0x01,r5
  365. bf cr7*4+0,12f
  366. ld r0,0(r4)
  367. addi r4,r4,8
  368. std r0,0(r3)
  369. addi r3,r3,8
  370. 12: bf cr7*4+1,13f
  371. lwz r0,0(r4)
  372. addi r4,r4,4
  373. stw r0,0(r3)
  374. addi r3,r3,4
  375. 13: bf cr7*4+2,14f
  376. lhz r0,0(r4)
  377. addi r4,r4,2
  378. sth r0,0(r3)
  379. addi r3,r3,2
  380. 14: bf cr7*4+3,15f
  381. lbz r0,0(r4)
  382. stb r0,0(r3)
  383. 15: addi r1,r1,STACKFRAMESIZE
  384. ld r3,48(r1)
  385. b .exit_vmx_copy /* tail call optimise */
  386. .Lvmx_unaligned_copy:
  387. /* Get the destination 16B aligned */
  388. neg r6,r3
  389. mtocrf 0x01,r6
  390. clrldi r6,r6,(64-4)
  391. bf cr7*4+3,1f
  392. lbz r0,0(r4)
  393. addi r4,r4,1
  394. stb r0,0(r3)
  395. addi r3,r3,1
  396. 1: bf cr7*4+2,2f
  397. lhz r0,0(r4)
  398. addi r4,r4,2
  399. sth r0,0(r3)
  400. addi r3,r3,2
  401. 2: bf cr7*4+1,3f
  402. lwz r0,0(r4)
  403. addi r4,r4,4
  404. stw r0,0(r3)
  405. addi r3,r3,4
  406. 3: bf cr7*4+0,4f
  407. lwz r0,0(r4) /* Less chance of a reject with word ops */
  408. lwz r7,4(r4)
  409. addi r4,r4,8
  410. stw r0,0(r3)
  411. stw r7,4(r3)
  412. addi r3,r3,8
  413. 4: sub r5,r5,r6
  414. /* Get the desination 128B aligned */
  415. neg r6,r3
  416. srdi r7,r6,4
  417. mtocrf 0x01,r7
  418. clrldi r6,r6,(64-7)
  419. li r9,16
  420. li r10,32
  421. li r11,48
  422. lvsl vr16,0,r4 /* Setup permute control vector */
  423. lvx vr0,0,r4
  424. addi r4,r4,16
  425. bf cr7*4+3,5f
  426. lvx vr1,r0,r4
  427. vperm vr8,vr0,vr1,vr16
  428. addi r4,r4,16
  429. stvx vr8,r0,r3
  430. addi r3,r3,16
  431. vor vr0,vr1,vr1
  432. 5: bf cr7*4+2,6f
  433. lvx vr1,r0,r4
  434. vperm vr8,vr0,vr1,vr16
  435. lvx vr0,r4,r9
  436. vperm vr9,vr1,vr0,vr16
  437. addi r4,r4,32
  438. stvx vr8,r0,r3
  439. stvx vr9,r3,r9
  440. addi r3,r3,32
  441. 6: bf cr7*4+1,7f
  442. lvx vr3,r0,r4
  443. vperm vr8,vr0,vr3,vr16
  444. lvx vr2,r4,r9
  445. vperm vr9,vr3,vr2,vr16
  446. lvx vr1,r4,r10
  447. vperm vr10,vr2,vr1,vr16
  448. lvx vr0,r4,r11
  449. vperm vr11,vr1,vr0,vr16
  450. addi r4,r4,64
  451. stvx vr8,r0,r3
  452. stvx vr9,r3,r9
  453. stvx vr10,r3,r10
  454. stvx vr11,r3,r11
  455. addi r3,r3,64
  456. 7: sub r5,r5,r6
  457. srdi r6,r5,7
  458. std r14,STK_REG(R14)(r1)
  459. std r15,STK_REG(R15)(r1)
  460. std r16,STK_REG(R16)(r1)
  461. li r12,64
  462. li r14,80
  463. li r15,96
  464. li r16,112
  465. mtctr r6
  466. /*
  467. * Now do cacheline sized loads and stores. By this stage the
  468. * cacheline stores are also cacheline aligned.
  469. */
  470. .align 5
  471. 8:
  472. lvx vr7,r0,r4
  473. vperm vr8,vr0,vr7,vr16
  474. lvx vr6,r4,r9
  475. vperm vr9,vr7,vr6,vr16
  476. lvx vr5,r4,r10
  477. vperm vr10,vr6,vr5,vr16
  478. lvx vr4,r4,r11
  479. vperm vr11,vr5,vr4,vr16
  480. lvx vr3,r4,r12
  481. vperm vr12,vr4,vr3,vr16
  482. lvx vr2,r4,r14
  483. vperm vr13,vr3,vr2,vr16
  484. lvx vr1,r4,r15
  485. vperm vr14,vr2,vr1,vr16
  486. lvx vr0,r4,r16
  487. vperm vr15,vr1,vr0,vr16
  488. addi r4,r4,128
  489. stvx vr8,r0,r3
  490. stvx vr9,r3,r9
  491. stvx vr10,r3,r10
  492. stvx vr11,r3,r11
  493. stvx vr12,r3,r12
  494. stvx vr13,r3,r14
  495. stvx vr14,r3,r15
  496. stvx vr15,r3,r16
  497. addi r3,r3,128
  498. bdnz 8b
  499. ld r14,STK_REG(R14)(r1)
  500. ld r15,STK_REG(R15)(r1)
  501. ld r16,STK_REG(R16)(r1)
  502. /* Up to 127B to go */
  503. clrldi r5,r5,(64-7)
  504. srdi r6,r5,4
  505. mtocrf 0x01,r6
  506. bf cr7*4+1,9f
  507. lvx vr3,r0,r4
  508. vperm vr8,vr0,vr3,vr16
  509. lvx vr2,r4,r9
  510. vperm vr9,vr3,vr2,vr16
  511. lvx vr1,r4,r10
  512. vperm vr10,vr2,vr1,vr16
  513. lvx vr0,r4,r11
  514. vperm vr11,vr1,vr0,vr16
  515. addi r4,r4,64
  516. stvx vr8,r0,r3
  517. stvx vr9,r3,r9
  518. stvx vr10,r3,r10
  519. stvx vr11,r3,r11
  520. addi r3,r3,64
  521. 9: bf cr7*4+2,10f
  522. lvx vr1,r0,r4
  523. vperm vr8,vr0,vr1,vr16
  524. lvx vr0,r4,r9
  525. vperm vr9,vr1,vr0,vr16
  526. addi r4,r4,32
  527. stvx vr8,r0,r3
  528. stvx vr9,r3,r9
  529. addi r3,r3,32
  530. 10: bf cr7*4+3,11f
  531. lvx vr1,r0,r4
  532. vperm vr8,vr0,vr1,vr16
  533. addi r4,r4,16
  534. stvx vr8,r0,r3
  535. addi r3,r3,16
  536. /* Up to 15B to go */
  537. 11: clrldi r5,r5,(64-4)
  538. addi r4,r4,-16 /* Unwind the +16 load offset */
  539. mtocrf 0x01,r5
  540. bf cr7*4+0,12f
  541. lwz r0,0(r4) /* Less chance of a reject with word ops */
  542. lwz r6,4(r4)
  543. addi r4,r4,8
  544. stw r0,0(r3)
  545. stw r6,4(r3)
  546. addi r3,r3,8
  547. 12: bf cr7*4+1,13f
  548. lwz r0,0(r4)
  549. addi r4,r4,4
  550. stw r0,0(r3)
  551. addi r3,r3,4
  552. 13: bf cr7*4+2,14f
  553. lhz r0,0(r4)
  554. addi r4,r4,2
  555. sth r0,0(r3)
  556. addi r3,r3,2
  557. 14: bf cr7*4+3,15f
  558. lbz r0,0(r4)
  559. stb r0,0(r3)
  560. 15: addi r1,r1,STACKFRAMESIZE
  561. ld r3,48(r1)
  562. b .exit_vmx_copy /* tail call optimise */
  563. #endif /* CONFiG_ALTIVEC */