csum_partial.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Quick'n'dirty IP checksum ...
  7. *
  8. * Copyright (C) 1998, 1999 Ralf Baechle
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Copyright (C) 2007 Maciej W. Rozycki
  11. */
  12. #include <linux/errno.h>
  13. #include <asm/asm.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/regdef.h>
  16. #ifdef CONFIG_64BIT
  17. /*
  18. * As we are sharing code base with the mips32 tree (which use the o32 ABI
  19. * register definitions). We need to redefine the register definitions from
  20. * the n64 ABI register naming to the o32 ABI register naming.
  21. */
  22. #undef t0
  23. #undef t1
  24. #undef t2
  25. #undef t3
  26. #define t0 $8
  27. #define t1 $9
  28. #define t2 $10
  29. #define t3 $11
  30. #define t4 $12
  31. #define t5 $13
  32. #define t6 $14
  33. #define t7 $15
  34. #define USE_DOUBLE
  35. #endif
  36. #ifdef USE_DOUBLE
  37. #define LOAD ld
  38. #define ADD daddu
  39. #define NBYTES 8
  40. #else
  41. #define LOAD lw
  42. #define ADD addu
  43. #define NBYTES 4
  44. #endif /* USE_DOUBLE */
  45. #define UNIT(unit) ((unit)*NBYTES)
  46. #define ADDC(sum,reg) \
  47. .set push; \
  48. .set noat; \
  49. ADD sum, reg; \
  50. sltu v1, sum, reg; \
  51. ADD sum, v1; \
  52. .set pop
  53. #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
  54. LOAD _t0, (offset + UNIT(0))(src); \
  55. LOAD _t1, (offset + UNIT(1))(src); \
  56. LOAD _t2, (offset + UNIT(2))(src); \
  57. LOAD _t3, (offset + UNIT(3))(src); \
  58. ADDC(sum, _t0); \
  59. ADDC(sum, _t1); \
  60. ADDC(sum, _t2); \
  61. ADDC(sum, _t3)
  62. #ifdef USE_DOUBLE
  63. #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
  64. CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
  65. #else
  66. #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
  67. CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
  68. CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
  69. #endif
  70. /*
  71. * a0: source address
  72. * a1: length of the area to checksum
  73. * a2: partial checksum
  74. */
  75. #define src a0
  76. #define sum v0
  77. .text
  78. .set noreorder
  79. .align 5
  80. LEAF(csum_partial)
  81. move sum, zero
  82. move t7, zero
  83. sltiu t8, a1, 0x8
  84. bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
  85. move t2, a1
  86. andi t7, src, 0x1 /* odd buffer? */
  87. .Lhword_align:
  88. beqz t7, .Lword_align
  89. andi t8, src, 0x2
  90. lbu t0, (src)
  91. LONG_SUBU a1, a1, 0x1
  92. #ifdef __MIPSEL__
  93. sll t0, t0, 8
  94. #endif
  95. ADDC(sum, t0)
  96. PTR_ADDU src, src, 0x1
  97. andi t8, src, 0x2
  98. .Lword_align:
  99. beqz t8, .Ldword_align
  100. sltiu t8, a1, 56
  101. lhu t0, (src)
  102. LONG_SUBU a1, a1, 0x2
  103. ADDC(sum, t0)
  104. sltiu t8, a1, 56
  105. PTR_ADDU src, src, 0x2
  106. .Ldword_align:
  107. bnez t8, .Ldo_end_words
  108. move t8, a1
  109. andi t8, src, 0x4
  110. beqz t8, .Lqword_align
  111. andi t8, src, 0x8
  112. lw t0, 0x00(src)
  113. LONG_SUBU a1, a1, 0x4
  114. ADDC(sum, t0)
  115. PTR_ADDU src, src, 0x4
  116. andi t8, src, 0x8
  117. .Lqword_align:
  118. beqz t8, .Loword_align
  119. andi t8, src, 0x10
  120. #ifdef USE_DOUBLE
  121. ld t0, 0x00(src)
  122. LONG_SUBU a1, a1, 0x8
  123. ADDC(sum, t0)
  124. #else
  125. lw t0, 0x00(src)
  126. lw t1, 0x04(src)
  127. LONG_SUBU a1, a1, 0x8
  128. ADDC(sum, t0)
  129. ADDC(sum, t1)
  130. #endif
  131. PTR_ADDU src, src, 0x8
  132. andi t8, src, 0x10
  133. .Loword_align:
  134. beqz t8, .Lbegin_movement
  135. LONG_SRL t8, a1, 0x7
  136. #ifdef USE_DOUBLE
  137. ld t0, 0x00(src)
  138. ld t1, 0x08(src)
  139. ADDC(sum, t0)
  140. ADDC(sum, t1)
  141. #else
  142. CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
  143. #endif
  144. LONG_SUBU a1, a1, 0x10
  145. PTR_ADDU src, src, 0x10
  146. LONG_SRL t8, a1, 0x7
  147. .Lbegin_movement:
  148. beqz t8, 1f
  149. andi t2, a1, 0x40
  150. .Lmove_128bytes:
  151. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  152. CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
  153. CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
  154. CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
  155. LONG_SUBU t8, t8, 0x01
  156. .set reorder /* DADDI_WAR */
  157. PTR_ADDU src, src, 0x80
  158. bnez t8, .Lmove_128bytes
  159. .set noreorder
  160. 1:
  161. beqz t2, 1f
  162. andi t2, a1, 0x20
  163. .Lmove_64bytes:
  164. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  165. CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
  166. PTR_ADDU src, src, 0x40
  167. 1:
  168. beqz t2, .Ldo_end_words
  169. andi t8, a1, 0x1c
  170. .Lmove_32bytes:
  171. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  172. andi t8, a1, 0x1c
  173. PTR_ADDU src, src, 0x20
  174. .Ldo_end_words:
  175. beqz t8, .Lsmall_csumcpy
  176. andi t2, a1, 0x3
  177. LONG_SRL t8, t8, 0x2
  178. .Lend_words:
  179. lw t0, (src)
  180. LONG_SUBU t8, t8, 0x1
  181. ADDC(sum, t0)
  182. .set reorder /* DADDI_WAR */
  183. PTR_ADDU src, src, 0x4
  184. bnez t8, .Lend_words
  185. .set noreorder
  186. /* unknown src alignment and < 8 bytes to go */
  187. .Lsmall_csumcpy:
  188. move a1, t2
  189. andi t0, a1, 4
  190. beqz t0, 1f
  191. andi t0, a1, 2
  192. /* Still a full word to go */
  193. ulw t1, (src)
  194. PTR_ADDIU src, 4
  195. ADDC(sum, t1)
  196. 1: move t1, zero
  197. beqz t0, 1f
  198. andi t0, a1, 1
  199. /* Still a halfword to go */
  200. ulhu t1, (src)
  201. PTR_ADDIU src, 2
  202. 1: beqz t0, 1f
  203. sll t1, t1, 16
  204. lbu t2, (src)
  205. nop
  206. #ifdef __MIPSEB__
  207. sll t2, t2, 8
  208. #endif
  209. or t1, t2
  210. 1: ADDC(sum, t1)
  211. /* fold checksum */
  212. .set push
  213. .set noat
  214. #ifdef USE_DOUBLE
  215. dsll32 v1, sum, 0
  216. daddu sum, v1
  217. sltu v1, sum, v1
  218. dsra32 sum, sum, 0
  219. addu sum, v1
  220. #endif
  221. sll v1, sum, 16
  222. addu sum, v1
  223. sltu v1, sum, v1
  224. srl sum, sum, 16
  225. addu sum, v1
  226. /* odd buffer alignment? */
  227. beqz t7, 1f
  228. nop
  229. sll v1, sum, 8
  230. srl sum, sum, 8
  231. or sum, v1
  232. andi sum, 0xffff
  233. .set pop
  234. 1:
  235. .set reorder
  236. /* Add the passed partial csum. */
  237. ADDC(sum, a2)
  238. jr ra
  239. .set noreorder
  240. END(csum_partial)
  241. /*
  242. * checksum and copy routines based on memcpy.S
  243. *
  244. * csum_partial_copy_nocheck(src, dst, len, sum)
  245. * __csum_partial_copy_user(src, dst, len, sum, errp)
  246. *
  247. * See "Spec" in memcpy.S for details. Unlike __copy_user, all
  248. * function in this file use the standard calling convention.
  249. */
  250. #define src a0
  251. #define dst a1
  252. #define len a2
  253. #define psum a3
  254. #define sum v0
  255. #define odd t8
  256. #define errptr t9
  257. /*
  258. * The exception handler for loads requires that:
  259. * 1- AT contain the address of the byte just past the end of the source
  260. * of the copy,
  261. * 2- src_entry <= src < AT, and
  262. * 3- (dst - src) == (dst_entry - src_entry),
  263. * The _entry suffix denotes values when __copy_user was called.
  264. *
  265. * (1) is set up up by __csum_partial_copy_from_user and maintained by
  266. * not writing AT in __csum_partial_copy
  267. * (2) is met by incrementing src by the number of bytes copied
  268. * (3) is met by not doing loads between a pair of increments of dst and src
  269. *
  270. * The exception handlers for stores stores -EFAULT to errptr and return.
  271. * These handlers do not need to overwrite any data.
  272. */
  273. #define EXC(inst_reg,addr,handler) \
  274. 9: inst_reg, addr; \
  275. .section __ex_table,"a"; \
  276. PTR 9b, handler; \
  277. .previous
  278. #ifdef USE_DOUBLE
  279. #define LOAD ld
  280. #define LOADL ldl
  281. #define LOADR ldr
  282. #define STOREL sdl
  283. #define STORER sdr
  284. #define STORE sd
  285. #define ADD daddu
  286. #define SUB dsubu
  287. #define SRL dsrl
  288. #define SLL dsll
  289. #define SLLV dsllv
  290. #define SRLV dsrlv
  291. #define NBYTES 8
  292. #define LOG_NBYTES 3
  293. #else
  294. #define LOAD lw
  295. #define LOADL lwl
  296. #define LOADR lwr
  297. #define STOREL swl
  298. #define STORER swr
  299. #define STORE sw
  300. #define ADD addu
  301. #define SUB subu
  302. #define SRL srl
  303. #define SLL sll
  304. #define SLLV sllv
  305. #define SRLV srlv
  306. #define NBYTES 4
  307. #define LOG_NBYTES 2
  308. #endif /* USE_DOUBLE */
  309. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  310. #define LDFIRST LOADR
  311. #define LDREST LOADL
  312. #define STFIRST STORER
  313. #define STREST STOREL
  314. #define SHIFT_DISCARD SLLV
  315. #define SHIFT_DISCARD_REVERT SRLV
  316. #else
  317. #define LDFIRST LOADL
  318. #define LDREST LOADR
  319. #define STFIRST STOREL
  320. #define STREST STORER
  321. #define SHIFT_DISCARD SRLV
  322. #define SHIFT_DISCARD_REVERT SLLV
  323. #endif
  324. #define FIRST(unit) ((unit)*NBYTES)
  325. #define REST(unit) (FIRST(unit)+NBYTES-1)
  326. #define ADDRMASK (NBYTES-1)
  327. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  328. .set noat
  329. #else
  330. .set at=v1
  331. #endif
  332. LEAF(__csum_partial_copy_user)
  333. PTR_ADDU AT, src, len /* See (1) above. */
  334. #ifdef CONFIG_64BIT
  335. move errptr, a4
  336. #else
  337. lw errptr, 16(sp)
  338. #endif
  339. FEXPORT(csum_partial_copy_nocheck)
  340. move sum, zero
  341. move odd, zero
  342. /*
  343. * Note: dst & src may be unaligned, len may be 0
  344. * Temps
  345. */
  346. /*
  347. * The "issue break"s below are very approximate.
  348. * Issue delays for dcache fills will perturb the schedule, as will
  349. * load queue full replay traps, etc.
  350. *
  351. * If len < NBYTES use byte operations.
  352. */
  353. sltu t2, len, NBYTES
  354. and t1, dst, ADDRMASK
  355. bnez t2, .Lcopy_bytes_checklen
  356. and t0, src, ADDRMASK
  357. andi odd, dst, 0x1 /* odd buffer? */
  358. bnez t1, .Ldst_unaligned
  359. nop
  360. bnez t0, .Lsrc_unaligned_dst_aligned
  361. /*
  362. * use delay slot for fall-through
  363. * src and dst are aligned; need to compute rem
  364. */
  365. .Lboth_aligned:
  366. SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
  367. beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
  368. nop
  369. SUB len, 8*NBYTES # subtract here for bgez loop
  370. .align 4
  371. 1:
  372. EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
  373. EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
  374. EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
  375. EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
  376. EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
  377. EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
  378. EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy)
  379. EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy)
  380. SUB len, len, 8*NBYTES
  381. ADD src, src, 8*NBYTES
  382. EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
  383. ADDC(sum, t0)
  384. EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
  385. ADDC(sum, t1)
  386. EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
  387. ADDC(sum, t2)
  388. EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
  389. ADDC(sum, t3)
  390. EXC( STORE t4, UNIT(4)(dst), .Ls_exc)
  391. ADDC(sum, t4)
  392. EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
  393. ADDC(sum, t5)
  394. EXC( STORE t6, UNIT(6)(dst), .Ls_exc)
  395. ADDC(sum, t6)
  396. EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
  397. ADDC(sum, t7)
  398. .set reorder /* DADDI_WAR */
  399. ADD dst, dst, 8*NBYTES
  400. bgez len, 1b
  401. .set noreorder
  402. ADD len, 8*NBYTES # revert len (see above)
  403. /*
  404. * len == the number of bytes left to copy < 8*NBYTES
  405. */
  406. .Lcleanup_both_aligned:
  407. #define rem t7
  408. beqz len, .Ldone
  409. sltu t0, len, 4*NBYTES
  410. bnez t0, .Lless_than_4units
  411. and rem, len, (NBYTES-1) # rem = len % NBYTES
  412. /*
  413. * len >= 4*NBYTES
  414. */
  415. EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
  416. EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
  417. EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
  418. EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
  419. SUB len, len, 4*NBYTES
  420. ADD src, src, 4*NBYTES
  421. EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
  422. ADDC(sum, t0)
  423. EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
  424. ADDC(sum, t1)
  425. EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
  426. ADDC(sum, t2)
  427. EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
  428. ADDC(sum, t3)
  429. .set reorder /* DADDI_WAR */
  430. ADD dst, dst, 4*NBYTES
  431. beqz len, .Ldone
  432. .set noreorder
  433. .Lless_than_4units:
  434. /*
  435. * rem = len % NBYTES
  436. */
  437. beq rem, len, .Lcopy_bytes
  438. nop
  439. 1:
  440. EXC( LOAD t0, 0(src), .Ll_exc)
  441. ADD src, src, NBYTES
  442. SUB len, len, NBYTES
  443. EXC( STORE t0, 0(dst), .Ls_exc)
  444. ADDC(sum, t0)
  445. .set reorder /* DADDI_WAR */
  446. ADD dst, dst, NBYTES
  447. bne rem, len, 1b
  448. .set noreorder
  449. /*
  450. * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
  451. * A loop would do only a byte at a time with possible branch
  452. * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
  453. * because can't assume read-access to dst. Instead, use
  454. * STREST dst, which doesn't require read access to dst.
  455. *
  456. * This code should perform better than a simple loop on modern,
  457. * wide-issue mips processors because the code has fewer branches and
  458. * more instruction-level parallelism.
  459. */
  460. #define bits t2
  461. beqz len, .Ldone
  462. ADD t1, dst, len # t1 is just past last byte of dst
  463. li bits, 8*NBYTES
  464. SLL rem, len, 3 # rem = number of bits to keep
  465. EXC( LOAD t0, 0(src), .Ll_exc)
  466. SUB bits, bits, rem # bits = number of bits to discard
  467. SHIFT_DISCARD t0, t0, bits
  468. EXC( STREST t0, -1(t1), .Ls_exc)
  469. SHIFT_DISCARD_REVERT t0, t0, bits
  470. .set reorder
  471. ADDC(sum, t0)
  472. b .Ldone
  473. .set noreorder
  474. .Ldst_unaligned:
  475. /*
  476. * dst is unaligned
  477. * t0 = src & ADDRMASK
  478. * t1 = dst & ADDRMASK; T1 > 0
  479. * len >= NBYTES
  480. *
  481. * Copy enough bytes to align dst
  482. * Set match = (src and dst have same alignment)
  483. */
  484. #define match rem
  485. EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
  486. ADD t2, zero, NBYTES
  487. EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
  488. SUB t2, t2, t1 # t2 = number of bytes copied
  489. xor match, t0, t1
  490. EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
  491. SLL t4, t1, 3 # t4 = number of bits to discard
  492. SHIFT_DISCARD t3, t3, t4
  493. /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
  494. ADDC(sum, t3)
  495. beq len, t2, .Ldone
  496. SUB len, len, t2
  497. ADD dst, dst, t2
  498. beqz match, .Lboth_aligned
  499. ADD src, src, t2
  500. .Lsrc_unaligned_dst_aligned:
  501. SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
  502. beqz t0, .Lcleanup_src_unaligned
  503. and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
  504. 1:
  505. /*
  506. * Avoid consecutive LD*'s to the same register since some mips
  507. * implementations can't issue them in the same cycle.
  508. * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  509. * are to the same unit (unless src is aligned, but it's not).
  510. */
  511. EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
  512. EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
  513. SUB len, len, 4*NBYTES
  514. EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
  515. EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
  516. EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
  517. EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
  518. EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
  519. EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
  520. ADD src, src, 4*NBYTES
  521. #ifdef CONFIG_CPU_SB1
  522. nop # improves slotting
  523. #endif
  524. EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
  525. ADDC(sum, t0)
  526. EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
  527. ADDC(sum, t1)
  528. EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
  529. ADDC(sum, t2)
  530. EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
  531. ADDC(sum, t3)
  532. .set reorder /* DADDI_WAR */
  533. ADD dst, dst, 4*NBYTES
  534. bne len, rem, 1b
  535. .set noreorder
  536. .Lcleanup_src_unaligned:
  537. beqz len, .Ldone
  538. and rem, len, NBYTES-1 # rem = len % NBYTES
  539. beq rem, len, .Lcopy_bytes
  540. nop
  541. 1:
  542. EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
  543. EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
  544. ADD src, src, NBYTES
  545. SUB len, len, NBYTES
  546. EXC( STORE t0, 0(dst), .Ls_exc)
  547. ADDC(sum, t0)
  548. .set reorder /* DADDI_WAR */
  549. ADD dst, dst, NBYTES
  550. bne len, rem, 1b
  551. .set noreorder
  552. .Lcopy_bytes_checklen:
  553. beqz len, .Ldone
  554. nop
  555. .Lcopy_bytes:
  556. /* 0 < len < NBYTES */
  557. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  558. #define SHIFT_START 0
  559. #define SHIFT_INC 8
  560. #else
  561. #define SHIFT_START 8*(NBYTES-1)
  562. #define SHIFT_INC -8
  563. #endif
  564. move t2, zero # partial word
  565. li t3, SHIFT_START # shift
  566. /* use .Ll_exc_copy here to return correct sum on fault */
  567. #define COPY_BYTE(N) \
  568. EXC( lbu t0, N(src), .Ll_exc_copy); \
  569. SUB len, len, 1; \
  570. EXC( sb t0, N(dst), .Ls_exc); \
  571. SLLV t0, t0, t3; \
  572. addu t3, SHIFT_INC; \
  573. beqz len, .Lcopy_bytes_done; \
  574. or t2, t0
  575. COPY_BYTE(0)
  576. COPY_BYTE(1)
  577. #ifdef USE_DOUBLE
  578. COPY_BYTE(2)
  579. COPY_BYTE(3)
  580. COPY_BYTE(4)
  581. COPY_BYTE(5)
  582. #endif
  583. EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy)
  584. SUB len, len, 1
  585. EXC( sb t0, NBYTES-2(dst), .Ls_exc)
  586. SLLV t0, t0, t3
  587. or t2, t0
  588. .Lcopy_bytes_done:
  589. ADDC(sum, t2)
  590. .Ldone:
  591. /* fold checksum */
  592. .set push
  593. .set noat
  594. #ifdef USE_DOUBLE
  595. dsll32 v1, sum, 0
  596. daddu sum, v1
  597. sltu v1, sum, v1
  598. dsra32 sum, sum, 0
  599. addu sum, v1
  600. #endif
  601. sll v1, sum, 16
  602. addu sum, v1
  603. sltu v1, sum, v1
  604. srl sum, sum, 16
  605. addu sum, v1
  606. /* odd buffer alignment? */
  607. beqz odd, 1f
  608. nop
  609. sll v1, sum, 8
  610. srl sum, sum, 8
  611. or sum, v1
  612. andi sum, 0xffff
  613. .set pop
  614. 1:
  615. .set reorder
  616. ADDC(sum, psum)
  617. jr ra
  618. .set noreorder
  619. .Ll_exc_copy:
  620. /*
  621. * Copy bytes from src until faulting load address (or until a
  622. * lb faults)
  623. *
  624. * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
  625. * may be more than a byte beyond the last address.
  626. * Hence, the lb below may get an exception.
  627. *
  628. * Assumes src < THREAD_BUADDR($28)
  629. */
  630. LOAD t0, TI_TASK($28)
  631. li t2, SHIFT_START
  632. LOAD t0, THREAD_BUADDR(t0)
  633. 1:
  634. EXC( lbu t1, 0(src), .Ll_exc)
  635. ADD src, src, 1
  636. sb t1, 0(dst) # can't fault -- we're copy_from_user
  637. SLLV t1, t1, t2
  638. addu t2, SHIFT_INC
  639. ADDC(sum, t1)
  640. .set reorder /* DADDI_WAR */
  641. ADD dst, dst, 1
  642. bne src, t0, 1b
  643. .set noreorder
  644. .Ll_exc:
  645. LOAD t0, TI_TASK($28)
  646. nop
  647. LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
  648. nop
  649. SUB len, AT, t0 # len number of uncopied bytes
  650. /*
  651. * Here's where we rely on src and dst being incremented in tandem,
  652. * See (3) above.
  653. * dst += (fault addr - src) to put dst at first byte to clear
  654. */
  655. ADD dst, t0 # compute start address in a1
  656. SUB dst, src
  657. /*
  658. * Clear len bytes starting at dst. Can't call __bzero because it
  659. * might modify len. An inefficient loop for these rare times...
  660. */
  661. .set reorder /* DADDI_WAR */
  662. SUB src, len, 1
  663. beqz len, .Ldone
  664. .set noreorder
  665. 1: sb zero, 0(dst)
  666. ADD dst, dst, 1
  667. .set push
  668. .set noat
  669. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  670. bnez src, 1b
  671. SUB src, src, 1
  672. #else
  673. li v1, 1
  674. bnez src, 1b
  675. SUB src, src, v1
  676. #endif
  677. li v1, -EFAULT
  678. b .Ldone
  679. sw v1, (errptr)
  680. .Ls_exc:
  681. li v0, -1 /* invalid checksum */
  682. li v1, -EFAULT
  683. jr ra
  684. sw v1, (errptr)
  685. .set pop
  686. END(__csum_partial_copy_user)