checksum_64.S 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * This file contains assembly-language implementations
  3. * of IP-style 1's complement checksum routines.
  4. *
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/processor.h>
  16. #include <asm/errno.h>
  17. #include <asm/ppc_asm.h>
  18. /*
  19. * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
  20. * len is in words and is always >= 5.
  21. *
  22. * In practice len == 5, but this is not guaranteed. So this code does not
  23. * attempt to use doubleword instructions.
  24. */
  25. _GLOBAL(ip_fast_csum)
  26. lwz r0,0(r3)
  27. lwzu r5,4(r3)
  28. addic. r4,r4,-2
  29. addc r0,r0,r5
  30. mtctr r4
  31. blelr-
  32. 1: lwzu r4,4(r3)
  33. adde r0,r0,r4
  34. bdnz 1b
  35. addze r0,r0 /* add in final carry */
  36. rldicl r4,r0,32,0 /* fold two 32-bit halves together */
  37. add r0,r0,r4
  38. srdi r0,r0,32
  39. rlwinm r3,r0,16,0,31 /* fold two halves together */
  40. add r3,r0,r3
  41. not r3,r3
  42. srwi r3,r3,16
  43. blr
  44. /*
  45. * Compute checksum of TCP or UDP pseudo-header:
  46. * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
  47. * No real gain trying to do this specially for 64 bit, but
  48. * the 32 bit addition may spill into the upper bits of
  49. * the doubleword so we still must fold it down from 64.
  50. */
  51. _GLOBAL(csum_tcpudp_magic)
  52. rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
  53. addc r0,r3,r4 /* add 4 32-bit words together */
  54. adde r0,r0,r5
  55. adde r0,r0,r7
  56. rldicl r4,r0,32,0 /* fold 64 bit value */
  57. add r0,r4,r0
  58. srdi r0,r0,32
  59. rlwinm r3,r0,16,0,31 /* fold two halves together */
  60. add r3,r0,r3
  61. not r3,r3
  62. srwi r3,r3,16
  63. blr
  64. /*
  65. * Computes the checksum of a memory block at buff, length len,
  66. * and adds in "sum" (32-bit).
  67. *
  68. * This code assumes at least halfword alignment, though the length
  69. * can be any number of bytes. The sum is accumulated in r5.
  70. *
  71. * csum_partial(r3=buff, r4=len, r5=sum)
  72. */
  73. _GLOBAL(csum_partial)
  74. subi r3,r3,8 /* we'll offset by 8 for the loads */
  75. srdi. r6,r4,3 /* divide by 8 for doubleword count */
  76. addic r5,r5,0 /* clear carry */
  77. beq 3f /* if we're doing < 8 bytes */
  78. andi. r0,r3,2 /* aligned on a word boundary already? */
  79. beq+ 1f
  80. lhz r6,8(r3) /* do 2 bytes to get aligned */
  81. addi r3,r3,2
  82. subi r4,r4,2
  83. addc r5,r5,r6
  84. srdi. r6,r4,3 /* recompute number of doublewords */
  85. beq 3f /* any left? */
  86. 1: mtctr r6
  87. 2: ldu r6,8(r3) /* main sum loop */
  88. adde r5,r5,r6
  89. bdnz 2b
  90. andi. r4,r4,7 /* compute bytes left to sum after doublewords */
  91. 3: cmpwi 0,r4,4 /* is at least a full word left? */
  92. blt 4f
  93. lwz r6,8(r3) /* sum this word */
  94. addi r3,r3,4
  95. subi r4,r4,4
  96. adde r5,r5,r6
  97. 4: cmpwi 0,r4,2 /* is at least a halfword left? */
  98. blt+ 5f
  99. lhz r6,8(r3) /* sum this halfword */
  100. addi r3,r3,2
  101. subi r4,r4,2
  102. adde r5,r5,r6
  103. 5: cmpwi 0,r4,1 /* is at least a byte left? */
  104. bne+ 6f
  105. lbz r6,8(r3) /* sum this byte */
  106. slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
  107. adde r5,r5,r6
  108. 6: addze r5,r5 /* add in final carry */
  109. rldicl r4,r5,32,0 /* fold two 32-bit halves together */
  110. add r3,r4,r5
  111. srdi r3,r3,32
  112. blr
  113. /*
  114. * Computes the checksum of a memory block at src, length len,
  115. * and adds in "sum" (32-bit), while copying the block to dst.
  116. * If an access exception occurs on src or dst, it stores -EFAULT
  117. * to *src_err or *dst_err respectively, and (for an error on
  118. * src) zeroes the rest of dst.
  119. *
  120. * This code needs to be reworked to take advantage of 64 bit sum+copy.
  121. * However, due to tokenring halfword alignment problems this will be very
  122. * tricky. For now we'll leave it until we instrument it somehow.
  123. *
  124. * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
  125. */
  126. _GLOBAL(csum_partial_copy_generic)
  127. addic r0,r6,0
  128. subi r3,r3,4
  129. subi r4,r4,4
  130. srwi. r6,r5,2
  131. beq 3f /* if we're doing < 4 bytes */
  132. andi. r9,r4,2 /* Align dst to longword boundary */
  133. beq+ 1f
  134. 81: lhz r6,4(r3) /* do 2 bytes to get aligned */
  135. addi r3,r3,2
  136. subi r5,r5,2
  137. 91: sth r6,4(r4)
  138. addi r4,r4,2
  139. addc r0,r0,r6
  140. srwi. r6,r5,2 /* # words to do */
  141. beq 3f
  142. 1: mtctr r6
  143. 82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
  144. 92: stwu r6,4(r4) /* be unnecessary to unroll this loop */
  145. adde r0,r0,r6
  146. bdnz 82b
  147. andi. r5,r5,3
  148. 3: cmpwi 0,r5,2
  149. blt+ 4f
  150. 83: lhz r6,4(r3)
  151. addi r3,r3,2
  152. subi r5,r5,2
  153. 93: sth r6,4(r4)
  154. addi r4,r4,2
  155. adde r0,r0,r6
  156. 4: cmpwi 0,r5,1
  157. bne+ 5f
  158. 84: lbz r6,4(r3)
  159. 94: stb r6,4(r4)
  160. slwi r6,r6,8 /* Upper byte of word */
  161. adde r0,r0,r6
  162. 5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
  163. rldicl r4,r3,32,0 /* fold 64 bit value */
  164. add r3,r4,r3
  165. srdi r3,r3,32
  166. blr
  167. /* These shouldn't go in the fixup section, since that would
  168. cause the ex_table addresses to get out of order. */
  169. .globl src_error_1
  170. src_error_1:
  171. li r6,0
  172. subi r5,r5,2
  173. 95: sth r6,4(r4)
  174. addi r4,r4,2
  175. srwi. r6,r5,2
  176. beq 3f
  177. mtctr r6
  178. .globl src_error_2
  179. src_error_2:
  180. li r6,0
  181. 96: stwu r6,4(r4)
  182. bdnz 96b
  183. 3: andi. r5,r5,3
  184. beq src_error
  185. .globl src_error_3
  186. src_error_3:
  187. li r6,0
  188. mtctr r5
  189. addi r4,r4,3
  190. 97: stbu r6,1(r4)
  191. bdnz 97b
  192. .globl src_error
  193. src_error:
  194. cmpdi 0,r7,0
  195. beq 1f
  196. li r6,-EFAULT
  197. stw r6,0(r7)
  198. 1: addze r3,r0
  199. blr
  200. .globl dst_error
  201. dst_error:
  202. cmpdi 0,r8,0
  203. beq 1f
  204. li r6,-EFAULT
  205. stw r6,0(r8)
  206. 1: addze r3,r0
  207. blr
  208. .section __ex_table,"a"
  209. .align 3
  210. .llong 81b,src_error_1
  211. .llong 91b,dst_error
  212. .llong 82b,src_error_2
  213. .llong 92b,dst_error
  214. .llong 83b,src_error_3
  215. .llong 93b,dst_error
  216. .llong 84b,src_error_3
  217. .llong 94b,dst_error
  218. .llong 95b,dst_error
  219. .llong 96b,dst_error
  220. .llong 97b,dst_error