checksum.S 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * IP/TCP/UDP checksumming routines
  7. *
  8. * Authors: Jorge Cwik, <jorge@laser.satlink.net>
  9. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  10. * Tom May, <ftom@netcom.com>
  11. * Pentium Pro/II routines:
  12. * Alexander Kjeldaas <astor@guardian.no>
  13. * Finn Arne Gangstad <finnag@guardian.no>
  14. * Lots of code moved from tcp.c and ip.c; see those files
  15. * for more names.
  16. *
  17. * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
  18. * handling.
  19. * Andi Kleen, add zeroing on error
  20. * converted to pure assembler
  21. *
  22. * This program is free software; you can redistribute it and/or
  23. * modify it under the terms of the GNU General Public License
  24. * as published by the Free Software Foundation; either version
  25. * 2 of the License, or (at your option) any later version.
  26. */
  27. #include <linux/config.h>
  28. #include <asm/errno.h>
  29. /*
  30. * computes a partial checksum, e.g. for TCP/UDP fragments
  31. */
  32. /*
  33. unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
  34. */
  35. .text
  36. .align 4
  37. .globl csum_partial
  38. #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
  39. /*
  40. * Experiments with Ethernet and SLIP connections show that buff
  41. * is aligned on either a 2-byte or 4-byte boundary. We get at
  42. * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
  43. * Fortunately, it is easy to convert 2-byte alignment to 4-byte
  44. * alignment for the unrolled loop.
  45. */
  46. csum_partial:
  47. pushl %esi
  48. pushl %ebx
  49. movl 20(%esp),%eax # Function arg: unsigned int sum
  50. movl 16(%esp),%ecx # Function arg: int len
  51. movl 12(%esp),%esi # Function arg: unsigned char *buff
  52. testl $2, %esi # Check alignment.
  53. jz 2f # Jump if alignment is ok.
  54. subl $2, %ecx # Alignment uses up two bytes.
  55. jae 1f # Jump if we had at least two bytes.
  56. addl $2, %ecx # ecx was < 2. Deal with it.
  57. jmp 4f
  58. 1: movw (%esi), %bx
  59. addl $2, %esi
  60. addw %bx, %ax
  61. adcl $0, %eax
  62. 2:
  63. movl %ecx, %edx
  64. shrl $5, %ecx
  65. jz 2f
  66. testl %esi, %esi
  67. 1: movl (%esi), %ebx
  68. adcl %ebx, %eax
  69. movl 4(%esi), %ebx
  70. adcl %ebx, %eax
  71. movl 8(%esi), %ebx
  72. adcl %ebx, %eax
  73. movl 12(%esi), %ebx
  74. adcl %ebx, %eax
  75. movl 16(%esi), %ebx
  76. adcl %ebx, %eax
  77. movl 20(%esi), %ebx
  78. adcl %ebx, %eax
  79. movl 24(%esi), %ebx
  80. adcl %ebx, %eax
  81. movl 28(%esi), %ebx
  82. adcl %ebx, %eax
  83. lea 32(%esi), %esi
  84. dec %ecx
  85. jne 1b
  86. adcl $0, %eax
  87. 2: movl %edx, %ecx
  88. andl $0x1c, %edx
  89. je 4f
  90. shrl $2, %edx # This clears CF
  91. 3: adcl (%esi), %eax
  92. lea 4(%esi), %esi
  93. dec %edx
  94. jne 3b
  95. adcl $0, %eax
  96. 4: andl $3, %ecx
  97. jz 7f
  98. cmpl $2, %ecx
  99. jb 5f
  100. movw (%esi),%cx
  101. leal 2(%esi),%esi
  102. je 6f
  103. shll $16,%ecx
  104. 5: movb (%esi),%cl
  105. 6: addl %ecx,%eax
  106. adcl $0, %eax
  107. 7:
  108. popl %ebx
  109. popl %esi
  110. ret
  111. #else
  112. /* Version for PentiumII/PPro */
  113. csum_partial:
  114. pushl %esi
  115. pushl %ebx
  116. movl 20(%esp),%eax # Function arg: unsigned int sum
  117. movl 16(%esp),%ecx # Function arg: int len
  118. movl 12(%esp),%esi # Function arg: const unsigned char *buf
  119. testl $2, %esi
  120. jnz 30f
  121. 10:
  122. movl %ecx, %edx
  123. movl %ecx, %ebx
  124. andl $0x7c, %ebx
  125. shrl $7, %ecx
  126. addl %ebx,%esi
  127. shrl $2, %ebx
  128. negl %ebx
  129. lea 45f(%ebx,%ebx,2), %ebx
  130. testl %esi, %esi
  131. jmp *%ebx
  132. # Handle 2-byte-aligned regions
  133. 20: addw (%esi), %ax
  134. lea 2(%esi), %esi
  135. adcl $0, %eax
  136. jmp 10b
  137. 30: subl $2, %ecx
  138. ja 20b
  139. je 32f
  140. movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
  141. addl %ebx, %eax
  142. adcl $0, %eax
  143. jmp 80f
  144. 32:
  145. addw (%esi), %ax # csumming 2 bytes, 2-aligned
  146. adcl $0, %eax
  147. jmp 80f
  148. 40:
  149. addl -128(%esi), %eax
  150. adcl -124(%esi), %eax
  151. adcl -120(%esi), %eax
  152. adcl -116(%esi), %eax
  153. adcl -112(%esi), %eax
  154. adcl -108(%esi), %eax
  155. adcl -104(%esi), %eax
  156. adcl -100(%esi), %eax
  157. adcl -96(%esi), %eax
  158. adcl -92(%esi), %eax
  159. adcl -88(%esi), %eax
  160. adcl -84(%esi), %eax
  161. adcl -80(%esi), %eax
  162. adcl -76(%esi), %eax
  163. adcl -72(%esi), %eax
  164. adcl -68(%esi), %eax
  165. adcl -64(%esi), %eax
  166. adcl -60(%esi), %eax
  167. adcl -56(%esi), %eax
  168. adcl -52(%esi), %eax
  169. adcl -48(%esi), %eax
  170. adcl -44(%esi), %eax
  171. adcl -40(%esi), %eax
  172. adcl -36(%esi), %eax
  173. adcl -32(%esi), %eax
  174. adcl -28(%esi), %eax
  175. adcl -24(%esi), %eax
  176. adcl -20(%esi), %eax
  177. adcl -16(%esi), %eax
  178. adcl -12(%esi), %eax
  179. adcl -8(%esi), %eax
  180. adcl -4(%esi), %eax
  181. 45:
  182. lea 128(%esi), %esi
  183. adcl $0, %eax
  184. dec %ecx
  185. jge 40b
  186. movl %edx, %ecx
  187. 50: andl $3, %ecx
  188. jz 80f
  189. # Handle the last 1-3 bytes without jumping
  190. notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
  191. movl $0xffffff,%ebx # by the shll and shrl instructions
  192. shll $3,%ecx
  193. shrl %cl,%ebx
  194. andl -128(%esi),%ebx # esi is 4-aligned so should be ok
  195. addl %ebx,%eax
  196. adcl $0,%eax
  197. 80:
  198. popl %ebx
  199. popl %esi
  200. ret
  201. #endif
  202. /*
  203. unsigned int csum_partial_copy_generic (const char *src, char *dst,
  204. int len, int sum, int *src_err_ptr, int *dst_err_ptr)
  205. */
  206. /*
  207. * Copy from ds while checksumming, otherwise like csum_partial
  208. *
  209. * The macros SRC and DST specify the type of access for the instruction.
  210. * thus we can call a custom exception handler for all access types.
  211. *
  212. * FIXME: could someone double-check whether I haven't mixed up some SRC and
  213. * DST definitions? It's damn hard to trigger all cases. I hope I got
  214. * them all but there's no guarantee.
  215. */
  216. #define SRC(y...) \
  217. 9999: y; \
  218. .section __ex_table, "a"; \
  219. .long 9999b, 6001f ; \
  220. .previous
  221. #define DST(y...) \
  222. 9999: y; \
  223. .section __ex_table, "a"; \
  224. .long 9999b, 6002f ; \
  225. .previous
  226. .align 4
  227. .globl csum_partial_copy_generic_i386
  228. #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
  229. #define ARGBASE 16
  230. #define FP 12
  231. csum_partial_copy_generic_i386:
  232. subl $4,%esp
  233. pushl %edi
  234. pushl %esi
  235. pushl %ebx
  236. movl ARGBASE+16(%esp),%eax # sum
  237. movl ARGBASE+12(%esp),%ecx # len
  238. movl ARGBASE+4(%esp),%esi # src
  239. movl ARGBASE+8(%esp),%edi # dst
  240. testl $2, %edi # Check alignment.
  241. jz 2f # Jump if alignment is ok.
  242. subl $2, %ecx # Alignment uses up two bytes.
  243. jae 1f # Jump if we had at least two bytes.
  244. addl $2, %ecx # ecx was < 2. Deal with it.
  245. jmp 4f
  246. SRC(1: movw (%esi), %bx )
  247. addl $2, %esi
  248. DST( movw %bx, (%edi) )
  249. addl $2, %edi
  250. addw %bx, %ax
  251. adcl $0, %eax
  252. 2:
  253. movl %ecx, FP(%esp)
  254. shrl $5, %ecx
  255. jz 2f
  256. testl %esi, %esi
  257. SRC(1: movl (%esi), %ebx )
  258. SRC( movl 4(%esi), %edx )
  259. adcl %ebx, %eax
  260. DST( movl %ebx, (%edi) )
  261. adcl %edx, %eax
  262. DST( movl %edx, 4(%edi) )
  263. SRC( movl 8(%esi), %ebx )
  264. SRC( movl 12(%esi), %edx )
  265. adcl %ebx, %eax
  266. DST( movl %ebx, 8(%edi) )
  267. adcl %edx, %eax
  268. DST( movl %edx, 12(%edi) )
  269. SRC( movl 16(%esi), %ebx )
  270. SRC( movl 20(%esi), %edx )
  271. adcl %ebx, %eax
  272. DST( movl %ebx, 16(%edi) )
  273. adcl %edx, %eax
  274. DST( movl %edx, 20(%edi) )
  275. SRC( movl 24(%esi), %ebx )
  276. SRC( movl 28(%esi), %edx )
  277. adcl %ebx, %eax
  278. DST( movl %ebx, 24(%edi) )
  279. adcl %edx, %eax
  280. DST( movl %edx, 28(%edi) )
  281. lea 32(%esi), %esi
  282. lea 32(%edi), %edi
  283. dec %ecx
  284. jne 1b
  285. adcl $0, %eax
  286. 2: movl FP(%esp), %edx
  287. movl %edx, %ecx
  288. andl $0x1c, %edx
  289. je 4f
  290. shrl $2, %edx # This clears CF
  291. SRC(3: movl (%esi), %ebx )
  292. adcl %ebx, %eax
  293. DST( movl %ebx, (%edi) )
  294. lea 4(%esi), %esi
  295. lea 4(%edi), %edi
  296. dec %edx
  297. jne 3b
  298. adcl $0, %eax
  299. 4: andl $3, %ecx
  300. jz 7f
  301. cmpl $2, %ecx
  302. jb 5f
  303. SRC( movw (%esi), %cx )
  304. leal 2(%esi), %esi
  305. DST( movw %cx, (%edi) )
  306. leal 2(%edi), %edi
  307. je 6f
  308. shll $16,%ecx
  309. SRC(5: movb (%esi), %cl )
  310. DST( movb %cl, (%edi) )
  311. 6: addl %ecx, %eax
  312. adcl $0, %eax
  313. 7:
  314. 5000:
  315. # Exception handler:
  316. .section .fixup, "ax"
  317. 6001:
  318. movl ARGBASE+20(%esp), %ebx # src_err_ptr
  319. movl $-EFAULT, (%ebx)
  320. # zero the complete destination - computing the rest
  321. # is too much work
  322. movl ARGBASE+8(%esp), %edi # dst
  323. movl ARGBASE+12(%esp), %ecx # len
  324. xorl %eax,%eax
  325. rep ; stosb
  326. jmp 5000b
  327. 6002:
  328. movl ARGBASE+24(%esp), %ebx # dst_err_ptr
  329. movl $-EFAULT,(%ebx)
  330. jmp 5000b
  331. .previous
  332. popl %ebx
  333. popl %esi
  334. popl %edi
  335. popl %ecx # equivalent to addl $4,%esp
  336. ret
  337. #else
  338. /* Version for PentiumII/PPro */
  339. #define ROUND1(x) \
  340. SRC(movl x(%esi), %ebx ) ; \
  341. addl %ebx, %eax ; \
  342. DST(movl %ebx, x(%edi) ) ;
  343. #define ROUND(x) \
  344. SRC(movl x(%esi), %ebx ) ; \
  345. adcl %ebx, %eax ; \
  346. DST(movl %ebx, x(%edi) ) ;
  347. #define ARGBASE 12
  348. csum_partial_copy_generic_i386:
  349. pushl %ebx
  350. pushl %edi
  351. pushl %esi
  352. movl ARGBASE+4(%esp),%esi #src
  353. movl ARGBASE+8(%esp),%edi #dst
  354. movl ARGBASE+12(%esp),%ecx #len
  355. movl ARGBASE+16(%esp),%eax #sum
  356. # movl %ecx, %edx
  357. movl %ecx, %ebx
  358. movl %esi, %edx
  359. shrl $6, %ecx
  360. andl $0x3c, %ebx
  361. negl %ebx
  362. subl %ebx, %esi
  363. subl %ebx, %edi
  364. lea -1(%esi),%edx
  365. andl $-32,%edx
  366. lea 3f(%ebx,%ebx), %ebx
  367. testl %esi, %esi
  368. jmp *%ebx
  369. 1: addl $64,%esi
  370. addl $64,%edi
  371. SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
  372. ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
  373. ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
  374. ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
  375. ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
  376. 3: adcl $0,%eax
  377. addl $64, %edx
  378. dec %ecx
  379. jge 1b
  380. 4: movl ARGBASE+12(%esp),%edx #len
  381. andl $3, %edx
  382. jz 7f
  383. cmpl $2, %edx
  384. jb 5f
  385. SRC( movw (%esi), %dx )
  386. leal 2(%esi), %esi
  387. DST( movw %dx, (%edi) )
  388. leal 2(%edi), %edi
  389. je 6f
  390. shll $16,%edx
  391. 5:
  392. SRC( movb (%esi), %dl )
  393. DST( movb %dl, (%edi) )
  394. 6: addl %edx, %eax
  395. adcl $0, %eax
  396. 7:
  397. .section .fixup, "ax"
  398. 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
  399. movl $-EFAULT, (%ebx)
  400. # zero the complete destination (computing the rest is too much work)
  401. movl ARGBASE+8(%esp),%edi # dst
  402. movl ARGBASE+12(%esp),%ecx # len
  403. xorl %eax,%eax
  404. rep; stosb
  405. jmp 7b
  406. 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
  407. movl $-EFAULT, (%ebx)
  408. jmp 7b
  409. .previous
  410. popl %esi
  411. popl %edi
  412. popl %ebx
  413. ret
  414. #undef ROUND
  415. #undef ROUND1
  416. #endif