checksum_no.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #ifndef _M68K_CHECKSUM_H
  2. #define _M68K_CHECKSUM_H
  3. #include <linux/in6.h>
  4. /*
  5. * computes the checksum of a memory block at buff, length len,
  6. * and adds in "sum" (32-bit)
  7. *
  8. * returns a 32-bit number suitable for feeding into itself
  9. * or csum_tcpudp_magic
  10. *
  11. * this function must be called with even lengths, except
  12. * for the last fragment, which may be odd
  13. *
  14. * it's best to have buff aligned on a 32-bit boundary
  15. */
  16. __wsum csum_partial(const void *buff, int len, __wsum sum);
  17. /*
  18. * the same as csum_partial, but copies from src while it
  19. * checksums
  20. *
  21. * here even more important to align src and dst on a 32-bit (or even
  22. * better 64-bit) boundary
  23. */
  24. __wsum csum_partial_copy_nocheck(const void *src, void *dst,
  25. int len, __wsum sum);
  26. /*
  27. * the same as csum_partial_copy, but copies from user space.
  28. *
  29. * here even more important to align src and dst on a 32-bit (or even
  30. * better 64-bit) boundary
  31. */
  32. extern __wsum csum_partial_copy_from_user(const void __user *src,
  33. void *dst, int len, __wsum sum, int *csum_err);
  34. __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
  35. /*
  36. * Fold a partial checksum
  37. */
  38. static inline __sum16 csum_fold(__wsum sum)
  39. {
  40. unsigned int tmp = (__force u32)sum;
  41. #ifdef CONFIG_COLDFIRE
  42. tmp = (tmp & 0xffff) + (tmp >> 16);
  43. tmp = (tmp & 0xffff) + (tmp >> 16);
  44. return (__force __sum16)~tmp;
  45. #else
  46. __asm__("swap %1\n\t"
  47. "addw %1, %0\n\t"
  48. "clrw %1\n\t"
  49. "addxw %1, %0"
  50. : "=&d" (sum), "=&d" (tmp)
  51. : "0" (sum), "1" (sum));
  52. return (__force __sum16)~sum;
  53. #endif
  54. }
  55. /*
  56. * computes the checksum of the TCP/UDP pseudo-header
  57. * returns a 16-bit checksum, already complemented
  58. */
  59. static inline __wsum
  60. csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
  61. unsigned short proto, __wsum sum)
  62. {
  63. __asm__ ("addl %1,%0\n\t"
  64. "addxl %4,%0\n\t"
  65. "addxl %5,%0\n\t"
  66. "clrl %1\n\t"
  67. "addxl %1,%0"
  68. : "=&d" (sum), "=&d" (saddr)
  69. : "0" (daddr), "1" (saddr), "d" (len + proto),
  70. "d"(sum));
  71. return sum;
  72. }
  73. static inline __sum16
  74. csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
  75. unsigned short proto, __wsum sum)
  76. {
  77. return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
  78. }
  79. /*
  80. * this routine is used for miscellaneous IP-like checksums, mainly
  81. * in icmp.c
  82. */
  83. extern __sum16 ip_compute_csum(const void *buff, int len);
  84. #define _HAVE_ARCH_IPV6_CSUM
  85. static __inline__ __sum16
  86. csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
  87. __u32 len, unsigned short proto, __wsum sum)
  88. {
  89. register unsigned long tmp;
  90. __asm__("addl %2@,%0\n\t"
  91. "movel %2@(4),%1\n\t"
  92. "addxl %1,%0\n\t"
  93. "movel %2@(8),%1\n\t"
  94. "addxl %1,%0\n\t"
  95. "movel %2@(12),%1\n\t"
  96. "addxl %1,%0\n\t"
  97. "movel %3@,%1\n\t"
  98. "addxl %1,%0\n\t"
  99. "movel %3@(4),%1\n\t"
  100. "addxl %1,%0\n\t"
  101. "movel %3@(8),%1\n\t"
  102. "addxl %1,%0\n\t"
  103. "movel %3@(12),%1\n\t"
  104. "addxl %1,%0\n\t"
  105. "addxl %4,%0\n\t"
  106. "clrl %1\n\t"
  107. "addxl %1,%0"
  108. : "=&d" (sum), "=&d" (tmp)
  109. : "a" (saddr), "a" (daddr), "d" (len + proto),
  110. "0" (sum));
  111. return csum_fold(sum);
  112. }
  113. #endif /* _M68K_CHECKSUM_H */