checksum.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * linux/include/asm-arm/checksum.h
  3. *
  4. * IP checksum routines
  5. *
  6. * Copyright (C) Original authors of ../asm-i386/checksum.h
  7. * Copyright (C) 1996-1999 Russell King
  8. */
  9. #ifndef __ASM_ARM_CHECKSUM_H
  10. #define __ASM_ARM_CHECKSUM_H
  11. #include <linux/in6.h>
  12. /*
  13. * computes the checksum of a memory block at buff, length len,
  14. * and adds in "sum" (32-bit)
  15. *
  16. * returns a 32-bit number suitable for feeding into itself
  17. * or csum_tcpudp_magic
  18. *
  19. * this function must be called with even lengths, except
  20. * for the last fragment, which may be odd
  21. *
  22. * it's best to have buff aligned on a 32-bit boundary
  23. */
  24. unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
  25. /*
  26. * the same as csum_partial, but copies from src while it
  27. * checksums, and handles user-space pointer exceptions correctly, when needed.
  28. *
  29. * here even more important to align src and dst on a 32-bit (or even
  30. * better 64-bit) boundary
  31. */
  32. unsigned int
  33. csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum);
  34. unsigned int
  35. csum_partial_copy_from_user(const char __user *src, char *dst, int len, int sum, int *err_ptr);
  36. /*
  37. * This is the old (and unsafe) way of doing checksums, a warning message will
  38. * be printed if it is used and an exception occurs.
  39. *
  40. * this functions should go away after some time.
  41. */
  42. #define csum_partial_copy(src,dst,len,sum) csum_partial_copy_nocheck(src,dst,len,sum)
  43. /*
  44. * This is a version of ip_compute_csum() optimized for IP headers,
  45. * which always checksum on 4 octet boundaries.
  46. */
  47. static inline unsigned short
  48. ip_fast_csum(unsigned char * iph, unsigned int ihl)
  49. {
  50. unsigned int sum, tmp1;
  51. __asm__ __volatile__(
  52. "ldr %0, [%1], #4 @ ip_fast_csum \n\
  53. ldr %3, [%1], #4 \n\
  54. sub %2, %2, #5 \n\
  55. adds %0, %0, %3 \n\
  56. ldr %3, [%1], #4 \n\
  57. adcs %0, %0, %3 \n\
  58. ldr %3, [%1], #4 \n\
  59. 1: adcs %0, %0, %3 \n\
  60. ldr %3, [%1], #4 \n\
  61. tst %2, #15 @ do this carefully \n\
  62. subne %2, %2, #1 @ without destroying \n\
  63. bne 1b @ the carry flag \n\
  64. adcs %0, %0, %3 \n\
  65. adc %0, %0, #0 \n\
  66. adds %0, %0, %0, lsl #16 \n\
  67. addcs %0, %0, #0x10000 \n\
  68. mvn %0, %0 \n\
  69. mov %0, %0, lsr #16"
  70. : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
  71. : "1" (iph), "2" (ihl)
  72. : "cc", "memory");
  73. return sum;
  74. }
  75. /*
  76. * Fold a partial checksum without adding pseudo headers
  77. */
  78. static inline unsigned int
  79. csum_fold(unsigned int sum)
  80. {
  81. __asm__(
  82. "adds %0, %1, %1, lsl #16 @ csum_fold \n\
  83. addcs %0, %0, #0x10000"
  84. : "=r" (sum)
  85. : "r" (sum)
  86. : "cc");
  87. return (~sum) >> 16;
  88. }
  89. static inline unsigned int
  90. csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
  91. unsigned int proto, unsigned int sum)
  92. {
  93. __asm__(
  94. "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
  95. adcs %0, %0, %3 \n\
  96. adcs %0, %0, %4 \n\
  97. adcs %0, %0, %5 \n\
  98. adc %0, %0, #0"
  99. : "=&r"(sum)
  100. : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
  101. : "cc");
  102. return sum;
  103. }
  104. /*
  105. * computes the checksum of the TCP/UDP pseudo-header
  106. * returns a 16-bit checksum, already complemented
  107. */
  108. static inline unsigned short int
  109. csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
  110. unsigned int proto, unsigned int sum)
  111. {
  112. __asm__(
  113. "adds %0, %1, %2 @ csum_tcpudp_magic \n\
  114. adcs %0, %0, %3 \n\
  115. adcs %0, %0, %4 \n\
  116. adcs %0, %0, %5 \n\
  117. adc %0, %0, #0 \n\
  118. adds %0, %0, %0, lsl #16 \n\
  119. addcs %0, %0, #0x10000 \n\
  120. mvn %0, %0"
  121. : "=&r"(sum)
  122. : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
  123. : "cc");
  124. return sum >> 16;
  125. }
  126. /*
  127. * this routine is used for miscellaneous IP-like checksums, mainly
  128. * in icmp.c
  129. */
  130. static inline unsigned short
  131. ip_compute_csum(unsigned char * buff, int len)
  132. {
  133. return csum_fold(csum_partial(buff, len, 0));
  134. }
  135. #define _HAVE_ARCH_IPV6_CSUM
  136. extern unsigned long
  137. __csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
  138. __u32 proto, unsigned int sum);
  139. static inline unsigned short int
  140. csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
  141. unsigned short proto, unsigned int sum)
  142. {
  143. return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
  144. htonl(proto), sum));
  145. }
  146. #endif