tcp_offload.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * TCPv4 GSO/GRO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/tcp.h>
  14. #include <net/protocol.h>
  15. struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
  16. netdev_features_t features)
  17. {
  18. struct sk_buff *segs = ERR_PTR(-EINVAL);
  19. struct tcphdr *th;
  20. unsigned int thlen;
  21. unsigned int seq;
  22. __be32 delta;
  23. unsigned int oldlen;
  24. unsigned int mss;
  25. struct sk_buff *gso_skb = skb;
  26. __sum16 newcheck;
  27. bool ooo_okay, copy_destructor;
  28. if (!pskb_may_pull(skb, sizeof(*th)))
  29. goto out;
  30. th = tcp_hdr(skb);
  31. thlen = th->doff * 4;
  32. if (thlen < sizeof(*th))
  33. goto out;
  34. if (!pskb_may_pull(skb, thlen))
  35. goto out;
  36. oldlen = (u16)~skb->len;
  37. __skb_pull(skb, thlen);
  38. mss = tcp_skb_mss(skb);
  39. if (unlikely(skb->len <= mss))
  40. goto out;
  41. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  42. /* Packet is from an untrusted source, reset gso_segs. */
  43. int type = skb_shinfo(skb)->gso_type;
  44. if (unlikely(type &
  45. ~(SKB_GSO_TCPV4 |
  46. SKB_GSO_DODGY |
  47. SKB_GSO_TCP_ECN |
  48. SKB_GSO_TCPV6 |
  49. SKB_GSO_GRE |
  50. SKB_GSO_IPIP |
  51. SKB_GSO_SIT |
  52. SKB_GSO_MPLS |
  53. SKB_GSO_UDP_TUNNEL |
  54. 0) ||
  55. !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
  56. goto out;
  57. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  58. segs = NULL;
  59. goto out;
  60. }
  61. copy_destructor = gso_skb->destructor == tcp_wfree;
  62. ooo_okay = gso_skb->ooo_okay;
  63. /* All segments but the first should have ooo_okay cleared */
  64. skb->ooo_okay = 0;
  65. segs = skb_segment(skb, features);
  66. if (IS_ERR(segs))
  67. goto out;
  68. /* Only first segment might have ooo_okay set */
  69. segs->ooo_okay = ooo_okay;
  70. delta = htonl(oldlen + (thlen + mss));
  71. skb = segs;
  72. th = tcp_hdr(skb);
  73. seq = ntohl(th->seq);
  74. newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
  75. (__force u32)delta));
  76. do {
  77. th->fin = th->psh = 0;
  78. th->check = newcheck;
  79. if (skb->ip_summed != CHECKSUM_PARTIAL)
  80. th->check =
  81. csum_fold(csum_partial(skb_transport_header(skb),
  82. thlen, skb->csum));
  83. seq += mss;
  84. if (copy_destructor) {
  85. skb->destructor = gso_skb->destructor;
  86. skb->sk = gso_skb->sk;
  87. /* {tcp|sock}_wfree() use exact truesize accounting :
  88. * sum(skb->truesize) MUST be exactly be gso_skb->truesize
  89. * So we account mss bytes of 'true size' for each segment.
  90. * The last segment will contain the remaining.
  91. */
  92. skb->truesize = mss;
  93. gso_skb->truesize -= mss;
  94. }
  95. skb = skb->next;
  96. th = tcp_hdr(skb);
  97. th->seq = htonl(seq);
  98. th->cwr = 0;
  99. } while (skb->next);
  100. /* Following permits TCP Small Queues to work well with GSO :
  101. * The callback to TCP stack will be called at the time last frag
  102. * is freed at TX completion, and not right now when gso_skb
  103. * is freed by GSO engine
  104. */
  105. if (copy_destructor) {
  106. swap(gso_skb->sk, skb->sk);
  107. swap(gso_skb->destructor, skb->destructor);
  108. swap(gso_skb->truesize, skb->truesize);
  109. }
  110. delta = htonl(oldlen + (skb_tail_pointer(skb) -
  111. skb_transport_header(skb)) +
  112. skb->data_len);
  113. th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
  114. (__force u32)delta));
  115. if (skb->ip_summed != CHECKSUM_PARTIAL)
  116. th->check = csum_fold(csum_partial(skb_transport_header(skb),
  117. thlen, skb->csum));
  118. out:
  119. return segs;
  120. }
  121. EXPORT_SYMBOL(tcp_gso_segment);
  122. struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  123. {
  124. struct sk_buff **pp = NULL;
  125. struct sk_buff *p;
  126. struct tcphdr *th;
  127. struct tcphdr *th2;
  128. unsigned int len;
  129. unsigned int thlen;
  130. __be32 flags;
  131. unsigned int mss = 1;
  132. unsigned int hlen;
  133. unsigned int off;
  134. int flush = 1;
  135. int i;
  136. off = skb_gro_offset(skb);
  137. hlen = off + sizeof(*th);
  138. th = skb_gro_header_fast(skb, off);
  139. if (skb_gro_header_hard(skb, hlen)) {
  140. th = skb_gro_header_slow(skb, hlen, off);
  141. if (unlikely(!th))
  142. goto out;
  143. }
  144. thlen = th->doff * 4;
  145. if (thlen < sizeof(*th))
  146. goto out;
  147. hlen = off + thlen;
  148. if (skb_gro_header_hard(skb, hlen)) {
  149. th = skb_gro_header_slow(skb, hlen, off);
  150. if (unlikely(!th))
  151. goto out;
  152. }
  153. skb_gro_pull(skb, thlen);
  154. len = skb_gro_len(skb);
  155. flags = tcp_flag_word(th);
  156. for (; (p = *head); head = &p->next) {
  157. if (!NAPI_GRO_CB(p)->same_flow)
  158. continue;
  159. th2 = tcp_hdr(p);
  160. if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  161. NAPI_GRO_CB(p)->same_flow = 0;
  162. continue;
  163. }
  164. goto found;
  165. }
  166. goto out_check_final;
  167. found:
  168. flush = NAPI_GRO_CB(p)->flush;
  169. flush |= (__force int)(flags & TCP_FLAG_CWR);
  170. flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  171. ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
  172. flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
  173. for (i = sizeof(*th); i < thlen; i += 4)
  174. flush |= *(u32 *)((u8 *)th + i) ^
  175. *(u32 *)((u8 *)th2 + i);
  176. mss = tcp_skb_mss(p);
  177. flush |= (len - 1) >= mss;
  178. flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
  179. if (flush || skb_gro_receive(head, skb)) {
  180. mss = 1;
  181. goto out_check_final;
  182. }
  183. p = *head;
  184. th2 = tcp_hdr(p);
  185. tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
  186. out_check_final:
  187. flush = len < mss;
  188. flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
  189. TCP_FLAG_RST | TCP_FLAG_SYN |
  190. TCP_FLAG_FIN));
  191. if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
  192. pp = head;
  193. out:
  194. NAPI_GRO_CB(skb)->flush |= flush;
  195. return pp;
  196. }
  197. EXPORT_SYMBOL(tcp_gro_receive);
  198. int tcp_gro_complete(struct sk_buff *skb)
  199. {
  200. struct tcphdr *th = tcp_hdr(skb);
  201. skb->csum_start = skb_transport_header(skb) - skb->head;
  202. skb->csum_offset = offsetof(struct tcphdr, check);
  203. skb->ip_summed = CHECKSUM_PARTIAL;
  204. skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  205. if (th->cwr)
  206. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  207. return 0;
  208. }
  209. EXPORT_SYMBOL(tcp_gro_complete);
  210. static int tcp_v4_gso_send_check(struct sk_buff *skb)
  211. {
  212. const struct iphdr *iph;
  213. struct tcphdr *th;
  214. if (!pskb_may_pull(skb, sizeof(*th)))
  215. return -EINVAL;
  216. iph = ip_hdr(skb);
  217. th = tcp_hdr(skb);
  218. th->check = 0;
  219. skb->ip_summed = CHECKSUM_PARTIAL;
  220. __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
  221. return 0;
  222. }
  223. static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  224. {
  225. const struct iphdr *iph = skb_gro_network_header(skb);
  226. __wsum wsum;
  227. __sum16 sum;
  228. switch (skb->ip_summed) {
  229. case CHECKSUM_COMPLETE:
  230. if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
  231. skb->csum)) {
  232. skb->ip_summed = CHECKSUM_UNNECESSARY;
  233. break;
  234. }
  235. flush:
  236. NAPI_GRO_CB(skb)->flush = 1;
  237. return NULL;
  238. case CHECKSUM_NONE:
  239. wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  240. skb_gro_len(skb), IPPROTO_TCP, 0);
  241. sum = csum_fold(skb_checksum(skb,
  242. skb_gro_offset(skb),
  243. skb_gro_len(skb),
  244. wsum));
  245. if (sum)
  246. goto flush;
  247. skb->ip_summed = CHECKSUM_UNNECESSARY;
  248. break;
  249. }
  250. return tcp_gro_receive(head, skb);
  251. }
  252. static int tcp4_gro_complete(struct sk_buff *skb)
  253. {
  254. const struct iphdr *iph = ip_hdr(skb);
  255. struct tcphdr *th = tcp_hdr(skb);
  256. th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
  257. iph->saddr, iph->daddr, 0);
  258. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  259. return tcp_gro_complete(skb);
  260. }
  261. static const struct net_offload tcpv4_offload = {
  262. .callbacks = {
  263. .gso_send_check = tcp_v4_gso_send_check,
  264. .gso_segment = tcp_gso_segment,
  265. .gro_receive = tcp4_gro_receive,
  266. .gro_complete = tcp4_gro_complete,
  267. },
  268. };
  269. int __init tcpv4_offload_init(void)
  270. {
  271. return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
  272. }