xfrm4_input.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * xfrm4_input.c
  3. *
  4. * Changes:
  5. * YOSHIFUJI Hideaki @USAGI
  6. * Split up af-specific portion
  7. * Derek Atkins <derek@ihtfp.com>
  8. * Add Encapsulation support
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/string.h>
  13. #include <linux/netfilter.h>
  14. #include <linux/netfilter_ipv4.h>
  15. #include <net/ip.h>
  16. #include <net/xfrm.h>
  17. #ifdef CONFIG_NETFILTER
  18. static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
  19. {
  20. if (skb->dst == NULL) {
  21. const struct iphdr *iph = ip_hdr(skb);
  22. if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
  23. skb->dev))
  24. goto drop;
  25. }
  26. return dst_input(skb);
  27. drop:
  28. kfree_skb(skb);
  29. return NET_RX_DROP;
  30. }
  31. #endif
  32. int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
  33. int encap_type)
  34. {
  35. int err;
  36. __be32 seq;
  37. struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
  38. struct xfrm_state *x;
  39. int xfrm_nr = 0;
  40. int decaps = 0;
  41. unsigned int nhoff = offsetof(struct iphdr, protocol);
  42. seq = 0;
  43. if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
  44. goto drop;
  45. do {
  46. const struct iphdr *iph = ip_hdr(skb);
  47. if (xfrm_nr == XFRM_MAX_DEPTH)
  48. goto drop;
  49. x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi,
  50. nexthdr, AF_INET);
  51. if (x == NULL)
  52. goto drop;
  53. spin_lock(&x->lock);
  54. if (unlikely(x->km.state != XFRM_STATE_VALID))
  55. goto drop_unlock;
  56. if ((x->encap ? x->encap->encap_type : 0) != encap_type)
  57. goto drop_unlock;
  58. if (x->props.replay_window && xfrm_replay_check(x, seq))
  59. goto drop_unlock;
  60. if (xfrm_state_check_expire(x))
  61. goto drop_unlock;
  62. nexthdr = x->type->input(x, skb);
  63. if (nexthdr <= 0)
  64. goto drop_unlock;
  65. skb_network_header(skb)[nhoff] = nexthdr;
  66. /* only the first xfrm gets the encap type */
  67. encap_type = 0;
  68. if (x->props.replay_window)
  69. xfrm_replay_advance(x, seq);
  70. x->curlft.bytes += skb->len;
  71. x->curlft.packets++;
  72. spin_unlock(&x->lock);
  73. xfrm_vec[xfrm_nr++] = x;
  74. if (x->outer_mode->input(x, skb))
  75. goto drop;
  76. if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
  77. decaps = 1;
  78. break;
  79. }
  80. err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
  81. if (err < 0)
  82. goto drop;
  83. } while (!err);
  84. /* Allocate new secpath or COW existing one. */
  85. if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
  86. struct sec_path *sp;
  87. sp = secpath_dup(skb->sp);
  88. if (!sp)
  89. goto drop;
  90. if (skb->sp)
  91. secpath_put(skb->sp);
  92. skb->sp = sp;
  93. }
  94. if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
  95. goto drop;
  96. memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
  97. xfrm_nr * sizeof(xfrm_vec[0]));
  98. skb->sp->len += xfrm_nr;
  99. nf_reset(skb);
  100. if (decaps) {
  101. dst_release(skb->dst);
  102. skb->dst = NULL;
  103. netif_rx(skb);
  104. return 0;
  105. } else {
  106. #ifdef CONFIG_NETFILTER
  107. __skb_push(skb, skb->data - skb_network_header(skb));
  108. ip_hdr(skb)->tot_len = htons(skb->len);
  109. ip_send_check(ip_hdr(skb));
  110. NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
  111. xfrm4_rcv_encap_finish);
  112. return 0;
  113. #else
  114. return -ip_hdr(skb)->protocol;
  115. #endif
  116. }
  117. drop_unlock:
  118. spin_unlock(&x->lock);
  119. xfrm_state_put(x);
  120. drop:
  121. while (--xfrm_nr >= 0)
  122. xfrm_state_put(xfrm_vec[xfrm_nr]);
  123. kfree_skb(skb);
  124. return 0;
  125. }
  126. EXPORT_SYMBOL(xfrm4_rcv_encap);
  127. /* If it's a keepalive packet, then just eat it.
  128. * If it's an encapsulated packet, then pass it to the
  129. * IPsec xfrm input.
  130. * Returns 0 if skb passed to xfrm or was dropped.
  131. * Returns >0 if skb should be passed to UDP.
  132. * Returns <0 if skb should be resubmitted (-ret is protocol)
  133. */
  134. int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
  135. {
  136. struct udp_sock *up = udp_sk(sk);
  137. struct udphdr *uh;
  138. struct iphdr *iph;
  139. int iphlen, len;
  140. int ret;
  141. __u8 *udpdata;
  142. __be32 *udpdata32;
  143. __u16 encap_type = up->encap_type;
  144. /* if this is not encapsulated socket, then just return now */
  145. if (!encap_type)
  146. return 1;
  147. /* If this is a paged skb, make sure we pull up
  148. * whatever data we need to look at. */
  149. len = skb->len - sizeof(struct udphdr);
  150. if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
  151. return 1;
  152. /* Now we can get the pointers */
  153. uh = udp_hdr(skb);
  154. udpdata = (__u8 *)uh + sizeof(struct udphdr);
  155. udpdata32 = (__be32 *)udpdata;
  156. switch (encap_type) {
  157. default:
  158. case UDP_ENCAP_ESPINUDP:
  159. /* Check if this is a keepalive packet. If so, eat it. */
  160. if (len == 1 && udpdata[0] == 0xff) {
  161. goto drop;
  162. } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
  163. /* ESP Packet without Non-ESP header */
  164. len = sizeof(struct udphdr);
  165. } else
  166. /* Must be an IKE packet.. pass it through */
  167. return 1;
  168. break;
  169. case UDP_ENCAP_ESPINUDP_NON_IKE:
  170. /* Check if this is a keepalive packet. If so, eat it. */
  171. if (len == 1 && udpdata[0] == 0xff) {
  172. goto drop;
  173. } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
  174. udpdata32[0] == 0 && udpdata32[1] == 0) {
  175. /* ESP Packet with Non-IKE marker */
  176. len = sizeof(struct udphdr) + 2 * sizeof(u32);
  177. } else
  178. /* Must be an IKE packet.. pass it through */
  179. return 1;
  180. break;
  181. }
  182. /* At this point we are sure that this is an ESPinUDP packet,
  183. * so we need to remove 'len' bytes from the packet (the UDP
  184. * header and optional ESP marker bytes) and then modify the
  185. * protocol to ESP, and then call into the transform receiver.
  186. */
  187. if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  188. goto drop;
  189. /* Now we can update and verify the packet length... */
  190. iph = ip_hdr(skb);
  191. iphlen = iph->ihl << 2;
  192. iph->tot_len = htons(ntohs(iph->tot_len) - len);
  193. if (skb->len < iphlen + len) {
  194. /* packet is too small!?! */
  195. goto drop;
  196. }
  197. /* pull the data buffer up to the ESP header and set the
  198. * transport header to point to ESP. Keep UDP on the stack
  199. * for later.
  200. */
  201. __skb_pull(skb, len);
  202. skb_reset_transport_header(skb);
  203. /* process ESP */
  204. ret = xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type);
  205. return ret;
  206. drop:
  207. kfree_skb(skb);
  208. return 0;
  209. }
  210. int xfrm4_rcv(struct sk_buff *skb)
  211. {
  212. return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0);
  213. }
  214. EXPORT_SYMBOL(xfrm4_rcv);