gre.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * GRE over IPv4 demultiplexer driver
  3. *
  4. * Authors: Dmitry Kozlov (xeb@mail.ru)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/kmod.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/in.h>
  18. #include <linux/ip.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/if_tunnel.h>
  21. #include <linux/spinlock.h>
  22. #include <net/protocol.h>
  23. #include <net/gre.h>
  24. static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
  25. static DEFINE_SPINLOCK(gre_proto_lock);
  26. struct gre_base_hdr {
  27. __be16 flags;
  28. __be16 protocol;
  29. };
  30. #define GRE_HEADER_SECTION 4
  31. int gre_add_protocol(const struct gre_protocol *proto, u8 version)
  32. {
  33. if (version >= GREPROTO_MAX)
  34. goto err_out;
  35. spin_lock(&gre_proto_lock);
  36. if (gre_proto[version])
  37. goto err_out_unlock;
  38. RCU_INIT_POINTER(gre_proto[version], proto);
  39. spin_unlock(&gre_proto_lock);
  40. return 0;
  41. err_out_unlock:
  42. spin_unlock(&gre_proto_lock);
  43. err_out:
  44. return -1;
  45. }
  46. EXPORT_SYMBOL_GPL(gre_add_protocol);
  47. int gre_del_protocol(const struct gre_protocol *proto, u8 version)
  48. {
  49. if (version >= GREPROTO_MAX)
  50. goto err_out;
  51. spin_lock(&gre_proto_lock);
  52. if (rcu_dereference_protected(gre_proto[version],
  53. lockdep_is_held(&gre_proto_lock)) != proto)
  54. goto err_out_unlock;
  55. RCU_INIT_POINTER(gre_proto[version], NULL);
  56. spin_unlock(&gre_proto_lock);
  57. synchronize_rcu();
  58. return 0;
  59. err_out_unlock:
  60. spin_unlock(&gre_proto_lock);
  61. err_out:
  62. return -1;
  63. }
  64. EXPORT_SYMBOL_GPL(gre_del_protocol);
  65. static int gre_rcv(struct sk_buff *skb)
  66. {
  67. const struct gre_protocol *proto;
  68. u8 ver;
  69. int ret;
  70. if (!pskb_may_pull(skb, 12))
  71. goto drop;
  72. ver = skb->data[1]&0x7f;
  73. if (ver >= GREPROTO_MAX)
  74. goto drop;
  75. rcu_read_lock();
  76. proto = rcu_dereference(gre_proto[ver]);
  77. if (!proto || !proto->handler)
  78. goto drop_unlock;
  79. ret = proto->handler(skb);
  80. rcu_read_unlock();
  81. return ret;
  82. drop_unlock:
  83. rcu_read_unlock();
  84. drop:
  85. kfree_skb(skb);
  86. return NET_RX_DROP;
  87. }
  88. static void gre_err(struct sk_buff *skb, u32 info)
  89. {
  90. const struct gre_protocol *proto;
  91. const struct iphdr *iph = (const struct iphdr *)skb->data;
  92. u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
  93. if (ver >= GREPROTO_MAX)
  94. return;
  95. rcu_read_lock();
  96. proto = rcu_dereference(gre_proto[ver]);
  97. if (proto && proto->err_handler)
  98. proto->err_handler(skb, info);
  99. rcu_read_unlock();
  100. }
  101. static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
  102. netdev_features_t features)
  103. {
  104. struct sk_buff *segs = ERR_PTR(-EINVAL);
  105. netdev_features_t enc_features;
  106. int ghl = GRE_HEADER_SECTION;
  107. struct gre_base_hdr *greh;
  108. int mac_len = skb->mac_len;
  109. int tnl_hlen;
  110. bool csum;
  111. if (unlikely(skb_shinfo(skb)->gso_type &
  112. ~(SKB_GSO_TCPV4 |
  113. SKB_GSO_TCPV6 |
  114. SKB_GSO_UDP |
  115. SKB_GSO_DODGY |
  116. SKB_GSO_TCP_ECN |
  117. SKB_GSO_GRE)))
  118. goto out;
  119. if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
  120. goto out;
  121. greh = (struct gre_base_hdr *)skb_transport_header(skb);
  122. if (greh->flags & GRE_KEY)
  123. ghl += GRE_HEADER_SECTION;
  124. if (greh->flags & GRE_SEQ)
  125. ghl += GRE_HEADER_SECTION;
  126. if (greh->flags & GRE_CSUM) {
  127. ghl += GRE_HEADER_SECTION;
  128. csum = true;
  129. } else
  130. csum = false;
  131. /* setup inner skb. */
  132. if (greh->protocol == htons(ETH_P_TEB)) {
  133. struct ethhdr *eth = eth_hdr(skb);
  134. skb->protocol = eth->h_proto;
  135. } else {
  136. skb->protocol = greh->protocol;
  137. }
  138. skb->encapsulation = 0;
  139. if (unlikely(!pskb_may_pull(skb, ghl)))
  140. goto out;
  141. __skb_pull(skb, ghl);
  142. skb_reset_mac_header(skb);
  143. skb_set_network_header(skb, skb_inner_network_offset(skb));
  144. skb->mac_len = skb_inner_network_offset(skb);
  145. /* segment inner packet. */
  146. enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
  147. segs = skb_mac_gso_segment(skb, enc_features);
  148. if (!segs || IS_ERR(segs))
  149. goto out;
  150. skb = segs;
  151. tnl_hlen = skb_tnl_header_len(skb);
  152. do {
  153. __skb_push(skb, ghl);
  154. if (csum) {
  155. __be32 *pcsum;
  156. if (skb_has_shared_frag(skb)) {
  157. int err;
  158. err = __skb_linearize(skb);
  159. if (err) {
  160. kfree_skb(segs);
  161. segs = ERR_PTR(err);
  162. goto out;
  163. }
  164. }
  165. greh = (struct gre_base_hdr *)(skb->data);
  166. pcsum = (__be32 *)(greh + 1);
  167. *pcsum = 0;
  168. *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
  169. }
  170. __skb_push(skb, tnl_hlen - ghl);
  171. skb_reset_mac_header(skb);
  172. skb_set_network_header(skb, mac_len);
  173. skb->mac_len = mac_len;
  174. } while ((skb = skb->next));
  175. out:
  176. return segs;
  177. }
  178. static int gre_gso_send_check(struct sk_buff *skb)
  179. {
  180. if (!skb->encapsulation)
  181. return -EINVAL;
  182. return 0;
  183. }
  184. static const struct net_protocol net_gre_protocol = {
  185. .handler = gre_rcv,
  186. .err_handler = gre_err,
  187. .netns_ok = 1,
  188. };
  189. static const struct net_offload gre_offload = {
  190. .callbacks = {
  191. .gso_send_check = gre_gso_send_check,
  192. .gso_segment = gre_gso_segment,
  193. },
  194. };
  195. static int __init gre_init(void)
  196. {
  197. pr_info("GRE over IPv4 demultiplexor driver\n");
  198. if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
  199. pr_err("can't add protocol\n");
  200. return -EAGAIN;
  201. }
  202. if (inet_add_offload(&gre_offload, IPPROTO_GRE)) {
  203. pr_err("can't add protocol offload\n");
  204. inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
  205. return -EAGAIN;
  206. }
  207. return 0;
  208. }
  209. static void __exit gre_exit(void)
  210. {
  211. inet_del_offload(&gre_offload, IPPROTO_GRE);
  212. inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
  213. }
  214. module_init(gre_init);
  215. module_exit(gre_exit);
  216. MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
  217. MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
  218. MODULE_LICENSE("GPL");