ip_vti.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /*
  2. * Linux NET3: IP/IP protocol decoder modified to support
  3. * virtual tunnel interface
  4. *
  5. * Authors:
  6. * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. /*
  15. This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
  16. For comments look at net/ipv4/ip_gre.c --ANK
  17. */
  18. #include <linux/capability.h>
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/in.h>
  26. #include <linux/tcp.h>
  27. #include <linux/udp.h>
  28. #include <linux/if_arp.h>
  29. #include <linux/mroute.h>
  30. #include <linux/init.h>
  31. #include <linux/netfilter_ipv4.h>
  32. #include <linux/if_ether.h>
  33. #include <net/sock.h>
  34. #include <net/ip.h>
  35. #include <net/icmp.h>
  36. #include <net/ipip.h>
  37. #include <net/inet_ecn.h>
  38. #include <net/xfrm.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. #define HASH_SIZE 16
  42. #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
  43. static struct rtnl_link_ops vti_link_ops __read_mostly;
  44. static int vti_net_id __read_mostly;
  45. struct vti_net {
  46. struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
  47. struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
  48. struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
  49. struct ip_tunnel __rcu *tunnels_wc[1];
  50. struct ip_tunnel __rcu **tunnels[4];
  51. struct net_device *fb_tunnel_dev;
  52. };
  53. static int vti_fb_tunnel_init(struct net_device *dev);
  54. static int vti_tunnel_init(struct net_device *dev);
  55. static void vti_tunnel_setup(struct net_device *dev);
  56. static void vti_dev_free(struct net_device *dev);
  57. static int vti_tunnel_bind_dev(struct net_device *dev);
  58. #define VTI_XMIT(stats1, stats2) do { \
  59. int err; \
  60. int pkt_len = skb->len; \
  61. err = dst_output(skb); \
  62. if (net_xmit_eval(err) == 0) { \
  63. u64_stats_update_begin(&(stats1)->syncp); \
  64. (stats1)->tx_bytes += pkt_len; \
  65. (stats1)->tx_packets++; \
  66. u64_stats_update_end(&(stats1)->syncp); \
  67. } else { \
  68. (stats2)->tx_errors++; \
  69. (stats2)->tx_aborted_errors++; \
  70. } \
  71. } while (0)
  72. static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
  73. struct rtnl_link_stats64 *tot)
  74. {
  75. int i;
  76. for_each_possible_cpu(i) {
  77. const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
  78. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  79. unsigned int start;
  80. do {
  81. start = u64_stats_fetch_begin_bh(&tstats->syncp);
  82. rx_packets = tstats->rx_packets;
  83. tx_packets = tstats->tx_packets;
  84. rx_bytes = tstats->rx_bytes;
  85. tx_bytes = tstats->tx_bytes;
  86. } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
  87. tot->rx_packets += rx_packets;
  88. tot->tx_packets += tx_packets;
  89. tot->rx_bytes += rx_bytes;
  90. tot->tx_bytes += tx_bytes;
  91. }
  92. tot->multicast = dev->stats.multicast;
  93. tot->rx_crc_errors = dev->stats.rx_crc_errors;
  94. tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
  95. tot->rx_length_errors = dev->stats.rx_length_errors;
  96. tot->rx_errors = dev->stats.rx_errors;
  97. tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
  98. tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
  99. tot->tx_dropped = dev->stats.tx_dropped;
  100. tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
  101. tot->tx_errors = dev->stats.tx_errors;
  102. return tot;
  103. }
  104. static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
  105. __be32 remote, __be32 local)
  106. {
  107. unsigned h0 = HASH(remote);
  108. unsigned h1 = HASH(local);
  109. struct ip_tunnel *t;
  110. struct vti_net *ipn = net_generic(net, vti_net_id);
  111. for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
  112. if (local == t->parms.iph.saddr &&
  113. remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
  114. return t;
  115. for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
  116. if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
  117. return t;
  118. for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
  119. if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
  120. return t;
  121. for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
  122. if (t && (t->dev->flags&IFF_UP))
  123. return t;
  124. return NULL;
  125. }
  126. static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
  127. struct ip_tunnel_parm *parms)
  128. {
  129. __be32 remote = parms->iph.daddr;
  130. __be32 local = parms->iph.saddr;
  131. unsigned h = 0;
  132. int prio = 0;
  133. if (remote) {
  134. prio |= 2;
  135. h ^= HASH(remote);
  136. }
  137. if (local) {
  138. prio |= 1;
  139. h ^= HASH(local);
  140. }
  141. return &ipn->tunnels[prio][h];
  142. }
  143. static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
  144. struct ip_tunnel *t)
  145. {
  146. return __vti_bucket(ipn, &t->parms);
  147. }
  148. static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
  149. {
  150. struct ip_tunnel __rcu **tp;
  151. struct ip_tunnel *iter;
  152. for (tp = vti_bucket(ipn, t);
  153. (iter = rtnl_dereference(*tp)) != NULL;
  154. tp = &iter->next) {
  155. if (t == iter) {
  156. rcu_assign_pointer(*tp, t->next);
  157. break;
  158. }
  159. }
  160. }
  161. static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
  162. {
  163. struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
  164. rcu_assign_pointer(t->next, rtnl_dereference(*tp));
  165. rcu_assign_pointer(*tp, t);
  166. }
  167. static struct ip_tunnel *vti_tunnel_locate(struct net *net,
  168. struct ip_tunnel_parm *parms,
  169. int create)
  170. {
  171. __be32 remote = parms->iph.daddr;
  172. __be32 local = parms->iph.saddr;
  173. struct ip_tunnel *t, *nt;
  174. struct ip_tunnel __rcu **tp;
  175. struct net_device *dev;
  176. char name[IFNAMSIZ];
  177. struct vti_net *ipn = net_generic(net, vti_net_id);
  178. for (tp = __vti_bucket(ipn, parms);
  179. (t = rtnl_dereference(*tp)) != NULL;
  180. tp = &t->next) {
  181. if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
  182. return t;
  183. }
  184. if (!create)
  185. return NULL;
  186. if (parms->name[0])
  187. strlcpy(name, parms->name, IFNAMSIZ);
  188. else
  189. strcpy(name, "vti%d");
  190. dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
  191. if (dev == NULL)
  192. return NULL;
  193. dev_net_set(dev, net);
  194. nt = netdev_priv(dev);
  195. nt->parms = *parms;
  196. dev->rtnl_link_ops = &vti_link_ops;
  197. vti_tunnel_bind_dev(dev);
  198. if (register_netdevice(dev) < 0)
  199. goto failed_free;
  200. dev_hold(dev);
  201. vti_tunnel_link(ipn, nt);
  202. return nt;
  203. failed_free:
  204. free_netdev(dev);
  205. return NULL;
  206. }
  207. static void vti_tunnel_uninit(struct net_device *dev)
  208. {
  209. struct net *net = dev_net(dev);
  210. struct vti_net *ipn = net_generic(net, vti_net_id);
  211. vti_tunnel_unlink(ipn, netdev_priv(dev));
  212. dev_put(dev);
  213. }
  214. static int vti_err(struct sk_buff *skb, u32 info)
  215. {
  216. /* All the routers (except for Linux) return only
  217. * 8 bytes of packet payload. It means, that precise relaying of
  218. * ICMP in the real Internet is absolutely infeasible.
  219. */
  220. struct iphdr *iph = (struct iphdr *)skb->data;
  221. const int type = icmp_hdr(skb)->type;
  222. const int code = icmp_hdr(skb)->code;
  223. struct ip_tunnel *t;
  224. int err;
  225. switch (type) {
  226. default:
  227. case ICMP_PARAMETERPROB:
  228. return 0;
  229. case ICMP_DEST_UNREACH:
  230. switch (code) {
  231. case ICMP_SR_FAILED:
  232. case ICMP_PORT_UNREACH:
  233. /* Impossible event. */
  234. return 0;
  235. default:
  236. /* All others are translated to HOST_UNREACH. */
  237. break;
  238. }
  239. break;
  240. case ICMP_TIME_EXCEEDED:
  241. if (code != ICMP_EXC_TTL)
  242. return 0;
  243. break;
  244. }
  245. err = -ENOENT;
  246. t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
  247. if (t == NULL)
  248. goto out;
  249. if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
  250. ipv4_update_pmtu(skb, dev_net(skb->dev), info,
  251. t->parms.link, 0, IPPROTO_IPIP, 0);
  252. err = 0;
  253. goto out;
  254. }
  255. err = 0;
  256. if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
  257. goto out;
  258. if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
  259. t->err_count++;
  260. else
  261. t->err_count = 1;
  262. t->err_time = jiffies;
  263. out:
  264. return err;
  265. }
  266. /* We dont digest the packet therefore let the packet pass */
  267. static int vti_rcv(struct sk_buff *skb)
  268. {
  269. struct ip_tunnel *tunnel;
  270. const struct iphdr *iph = ip_hdr(skb);
  271. tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
  272. if (tunnel != NULL) {
  273. struct pcpu_tstats *tstats;
  274. tstats = this_cpu_ptr(tunnel->dev->tstats);
  275. u64_stats_update_begin(&tstats->syncp);
  276. tstats->rx_packets++;
  277. tstats->rx_bytes += skb->len;
  278. u64_stats_update_end(&tstats->syncp);
  279. skb->dev = tunnel->dev;
  280. return 1;
  281. }
  282. return -1;
  283. }
  284. /* This function assumes it is being called from dev_queue_xmit()
  285. * and that skb is filled properly by that function.
  286. */
  287. static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
  288. {
  289. struct ip_tunnel *tunnel = netdev_priv(dev);
  290. struct pcpu_tstats *tstats;
  291. struct iphdr *tiph = &tunnel->parms.iph;
  292. u8 tos;
  293. struct rtable *rt; /* Route to the other host */
  294. struct net_device *tdev; /* Device to other host */
  295. struct iphdr *old_iph = ip_hdr(skb);
  296. __be32 dst = tiph->daddr;
  297. struct flowi4 fl4;
  298. if (skb->protocol != htons(ETH_P_IP))
  299. goto tx_error;
  300. tos = old_iph->tos;
  301. memset(&fl4, 0, sizeof(fl4));
  302. flowi4_init_output(&fl4, tunnel->parms.link,
  303. be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
  304. RT_SCOPE_UNIVERSE,
  305. IPPROTO_IPIP, 0,
  306. dst, tiph->saddr, 0, 0);
  307. rt = ip_route_output_key(dev_net(dev), &fl4);
  308. if (IS_ERR(rt)) {
  309. dev->stats.tx_carrier_errors++;
  310. goto tx_error_icmp;
  311. }
  312. /* if there is no transform then this tunnel is not functional.
  313. * Or if the xfrm is not mode tunnel.
  314. */
  315. if (!rt->dst.xfrm ||
  316. rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
  317. dev->stats.tx_carrier_errors++;
  318. goto tx_error_icmp;
  319. }
  320. tdev = rt->dst.dev;
  321. if (tdev == dev) {
  322. ip_rt_put(rt);
  323. dev->stats.collisions++;
  324. goto tx_error;
  325. }
  326. if (tunnel->err_count > 0) {
  327. if (time_before(jiffies,
  328. tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
  329. tunnel->err_count--;
  330. dst_link_failure(skb);
  331. } else
  332. tunnel->err_count = 0;
  333. }
  334. IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
  335. IPSKB_REROUTED);
  336. skb_dst_drop(skb);
  337. skb_dst_set(skb, &rt->dst);
  338. nf_reset(skb);
  339. skb->dev = skb_dst(skb)->dev;
  340. tstats = this_cpu_ptr(dev->tstats);
  341. VTI_XMIT(tstats, &dev->stats);
  342. return NETDEV_TX_OK;
  343. tx_error_icmp:
  344. dst_link_failure(skb);
  345. tx_error:
  346. dev->stats.tx_errors++;
  347. dev_kfree_skb(skb);
  348. return NETDEV_TX_OK;
  349. }
  350. static int vti_tunnel_bind_dev(struct net_device *dev)
  351. {
  352. struct net_device *tdev = NULL;
  353. struct ip_tunnel *tunnel;
  354. struct iphdr *iph;
  355. tunnel = netdev_priv(dev);
  356. iph = &tunnel->parms.iph;
  357. if (iph->daddr) {
  358. struct rtable *rt;
  359. struct flowi4 fl4;
  360. memset(&fl4, 0, sizeof(fl4));
  361. flowi4_init_output(&fl4, tunnel->parms.link,
  362. be32_to_cpu(tunnel->parms.i_key),
  363. RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
  364. IPPROTO_IPIP, 0,
  365. iph->daddr, iph->saddr, 0, 0);
  366. rt = ip_route_output_key(dev_net(dev), &fl4);
  367. if (!IS_ERR(rt)) {
  368. tdev = rt->dst.dev;
  369. ip_rt_put(rt);
  370. }
  371. dev->flags |= IFF_POINTOPOINT;
  372. }
  373. if (!tdev && tunnel->parms.link)
  374. tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
  375. if (tdev) {
  376. dev->hard_header_len = tdev->hard_header_len +
  377. sizeof(struct iphdr);
  378. dev->mtu = tdev->mtu;
  379. }
  380. dev->iflink = tunnel->parms.link;
  381. return dev->mtu;
  382. }
  383. static int
  384. vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  385. {
  386. int err = 0;
  387. struct ip_tunnel_parm p;
  388. struct ip_tunnel *t;
  389. struct net *net = dev_net(dev);
  390. struct vti_net *ipn = net_generic(net, vti_net_id);
  391. switch (cmd) {
  392. case SIOCGETTUNNEL:
  393. t = NULL;
  394. if (dev == ipn->fb_tunnel_dev) {
  395. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
  396. sizeof(p))) {
  397. err = -EFAULT;
  398. break;
  399. }
  400. t = vti_tunnel_locate(net, &p, 0);
  401. }
  402. if (t == NULL)
  403. t = netdev_priv(dev);
  404. memcpy(&p, &t->parms, sizeof(p));
  405. p.i_flags |= GRE_KEY | VTI_ISVTI;
  406. p.o_flags |= GRE_KEY;
  407. if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
  408. err = -EFAULT;
  409. break;
  410. case SIOCADDTUNNEL:
  411. case SIOCCHGTUNNEL:
  412. err = -EPERM;
  413. if (!capable(CAP_NET_ADMIN))
  414. goto done;
  415. err = -EFAULT;
  416. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
  417. goto done;
  418. err = -EINVAL;
  419. if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
  420. p.iph.ihl != 5)
  421. goto done;
  422. t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
  423. if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
  424. if (t != NULL) {
  425. if (t->dev != dev) {
  426. err = -EEXIST;
  427. break;
  428. }
  429. } else {
  430. if (((dev->flags&IFF_POINTOPOINT) &&
  431. !p.iph.daddr) ||
  432. (!(dev->flags&IFF_POINTOPOINT) &&
  433. p.iph.daddr)) {
  434. err = -EINVAL;
  435. break;
  436. }
  437. t = netdev_priv(dev);
  438. vti_tunnel_unlink(ipn, t);
  439. synchronize_net();
  440. t->parms.iph.saddr = p.iph.saddr;
  441. t->parms.iph.daddr = p.iph.daddr;
  442. t->parms.i_key = p.i_key;
  443. t->parms.o_key = p.o_key;
  444. t->parms.iph.protocol = IPPROTO_IPIP;
  445. memcpy(dev->dev_addr, &p.iph.saddr, 4);
  446. memcpy(dev->broadcast, &p.iph.daddr, 4);
  447. vti_tunnel_link(ipn, t);
  448. netdev_state_change(dev);
  449. }
  450. }
  451. if (t) {
  452. err = 0;
  453. if (cmd == SIOCCHGTUNNEL) {
  454. t->parms.i_key = p.i_key;
  455. t->parms.o_key = p.o_key;
  456. if (t->parms.link != p.link) {
  457. t->parms.link = p.link;
  458. vti_tunnel_bind_dev(dev);
  459. netdev_state_change(dev);
  460. }
  461. }
  462. p.i_flags |= GRE_KEY | VTI_ISVTI;
  463. p.o_flags |= GRE_KEY;
  464. if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
  465. sizeof(p)))
  466. err = -EFAULT;
  467. } else
  468. err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
  469. break;
  470. case SIOCDELTUNNEL:
  471. err = -EPERM;
  472. if (!capable(CAP_NET_ADMIN))
  473. goto done;
  474. if (dev == ipn->fb_tunnel_dev) {
  475. err = -EFAULT;
  476. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
  477. sizeof(p)))
  478. goto done;
  479. err = -ENOENT;
  480. t = vti_tunnel_locate(net, &p, 0);
  481. if (t == NULL)
  482. goto done;
  483. err = -EPERM;
  484. if (t->dev == ipn->fb_tunnel_dev)
  485. goto done;
  486. dev = t->dev;
  487. }
  488. unregister_netdevice(dev);
  489. err = 0;
  490. break;
  491. default:
  492. err = -EINVAL;
  493. }
  494. done:
  495. return err;
  496. }
  497. static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
  498. {
  499. if (new_mtu < 68 || new_mtu > 0xFFF8)
  500. return -EINVAL;
  501. dev->mtu = new_mtu;
  502. return 0;
  503. }
  504. static const struct net_device_ops vti_netdev_ops = {
  505. .ndo_init = vti_tunnel_init,
  506. .ndo_uninit = vti_tunnel_uninit,
  507. .ndo_start_xmit = vti_tunnel_xmit,
  508. .ndo_do_ioctl = vti_tunnel_ioctl,
  509. .ndo_change_mtu = vti_tunnel_change_mtu,
  510. .ndo_get_stats64 = vti_get_stats64,
  511. };
  512. static void vti_dev_free(struct net_device *dev)
  513. {
  514. free_percpu(dev->tstats);
  515. free_netdev(dev);
  516. }
  517. static void vti_tunnel_setup(struct net_device *dev)
  518. {
  519. dev->netdev_ops = &vti_netdev_ops;
  520. dev->destructor = vti_dev_free;
  521. dev->type = ARPHRD_TUNNEL;
  522. dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
  523. dev->mtu = ETH_DATA_LEN;
  524. dev->flags = IFF_NOARP;
  525. dev->iflink = 0;
  526. dev->addr_len = 4;
  527. dev->features |= NETIF_F_NETNS_LOCAL;
  528. dev->features |= NETIF_F_LLTX;
  529. dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
  530. }
  531. static int vti_tunnel_init(struct net_device *dev)
  532. {
  533. struct ip_tunnel *tunnel = netdev_priv(dev);
  534. tunnel->dev = dev;
  535. strcpy(tunnel->parms.name, dev->name);
  536. memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
  537. memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
  538. dev->tstats = alloc_percpu(struct pcpu_tstats);
  539. if (!dev->tstats)
  540. return -ENOMEM;
  541. return 0;
  542. }
  543. static int __net_init vti_fb_tunnel_init(struct net_device *dev)
  544. {
  545. struct ip_tunnel *tunnel = netdev_priv(dev);
  546. struct iphdr *iph = &tunnel->parms.iph;
  547. struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
  548. tunnel->dev = dev;
  549. strcpy(tunnel->parms.name, dev->name);
  550. iph->version = 4;
  551. iph->protocol = IPPROTO_IPIP;
  552. iph->ihl = 5;
  553. dev->tstats = alloc_percpu(struct pcpu_tstats);
  554. if (!dev->tstats)
  555. return -ENOMEM;
  556. dev_hold(dev);
  557. rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
  558. return 0;
  559. }
  560. static struct xfrm_tunnel vti_handler __read_mostly = {
  561. .handler = vti_rcv,
  562. .err_handler = vti_err,
  563. .priority = 1,
  564. };
  565. static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
  566. {
  567. int prio;
  568. for (prio = 1; prio < 4; prio++) {
  569. int h;
  570. for (h = 0; h < HASH_SIZE; h++) {
  571. struct ip_tunnel *t;
  572. t = rtnl_dereference(ipn->tunnels[prio][h]);
  573. while (t != NULL) {
  574. unregister_netdevice_queue(t->dev, head);
  575. t = rtnl_dereference(t->next);
  576. }
  577. }
  578. }
  579. }
  580. static int __net_init vti_init_net(struct net *net)
  581. {
  582. int err;
  583. struct vti_net *ipn = net_generic(net, vti_net_id);
  584. ipn->tunnels[0] = ipn->tunnels_wc;
  585. ipn->tunnels[1] = ipn->tunnels_l;
  586. ipn->tunnels[2] = ipn->tunnels_r;
  587. ipn->tunnels[3] = ipn->tunnels_r_l;
  588. ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
  589. "ip_vti0",
  590. vti_tunnel_setup);
  591. if (!ipn->fb_tunnel_dev) {
  592. err = -ENOMEM;
  593. goto err_alloc_dev;
  594. }
  595. dev_net_set(ipn->fb_tunnel_dev, net);
  596. err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
  597. if (err)
  598. goto err_reg_dev;
  599. ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
  600. err = register_netdev(ipn->fb_tunnel_dev);
  601. if (err)
  602. goto err_reg_dev;
  603. return 0;
  604. err_reg_dev:
  605. vti_dev_free(ipn->fb_tunnel_dev);
  606. err_alloc_dev:
  607. /* nothing */
  608. return err;
  609. }
  610. static void __net_exit vti_exit_net(struct net *net)
  611. {
  612. struct vti_net *ipn = net_generic(net, vti_net_id);
  613. LIST_HEAD(list);
  614. rtnl_lock();
  615. vti_destroy_tunnels(ipn, &list);
  616. unregister_netdevice_many(&list);
  617. rtnl_unlock();
  618. }
  619. static struct pernet_operations vti_net_ops = {
  620. .init = vti_init_net,
  621. .exit = vti_exit_net,
  622. .id = &vti_net_id,
  623. .size = sizeof(struct vti_net),
  624. };
  625. static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
  626. {
  627. return 0;
  628. }
  629. static void vti_netlink_parms(struct nlattr *data[],
  630. struct ip_tunnel_parm *parms)
  631. {
  632. memset(parms, 0, sizeof(*parms));
  633. parms->iph.protocol = IPPROTO_IPIP;
  634. if (!data)
  635. return;
  636. if (data[IFLA_VTI_LINK])
  637. parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
  638. if (data[IFLA_VTI_IKEY])
  639. parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
  640. if (data[IFLA_VTI_OKEY])
  641. parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
  642. if (data[IFLA_VTI_LOCAL])
  643. parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
  644. if (data[IFLA_VTI_REMOTE])
  645. parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
  646. }
  647. static int vti_newlink(struct net *src_net, struct net_device *dev,
  648. struct nlattr *tb[], struct nlattr *data[])
  649. {
  650. struct ip_tunnel *nt;
  651. struct net *net = dev_net(dev);
  652. struct vti_net *ipn = net_generic(net, vti_net_id);
  653. int mtu;
  654. int err;
  655. nt = netdev_priv(dev);
  656. vti_netlink_parms(data, &nt->parms);
  657. if (vti_tunnel_locate(net, &nt->parms, 0))
  658. return -EEXIST;
  659. mtu = vti_tunnel_bind_dev(dev);
  660. if (!tb[IFLA_MTU])
  661. dev->mtu = mtu;
  662. err = register_netdevice(dev);
  663. if (err)
  664. goto out;
  665. dev_hold(dev);
  666. vti_tunnel_link(ipn, nt);
  667. out:
  668. return err;
  669. }
  670. static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
  671. struct nlattr *data[])
  672. {
  673. struct ip_tunnel *t, *nt;
  674. struct net *net = dev_net(dev);
  675. struct vti_net *ipn = net_generic(net, vti_net_id);
  676. struct ip_tunnel_parm p;
  677. int mtu;
  678. if (dev == ipn->fb_tunnel_dev)
  679. return -EINVAL;
  680. nt = netdev_priv(dev);
  681. vti_netlink_parms(data, &p);
  682. t = vti_tunnel_locate(net, &p, 0);
  683. if (t) {
  684. if (t->dev != dev)
  685. return -EEXIST;
  686. } else {
  687. t = nt;
  688. vti_tunnel_unlink(ipn, t);
  689. t->parms.iph.saddr = p.iph.saddr;
  690. t->parms.iph.daddr = p.iph.daddr;
  691. t->parms.i_key = p.i_key;
  692. t->parms.o_key = p.o_key;
  693. if (dev->type != ARPHRD_ETHER) {
  694. memcpy(dev->dev_addr, &p.iph.saddr, 4);
  695. memcpy(dev->broadcast, &p.iph.daddr, 4);
  696. }
  697. vti_tunnel_link(ipn, t);
  698. netdev_state_change(dev);
  699. }
  700. if (t->parms.link != p.link) {
  701. t->parms.link = p.link;
  702. mtu = vti_tunnel_bind_dev(dev);
  703. if (!tb[IFLA_MTU])
  704. dev->mtu = mtu;
  705. netdev_state_change(dev);
  706. }
  707. return 0;
  708. }
  709. static size_t vti_get_size(const struct net_device *dev)
  710. {
  711. return
  712. /* IFLA_VTI_LINK */
  713. nla_total_size(4) +
  714. /* IFLA_VTI_IKEY */
  715. nla_total_size(4) +
  716. /* IFLA_VTI_OKEY */
  717. nla_total_size(4) +
  718. /* IFLA_VTI_LOCAL */
  719. nla_total_size(4) +
  720. /* IFLA_VTI_REMOTE */
  721. nla_total_size(4) +
  722. 0;
  723. }
  724. static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
  725. {
  726. struct ip_tunnel *t = netdev_priv(dev);
  727. struct ip_tunnel_parm *p = &t->parms;
  728. nla_put_u32(skb, IFLA_VTI_LINK, p->link);
  729. nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
  730. nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
  731. nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
  732. nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
  733. return 0;
  734. }
  735. static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
  736. [IFLA_VTI_LINK] = { .type = NLA_U32 },
  737. [IFLA_VTI_IKEY] = { .type = NLA_U32 },
  738. [IFLA_VTI_OKEY] = { .type = NLA_U32 },
  739. [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
  740. [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
  741. };
  742. static struct rtnl_link_ops vti_link_ops __read_mostly = {
  743. .kind = "vti",
  744. .maxtype = IFLA_VTI_MAX,
  745. .policy = vti_policy,
  746. .priv_size = sizeof(struct ip_tunnel),
  747. .setup = vti_tunnel_setup,
  748. .validate = vti_tunnel_validate,
  749. .newlink = vti_newlink,
  750. .changelink = vti_changelink,
  751. .get_size = vti_get_size,
  752. .fill_info = vti_fill_info,
  753. };
  754. static int __init vti_init(void)
  755. {
  756. int err;
  757. pr_info("IPv4 over IPSec tunneling driver\n");
  758. err = register_pernet_device(&vti_net_ops);
  759. if (err < 0)
  760. return err;
  761. err = xfrm4_mode_tunnel_input_register(&vti_handler);
  762. if (err < 0) {
  763. unregister_pernet_device(&vti_net_ops);
  764. pr_info(KERN_INFO "vti init: can't register tunnel\n");
  765. }
  766. err = rtnl_link_register(&vti_link_ops);
  767. if (err < 0)
  768. goto rtnl_link_failed;
  769. return err;
  770. rtnl_link_failed:
  771. xfrm4_mode_tunnel_input_deregister(&vti_handler);
  772. unregister_pernet_device(&vti_net_ops);
  773. return err;
  774. }
  775. static void __exit vti_fini(void)
  776. {
  777. rtnl_link_unregister(&vti_link_ops);
  778. if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
  779. pr_info("vti close: can't deregister tunnel\n");
  780. unregister_pernet_device(&vti_net_ops);
  781. }
  782. module_init(vti_init);
  783. module_exit(vti_fini);
  784. MODULE_LICENSE("GPL");
  785. MODULE_ALIAS_RTNL_LINK("vti");
  786. MODULE_ALIAS_NETDEV("ip_vti0");