sit.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. *
  14. * Changes:
  15. * Roger Venning <r.venning@telstra.com>: 6to4 support
  16. * Nate Thompson <nate@thebog.net>: 6to4 support
  17. * Fred Templin <fred.l.templin@boeing.com>: isatap support
  18. */
  19. #include <linux/module.h>
  20. #include <linux/capability.h>
  21. #include <linux/errno.h>
  22. #include <linux/types.h>
  23. #include <linux/socket.h>
  24. #include <linux/sockios.h>
  25. #include <linux/net.h>
  26. #include <linux/in6.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/if_arp.h>
  29. #include <linux/icmp.h>
  30. #include <linux/slab.h>
  31. #include <asm/uaccess.h>
  32. #include <linux/init.h>
  33. #include <linux/netfilter_ipv4.h>
  34. #include <linux/if_ether.h>
  35. #include <net/sock.h>
  36. #include <net/snmp.h>
  37. #include <net/ipv6.h>
  38. #include <net/protocol.h>
  39. #include <net/transp_v6.h>
  40. #include <net/ip6_fib.h>
  41. #include <net/ip6_route.h>
  42. #include <net/ndisc.h>
  43. #include <net/addrconf.h>
  44. #include <net/ip.h>
  45. #include <net/udp.h>
  46. #include <net/icmp.h>
  47. #include <net/ipip.h>
  48. #include <net/inet_ecn.h>
  49. #include <net/xfrm.h>
  50. #include <net/dsfield.h>
  51. #include <net/net_namespace.h>
  52. #include <net/netns/generic.h>
  53. /*
  54. This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
  55. For comments look at net/ipv4/ip_gre.c --ANK
  56. */
  57. #define HASH_SIZE 16
  58. #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
  59. static void ipip6_tunnel_init(struct net_device *dev);
  60. static void ipip6_tunnel_setup(struct net_device *dev);
  61. static int sit_net_id __read_mostly;
  62. struct sit_net {
  63. struct ip_tunnel *tunnels_r_l[HASH_SIZE];
  64. struct ip_tunnel *tunnels_r[HASH_SIZE];
  65. struct ip_tunnel *tunnels_l[HASH_SIZE];
  66. struct ip_tunnel *tunnels_wc[1];
  67. struct ip_tunnel **tunnels[4];
  68. struct net_device *fb_tunnel_dev;
  69. };
  70. /*
  71. * Locking : hash tables are protected by RCU and a spinlock
  72. */
  73. static DEFINE_SPINLOCK(ipip6_lock);
  74. #define for_each_ip_tunnel_rcu(start) \
  75. for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  76. /*
  77. * Must be invoked with rcu_read_lock
  78. */
  79. static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
  80. struct net_device *dev, __be32 remote, __be32 local)
  81. {
  82. unsigned h0 = HASH(remote);
  83. unsigned h1 = HASH(local);
  84. struct ip_tunnel *t;
  85. struct sit_net *sitn = net_generic(net, sit_net_id);
  86. for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
  87. if (local == t->parms.iph.saddr &&
  88. remote == t->parms.iph.daddr &&
  89. (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
  90. (t->dev->flags & IFF_UP))
  91. return t;
  92. }
  93. for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
  94. if (remote == t->parms.iph.daddr &&
  95. (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
  96. (t->dev->flags & IFF_UP))
  97. return t;
  98. }
  99. for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
  100. if (local == t->parms.iph.saddr &&
  101. (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
  102. (t->dev->flags & IFF_UP))
  103. return t;
  104. }
  105. t = rcu_dereference(sitn->tunnels_wc[0]);
  106. if ((t != NULL) && (t->dev->flags & IFF_UP))
  107. return t;
  108. return NULL;
  109. }
  110. static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
  111. struct ip_tunnel_parm *parms)
  112. {
  113. __be32 remote = parms->iph.daddr;
  114. __be32 local = parms->iph.saddr;
  115. unsigned h = 0;
  116. int prio = 0;
  117. if (remote) {
  118. prio |= 2;
  119. h ^= HASH(remote);
  120. }
  121. if (local) {
  122. prio |= 1;
  123. h ^= HASH(local);
  124. }
  125. return &sitn->tunnels[prio][h];
  126. }
  127. static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
  128. struct ip_tunnel *t)
  129. {
  130. return __ipip6_bucket(sitn, &t->parms);
  131. }
  132. static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
  133. {
  134. struct ip_tunnel **tp;
  135. for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
  136. if (t == *tp) {
  137. spin_lock_bh(&ipip6_lock);
  138. *tp = t->next;
  139. spin_unlock_bh(&ipip6_lock);
  140. break;
  141. }
  142. }
  143. }
  144. static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
  145. {
  146. struct ip_tunnel **tp = ipip6_bucket(sitn, t);
  147. spin_lock_bh(&ipip6_lock);
  148. t->next = *tp;
  149. rcu_assign_pointer(*tp, t);
  150. spin_unlock_bh(&ipip6_lock);
  151. }
  152. static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
  153. {
  154. #ifdef CONFIG_IPV6_SIT_6RD
  155. struct ip_tunnel *t = netdev_priv(dev);
  156. if (t->dev == sitn->fb_tunnel_dev) {
  157. ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
  158. t->ip6rd.relay_prefix = 0;
  159. t->ip6rd.prefixlen = 16;
  160. t->ip6rd.relay_prefixlen = 0;
  161. } else {
  162. struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
  163. memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
  164. }
  165. #endif
  166. }
  167. static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
  168. struct ip_tunnel_parm *parms, int create)
  169. {
  170. __be32 remote = parms->iph.daddr;
  171. __be32 local = parms->iph.saddr;
  172. struct ip_tunnel *t, **tp, *nt;
  173. struct net_device *dev;
  174. char name[IFNAMSIZ];
  175. struct sit_net *sitn = net_generic(net, sit_net_id);
  176. for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
  177. if (local == t->parms.iph.saddr &&
  178. remote == t->parms.iph.daddr &&
  179. parms->link == t->parms.link) {
  180. if (create)
  181. return NULL;
  182. else
  183. return t;
  184. }
  185. }
  186. if (!create)
  187. goto failed;
  188. if (parms->name[0])
  189. strlcpy(name, parms->name, IFNAMSIZ);
  190. else
  191. sprintf(name, "sit%%d");
  192. dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
  193. if (dev == NULL)
  194. return NULL;
  195. dev_net_set(dev, net);
  196. if (strchr(name, '%')) {
  197. if (dev_alloc_name(dev, name) < 0)
  198. goto failed_free;
  199. }
  200. nt = netdev_priv(dev);
  201. nt->parms = *parms;
  202. ipip6_tunnel_init(dev);
  203. ipip6_tunnel_clone_6rd(dev, sitn);
  204. if (parms->i_flags & SIT_ISATAP)
  205. dev->priv_flags |= IFF_ISATAP;
  206. if (register_netdevice(dev) < 0)
  207. goto failed_free;
  208. dev_hold(dev);
  209. ipip6_tunnel_link(sitn, nt);
  210. return nt;
  211. failed_free:
  212. free_netdev(dev);
  213. failed:
  214. return NULL;
  215. }
  216. static DEFINE_SPINLOCK(ipip6_prl_lock);
  217. #define for_each_prl_rcu(start) \
  218. for (prl = rcu_dereference(start); \
  219. prl; \
  220. prl = rcu_dereference(prl->next))
  221. static struct ip_tunnel_prl_entry *
  222. __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
  223. {
  224. struct ip_tunnel_prl_entry *prl;
  225. for_each_prl_rcu(t->prl)
  226. if (prl->addr == addr)
  227. break;
  228. return prl;
  229. }
  230. static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
  231. struct ip_tunnel_prl __user *a)
  232. {
  233. struct ip_tunnel_prl kprl, *kp;
  234. struct ip_tunnel_prl_entry *prl;
  235. unsigned int cmax, c = 0, ca, len;
  236. int ret = 0;
  237. if (copy_from_user(&kprl, a, sizeof(kprl)))
  238. return -EFAULT;
  239. cmax = kprl.datalen / sizeof(kprl);
  240. if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
  241. cmax = 1;
  242. /* For simple GET or for root users,
  243. * we try harder to allocate.
  244. */
  245. kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
  246. kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
  247. NULL;
  248. rcu_read_lock();
  249. ca = t->prl_count < cmax ? t->prl_count : cmax;
  250. if (!kp) {
  251. /* We don't try hard to allocate much memory for
  252. * non-root users.
  253. * For root users, retry allocating enough memory for
  254. * the answer.
  255. */
  256. kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
  257. if (!kp) {
  258. ret = -ENOMEM;
  259. goto out;
  260. }
  261. }
  262. c = 0;
  263. for_each_prl_rcu(t->prl) {
  264. if (c >= cmax)
  265. break;
  266. if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
  267. continue;
  268. kp[c].addr = prl->addr;
  269. kp[c].flags = prl->flags;
  270. c++;
  271. if (kprl.addr != htonl(INADDR_ANY))
  272. break;
  273. }
  274. out:
  275. rcu_read_unlock();
  276. len = sizeof(*kp) * c;
  277. ret = 0;
  278. if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
  279. ret = -EFAULT;
  280. kfree(kp);
  281. return ret;
  282. }
  283. static int
  284. ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
  285. {
  286. struct ip_tunnel_prl_entry *p;
  287. int err = 0;
  288. if (a->addr == htonl(INADDR_ANY))
  289. return -EINVAL;
  290. spin_lock(&ipip6_prl_lock);
  291. for (p = t->prl; p; p = p->next) {
  292. if (p->addr == a->addr) {
  293. if (chg) {
  294. p->flags = a->flags;
  295. goto out;
  296. }
  297. err = -EEXIST;
  298. goto out;
  299. }
  300. }
  301. if (chg) {
  302. err = -ENXIO;
  303. goto out;
  304. }
  305. p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
  306. if (!p) {
  307. err = -ENOBUFS;
  308. goto out;
  309. }
  310. p->next = t->prl;
  311. p->addr = a->addr;
  312. p->flags = a->flags;
  313. t->prl_count++;
  314. rcu_assign_pointer(t->prl, p);
  315. out:
  316. spin_unlock(&ipip6_prl_lock);
  317. return err;
  318. }
  319. static void prl_entry_destroy_rcu(struct rcu_head *head)
  320. {
  321. kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head));
  322. }
  323. static void prl_list_destroy_rcu(struct rcu_head *head)
  324. {
  325. struct ip_tunnel_prl_entry *p, *n;
  326. p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
  327. do {
  328. n = p->next;
  329. kfree(p);
  330. p = n;
  331. } while (p);
  332. }
  333. static int
  334. ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
  335. {
  336. struct ip_tunnel_prl_entry *x, **p;
  337. int err = 0;
  338. spin_lock(&ipip6_prl_lock);
  339. if (a && a->addr != htonl(INADDR_ANY)) {
  340. for (p = &t->prl; *p; p = &(*p)->next) {
  341. if ((*p)->addr == a->addr) {
  342. x = *p;
  343. *p = x->next;
  344. call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
  345. t->prl_count--;
  346. goto out;
  347. }
  348. }
  349. err = -ENXIO;
  350. } else {
  351. if (t->prl) {
  352. t->prl_count = 0;
  353. x = t->prl;
  354. call_rcu(&x->rcu_head, prl_list_destroy_rcu);
  355. t->prl = NULL;
  356. }
  357. }
  358. out:
  359. spin_unlock(&ipip6_prl_lock);
  360. return err;
  361. }
  362. static int
  363. isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
  364. {
  365. struct ip_tunnel_prl_entry *p;
  366. int ok = 1;
  367. rcu_read_lock();
  368. p = __ipip6_tunnel_locate_prl(t, iph->saddr);
  369. if (p) {
  370. if (p->flags & PRL_DEFAULT)
  371. skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
  372. else
  373. skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
  374. } else {
  375. struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
  376. if (ipv6_addr_is_isatap(addr6) &&
  377. (addr6->s6_addr32[3] == iph->saddr) &&
  378. ipv6_chk_prefix(addr6, t->dev))
  379. skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
  380. else
  381. ok = 0;
  382. }
  383. rcu_read_unlock();
  384. return ok;
  385. }
  386. static void ipip6_tunnel_uninit(struct net_device *dev)
  387. {
  388. struct net *net = dev_net(dev);
  389. struct sit_net *sitn = net_generic(net, sit_net_id);
  390. if (dev == sitn->fb_tunnel_dev) {
  391. spin_lock_bh(&ipip6_lock);
  392. sitn->tunnels_wc[0] = NULL;
  393. spin_unlock_bh(&ipip6_lock);
  394. dev_put(dev);
  395. } else {
  396. ipip6_tunnel_unlink(sitn, netdev_priv(dev));
  397. ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
  398. dev_put(dev);
  399. }
  400. }
  401. static int ipip6_err(struct sk_buff *skb, u32 info)
  402. {
  403. /* All the routers (except for Linux) return only
  404. 8 bytes of packet payload. It means, that precise relaying of
  405. ICMP in the real Internet is absolutely infeasible.
  406. */
  407. struct iphdr *iph = (struct iphdr*)skb->data;
  408. const int type = icmp_hdr(skb)->type;
  409. const int code = icmp_hdr(skb)->code;
  410. struct ip_tunnel *t;
  411. int err;
  412. switch (type) {
  413. default:
  414. case ICMP_PARAMETERPROB:
  415. return 0;
  416. case ICMP_DEST_UNREACH:
  417. switch (code) {
  418. case ICMP_SR_FAILED:
  419. case ICMP_PORT_UNREACH:
  420. /* Impossible event. */
  421. return 0;
  422. case ICMP_FRAG_NEEDED:
  423. /* Soft state for pmtu is maintained by IP core. */
  424. return 0;
  425. default:
  426. /* All others are translated to HOST_UNREACH.
  427. rfc2003 contains "deep thoughts" about NET_UNREACH,
  428. I believe they are just ether pollution. --ANK
  429. */
  430. break;
  431. }
  432. break;
  433. case ICMP_TIME_EXCEEDED:
  434. if (code != ICMP_EXC_TTL)
  435. return 0;
  436. break;
  437. }
  438. err = -ENOENT;
  439. rcu_read_lock();
  440. t = ipip6_tunnel_lookup(dev_net(skb->dev),
  441. skb->dev,
  442. iph->daddr,
  443. iph->saddr);
  444. if (t == NULL || t->parms.iph.daddr == 0)
  445. goto out;
  446. err = 0;
  447. if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
  448. goto out;
  449. if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
  450. t->err_count++;
  451. else
  452. t->err_count = 1;
  453. t->err_time = jiffies;
  454. out:
  455. rcu_read_unlock();
  456. return err;
  457. }
  458. static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
  459. {
  460. if (INET_ECN_is_ce(iph->tos))
  461. IP6_ECN_set_ce(ipv6_hdr(skb));
  462. }
  463. static int ipip6_rcv(struct sk_buff *skb)
  464. {
  465. struct iphdr *iph;
  466. struct ip_tunnel *tunnel;
  467. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
  468. goto out;
  469. iph = ip_hdr(skb);
  470. rcu_read_lock();
  471. tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
  472. iph->saddr, iph->daddr);
  473. if (tunnel != NULL) {
  474. secpath_reset(skb);
  475. skb->mac_header = skb->network_header;
  476. skb_reset_network_header(skb);
  477. IPCB(skb)->flags = 0;
  478. skb->protocol = htons(ETH_P_IPV6);
  479. skb->pkt_type = PACKET_HOST;
  480. if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
  481. !isatap_chksrc(skb, iph, tunnel)) {
  482. tunnel->dev->stats.rx_errors++;
  483. rcu_read_unlock();
  484. kfree_skb(skb);
  485. return 0;
  486. }
  487. skb_tunnel_rx(skb, tunnel->dev);
  488. ipip6_ecn_decapsulate(iph, skb);
  489. netif_rx(skb);
  490. rcu_read_unlock();
  491. return 0;
  492. }
  493. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  494. rcu_read_unlock();
  495. out:
  496. kfree_skb(skb);
  497. return 0;
  498. }
  499. /*
  500. * Returns the embedded IPv4 address if the IPv6 address
  501. * comes from 6rd / 6to4 (RFC 3056) addr space.
  502. */
  503. static inline
  504. __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel)
  505. {
  506. __be32 dst = 0;
  507. #ifdef CONFIG_IPV6_SIT_6RD
  508. if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
  509. tunnel->ip6rd.prefixlen)) {
  510. unsigned pbw0, pbi0;
  511. int pbi1;
  512. u32 d;
  513. pbw0 = tunnel->ip6rd.prefixlen >> 5;
  514. pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
  515. d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
  516. tunnel->ip6rd.relay_prefixlen;
  517. pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
  518. if (pbi1 > 0)
  519. d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
  520. (32 - pbi1);
  521. dst = tunnel->ip6rd.relay_prefix | htonl(d);
  522. }
  523. #else
  524. if (v6dst->s6_addr16[0] == htons(0x2002)) {
  525. /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
  526. memcpy(&dst, &v6dst->s6_addr16[1], 4);
  527. }
  528. #endif
  529. return dst;
  530. }
  531. /*
  532. * This function assumes it is being called from dev_queue_xmit()
  533. * and that skb is filled properly by that function.
  534. */
  535. static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
  536. struct net_device *dev)
  537. {
  538. struct ip_tunnel *tunnel = netdev_priv(dev);
  539. struct net_device_stats *stats = &dev->stats;
  540. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  541. struct iphdr *tiph = &tunnel->parms.iph;
  542. struct ipv6hdr *iph6 = ipv6_hdr(skb);
  543. u8 tos = tunnel->parms.iph.tos;
  544. __be16 df = tiph->frag_off;
  545. struct rtable *rt; /* Route to the other host */
  546. struct net_device *tdev; /* Device to other host */
  547. struct iphdr *iph; /* Our new IP header */
  548. unsigned int max_headroom; /* The extra header space needed */
  549. __be32 dst = tiph->daddr;
  550. int mtu;
  551. struct in6_addr *addr6;
  552. int addr_type;
  553. if (skb->protocol != htons(ETH_P_IPV6))
  554. goto tx_error;
  555. /* ISATAP (RFC4214) - must come before 6to4 */
  556. if (dev->priv_flags & IFF_ISATAP) {
  557. struct neighbour *neigh = NULL;
  558. if (skb_dst(skb))
  559. neigh = skb_dst(skb)->neighbour;
  560. if (neigh == NULL) {
  561. if (net_ratelimit())
  562. printk(KERN_DEBUG "sit: nexthop == NULL\n");
  563. goto tx_error;
  564. }
  565. addr6 = (struct in6_addr*)&neigh->primary_key;
  566. addr_type = ipv6_addr_type(addr6);
  567. if ((addr_type & IPV6_ADDR_UNICAST) &&
  568. ipv6_addr_is_isatap(addr6))
  569. dst = addr6->s6_addr32[3];
  570. else
  571. goto tx_error;
  572. }
  573. if (!dst)
  574. dst = try_6rd(&iph6->daddr, tunnel);
  575. if (!dst) {
  576. struct neighbour *neigh = NULL;
  577. if (skb_dst(skb))
  578. neigh = skb_dst(skb)->neighbour;
  579. if (neigh == NULL) {
  580. if (net_ratelimit())
  581. printk(KERN_DEBUG "sit: nexthop == NULL\n");
  582. goto tx_error;
  583. }
  584. addr6 = (struct in6_addr*)&neigh->primary_key;
  585. addr_type = ipv6_addr_type(addr6);
  586. if (addr_type == IPV6_ADDR_ANY) {
  587. addr6 = &ipv6_hdr(skb)->daddr;
  588. addr_type = ipv6_addr_type(addr6);
  589. }
  590. if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
  591. goto tx_error_icmp;
  592. dst = addr6->s6_addr32[3];
  593. }
  594. {
  595. struct flowi fl = { .nl_u = { .ip4_u =
  596. { .daddr = dst,
  597. .saddr = tiph->saddr,
  598. .tos = RT_TOS(tos) } },
  599. .oif = tunnel->parms.link,
  600. .proto = IPPROTO_IPV6 };
  601. if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
  602. stats->tx_carrier_errors++;
  603. goto tx_error_icmp;
  604. }
  605. }
  606. if (rt->rt_type != RTN_UNICAST) {
  607. ip_rt_put(rt);
  608. stats->tx_carrier_errors++;
  609. goto tx_error_icmp;
  610. }
  611. tdev = rt->u.dst.dev;
  612. if (tdev == dev) {
  613. ip_rt_put(rt);
  614. stats->collisions++;
  615. goto tx_error;
  616. }
  617. if (df) {
  618. mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
  619. if (mtu < 68) {
  620. stats->collisions++;
  621. ip_rt_put(rt);
  622. goto tx_error;
  623. }
  624. if (mtu < IPV6_MIN_MTU) {
  625. mtu = IPV6_MIN_MTU;
  626. df = 0;
  627. }
  628. if (tunnel->parms.iph.daddr && skb_dst(skb))
  629. skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
  630. if (skb->len > mtu) {
  631. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  632. ip_rt_put(rt);
  633. goto tx_error;
  634. }
  635. }
  636. if (tunnel->err_count > 0) {
  637. if (time_before(jiffies,
  638. tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
  639. tunnel->err_count--;
  640. dst_link_failure(skb);
  641. } else
  642. tunnel->err_count = 0;
  643. }
  644. /*
  645. * Okay, now see if we can stuff it in the buffer as-is.
  646. */
  647. max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
  648. if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
  649. (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
  650. struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
  651. if (!new_skb) {
  652. ip_rt_put(rt);
  653. txq->tx_dropped++;
  654. dev_kfree_skb(skb);
  655. return NETDEV_TX_OK;
  656. }
  657. if (skb->sk)
  658. skb_set_owner_w(new_skb, skb->sk);
  659. dev_kfree_skb(skb);
  660. skb = new_skb;
  661. iph6 = ipv6_hdr(skb);
  662. }
  663. skb->transport_header = skb->network_header;
  664. skb_push(skb, sizeof(struct iphdr));
  665. skb_reset_network_header(skb);
  666. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  667. IPCB(skb)->flags = 0;
  668. skb_dst_drop(skb);
  669. skb_dst_set(skb, &rt->u.dst);
  670. /*
  671. * Push down and install the IPIP header.
  672. */
  673. iph = ip_hdr(skb);
  674. iph->version = 4;
  675. iph->ihl = sizeof(struct iphdr)>>2;
  676. iph->frag_off = df;
  677. iph->protocol = IPPROTO_IPV6;
  678. iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
  679. iph->daddr = rt->rt_dst;
  680. iph->saddr = rt->rt_src;
  681. if ((iph->ttl = tiph->ttl) == 0)
  682. iph->ttl = iph6->hop_limit;
  683. nf_reset(skb);
  684. IPTUNNEL_XMIT();
  685. return NETDEV_TX_OK;
  686. tx_error_icmp:
  687. dst_link_failure(skb);
  688. tx_error:
  689. stats->tx_errors++;
  690. dev_kfree_skb(skb);
  691. return NETDEV_TX_OK;
  692. }
  693. static void ipip6_tunnel_bind_dev(struct net_device *dev)
  694. {
  695. struct net_device *tdev = NULL;
  696. struct ip_tunnel *tunnel;
  697. struct iphdr *iph;
  698. tunnel = netdev_priv(dev);
  699. iph = &tunnel->parms.iph;
  700. if (iph->daddr) {
  701. struct flowi fl = { .nl_u = { .ip4_u =
  702. { .daddr = iph->daddr,
  703. .saddr = iph->saddr,
  704. .tos = RT_TOS(iph->tos) } },
  705. .oif = tunnel->parms.link,
  706. .proto = IPPROTO_IPV6 };
  707. struct rtable *rt;
  708. if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
  709. tdev = rt->u.dst.dev;
  710. ip_rt_put(rt);
  711. }
  712. dev->flags |= IFF_POINTOPOINT;
  713. }
  714. if (!tdev && tunnel->parms.link)
  715. tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
  716. if (tdev) {
  717. dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
  718. dev->mtu = tdev->mtu - sizeof(struct iphdr);
  719. if (dev->mtu < IPV6_MIN_MTU)
  720. dev->mtu = IPV6_MIN_MTU;
  721. }
  722. dev->iflink = tunnel->parms.link;
  723. }
  724. static int
  725. ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
  726. {
  727. int err = 0;
  728. struct ip_tunnel_parm p;
  729. struct ip_tunnel_prl prl;
  730. struct ip_tunnel *t;
  731. struct net *net = dev_net(dev);
  732. struct sit_net *sitn = net_generic(net, sit_net_id);
  733. #ifdef CONFIG_IPV6_SIT_6RD
  734. struct ip_tunnel_6rd ip6rd;
  735. #endif
  736. switch (cmd) {
  737. case SIOCGETTUNNEL:
  738. #ifdef CONFIG_IPV6_SIT_6RD
  739. case SIOCGET6RD:
  740. #endif
  741. t = NULL;
  742. if (dev == sitn->fb_tunnel_dev) {
  743. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
  744. err = -EFAULT;
  745. break;
  746. }
  747. t = ipip6_tunnel_locate(net, &p, 0);
  748. }
  749. if (t == NULL)
  750. t = netdev_priv(dev);
  751. err = -EFAULT;
  752. if (cmd == SIOCGETTUNNEL) {
  753. memcpy(&p, &t->parms, sizeof(p));
  754. if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
  755. sizeof(p)))
  756. goto done;
  757. #ifdef CONFIG_IPV6_SIT_6RD
  758. } else {
  759. ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
  760. ip6rd.relay_prefix = t->ip6rd.relay_prefix;
  761. ip6rd.prefixlen = t->ip6rd.prefixlen;
  762. ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
  763. if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
  764. sizeof(ip6rd)))
  765. goto done;
  766. #endif
  767. }
  768. err = 0;
  769. break;
  770. case SIOCADDTUNNEL:
  771. case SIOCCHGTUNNEL:
  772. err = -EPERM;
  773. if (!capable(CAP_NET_ADMIN))
  774. goto done;
  775. err = -EFAULT;
  776. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
  777. goto done;
  778. err = -EINVAL;
  779. if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
  780. p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
  781. goto done;
  782. if (p.iph.ttl)
  783. p.iph.frag_off |= htons(IP_DF);
  784. t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
  785. if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
  786. if (t != NULL) {
  787. if (t->dev != dev) {
  788. err = -EEXIST;
  789. break;
  790. }
  791. } else {
  792. if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
  793. (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
  794. err = -EINVAL;
  795. break;
  796. }
  797. t = netdev_priv(dev);
  798. ipip6_tunnel_unlink(sitn, t);
  799. t->parms.iph.saddr = p.iph.saddr;
  800. t->parms.iph.daddr = p.iph.daddr;
  801. memcpy(dev->dev_addr, &p.iph.saddr, 4);
  802. memcpy(dev->broadcast, &p.iph.daddr, 4);
  803. ipip6_tunnel_link(sitn, t);
  804. netdev_state_change(dev);
  805. }
  806. }
  807. if (t) {
  808. err = 0;
  809. if (cmd == SIOCCHGTUNNEL) {
  810. t->parms.iph.ttl = p.iph.ttl;
  811. t->parms.iph.tos = p.iph.tos;
  812. if (t->parms.link != p.link) {
  813. t->parms.link = p.link;
  814. ipip6_tunnel_bind_dev(dev);
  815. netdev_state_change(dev);
  816. }
  817. }
  818. if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
  819. err = -EFAULT;
  820. } else
  821. err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
  822. break;
  823. case SIOCDELTUNNEL:
  824. err = -EPERM;
  825. if (!capable(CAP_NET_ADMIN))
  826. goto done;
  827. if (dev == sitn->fb_tunnel_dev) {
  828. err = -EFAULT;
  829. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
  830. goto done;
  831. err = -ENOENT;
  832. if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
  833. goto done;
  834. err = -EPERM;
  835. if (t == netdev_priv(sitn->fb_tunnel_dev))
  836. goto done;
  837. dev = t->dev;
  838. }
  839. unregister_netdevice(dev);
  840. err = 0;
  841. break;
  842. case SIOCGETPRL:
  843. err = -EINVAL;
  844. if (dev == sitn->fb_tunnel_dev)
  845. goto done;
  846. err = -ENOENT;
  847. if (!(t = netdev_priv(dev)))
  848. goto done;
  849. err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
  850. break;
  851. case SIOCADDPRL:
  852. case SIOCDELPRL:
  853. case SIOCCHGPRL:
  854. err = -EPERM;
  855. if (!capable(CAP_NET_ADMIN))
  856. goto done;
  857. err = -EINVAL;
  858. if (dev == sitn->fb_tunnel_dev)
  859. goto done;
  860. err = -EFAULT;
  861. if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
  862. goto done;
  863. err = -ENOENT;
  864. if (!(t = netdev_priv(dev)))
  865. goto done;
  866. switch (cmd) {
  867. case SIOCDELPRL:
  868. err = ipip6_tunnel_del_prl(t, &prl);
  869. break;
  870. case SIOCADDPRL:
  871. case SIOCCHGPRL:
  872. err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
  873. break;
  874. }
  875. netdev_state_change(dev);
  876. break;
  877. #ifdef CONFIG_IPV6_SIT_6RD
  878. case SIOCADD6RD:
  879. case SIOCCHG6RD:
  880. case SIOCDEL6RD:
  881. err = -EPERM;
  882. if (!capable(CAP_NET_ADMIN))
  883. goto done;
  884. err = -EFAULT;
  885. if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
  886. sizeof(ip6rd)))
  887. goto done;
  888. t = netdev_priv(dev);
  889. if (cmd != SIOCDEL6RD) {
  890. struct in6_addr prefix;
  891. __be32 relay_prefix;
  892. err = -EINVAL;
  893. if (ip6rd.relay_prefixlen > 32 ||
  894. ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
  895. goto done;
  896. ipv6_addr_prefix(&prefix, &ip6rd.prefix,
  897. ip6rd.prefixlen);
  898. if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
  899. goto done;
  900. if (ip6rd.relay_prefixlen)
  901. relay_prefix = ip6rd.relay_prefix &
  902. htonl(0xffffffffUL <<
  903. (32 - ip6rd.relay_prefixlen));
  904. else
  905. relay_prefix = 0;
  906. if (relay_prefix != ip6rd.relay_prefix)
  907. goto done;
  908. ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
  909. t->ip6rd.relay_prefix = relay_prefix;
  910. t->ip6rd.prefixlen = ip6rd.prefixlen;
  911. t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
  912. } else
  913. ipip6_tunnel_clone_6rd(dev, sitn);
  914. err = 0;
  915. break;
  916. #endif
  917. default:
  918. err = -EINVAL;
  919. }
  920. done:
  921. return err;
  922. }
  923. static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
  924. {
  925. if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
  926. return -EINVAL;
  927. dev->mtu = new_mtu;
  928. return 0;
  929. }
  930. static const struct net_device_ops ipip6_netdev_ops = {
  931. .ndo_uninit = ipip6_tunnel_uninit,
  932. .ndo_start_xmit = ipip6_tunnel_xmit,
  933. .ndo_do_ioctl = ipip6_tunnel_ioctl,
  934. .ndo_change_mtu = ipip6_tunnel_change_mtu,
  935. };
  936. static void ipip6_tunnel_setup(struct net_device *dev)
  937. {
  938. dev->netdev_ops = &ipip6_netdev_ops;
  939. dev->destructor = free_netdev;
  940. dev->type = ARPHRD_SIT;
  941. dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
  942. dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
  943. dev->flags = IFF_NOARP;
  944. dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
  945. dev->iflink = 0;
  946. dev->addr_len = 4;
  947. dev->features |= NETIF_F_NETNS_LOCAL;
  948. }
  949. static void ipip6_tunnel_init(struct net_device *dev)
  950. {
  951. struct ip_tunnel *tunnel = netdev_priv(dev);
  952. tunnel->dev = dev;
  953. strcpy(tunnel->parms.name, dev->name);
  954. memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
  955. memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
  956. ipip6_tunnel_bind_dev(dev);
  957. }
  958. static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
  959. {
  960. struct ip_tunnel *tunnel = netdev_priv(dev);
  961. struct iphdr *iph = &tunnel->parms.iph;
  962. struct net *net = dev_net(dev);
  963. struct sit_net *sitn = net_generic(net, sit_net_id);
  964. tunnel->dev = dev;
  965. strcpy(tunnel->parms.name, dev->name);
  966. iph->version = 4;
  967. iph->protocol = IPPROTO_IPV6;
  968. iph->ihl = 5;
  969. iph->ttl = 64;
  970. dev_hold(dev);
  971. sitn->tunnels_wc[0] = tunnel;
  972. }
  973. static struct xfrm_tunnel sit_handler = {
  974. .handler = ipip6_rcv,
  975. .err_handler = ipip6_err,
  976. .priority = 1,
  977. };
  978. static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
  979. {
  980. int prio;
  981. for (prio = 1; prio < 4; prio++) {
  982. int h;
  983. for (h = 0; h < HASH_SIZE; h++) {
  984. struct ip_tunnel *t = sitn->tunnels[prio][h];
  985. while (t != NULL) {
  986. unregister_netdevice_queue(t->dev, head);
  987. t = t->next;
  988. }
  989. }
  990. }
  991. }
  992. static int __net_init sit_init_net(struct net *net)
  993. {
  994. struct sit_net *sitn = net_generic(net, sit_net_id);
  995. int err;
  996. sitn->tunnels[0] = sitn->tunnels_wc;
  997. sitn->tunnels[1] = sitn->tunnels_l;
  998. sitn->tunnels[2] = sitn->tunnels_r;
  999. sitn->tunnels[3] = sitn->tunnels_r_l;
  1000. sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
  1001. ipip6_tunnel_setup);
  1002. if (!sitn->fb_tunnel_dev) {
  1003. err = -ENOMEM;
  1004. goto err_alloc_dev;
  1005. }
  1006. dev_net_set(sitn->fb_tunnel_dev, net);
  1007. ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
  1008. ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
  1009. if ((err = register_netdev(sitn->fb_tunnel_dev)))
  1010. goto err_reg_dev;
  1011. return 0;
  1012. err_reg_dev:
  1013. dev_put(sitn->fb_tunnel_dev);
  1014. free_netdev(sitn->fb_tunnel_dev);
  1015. err_alloc_dev:
  1016. return err;
  1017. }
  1018. static void __net_exit sit_exit_net(struct net *net)
  1019. {
  1020. struct sit_net *sitn = net_generic(net, sit_net_id);
  1021. LIST_HEAD(list);
  1022. rtnl_lock();
  1023. sit_destroy_tunnels(sitn, &list);
  1024. unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
  1025. unregister_netdevice_many(&list);
  1026. rtnl_unlock();
  1027. }
  1028. static struct pernet_operations sit_net_ops = {
  1029. .init = sit_init_net,
  1030. .exit = sit_exit_net,
  1031. .id = &sit_net_id,
  1032. .size = sizeof(struct sit_net),
  1033. };
  1034. static void __exit sit_cleanup(void)
  1035. {
  1036. xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
  1037. unregister_pernet_device(&sit_net_ops);
  1038. rcu_barrier(); /* Wait for completion of call_rcu()'s */
  1039. }
  1040. static int __init sit_init(void)
  1041. {
  1042. int err;
  1043. printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
  1044. err = register_pernet_device(&sit_net_ops);
  1045. if (err < 0)
  1046. return err;
  1047. err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
  1048. if (err < 0) {
  1049. unregister_pernet_device(&sit_net_ops);
  1050. printk(KERN_INFO "sit init: Can't add protocol\n");
  1051. }
  1052. return err;
  1053. }
  1054. module_init(sit_init);
  1055. module_exit(sit_cleanup);
  1056. MODULE_LICENSE("GPL");
  1057. MODULE_ALIAS("sit0");