ip6_output.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. if (dst->hh)
  69. return neigh_hh_output(dst->hh, skb);
  70. else if (dst->neighbour)
  71. return dst->neighbour->output(skb);
  72. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  73. kfree_skb(skb);
  74. return -EINVAL;
  75. }
  76. /* dev_loopback_xmit for use with netfilter. */
  77. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  78. {
  79. skb_reset_mac_header(newskb);
  80. __skb_pull(newskb, skb_network_offset(newskb));
  81. newskb->pkt_type = PACKET_LOOPBACK;
  82. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  83. BUG_TRAP(newskb->dst);
  84. netif_rx(newskb);
  85. return 0;
  86. }
  87. static int ip6_output2(struct sk_buff *skb)
  88. {
  89. struct dst_entry *dst = skb->dst;
  90. struct net_device *dev = dst->dev;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  94. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  95. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  96. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  97. ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  98. &ipv6_hdr(skb)->saddr)) {
  99. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  100. /* Do not check for IFF_ALLMULTI; multicast routing
  101. is not supported in any case.
  102. */
  103. if (newskb)
  104. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  105. newskb->dev,
  106. ip6_dev_loopback_xmit);
  107. if (ipv6_hdr(skb)->hop_limit == 0) {
  108. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  109. kfree_skb(skb);
  110. return 0;
  111. }
  112. }
  113. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  114. }
  115. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  116. }
  117. static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
  118. {
  119. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  120. return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
  121. skb->dst->dev->mtu : dst_mtu(skb->dst);
  122. }
  123. int ip6_output(struct sk_buff *skb)
  124. {
  125. if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
  126. dst_allfrag(skb->dst))
  127. return ip6_fragment(skb, ip6_output2);
  128. else
  129. return ip6_output2(skb);
  130. }
  131. /*
  132. * xmit an sk_buff (used by TCP)
  133. */
  134. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  135. struct ipv6_txoptions *opt, int ipfragok)
  136. {
  137. struct ipv6_pinfo *np = inet6_sk(sk);
  138. struct in6_addr *first_hop = &fl->fl6_dst;
  139. struct dst_entry *dst = skb->dst;
  140. struct ipv6hdr *hdr;
  141. u8 proto = fl->proto;
  142. int seg_len = skb->len;
  143. int hlimit, tclass;
  144. u32 mtu;
  145. if (opt) {
  146. int head_room;
  147. /* First: exthdrs may take lots of space (~8K for now)
  148. MAX_HEADER is not enough.
  149. */
  150. head_room = opt->opt_nflen + opt->opt_flen;
  151. seg_len += head_room;
  152. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  153. if (skb_headroom(skb) < head_room) {
  154. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  155. if (skb2 == NULL) {
  156. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  157. IPSTATS_MIB_OUTDISCARDS);
  158. kfree_skb(skb);
  159. return -ENOBUFS;
  160. }
  161. kfree_skb(skb);
  162. skb = skb2;
  163. if (sk)
  164. skb_set_owner_w(skb, sk);
  165. }
  166. if (opt->opt_flen)
  167. ipv6_push_frag_opts(skb, opt, &proto);
  168. if (opt->opt_nflen)
  169. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  170. }
  171. skb_push(skb, sizeof(struct ipv6hdr));
  172. skb_reset_network_header(skb);
  173. hdr = ipv6_hdr(skb);
  174. /*
  175. * Fill in the IPv6 header
  176. */
  177. hlimit = -1;
  178. if (np)
  179. hlimit = np->hop_limit;
  180. if (hlimit < 0)
  181. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  182. if (hlimit < 0)
  183. hlimit = ipv6_get_hoplimit(dst->dev);
  184. tclass = -1;
  185. if (np)
  186. tclass = np->tclass;
  187. if (tclass < 0)
  188. tclass = 0;
  189. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  190. hdr->payload_len = htons(seg_len);
  191. hdr->nexthdr = proto;
  192. hdr->hop_limit = hlimit;
  193. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  194. ipv6_addr_copy(&hdr->daddr, first_hop);
  195. skb->priority = sk->sk_priority;
  196. mtu = dst_mtu(dst);
  197. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  198. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  199. IPSTATS_MIB_OUTREQUESTS);
  200. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  201. dst_output);
  202. }
  203. if (net_ratelimit())
  204. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  205. skb->dev = dst->dev;
  206. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  207. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  208. kfree_skb(skb);
  209. return -EMSGSIZE;
  210. }
  211. EXPORT_SYMBOL(ip6_xmit);
  212. /*
  213. * To avoid extra problems ND packets are send through this
  214. * routine. It's code duplication but I really want to avoid
  215. * extra checks since ipv6_build_header is used by TCP (which
  216. * is for us performance critical)
  217. */
  218. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  219. struct in6_addr *saddr, struct in6_addr *daddr,
  220. int proto, int len)
  221. {
  222. struct ipv6_pinfo *np = inet6_sk(sk);
  223. struct ipv6hdr *hdr;
  224. int totlen;
  225. skb->protocol = htons(ETH_P_IPV6);
  226. skb->dev = dev;
  227. totlen = len + sizeof(struct ipv6hdr);
  228. skb_reset_network_header(skb);
  229. skb_put(skb, sizeof(struct ipv6hdr));
  230. hdr = ipv6_hdr(skb);
  231. *(__be32*)hdr = htonl(0x60000000);
  232. hdr->payload_len = htons(len);
  233. hdr->nexthdr = proto;
  234. hdr->hop_limit = np->hop_limit;
  235. ipv6_addr_copy(&hdr->saddr, saddr);
  236. ipv6_addr_copy(&hdr->daddr, daddr);
  237. return 0;
  238. }
  239. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  240. {
  241. struct ip6_ra_chain *ra;
  242. struct sock *last = NULL;
  243. read_lock(&ip6_ra_lock);
  244. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  245. struct sock *sk = ra->sk;
  246. if (sk && ra->sel == sel &&
  247. (!sk->sk_bound_dev_if ||
  248. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  249. if (last) {
  250. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  251. if (skb2)
  252. rawv6_rcv(last, skb2);
  253. }
  254. last = sk;
  255. }
  256. }
  257. if (last) {
  258. rawv6_rcv(last, skb);
  259. read_unlock(&ip6_ra_lock);
  260. return 1;
  261. }
  262. read_unlock(&ip6_ra_lock);
  263. return 0;
  264. }
  265. static int ip6_forward_proxy_check(struct sk_buff *skb)
  266. {
  267. struct ipv6hdr *hdr = ipv6_hdr(skb);
  268. u8 nexthdr = hdr->nexthdr;
  269. int offset;
  270. if (ipv6_ext_hdr(nexthdr)) {
  271. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  272. if (offset < 0)
  273. return 0;
  274. } else
  275. offset = sizeof(struct ipv6hdr);
  276. if (nexthdr == IPPROTO_ICMPV6) {
  277. struct icmp6hdr *icmp6;
  278. if (!pskb_may_pull(skb, (skb_network_header(skb) +
  279. offset + 1 - skb->data)))
  280. return 0;
  281. icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
  282. switch (icmp6->icmp6_type) {
  283. case NDISC_ROUTER_SOLICITATION:
  284. case NDISC_ROUTER_ADVERTISEMENT:
  285. case NDISC_NEIGHBOUR_SOLICITATION:
  286. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  287. case NDISC_REDIRECT:
  288. /* For reaction involving unicast neighbor discovery
  289. * message destined to the proxied address, pass it to
  290. * input function.
  291. */
  292. return 1;
  293. default:
  294. break;
  295. }
  296. }
  297. /*
  298. * The proxying router can't forward traffic sent to a link-local
  299. * address, so signal the sender and discard the packet. This
  300. * behavior is clarified by the MIPv6 specification.
  301. */
  302. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  303. dst_link_failure(skb);
  304. return -1;
  305. }
  306. return 0;
  307. }
  308. static inline int ip6_forward_finish(struct sk_buff *skb)
  309. {
  310. return dst_output(skb);
  311. }
  312. int ip6_forward(struct sk_buff *skb)
  313. {
  314. struct dst_entry *dst = skb->dst;
  315. struct ipv6hdr *hdr = ipv6_hdr(skb);
  316. struct inet6_skb_parm *opt = IP6CB(skb);
  317. if (ipv6_devconf.forwarding == 0)
  318. goto error;
  319. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  320. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  321. goto drop;
  322. }
  323. skb_forward_csum(skb);
  324. /*
  325. * We DO NOT make any processing on
  326. * RA packets, pushing them to user level AS IS
  327. * without ane WARRANTY that application will be able
  328. * to interpret them. The reason is that we
  329. * cannot make anything clever here.
  330. *
  331. * We are not end-node, so that if packet contains
  332. * AH/ESP, we cannot make anything.
  333. * Defragmentation also would be mistake, RA packets
  334. * cannot be fragmented, because there is no warranty
  335. * that different fragments will go along one path. --ANK
  336. */
  337. if (opt->ra) {
  338. u8 *ptr = skb_network_header(skb) + opt->ra;
  339. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  340. return 0;
  341. }
  342. /*
  343. * check and decrement ttl
  344. */
  345. if (hdr->hop_limit <= 1) {
  346. /* Force OUTPUT device used as source address */
  347. skb->dev = dst->dev;
  348. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  349. 0, skb->dev);
  350. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  351. kfree_skb(skb);
  352. return -ETIMEDOUT;
  353. }
  354. /* XXX: idev->cnf.proxy_ndp? */
  355. if (ipv6_devconf.proxy_ndp &&
  356. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  357. int proxied = ip6_forward_proxy_check(skb);
  358. if (proxied > 0)
  359. return ip6_input(skb);
  360. else if (proxied < 0) {
  361. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  362. goto drop;
  363. }
  364. }
  365. if (!xfrm6_route_forward(skb)) {
  366. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  367. goto drop;
  368. }
  369. dst = skb->dst;
  370. /* IPv6 specs say nothing about it, but it is clear that we cannot
  371. send redirects to source routed frames.
  372. */
  373. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  374. struct in6_addr *target = NULL;
  375. struct rt6_info *rt;
  376. struct neighbour *n = dst->neighbour;
  377. /*
  378. * incoming and outgoing devices are the same
  379. * send a redirect.
  380. */
  381. rt = (struct rt6_info *) dst;
  382. if ((rt->rt6i_flags & RTF_GATEWAY))
  383. target = (struct in6_addr*)&n->primary_key;
  384. else
  385. target = &hdr->daddr;
  386. /* Limit redirects both by destination (here)
  387. and by source (inside ndisc_send_redirect)
  388. */
  389. if (xrlim_allow(dst, 1*HZ))
  390. ndisc_send_redirect(skb, n, target);
  391. } else {
  392. int addrtype = ipv6_addr_type(&hdr->saddr);
  393. /* This check is security critical. */
  394. if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
  395. goto error;
  396. if (addrtype & IPV6_ADDR_LINKLOCAL) {
  397. icmpv6_send(skb, ICMPV6_DEST_UNREACH,
  398. ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
  399. goto error;
  400. }
  401. }
  402. if (skb->len > dst_mtu(dst)) {
  403. /* Again, force OUTPUT device used as source address */
  404. skb->dev = dst->dev;
  405. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  406. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  407. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  408. kfree_skb(skb);
  409. return -EMSGSIZE;
  410. }
  411. if (skb_cow(skb, dst->dev->hard_header_len)) {
  412. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  413. goto drop;
  414. }
  415. hdr = ipv6_hdr(skb);
  416. /* Mangling hops number delayed to point after skb COW */
  417. hdr->hop_limit--;
  418. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  419. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  420. error:
  421. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  422. drop:
  423. kfree_skb(skb);
  424. return -EINVAL;
  425. }
  426. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  427. {
  428. to->pkt_type = from->pkt_type;
  429. to->priority = from->priority;
  430. to->protocol = from->protocol;
  431. dst_release(to->dst);
  432. to->dst = dst_clone(from->dst);
  433. to->dev = from->dev;
  434. to->mark = from->mark;
  435. #ifdef CONFIG_NET_SCHED
  436. to->tc_index = from->tc_index;
  437. #endif
  438. nf_copy(to, from);
  439. skb_copy_secmark(to, from);
  440. }
  441. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  442. {
  443. u16 offset = sizeof(struct ipv6hdr);
  444. struct ipv6_opt_hdr *exthdr =
  445. (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
  446. unsigned int packet_len = skb->tail - skb->network_header;
  447. int found_rhdr = 0;
  448. *nexthdr = &ipv6_hdr(skb)->nexthdr;
  449. while (offset + 1 <= packet_len) {
  450. switch (**nexthdr) {
  451. case NEXTHDR_HOP:
  452. break;
  453. case NEXTHDR_ROUTING:
  454. found_rhdr = 1;
  455. break;
  456. case NEXTHDR_DEST:
  457. #ifdef CONFIG_IPV6_MIP6
  458. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  459. break;
  460. #endif
  461. if (found_rhdr)
  462. return offset;
  463. break;
  464. default :
  465. return offset;
  466. }
  467. offset += ipv6_optlen(exthdr);
  468. *nexthdr = &exthdr->nexthdr;
  469. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  470. offset);
  471. }
  472. return offset;
  473. }
  474. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  475. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  476. {
  477. struct net_device *dev;
  478. struct sk_buff *frag;
  479. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  480. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  481. struct ipv6hdr *tmp_hdr;
  482. struct frag_hdr *fh;
  483. unsigned int mtu, hlen, left, len;
  484. __be32 frag_id = 0;
  485. int ptr, offset = 0, err=0;
  486. u8 *prevhdr, nexthdr = 0;
  487. dev = rt->u.dst.dev;
  488. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  489. nexthdr = *prevhdr;
  490. mtu = ip6_skb_dst_mtu(skb);
  491. /* We must not fragment if the socket is set to force MTU discovery
  492. * or if the skb it not generated by a local socket. (This last
  493. * check should be redundant, but it's free.)
  494. */
  495. if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
  496. skb->dev = skb->dst->dev;
  497. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  498. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  499. kfree_skb(skb);
  500. return -EMSGSIZE;
  501. }
  502. if (np && np->frag_size < mtu) {
  503. if (np->frag_size)
  504. mtu = np->frag_size;
  505. }
  506. mtu -= hlen + sizeof(struct frag_hdr);
  507. if (skb_shinfo(skb)->frag_list) {
  508. int first_len = skb_pagelen(skb);
  509. if (first_len - hlen > mtu ||
  510. ((first_len - hlen) & 7) ||
  511. skb_cloned(skb))
  512. goto slow_path;
  513. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  514. /* Correct geometry. */
  515. if (frag->len > mtu ||
  516. ((frag->len & 7) && frag->next) ||
  517. skb_headroom(frag) < hlen)
  518. goto slow_path;
  519. /* Partially cloned skb? */
  520. if (skb_shared(frag))
  521. goto slow_path;
  522. BUG_ON(frag->sk);
  523. if (skb->sk) {
  524. sock_hold(skb->sk);
  525. frag->sk = skb->sk;
  526. frag->destructor = sock_wfree;
  527. skb->truesize -= frag->truesize;
  528. }
  529. }
  530. err = 0;
  531. offset = 0;
  532. frag = skb_shinfo(skb)->frag_list;
  533. skb_shinfo(skb)->frag_list = NULL;
  534. /* BUILD HEADER */
  535. *prevhdr = NEXTHDR_FRAGMENT;
  536. tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
  537. if (!tmp_hdr) {
  538. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  539. return -ENOMEM;
  540. }
  541. __skb_pull(skb, hlen);
  542. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  543. __skb_push(skb, hlen);
  544. skb_reset_network_header(skb);
  545. memcpy(skb_network_header(skb), tmp_hdr, hlen);
  546. ipv6_select_ident(skb, fh);
  547. fh->nexthdr = nexthdr;
  548. fh->reserved = 0;
  549. fh->frag_off = htons(IP6_MF);
  550. frag_id = fh->identification;
  551. first_len = skb_pagelen(skb);
  552. skb->data_len = first_len - skb_headlen(skb);
  553. skb->len = first_len;
  554. ipv6_hdr(skb)->payload_len = htons(first_len -
  555. sizeof(struct ipv6hdr));
  556. dst_hold(&rt->u.dst);
  557. for (;;) {
  558. /* Prepare header of the next frame,
  559. * before previous one went down. */
  560. if (frag) {
  561. frag->ip_summed = CHECKSUM_NONE;
  562. skb_reset_transport_header(frag);
  563. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  564. __skb_push(frag, hlen);
  565. skb_reset_network_header(frag);
  566. memcpy(skb_network_header(frag), tmp_hdr,
  567. hlen);
  568. offset += skb->len - hlen - sizeof(struct frag_hdr);
  569. fh->nexthdr = nexthdr;
  570. fh->reserved = 0;
  571. fh->frag_off = htons(offset);
  572. if (frag->next != NULL)
  573. fh->frag_off |= htons(IP6_MF);
  574. fh->identification = frag_id;
  575. ipv6_hdr(frag)->payload_len =
  576. htons(frag->len -
  577. sizeof(struct ipv6hdr));
  578. ip6_copy_metadata(frag, skb);
  579. }
  580. err = output(skb);
  581. if(!err)
  582. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  583. if (err || !frag)
  584. break;
  585. skb = frag;
  586. frag = skb->next;
  587. skb->next = NULL;
  588. }
  589. kfree(tmp_hdr);
  590. if (err == 0) {
  591. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  592. dst_release(&rt->u.dst);
  593. return 0;
  594. }
  595. while (frag) {
  596. skb = frag->next;
  597. kfree_skb(frag);
  598. frag = skb;
  599. }
  600. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  601. dst_release(&rt->u.dst);
  602. return err;
  603. }
  604. slow_path:
  605. left = skb->len - hlen; /* Space per frame */
  606. ptr = hlen; /* Where to start from */
  607. /*
  608. * Fragment the datagram.
  609. */
  610. *prevhdr = NEXTHDR_FRAGMENT;
  611. /*
  612. * Keep copying data until we run out.
  613. */
  614. while(left > 0) {
  615. len = left;
  616. /* IF: it doesn't fit, use 'mtu' - the data space left */
  617. if (len > mtu)
  618. len = mtu;
  619. /* IF: we are not sending upto and including the packet end
  620. then align the next start on an eight byte boundary */
  621. if (len < left) {
  622. len &= ~7;
  623. }
  624. /*
  625. * Allocate buffer.
  626. */
  627. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  628. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  629. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  630. IPSTATS_MIB_FRAGFAILS);
  631. err = -ENOMEM;
  632. goto fail;
  633. }
  634. /*
  635. * Set up data on packet
  636. */
  637. ip6_copy_metadata(frag, skb);
  638. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  639. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  640. skb_reset_network_header(frag);
  641. fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
  642. frag->transport_header = (frag->network_header + hlen +
  643. sizeof(struct frag_hdr));
  644. /*
  645. * Charge the memory for the fragment to any owner
  646. * it might possess
  647. */
  648. if (skb->sk)
  649. skb_set_owner_w(frag, skb->sk);
  650. /*
  651. * Copy the packet header into the new buffer.
  652. */
  653. skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
  654. /*
  655. * Build fragment header.
  656. */
  657. fh->nexthdr = nexthdr;
  658. fh->reserved = 0;
  659. if (!frag_id) {
  660. ipv6_select_ident(skb, fh);
  661. frag_id = fh->identification;
  662. } else
  663. fh->identification = frag_id;
  664. /*
  665. * Copy a block of the IP datagram.
  666. */
  667. if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len))
  668. BUG();
  669. left -= len;
  670. fh->frag_off = htons(offset);
  671. if (left > 0)
  672. fh->frag_off |= htons(IP6_MF);
  673. ipv6_hdr(frag)->payload_len = htons(frag->len -
  674. sizeof(struct ipv6hdr));
  675. ptr += len;
  676. offset += len;
  677. /*
  678. * Put this fragment into the sending queue.
  679. */
  680. err = output(frag);
  681. if (err)
  682. goto fail;
  683. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  684. }
  685. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  686. IPSTATS_MIB_FRAGOKS);
  687. kfree_skb(skb);
  688. return err;
  689. fail:
  690. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  691. IPSTATS_MIB_FRAGFAILS);
  692. kfree_skb(skb);
  693. return err;
  694. }
  695. static inline int ip6_rt_check(struct rt6key *rt_key,
  696. struct in6_addr *fl_addr,
  697. struct in6_addr *addr_cache)
  698. {
  699. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  700. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  701. }
  702. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  703. struct dst_entry *dst,
  704. struct flowi *fl)
  705. {
  706. struct ipv6_pinfo *np = inet6_sk(sk);
  707. struct rt6_info *rt = (struct rt6_info *)dst;
  708. if (!dst)
  709. goto out;
  710. /* Yes, checking route validity in not connected
  711. * case is not very simple. Take into account,
  712. * that we do not support routing by source, TOS,
  713. * and MSG_DONTROUTE --ANK (980726)
  714. *
  715. * 1. ip6_rt_check(): If route was host route,
  716. * check that cached destination is current.
  717. * If it is network route, we still may
  718. * check its validity using saved pointer
  719. * to the last used address: daddr_cache.
  720. * We do not want to save whole address now,
  721. * (because main consumer of this service
  722. * is tcp, which has not this problem),
  723. * so that the last trick works only on connected
  724. * sockets.
  725. * 2. oif also should be the same.
  726. */
  727. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  728. #ifdef CONFIG_IPV6_SUBTREES
  729. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  730. #endif
  731. (fl->oif && fl->oif != dst->dev->ifindex)) {
  732. dst_release(dst);
  733. dst = NULL;
  734. }
  735. out:
  736. return dst;
  737. }
  738. static int ip6_dst_lookup_tail(struct sock *sk,
  739. struct dst_entry **dst, struct flowi *fl)
  740. {
  741. int err;
  742. if (*dst == NULL)
  743. *dst = ip6_route_output(sk, fl);
  744. if ((err = (*dst)->error))
  745. goto out_err_release;
  746. if (ipv6_addr_any(&fl->fl6_src)) {
  747. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  748. if (err)
  749. goto out_err_release;
  750. }
  751. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  752. /*
  753. * Here if the dst entry we've looked up
  754. * has a neighbour entry that is in the INCOMPLETE
  755. * state and the src address from the flow is
  756. * marked as OPTIMISTIC, we release the found
  757. * dst entry and replace it instead with the
  758. * dst entry of the nexthop router
  759. */
  760. if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
  761. struct inet6_ifaddr *ifp;
  762. struct flowi fl_gw;
  763. int redirect;
  764. ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
  765. redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
  766. if (ifp)
  767. in6_ifa_put(ifp);
  768. if (redirect) {
  769. /*
  770. * We need to get the dst entry for the
  771. * default router instead
  772. */
  773. dst_release(*dst);
  774. memcpy(&fl_gw, fl, sizeof(struct flowi));
  775. memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
  776. *dst = ip6_route_output(sk, &fl_gw);
  777. if ((err = (*dst)->error))
  778. goto out_err_release;
  779. }
  780. }
  781. #endif
  782. return 0;
  783. out_err_release:
  784. dst_release(*dst);
  785. *dst = NULL;
  786. return err;
  787. }
  788. /**
  789. * ip6_dst_lookup - perform route lookup on flow
  790. * @sk: socket which provides route info
  791. * @dst: pointer to dst_entry * for result
  792. * @fl: flow to lookup
  793. *
  794. * This function performs a route lookup on the given flow.
  795. *
  796. * It returns zero on success, or a standard errno code on error.
  797. */
  798. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  799. {
  800. *dst = NULL;
  801. return ip6_dst_lookup_tail(sk, dst, fl);
  802. }
  803. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  804. /**
  805. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  806. * @sk: socket which provides the dst cache and route info
  807. * @dst: pointer to dst_entry * for result
  808. * @fl: flow to lookup
  809. *
  810. * This function performs a route lookup on the given flow with the
  811. * possibility of using the cached route in the socket if it is valid.
  812. * It will take the socket dst lock when operating on the dst cache.
  813. * As a result, this function can only be used in process context.
  814. *
  815. * It returns zero on success, or a standard errno code on error.
  816. */
  817. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  818. {
  819. *dst = NULL;
  820. if (sk) {
  821. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  822. *dst = ip6_sk_dst_check(sk, *dst, fl);
  823. }
  824. return ip6_dst_lookup_tail(sk, dst, fl);
  825. }
  826. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  827. static inline int ip6_ufo_append_data(struct sock *sk,
  828. int getfrag(void *from, char *to, int offset, int len,
  829. int odd, struct sk_buff *skb),
  830. void *from, int length, int hh_len, int fragheaderlen,
  831. int transhdrlen, int mtu,unsigned int flags)
  832. {
  833. struct sk_buff *skb;
  834. int err;
  835. /* There is support for UDP large send offload by network
  836. * device, so create one single skb packet containing complete
  837. * udp datagram
  838. */
  839. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  840. skb = sock_alloc_send_skb(sk,
  841. hh_len + fragheaderlen + transhdrlen + 20,
  842. (flags & MSG_DONTWAIT), &err);
  843. if (skb == NULL)
  844. return -ENOMEM;
  845. /* reserve space for Hardware header */
  846. skb_reserve(skb, hh_len);
  847. /* create space for UDP/IP header */
  848. skb_put(skb,fragheaderlen + transhdrlen);
  849. /* initialize network header pointer */
  850. skb_reset_network_header(skb);
  851. /* initialize protocol header pointer */
  852. skb->transport_header = skb->network_header + fragheaderlen;
  853. skb->ip_summed = CHECKSUM_PARTIAL;
  854. skb->csum = 0;
  855. sk->sk_sndmsg_off = 0;
  856. }
  857. err = skb_append_datato_frags(sk,skb, getfrag, from,
  858. (length - transhdrlen));
  859. if (!err) {
  860. struct frag_hdr fhdr;
  861. /* specify the length of each IP datagram fragment*/
  862. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  863. sizeof(struct frag_hdr);
  864. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  865. ipv6_select_ident(skb, &fhdr);
  866. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  867. __skb_queue_tail(&sk->sk_write_queue, skb);
  868. return 0;
  869. }
  870. /* There is not enough support do UPD LSO,
  871. * so follow normal path
  872. */
  873. kfree_skb(skb);
  874. return err;
  875. }
  876. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  877. int offset, int len, int odd, struct sk_buff *skb),
  878. void *from, int length, int transhdrlen,
  879. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  880. struct rt6_info *rt, unsigned int flags)
  881. {
  882. struct inet_sock *inet = inet_sk(sk);
  883. struct ipv6_pinfo *np = inet6_sk(sk);
  884. struct sk_buff *skb;
  885. unsigned int maxfraglen, fragheaderlen;
  886. int exthdrlen;
  887. int hh_len;
  888. int mtu;
  889. int copy;
  890. int err;
  891. int offset = 0;
  892. int csummode = CHECKSUM_NONE;
  893. if (flags&MSG_PROBE)
  894. return 0;
  895. if (skb_queue_empty(&sk->sk_write_queue)) {
  896. /*
  897. * setup for corking
  898. */
  899. if (opt) {
  900. if (np->cork.opt == NULL) {
  901. np->cork.opt = kmalloc(opt->tot_len,
  902. sk->sk_allocation);
  903. if (unlikely(np->cork.opt == NULL))
  904. return -ENOBUFS;
  905. } else if (np->cork.opt->tot_len < opt->tot_len) {
  906. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  907. return -EINVAL;
  908. }
  909. memcpy(np->cork.opt, opt, opt->tot_len);
  910. inet->cork.flags |= IPCORK_OPT;
  911. /* need source address above miyazawa*/
  912. }
  913. dst_hold(&rt->u.dst);
  914. np->cork.rt = rt;
  915. inet->cork.fl = *fl;
  916. np->cork.hop_limit = hlimit;
  917. np->cork.tclass = tclass;
  918. mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
  919. rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
  920. if (np->frag_size < mtu) {
  921. if (np->frag_size)
  922. mtu = np->frag_size;
  923. }
  924. inet->cork.fragsize = mtu;
  925. if (dst_allfrag(rt->u.dst.path))
  926. inet->cork.flags |= IPCORK_ALLFRAG;
  927. inet->cork.length = 0;
  928. sk->sk_sndmsg_page = NULL;
  929. sk->sk_sndmsg_off = 0;
  930. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  931. length += exthdrlen;
  932. transhdrlen += exthdrlen;
  933. } else {
  934. rt = np->cork.rt;
  935. fl = &inet->cork.fl;
  936. if (inet->cork.flags & IPCORK_OPT)
  937. opt = np->cork.opt;
  938. transhdrlen = 0;
  939. exthdrlen = 0;
  940. mtu = inet->cork.fragsize;
  941. }
  942. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  943. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  944. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  945. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  946. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  947. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  948. return -EMSGSIZE;
  949. }
  950. }
  951. /*
  952. * Let's try using as much space as possible.
  953. * Use MTU if total length of the message fits into the MTU.
  954. * Otherwise, we need to reserve fragment header and
  955. * fragment alignment (= 8-15 octects, in total).
  956. *
  957. * Note that we may need to "move" the data from the tail of
  958. * of the buffer to the new fragment when we split
  959. * the message.
  960. *
  961. * FIXME: It may be fragmented into multiple chunks
  962. * at once if non-fragmentable extension headers
  963. * are too large.
  964. * --yoshfuji
  965. */
  966. inet->cork.length += length;
  967. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  968. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  969. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  970. fragheaderlen, transhdrlen, mtu,
  971. flags);
  972. if (err)
  973. goto error;
  974. return 0;
  975. }
  976. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  977. goto alloc_new_skb;
  978. while (length > 0) {
  979. /* Check if the remaining data fits into current packet. */
  980. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  981. if (copy < length)
  982. copy = maxfraglen - skb->len;
  983. if (copy <= 0) {
  984. char *data;
  985. unsigned int datalen;
  986. unsigned int fraglen;
  987. unsigned int fraggap;
  988. unsigned int alloclen;
  989. struct sk_buff *skb_prev;
  990. alloc_new_skb:
  991. skb_prev = skb;
  992. /* There's no room in the current skb */
  993. if (skb_prev)
  994. fraggap = skb_prev->len - maxfraglen;
  995. else
  996. fraggap = 0;
  997. /*
  998. * If remaining data exceeds the mtu,
  999. * we know we need more fragment(s).
  1000. */
  1001. datalen = length + fraggap;
  1002. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  1003. datalen = maxfraglen - fragheaderlen;
  1004. fraglen = datalen + fragheaderlen;
  1005. if ((flags & MSG_MORE) &&
  1006. !(rt->u.dst.dev->features&NETIF_F_SG))
  1007. alloclen = mtu;
  1008. else
  1009. alloclen = datalen + fragheaderlen;
  1010. /*
  1011. * The last fragment gets additional space at tail.
  1012. * Note: we overallocate on fragments with MSG_MODE
  1013. * because we have no idea if we're the last one.
  1014. */
  1015. if (datalen == length + fraggap)
  1016. alloclen += rt->u.dst.trailer_len;
  1017. /*
  1018. * We just reserve space for fragment header.
  1019. * Note: this may be overallocation if the message
  1020. * (without MSG_MORE) fits into the MTU.
  1021. */
  1022. alloclen += sizeof(struct frag_hdr);
  1023. if (transhdrlen) {
  1024. skb = sock_alloc_send_skb(sk,
  1025. alloclen + hh_len,
  1026. (flags & MSG_DONTWAIT), &err);
  1027. } else {
  1028. skb = NULL;
  1029. if (atomic_read(&sk->sk_wmem_alloc) <=
  1030. 2 * sk->sk_sndbuf)
  1031. skb = sock_wmalloc(sk,
  1032. alloclen + hh_len, 1,
  1033. sk->sk_allocation);
  1034. if (unlikely(skb == NULL))
  1035. err = -ENOBUFS;
  1036. }
  1037. if (skb == NULL)
  1038. goto error;
  1039. /*
  1040. * Fill in the control structures
  1041. */
  1042. skb->ip_summed = csummode;
  1043. skb->csum = 0;
  1044. /* reserve for fragmentation */
  1045. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  1046. /*
  1047. * Find where to start putting bytes
  1048. */
  1049. data = skb_put(skb, fraglen);
  1050. skb_set_network_header(skb, exthdrlen);
  1051. data += fragheaderlen;
  1052. skb->transport_header = (skb->network_header +
  1053. fragheaderlen);
  1054. if (fraggap) {
  1055. skb->csum = skb_copy_and_csum_bits(
  1056. skb_prev, maxfraglen,
  1057. data + transhdrlen, fraggap, 0);
  1058. skb_prev->csum = csum_sub(skb_prev->csum,
  1059. skb->csum);
  1060. data += fraggap;
  1061. pskb_trim_unique(skb_prev, maxfraglen);
  1062. }
  1063. copy = datalen - transhdrlen - fraggap;
  1064. if (copy < 0) {
  1065. err = -EINVAL;
  1066. kfree_skb(skb);
  1067. goto error;
  1068. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1069. err = -EFAULT;
  1070. kfree_skb(skb);
  1071. goto error;
  1072. }
  1073. offset += copy;
  1074. length -= datalen - fraggap;
  1075. transhdrlen = 0;
  1076. exthdrlen = 0;
  1077. csummode = CHECKSUM_NONE;
  1078. /*
  1079. * Put the packet on the pending queue
  1080. */
  1081. __skb_queue_tail(&sk->sk_write_queue, skb);
  1082. continue;
  1083. }
  1084. if (copy > length)
  1085. copy = length;
  1086. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1087. unsigned int off;
  1088. off = skb->len;
  1089. if (getfrag(from, skb_put(skb, copy),
  1090. offset, copy, off, skb) < 0) {
  1091. __skb_trim(skb, off);
  1092. err = -EFAULT;
  1093. goto error;
  1094. }
  1095. } else {
  1096. int i = skb_shinfo(skb)->nr_frags;
  1097. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1098. struct page *page = sk->sk_sndmsg_page;
  1099. int off = sk->sk_sndmsg_off;
  1100. unsigned int left;
  1101. if (page && (left = PAGE_SIZE - off) > 0) {
  1102. if (copy >= left)
  1103. copy = left;
  1104. if (page != frag->page) {
  1105. if (i == MAX_SKB_FRAGS) {
  1106. err = -EMSGSIZE;
  1107. goto error;
  1108. }
  1109. get_page(page);
  1110. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1111. frag = &skb_shinfo(skb)->frags[i];
  1112. }
  1113. } else if(i < MAX_SKB_FRAGS) {
  1114. if (copy > PAGE_SIZE)
  1115. copy = PAGE_SIZE;
  1116. page = alloc_pages(sk->sk_allocation, 0);
  1117. if (page == NULL) {
  1118. err = -ENOMEM;
  1119. goto error;
  1120. }
  1121. sk->sk_sndmsg_page = page;
  1122. sk->sk_sndmsg_off = 0;
  1123. skb_fill_page_desc(skb, i, page, 0, 0);
  1124. frag = &skb_shinfo(skb)->frags[i];
  1125. skb->truesize += PAGE_SIZE;
  1126. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1127. } else {
  1128. err = -EMSGSIZE;
  1129. goto error;
  1130. }
  1131. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1132. err = -EFAULT;
  1133. goto error;
  1134. }
  1135. sk->sk_sndmsg_off += copy;
  1136. frag->size += copy;
  1137. skb->len += copy;
  1138. skb->data_len += copy;
  1139. }
  1140. offset += copy;
  1141. length -= copy;
  1142. }
  1143. return 0;
  1144. error:
  1145. inet->cork.length -= length;
  1146. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1147. return err;
  1148. }
  1149. int ip6_push_pending_frames(struct sock *sk)
  1150. {
  1151. struct sk_buff *skb, *tmp_skb;
  1152. struct sk_buff **tail_skb;
  1153. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1154. struct inet_sock *inet = inet_sk(sk);
  1155. struct ipv6_pinfo *np = inet6_sk(sk);
  1156. struct ipv6hdr *hdr;
  1157. struct ipv6_txoptions *opt = np->cork.opt;
  1158. struct rt6_info *rt = np->cork.rt;
  1159. struct flowi *fl = &inet->cork.fl;
  1160. unsigned char proto = fl->proto;
  1161. int err = 0;
  1162. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1163. goto out;
  1164. tail_skb = &(skb_shinfo(skb)->frag_list);
  1165. /* move skb->data to ip header from ext header */
  1166. if (skb->data < skb_network_header(skb))
  1167. __skb_pull(skb, skb_network_offset(skb));
  1168. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1169. __skb_pull(tmp_skb, skb_network_header_len(skb));
  1170. *tail_skb = tmp_skb;
  1171. tail_skb = &(tmp_skb->next);
  1172. skb->len += tmp_skb->len;
  1173. skb->data_len += tmp_skb->len;
  1174. skb->truesize += tmp_skb->truesize;
  1175. __sock_put(tmp_skb->sk);
  1176. tmp_skb->destructor = NULL;
  1177. tmp_skb->sk = NULL;
  1178. }
  1179. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1180. __skb_pull(skb, skb_network_header_len(skb));
  1181. if (opt && opt->opt_flen)
  1182. ipv6_push_frag_opts(skb, opt, &proto);
  1183. if (opt && opt->opt_nflen)
  1184. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1185. skb_push(skb, sizeof(struct ipv6hdr));
  1186. skb_reset_network_header(skb);
  1187. hdr = ipv6_hdr(skb);
  1188. *(__be32*)hdr = fl->fl6_flowlabel |
  1189. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1190. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1191. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1192. else
  1193. hdr->payload_len = 0;
  1194. hdr->hop_limit = np->cork.hop_limit;
  1195. hdr->nexthdr = proto;
  1196. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1197. ipv6_addr_copy(&hdr->daddr, final_dst);
  1198. skb->priority = sk->sk_priority;
  1199. skb->dst = dst_clone(&rt->u.dst);
  1200. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1201. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1202. if (err) {
  1203. if (err > 0)
  1204. err = np->recverr ? net_xmit_errno(err) : 0;
  1205. if (err)
  1206. goto error;
  1207. }
  1208. out:
  1209. inet->cork.flags &= ~IPCORK_OPT;
  1210. kfree(np->cork.opt);
  1211. np->cork.opt = NULL;
  1212. if (np->cork.rt) {
  1213. dst_release(&np->cork.rt->u.dst);
  1214. np->cork.rt = NULL;
  1215. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1216. }
  1217. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1218. return err;
  1219. error:
  1220. goto out;
  1221. }
  1222. void ip6_flush_pending_frames(struct sock *sk)
  1223. {
  1224. struct inet_sock *inet = inet_sk(sk);
  1225. struct ipv6_pinfo *np = inet6_sk(sk);
  1226. struct sk_buff *skb;
  1227. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1228. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1229. IPSTATS_MIB_OUTDISCARDS);
  1230. kfree_skb(skb);
  1231. }
  1232. inet->cork.flags &= ~IPCORK_OPT;
  1233. kfree(np->cork.opt);
  1234. np->cork.opt = NULL;
  1235. if (np->cork.rt) {
  1236. dst_release(&np->cork.rt->u.dst);
  1237. np->cork.rt = NULL;
  1238. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1239. }
  1240. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1241. }