ip6_output.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. if (dst->hh)
  69. return neigh_hh_output(dst->hh, skb);
  70. else if (dst->neighbour)
  71. return dst->neighbour->output(skb);
  72. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  73. kfree_skb(skb);
  74. return -EINVAL;
  75. }
  76. /* dev_loopback_xmit for use with netfilter. */
  77. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  78. {
  79. newskb->mac.raw = newskb->data;
  80. __skb_pull(newskb, newskb->nh.raw - newskb->data);
  81. newskb->pkt_type = PACKET_LOOPBACK;
  82. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  83. BUG_TRAP(newskb->dst);
  84. netif_rx(newskb);
  85. return 0;
  86. }
  87. static int ip6_output2(struct sk_buff *skb)
  88. {
  89. struct dst_entry *dst = skb->dst;
  90. struct net_device *dev = dst->dev;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
  94. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  95. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  96. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  97. ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
  98. &skb->nh.ipv6h->saddr)) {
  99. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  100. /* Do not check for IFF_ALLMULTI; multicast routing
  101. is not supported in any case.
  102. */
  103. if (newskb)
  104. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  105. newskb->dev,
  106. ip6_dev_loopback_xmit);
  107. if (skb->nh.ipv6h->hop_limit == 0) {
  108. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  109. kfree_skb(skb);
  110. return 0;
  111. }
  112. }
  113. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  114. }
  115. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  116. }
  117. int ip6_output(struct sk_buff *skb)
  118. {
  119. if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  120. dst_allfrag(skb->dst))
  121. return ip6_fragment(skb, ip6_output2);
  122. else
  123. return ip6_output2(skb);
  124. }
  125. /*
  126. * xmit an sk_buff (used by TCP)
  127. */
  128. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  129. struct ipv6_txoptions *opt, int ipfragok)
  130. {
  131. struct ipv6_pinfo *np = inet6_sk(sk);
  132. struct in6_addr *first_hop = &fl->fl6_dst;
  133. struct dst_entry *dst = skb->dst;
  134. struct ipv6hdr *hdr;
  135. u8 proto = fl->proto;
  136. int seg_len = skb->len;
  137. int hlimit, tclass;
  138. u32 mtu;
  139. if (opt) {
  140. int head_room;
  141. /* First: exthdrs may take lots of space (~8K for now)
  142. MAX_HEADER is not enough.
  143. */
  144. head_room = opt->opt_nflen + opt->opt_flen;
  145. seg_len += head_room;
  146. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  147. if (skb_headroom(skb) < head_room) {
  148. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  149. if (skb2 == NULL) {
  150. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  151. IPSTATS_MIB_OUTDISCARDS);
  152. kfree_skb(skb);
  153. return -ENOBUFS;
  154. }
  155. kfree_skb(skb);
  156. skb = skb2;
  157. if (sk)
  158. skb_set_owner_w(skb, sk);
  159. }
  160. if (opt->opt_flen)
  161. ipv6_push_frag_opts(skb, opt, &proto);
  162. if (opt->opt_nflen)
  163. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  164. }
  165. hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
  166. /*
  167. * Fill in the IPv6 header
  168. */
  169. hlimit = -1;
  170. if (np)
  171. hlimit = np->hop_limit;
  172. if (hlimit < 0)
  173. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  174. if (hlimit < 0)
  175. hlimit = ipv6_get_hoplimit(dst->dev);
  176. tclass = -1;
  177. if (np)
  178. tclass = np->tclass;
  179. if (tclass < 0)
  180. tclass = 0;
  181. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  182. hdr->payload_len = htons(seg_len);
  183. hdr->nexthdr = proto;
  184. hdr->hop_limit = hlimit;
  185. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  186. ipv6_addr_copy(&hdr->daddr, first_hop);
  187. skb->priority = sk->sk_priority;
  188. mtu = dst_mtu(dst);
  189. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  190. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  191. IPSTATS_MIB_OUTREQUESTS);
  192. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  193. dst_output);
  194. }
  195. if (net_ratelimit())
  196. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  197. skb->dev = dst->dev;
  198. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  199. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  200. kfree_skb(skb);
  201. return -EMSGSIZE;
  202. }
  203. /*
  204. * To avoid extra problems ND packets are send through this
  205. * routine. It's code duplication but I really want to avoid
  206. * extra checks since ipv6_build_header is used by TCP (which
  207. * is for us performance critical)
  208. */
  209. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  210. struct in6_addr *saddr, struct in6_addr *daddr,
  211. int proto, int len)
  212. {
  213. struct ipv6_pinfo *np = inet6_sk(sk);
  214. struct ipv6hdr *hdr;
  215. int totlen;
  216. skb->protocol = htons(ETH_P_IPV6);
  217. skb->dev = dev;
  218. totlen = len + sizeof(struct ipv6hdr);
  219. hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
  220. skb->nh.ipv6h = hdr;
  221. *(__be32*)hdr = htonl(0x60000000);
  222. hdr->payload_len = htons(len);
  223. hdr->nexthdr = proto;
  224. hdr->hop_limit = np->hop_limit;
  225. ipv6_addr_copy(&hdr->saddr, saddr);
  226. ipv6_addr_copy(&hdr->daddr, daddr);
  227. return 0;
  228. }
  229. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  230. {
  231. struct ip6_ra_chain *ra;
  232. struct sock *last = NULL;
  233. read_lock(&ip6_ra_lock);
  234. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  235. struct sock *sk = ra->sk;
  236. if (sk && ra->sel == sel &&
  237. (!sk->sk_bound_dev_if ||
  238. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  239. if (last) {
  240. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  241. if (skb2)
  242. rawv6_rcv(last, skb2);
  243. }
  244. last = sk;
  245. }
  246. }
  247. if (last) {
  248. rawv6_rcv(last, skb);
  249. read_unlock(&ip6_ra_lock);
  250. return 1;
  251. }
  252. read_unlock(&ip6_ra_lock);
  253. return 0;
  254. }
  255. static int ip6_forward_proxy_check(struct sk_buff *skb)
  256. {
  257. struct ipv6hdr *hdr = skb->nh.ipv6h;
  258. u8 nexthdr = hdr->nexthdr;
  259. int offset;
  260. if (ipv6_ext_hdr(nexthdr)) {
  261. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  262. if (offset < 0)
  263. return 0;
  264. } else
  265. offset = sizeof(struct ipv6hdr);
  266. if (nexthdr == IPPROTO_ICMPV6) {
  267. struct icmp6hdr *icmp6;
  268. if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
  269. return 0;
  270. icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
  271. switch (icmp6->icmp6_type) {
  272. case NDISC_ROUTER_SOLICITATION:
  273. case NDISC_ROUTER_ADVERTISEMENT:
  274. case NDISC_NEIGHBOUR_SOLICITATION:
  275. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  276. case NDISC_REDIRECT:
  277. /* For reaction involving unicast neighbor discovery
  278. * message destined to the proxied address, pass it to
  279. * input function.
  280. */
  281. return 1;
  282. default:
  283. break;
  284. }
  285. }
  286. /*
  287. * The proxying router can't forward traffic sent to a link-local
  288. * address, so signal the sender and discard the packet. This
  289. * behavior is clarified by the MIPv6 specification.
  290. */
  291. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  292. dst_link_failure(skb);
  293. return -1;
  294. }
  295. return 0;
  296. }
  297. static inline int ip6_forward_finish(struct sk_buff *skb)
  298. {
  299. return dst_output(skb);
  300. }
  301. int ip6_forward(struct sk_buff *skb)
  302. {
  303. struct dst_entry *dst = skb->dst;
  304. struct ipv6hdr *hdr = skb->nh.ipv6h;
  305. struct inet6_skb_parm *opt = IP6CB(skb);
  306. if (ipv6_devconf.forwarding == 0)
  307. goto error;
  308. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  309. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  310. goto drop;
  311. }
  312. skb->ip_summed = CHECKSUM_NONE;
  313. /*
  314. * We DO NOT make any processing on
  315. * RA packets, pushing them to user level AS IS
  316. * without ane WARRANTY that application will be able
  317. * to interpret them. The reason is that we
  318. * cannot make anything clever here.
  319. *
  320. * We are not end-node, so that if packet contains
  321. * AH/ESP, we cannot make anything.
  322. * Defragmentation also would be mistake, RA packets
  323. * cannot be fragmented, because there is no warranty
  324. * that different fragments will go along one path. --ANK
  325. */
  326. if (opt->ra) {
  327. u8 *ptr = skb->nh.raw + opt->ra;
  328. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  329. return 0;
  330. }
  331. /*
  332. * check and decrement ttl
  333. */
  334. if (hdr->hop_limit <= 1) {
  335. /* Force OUTPUT device used as source address */
  336. skb->dev = dst->dev;
  337. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  338. 0, skb->dev);
  339. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  340. kfree_skb(skb);
  341. return -ETIMEDOUT;
  342. }
  343. /* XXX: idev->cnf.proxy_ndp? */
  344. if (ipv6_devconf.proxy_ndp &&
  345. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  346. int proxied = ip6_forward_proxy_check(skb);
  347. if (proxied > 0)
  348. return ip6_input(skb);
  349. else if (proxied < 0) {
  350. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  351. goto drop;
  352. }
  353. }
  354. if (!xfrm6_route_forward(skb)) {
  355. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  356. goto drop;
  357. }
  358. dst = skb->dst;
  359. /* IPv6 specs say nothing about it, but it is clear that we cannot
  360. send redirects to source routed frames.
  361. */
  362. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  363. struct in6_addr *target = NULL;
  364. struct rt6_info *rt;
  365. struct neighbour *n = dst->neighbour;
  366. /*
  367. * incoming and outgoing devices are the same
  368. * send a redirect.
  369. */
  370. rt = (struct rt6_info *) dst;
  371. if ((rt->rt6i_flags & RTF_GATEWAY))
  372. target = (struct in6_addr*)&n->primary_key;
  373. else
  374. target = &hdr->daddr;
  375. /* Limit redirects both by destination (here)
  376. and by source (inside ndisc_send_redirect)
  377. */
  378. if (xrlim_allow(dst, 1*HZ))
  379. ndisc_send_redirect(skb, n, target);
  380. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  381. |IPV6_ADDR_LINKLOCAL)) {
  382. /* This check is security critical. */
  383. goto error;
  384. }
  385. if (skb->len > dst_mtu(dst)) {
  386. /* Again, force OUTPUT device used as source address */
  387. skb->dev = dst->dev;
  388. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  389. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  390. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  391. kfree_skb(skb);
  392. return -EMSGSIZE;
  393. }
  394. if (skb_cow(skb, dst->dev->hard_header_len)) {
  395. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  396. goto drop;
  397. }
  398. hdr = skb->nh.ipv6h;
  399. /* Mangling hops number delayed to point after skb COW */
  400. hdr->hop_limit--;
  401. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  402. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  403. error:
  404. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  405. drop:
  406. kfree_skb(skb);
  407. return -EINVAL;
  408. }
  409. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  410. {
  411. to->pkt_type = from->pkt_type;
  412. to->priority = from->priority;
  413. to->protocol = from->protocol;
  414. dst_release(to->dst);
  415. to->dst = dst_clone(from->dst);
  416. to->dev = from->dev;
  417. to->mark = from->mark;
  418. #ifdef CONFIG_NET_SCHED
  419. to->tc_index = from->tc_index;
  420. #endif
  421. #ifdef CONFIG_NETFILTER
  422. /* Connection association is same as pre-frag packet */
  423. nf_conntrack_put(to->nfct);
  424. to->nfct = from->nfct;
  425. nf_conntrack_get(to->nfct);
  426. to->nfctinfo = from->nfctinfo;
  427. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  428. nf_conntrack_put_reasm(to->nfct_reasm);
  429. to->nfct_reasm = from->nfct_reasm;
  430. nf_conntrack_get_reasm(to->nfct_reasm);
  431. #endif
  432. #ifdef CONFIG_BRIDGE_NETFILTER
  433. nf_bridge_put(to->nf_bridge);
  434. to->nf_bridge = from->nf_bridge;
  435. nf_bridge_get(to->nf_bridge);
  436. #endif
  437. #endif
  438. skb_copy_secmark(to, from);
  439. }
  440. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  441. {
  442. u16 offset = sizeof(struct ipv6hdr);
  443. struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
  444. unsigned int packet_len = skb->tail - skb->nh.raw;
  445. int found_rhdr = 0;
  446. *nexthdr = &skb->nh.ipv6h->nexthdr;
  447. while (offset + 1 <= packet_len) {
  448. switch (**nexthdr) {
  449. case NEXTHDR_HOP:
  450. break;
  451. case NEXTHDR_ROUTING:
  452. found_rhdr = 1;
  453. break;
  454. case NEXTHDR_DEST:
  455. #ifdef CONFIG_IPV6_MIP6
  456. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  457. break;
  458. #endif
  459. if (found_rhdr)
  460. return offset;
  461. break;
  462. default :
  463. return offset;
  464. }
  465. offset += ipv6_optlen(exthdr);
  466. *nexthdr = &exthdr->nexthdr;
  467. exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
  468. }
  469. return offset;
  470. }
  471. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  472. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  473. {
  474. struct net_device *dev;
  475. struct sk_buff *frag;
  476. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  477. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  478. struct ipv6hdr *tmp_hdr;
  479. struct frag_hdr *fh;
  480. unsigned int mtu, hlen, left, len;
  481. __be32 frag_id = 0;
  482. int ptr, offset = 0, err=0;
  483. u8 *prevhdr, nexthdr = 0;
  484. dev = rt->u.dst.dev;
  485. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  486. nexthdr = *prevhdr;
  487. mtu = dst_mtu(&rt->u.dst);
  488. if (np && np->frag_size < mtu) {
  489. if (np->frag_size)
  490. mtu = np->frag_size;
  491. }
  492. mtu -= hlen + sizeof(struct frag_hdr);
  493. if (skb_shinfo(skb)->frag_list) {
  494. int first_len = skb_pagelen(skb);
  495. if (first_len - hlen > mtu ||
  496. ((first_len - hlen) & 7) ||
  497. skb_cloned(skb))
  498. goto slow_path;
  499. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  500. /* Correct geometry. */
  501. if (frag->len > mtu ||
  502. ((frag->len & 7) && frag->next) ||
  503. skb_headroom(frag) < hlen)
  504. goto slow_path;
  505. /* Partially cloned skb? */
  506. if (skb_shared(frag))
  507. goto slow_path;
  508. BUG_ON(frag->sk);
  509. if (skb->sk) {
  510. sock_hold(skb->sk);
  511. frag->sk = skb->sk;
  512. frag->destructor = sock_wfree;
  513. skb->truesize -= frag->truesize;
  514. }
  515. }
  516. err = 0;
  517. offset = 0;
  518. frag = skb_shinfo(skb)->frag_list;
  519. skb_shinfo(skb)->frag_list = NULL;
  520. /* BUILD HEADER */
  521. *prevhdr = NEXTHDR_FRAGMENT;
  522. tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
  523. if (!tmp_hdr) {
  524. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  525. return -ENOMEM;
  526. }
  527. __skb_pull(skb, hlen);
  528. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  529. skb->nh.raw = __skb_push(skb, hlen);
  530. memcpy(skb->nh.raw, tmp_hdr, hlen);
  531. ipv6_select_ident(skb, fh);
  532. fh->nexthdr = nexthdr;
  533. fh->reserved = 0;
  534. fh->frag_off = htons(IP6_MF);
  535. frag_id = fh->identification;
  536. first_len = skb_pagelen(skb);
  537. skb->data_len = first_len - skb_headlen(skb);
  538. skb->len = first_len;
  539. skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
  540. dst_hold(&rt->u.dst);
  541. for (;;) {
  542. /* Prepare header of the next frame,
  543. * before previous one went down. */
  544. if (frag) {
  545. frag->ip_summed = CHECKSUM_NONE;
  546. frag->h.raw = frag->data;
  547. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  548. frag->nh.raw = __skb_push(frag, hlen);
  549. memcpy(frag->nh.raw, tmp_hdr, hlen);
  550. offset += skb->len - hlen - sizeof(struct frag_hdr);
  551. fh->nexthdr = nexthdr;
  552. fh->reserved = 0;
  553. fh->frag_off = htons(offset);
  554. if (frag->next != NULL)
  555. fh->frag_off |= htons(IP6_MF);
  556. fh->identification = frag_id;
  557. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  558. ip6_copy_metadata(frag, skb);
  559. }
  560. err = output(skb);
  561. if(!err)
  562. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  563. if (err || !frag)
  564. break;
  565. skb = frag;
  566. frag = skb->next;
  567. skb->next = NULL;
  568. }
  569. kfree(tmp_hdr);
  570. if (err == 0) {
  571. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  572. dst_release(&rt->u.dst);
  573. return 0;
  574. }
  575. while (frag) {
  576. skb = frag->next;
  577. kfree_skb(frag);
  578. frag = skb;
  579. }
  580. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  581. dst_release(&rt->u.dst);
  582. return err;
  583. }
  584. slow_path:
  585. left = skb->len - hlen; /* Space per frame */
  586. ptr = hlen; /* Where to start from */
  587. /*
  588. * Fragment the datagram.
  589. */
  590. *prevhdr = NEXTHDR_FRAGMENT;
  591. /*
  592. * Keep copying data until we run out.
  593. */
  594. while(left > 0) {
  595. len = left;
  596. /* IF: it doesn't fit, use 'mtu' - the data space left */
  597. if (len > mtu)
  598. len = mtu;
  599. /* IF: we are not sending upto and including the packet end
  600. then align the next start on an eight byte boundary */
  601. if (len < left) {
  602. len &= ~7;
  603. }
  604. /*
  605. * Allocate buffer.
  606. */
  607. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  608. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  609. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  610. IPSTATS_MIB_FRAGFAILS);
  611. err = -ENOMEM;
  612. goto fail;
  613. }
  614. /*
  615. * Set up data on packet
  616. */
  617. ip6_copy_metadata(frag, skb);
  618. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  619. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  620. frag->nh.raw = frag->data;
  621. fh = (struct frag_hdr*)(frag->data + hlen);
  622. frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
  623. /*
  624. * Charge the memory for the fragment to any owner
  625. * it might possess
  626. */
  627. if (skb->sk)
  628. skb_set_owner_w(frag, skb->sk);
  629. /*
  630. * Copy the packet header into the new buffer.
  631. */
  632. memcpy(frag->nh.raw, skb->data, hlen);
  633. /*
  634. * Build fragment header.
  635. */
  636. fh->nexthdr = nexthdr;
  637. fh->reserved = 0;
  638. if (!frag_id) {
  639. ipv6_select_ident(skb, fh);
  640. frag_id = fh->identification;
  641. } else
  642. fh->identification = frag_id;
  643. /*
  644. * Copy a block of the IP datagram.
  645. */
  646. if (skb_copy_bits(skb, ptr, frag->h.raw, len))
  647. BUG();
  648. left -= len;
  649. fh->frag_off = htons(offset);
  650. if (left > 0)
  651. fh->frag_off |= htons(IP6_MF);
  652. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  653. ptr += len;
  654. offset += len;
  655. /*
  656. * Put this fragment into the sending queue.
  657. */
  658. err = output(frag);
  659. if (err)
  660. goto fail;
  661. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  662. }
  663. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  664. IPSTATS_MIB_FRAGOKS);
  665. kfree_skb(skb);
  666. return err;
  667. fail:
  668. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  669. IPSTATS_MIB_FRAGFAILS);
  670. kfree_skb(skb);
  671. return err;
  672. }
  673. static inline int ip6_rt_check(struct rt6key *rt_key,
  674. struct in6_addr *fl_addr,
  675. struct in6_addr *addr_cache)
  676. {
  677. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  678. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  679. }
  680. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  681. struct dst_entry *dst,
  682. struct flowi *fl)
  683. {
  684. struct ipv6_pinfo *np = inet6_sk(sk);
  685. struct rt6_info *rt = (struct rt6_info *)dst;
  686. if (!dst)
  687. goto out;
  688. /* Yes, checking route validity in not connected
  689. * case is not very simple. Take into account,
  690. * that we do not support routing by source, TOS,
  691. * and MSG_DONTROUTE --ANK (980726)
  692. *
  693. * 1. ip6_rt_check(): If route was host route,
  694. * check that cached destination is current.
  695. * If it is network route, we still may
  696. * check its validity using saved pointer
  697. * to the last used address: daddr_cache.
  698. * We do not want to save whole address now,
  699. * (because main consumer of this service
  700. * is tcp, which has not this problem),
  701. * so that the last trick works only on connected
  702. * sockets.
  703. * 2. oif also should be the same.
  704. */
  705. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  706. #ifdef CONFIG_IPV6_SUBTREES
  707. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  708. #endif
  709. (fl->oif && fl->oif != dst->dev->ifindex)) {
  710. dst_release(dst);
  711. dst = NULL;
  712. }
  713. out:
  714. return dst;
  715. }
  716. static int ip6_dst_lookup_tail(struct sock *sk,
  717. struct dst_entry **dst, struct flowi *fl)
  718. {
  719. int err;
  720. if (*dst == NULL)
  721. *dst = ip6_route_output(sk, fl);
  722. if ((err = (*dst)->error))
  723. goto out_err_release;
  724. if (ipv6_addr_any(&fl->fl6_src)) {
  725. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  726. if (err)
  727. goto out_err_release;
  728. }
  729. return 0;
  730. out_err_release:
  731. dst_release(*dst);
  732. *dst = NULL;
  733. return err;
  734. }
  735. /**
  736. * ip6_dst_lookup - perform route lookup on flow
  737. * @sk: socket which provides route info
  738. * @dst: pointer to dst_entry * for result
  739. * @fl: flow to lookup
  740. *
  741. * This function performs a route lookup on the given flow.
  742. *
  743. * It returns zero on success, or a standard errno code on error.
  744. */
  745. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  746. {
  747. *dst = NULL;
  748. return ip6_dst_lookup_tail(sk, dst, fl);
  749. }
  750. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  751. /**
  752. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  753. * @sk: socket which provides the dst cache and route info
  754. * @dst: pointer to dst_entry * for result
  755. * @fl: flow to lookup
  756. *
  757. * This function performs a route lookup on the given flow with the
  758. * possibility of using the cached route in the socket if it is valid.
  759. * It will take the socket dst lock when operating on the dst cache.
  760. * As a result, this function can only be used in process context.
  761. *
  762. * It returns zero on success, or a standard errno code on error.
  763. */
  764. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  765. {
  766. *dst = NULL;
  767. if (sk) {
  768. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  769. *dst = ip6_sk_dst_check(sk, *dst, fl);
  770. }
  771. return ip6_dst_lookup_tail(sk, dst, fl);
  772. }
  773. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  774. static inline int ip6_ufo_append_data(struct sock *sk,
  775. int getfrag(void *from, char *to, int offset, int len,
  776. int odd, struct sk_buff *skb),
  777. void *from, int length, int hh_len, int fragheaderlen,
  778. int transhdrlen, int mtu,unsigned int flags)
  779. {
  780. struct sk_buff *skb;
  781. int err;
  782. /* There is support for UDP large send offload by network
  783. * device, so create one single skb packet containing complete
  784. * udp datagram
  785. */
  786. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  787. skb = sock_alloc_send_skb(sk,
  788. hh_len + fragheaderlen + transhdrlen + 20,
  789. (flags & MSG_DONTWAIT), &err);
  790. if (skb == NULL)
  791. return -ENOMEM;
  792. /* reserve space for Hardware header */
  793. skb_reserve(skb, hh_len);
  794. /* create space for UDP/IP header */
  795. skb_put(skb,fragheaderlen + transhdrlen);
  796. /* initialize network header pointer */
  797. skb->nh.raw = skb->data;
  798. /* initialize protocol header pointer */
  799. skb->h.raw = skb->data + fragheaderlen;
  800. skb->ip_summed = CHECKSUM_PARTIAL;
  801. skb->csum = 0;
  802. sk->sk_sndmsg_off = 0;
  803. }
  804. err = skb_append_datato_frags(sk,skb, getfrag, from,
  805. (length - transhdrlen));
  806. if (!err) {
  807. struct frag_hdr fhdr;
  808. /* specify the length of each IP datagram fragment*/
  809. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  810. sizeof(struct frag_hdr);
  811. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  812. ipv6_select_ident(skb, &fhdr);
  813. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  814. __skb_queue_tail(&sk->sk_write_queue, skb);
  815. return 0;
  816. }
  817. /* There is not enough support do UPD LSO,
  818. * so follow normal path
  819. */
  820. kfree_skb(skb);
  821. return err;
  822. }
  823. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  824. int offset, int len, int odd, struct sk_buff *skb),
  825. void *from, int length, int transhdrlen,
  826. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  827. struct rt6_info *rt, unsigned int flags)
  828. {
  829. struct inet_sock *inet = inet_sk(sk);
  830. struct ipv6_pinfo *np = inet6_sk(sk);
  831. struct sk_buff *skb;
  832. unsigned int maxfraglen, fragheaderlen;
  833. int exthdrlen;
  834. int hh_len;
  835. int mtu;
  836. int copy;
  837. int err;
  838. int offset = 0;
  839. int csummode = CHECKSUM_NONE;
  840. if (flags&MSG_PROBE)
  841. return 0;
  842. if (skb_queue_empty(&sk->sk_write_queue)) {
  843. /*
  844. * setup for corking
  845. */
  846. if (opt) {
  847. if (np->cork.opt == NULL) {
  848. np->cork.opt = kmalloc(opt->tot_len,
  849. sk->sk_allocation);
  850. if (unlikely(np->cork.opt == NULL))
  851. return -ENOBUFS;
  852. } else if (np->cork.opt->tot_len < opt->tot_len) {
  853. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  854. return -EINVAL;
  855. }
  856. memcpy(np->cork.opt, opt, opt->tot_len);
  857. inet->cork.flags |= IPCORK_OPT;
  858. /* need source address above miyazawa*/
  859. }
  860. dst_hold(&rt->u.dst);
  861. np->cork.rt = rt;
  862. inet->cork.fl = *fl;
  863. np->cork.hop_limit = hlimit;
  864. np->cork.tclass = tclass;
  865. mtu = dst_mtu(rt->u.dst.path);
  866. if (np->frag_size < mtu) {
  867. if (np->frag_size)
  868. mtu = np->frag_size;
  869. }
  870. inet->cork.fragsize = mtu;
  871. if (dst_allfrag(rt->u.dst.path))
  872. inet->cork.flags |= IPCORK_ALLFRAG;
  873. inet->cork.length = 0;
  874. sk->sk_sndmsg_page = NULL;
  875. sk->sk_sndmsg_off = 0;
  876. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  877. length += exthdrlen;
  878. transhdrlen += exthdrlen;
  879. } else {
  880. rt = np->cork.rt;
  881. fl = &inet->cork.fl;
  882. if (inet->cork.flags & IPCORK_OPT)
  883. opt = np->cork.opt;
  884. transhdrlen = 0;
  885. exthdrlen = 0;
  886. mtu = inet->cork.fragsize;
  887. }
  888. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  889. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  890. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  891. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  892. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  893. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  894. return -EMSGSIZE;
  895. }
  896. }
  897. /*
  898. * Let's try using as much space as possible.
  899. * Use MTU if total length of the message fits into the MTU.
  900. * Otherwise, we need to reserve fragment header and
  901. * fragment alignment (= 8-15 octects, in total).
  902. *
  903. * Note that we may need to "move" the data from the tail of
  904. * of the buffer to the new fragment when we split
  905. * the message.
  906. *
  907. * FIXME: It may be fragmented into multiple chunks
  908. * at once if non-fragmentable extension headers
  909. * are too large.
  910. * --yoshfuji
  911. */
  912. inet->cork.length += length;
  913. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  914. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  915. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  916. fragheaderlen, transhdrlen, mtu,
  917. flags);
  918. if (err)
  919. goto error;
  920. return 0;
  921. }
  922. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  923. goto alloc_new_skb;
  924. while (length > 0) {
  925. /* Check if the remaining data fits into current packet. */
  926. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  927. if (copy < length)
  928. copy = maxfraglen - skb->len;
  929. if (copy <= 0) {
  930. char *data;
  931. unsigned int datalen;
  932. unsigned int fraglen;
  933. unsigned int fraggap;
  934. unsigned int alloclen;
  935. struct sk_buff *skb_prev;
  936. alloc_new_skb:
  937. skb_prev = skb;
  938. /* There's no room in the current skb */
  939. if (skb_prev)
  940. fraggap = skb_prev->len - maxfraglen;
  941. else
  942. fraggap = 0;
  943. /*
  944. * If remaining data exceeds the mtu,
  945. * we know we need more fragment(s).
  946. */
  947. datalen = length + fraggap;
  948. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  949. datalen = maxfraglen - fragheaderlen;
  950. fraglen = datalen + fragheaderlen;
  951. if ((flags & MSG_MORE) &&
  952. !(rt->u.dst.dev->features&NETIF_F_SG))
  953. alloclen = mtu;
  954. else
  955. alloclen = datalen + fragheaderlen;
  956. /*
  957. * The last fragment gets additional space at tail.
  958. * Note: we overallocate on fragments with MSG_MODE
  959. * because we have no idea if we're the last one.
  960. */
  961. if (datalen == length + fraggap)
  962. alloclen += rt->u.dst.trailer_len;
  963. /*
  964. * We just reserve space for fragment header.
  965. * Note: this may be overallocation if the message
  966. * (without MSG_MORE) fits into the MTU.
  967. */
  968. alloclen += sizeof(struct frag_hdr);
  969. if (transhdrlen) {
  970. skb = sock_alloc_send_skb(sk,
  971. alloclen + hh_len,
  972. (flags & MSG_DONTWAIT), &err);
  973. } else {
  974. skb = NULL;
  975. if (atomic_read(&sk->sk_wmem_alloc) <=
  976. 2 * sk->sk_sndbuf)
  977. skb = sock_wmalloc(sk,
  978. alloclen + hh_len, 1,
  979. sk->sk_allocation);
  980. if (unlikely(skb == NULL))
  981. err = -ENOBUFS;
  982. }
  983. if (skb == NULL)
  984. goto error;
  985. /*
  986. * Fill in the control structures
  987. */
  988. skb->ip_summed = csummode;
  989. skb->csum = 0;
  990. /* reserve for fragmentation */
  991. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  992. /*
  993. * Find where to start putting bytes
  994. */
  995. data = skb_put(skb, fraglen);
  996. skb->nh.raw = data + exthdrlen;
  997. data += fragheaderlen;
  998. skb->h.raw = data + exthdrlen;
  999. if (fraggap) {
  1000. skb->csum = skb_copy_and_csum_bits(
  1001. skb_prev, maxfraglen,
  1002. data + transhdrlen, fraggap, 0);
  1003. skb_prev->csum = csum_sub(skb_prev->csum,
  1004. skb->csum);
  1005. data += fraggap;
  1006. pskb_trim_unique(skb_prev, maxfraglen);
  1007. }
  1008. copy = datalen - transhdrlen - fraggap;
  1009. if (copy < 0) {
  1010. err = -EINVAL;
  1011. kfree_skb(skb);
  1012. goto error;
  1013. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1014. err = -EFAULT;
  1015. kfree_skb(skb);
  1016. goto error;
  1017. }
  1018. offset += copy;
  1019. length -= datalen - fraggap;
  1020. transhdrlen = 0;
  1021. exthdrlen = 0;
  1022. csummode = CHECKSUM_NONE;
  1023. /*
  1024. * Put the packet on the pending queue
  1025. */
  1026. __skb_queue_tail(&sk->sk_write_queue, skb);
  1027. continue;
  1028. }
  1029. if (copy > length)
  1030. copy = length;
  1031. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1032. unsigned int off;
  1033. off = skb->len;
  1034. if (getfrag(from, skb_put(skb, copy),
  1035. offset, copy, off, skb) < 0) {
  1036. __skb_trim(skb, off);
  1037. err = -EFAULT;
  1038. goto error;
  1039. }
  1040. } else {
  1041. int i = skb_shinfo(skb)->nr_frags;
  1042. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1043. struct page *page = sk->sk_sndmsg_page;
  1044. int off = sk->sk_sndmsg_off;
  1045. unsigned int left;
  1046. if (page && (left = PAGE_SIZE - off) > 0) {
  1047. if (copy >= left)
  1048. copy = left;
  1049. if (page != frag->page) {
  1050. if (i == MAX_SKB_FRAGS) {
  1051. err = -EMSGSIZE;
  1052. goto error;
  1053. }
  1054. get_page(page);
  1055. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1056. frag = &skb_shinfo(skb)->frags[i];
  1057. }
  1058. } else if(i < MAX_SKB_FRAGS) {
  1059. if (copy > PAGE_SIZE)
  1060. copy = PAGE_SIZE;
  1061. page = alloc_pages(sk->sk_allocation, 0);
  1062. if (page == NULL) {
  1063. err = -ENOMEM;
  1064. goto error;
  1065. }
  1066. sk->sk_sndmsg_page = page;
  1067. sk->sk_sndmsg_off = 0;
  1068. skb_fill_page_desc(skb, i, page, 0, 0);
  1069. frag = &skb_shinfo(skb)->frags[i];
  1070. skb->truesize += PAGE_SIZE;
  1071. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1072. } else {
  1073. err = -EMSGSIZE;
  1074. goto error;
  1075. }
  1076. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1077. err = -EFAULT;
  1078. goto error;
  1079. }
  1080. sk->sk_sndmsg_off += copy;
  1081. frag->size += copy;
  1082. skb->len += copy;
  1083. skb->data_len += copy;
  1084. }
  1085. offset += copy;
  1086. length -= copy;
  1087. }
  1088. return 0;
  1089. error:
  1090. inet->cork.length -= length;
  1091. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1092. return err;
  1093. }
  1094. int ip6_push_pending_frames(struct sock *sk)
  1095. {
  1096. struct sk_buff *skb, *tmp_skb;
  1097. struct sk_buff **tail_skb;
  1098. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1099. struct inet_sock *inet = inet_sk(sk);
  1100. struct ipv6_pinfo *np = inet6_sk(sk);
  1101. struct ipv6hdr *hdr;
  1102. struct ipv6_txoptions *opt = np->cork.opt;
  1103. struct rt6_info *rt = np->cork.rt;
  1104. struct flowi *fl = &inet->cork.fl;
  1105. unsigned char proto = fl->proto;
  1106. int err = 0;
  1107. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1108. goto out;
  1109. tail_skb = &(skb_shinfo(skb)->frag_list);
  1110. /* move skb->data to ip header from ext header */
  1111. if (skb->data < skb->nh.raw)
  1112. __skb_pull(skb, skb->nh.raw - skb->data);
  1113. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1114. __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
  1115. *tail_skb = tmp_skb;
  1116. tail_skb = &(tmp_skb->next);
  1117. skb->len += tmp_skb->len;
  1118. skb->data_len += tmp_skb->len;
  1119. skb->truesize += tmp_skb->truesize;
  1120. __sock_put(tmp_skb->sk);
  1121. tmp_skb->destructor = NULL;
  1122. tmp_skb->sk = NULL;
  1123. }
  1124. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1125. __skb_pull(skb, skb->h.raw - skb->nh.raw);
  1126. if (opt && opt->opt_flen)
  1127. ipv6_push_frag_opts(skb, opt, &proto);
  1128. if (opt && opt->opt_nflen)
  1129. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1130. skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
  1131. *(__be32*)hdr = fl->fl6_flowlabel |
  1132. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1133. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1134. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1135. else
  1136. hdr->payload_len = 0;
  1137. hdr->hop_limit = np->cork.hop_limit;
  1138. hdr->nexthdr = proto;
  1139. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1140. ipv6_addr_copy(&hdr->daddr, final_dst);
  1141. skb->priority = sk->sk_priority;
  1142. skb->dst = dst_clone(&rt->u.dst);
  1143. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1144. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1145. if (err) {
  1146. if (err > 0)
  1147. err = np->recverr ? net_xmit_errno(err) : 0;
  1148. if (err)
  1149. goto error;
  1150. }
  1151. out:
  1152. inet->cork.flags &= ~IPCORK_OPT;
  1153. kfree(np->cork.opt);
  1154. np->cork.opt = NULL;
  1155. if (np->cork.rt) {
  1156. dst_release(&np->cork.rt->u.dst);
  1157. np->cork.rt = NULL;
  1158. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1159. }
  1160. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1161. return err;
  1162. error:
  1163. goto out;
  1164. }
  1165. void ip6_flush_pending_frames(struct sock *sk)
  1166. {
  1167. struct inet_sock *inet = inet_sk(sk);
  1168. struct ipv6_pinfo *np = inet6_sk(sk);
  1169. struct sk_buff *skb;
  1170. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1171. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1172. IPSTATS_MIB_OUTDISCARDS);
  1173. kfree_skb(skb);
  1174. }
  1175. inet->cork.flags &= ~IPCORK_OPT;
  1176. kfree(np->cork.opt);
  1177. np->cork.opt = NULL;
  1178. if (np->cork.rt) {
  1179. dst_release(&np->cork.rt->u.dst);
  1180. np->cork.rt = NULL;
  1181. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1182. }
  1183. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1184. }