ip6_output.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. struct hh_cache *hh = dst->hh;
  69. if (hh) {
  70. int hh_alen;
  71. read_lock_bh(&hh->hh_lock);
  72. hh_alen = HH_DATA_ALIGN(hh->hh_len);
  73. memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
  74. read_unlock_bh(&hh->hh_lock);
  75. skb_push(skb, hh->hh_len);
  76. return hh->hh_output(skb);
  77. } else if (dst->neighbour)
  78. return dst->neighbour->output(skb);
  79. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  80. kfree_skb(skb);
  81. return -EINVAL;
  82. }
  83. /* dev_loopback_xmit for use with netfilter. */
  84. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  85. {
  86. newskb->mac.raw = newskb->data;
  87. __skb_pull(newskb, newskb->nh.raw - newskb->data);
  88. newskb->pkt_type = PACKET_LOOPBACK;
  89. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  90. BUG_TRAP(newskb->dst);
  91. netif_rx(newskb);
  92. return 0;
  93. }
  94. static int ip6_output2(struct sk_buff *skb)
  95. {
  96. struct dst_entry *dst = skb->dst;
  97. struct net_device *dev = dst->dev;
  98. skb->protocol = htons(ETH_P_IPV6);
  99. skb->dev = dev;
  100. if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
  101. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  102. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  103. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  104. ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
  105. &skb->nh.ipv6h->saddr)) {
  106. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  107. /* Do not check for IFF_ALLMULTI; multicast routing
  108. is not supported in any case.
  109. */
  110. if (newskb)
  111. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  112. newskb->dev,
  113. ip6_dev_loopback_xmit);
  114. if (skb->nh.ipv6h->hop_limit == 0) {
  115. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  116. kfree_skb(skb);
  117. return 0;
  118. }
  119. }
  120. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  121. }
  122. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  123. }
  124. int ip6_output(struct sk_buff *skb)
  125. {
  126. if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  127. dst_allfrag(skb->dst))
  128. return ip6_fragment(skb, ip6_output2);
  129. else
  130. return ip6_output2(skb);
  131. }
  132. /*
  133. * xmit an sk_buff (used by TCP)
  134. */
  135. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  136. struct ipv6_txoptions *opt, int ipfragok)
  137. {
  138. struct ipv6_pinfo *np = inet6_sk(sk);
  139. struct in6_addr *first_hop = &fl->fl6_dst;
  140. struct dst_entry *dst = skb->dst;
  141. struct ipv6hdr *hdr;
  142. u8 proto = fl->proto;
  143. int seg_len = skb->len;
  144. int hlimit, tclass;
  145. u32 mtu;
  146. if (opt) {
  147. int head_room;
  148. /* First: exthdrs may take lots of space (~8K for now)
  149. MAX_HEADER is not enough.
  150. */
  151. head_room = opt->opt_nflen + opt->opt_flen;
  152. seg_len += head_room;
  153. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  154. if (skb_headroom(skb) < head_room) {
  155. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  156. if (skb2 == NULL) {
  157. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  158. IPSTATS_MIB_OUTDISCARDS);
  159. kfree_skb(skb);
  160. return -ENOBUFS;
  161. }
  162. kfree_skb(skb);
  163. skb = skb2;
  164. if (sk)
  165. skb_set_owner_w(skb, sk);
  166. }
  167. if (opt->opt_flen)
  168. ipv6_push_frag_opts(skb, opt, &proto);
  169. if (opt->opt_nflen)
  170. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  171. }
  172. hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
  173. /*
  174. * Fill in the IPv6 header
  175. */
  176. hlimit = -1;
  177. if (np)
  178. hlimit = np->hop_limit;
  179. if (hlimit < 0)
  180. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  181. if (hlimit < 0)
  182. hlimit = ipv6_get_hoplimit(dst->dev);
  183. tclass = -1;
  184. if (np)
  185. tclass = np->tclass;
  186. if (tclass < 0)
  187. tclass = 0;
  188. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  189. hdr->payload_len = htons(seg_len);
  190. hdr->nexthdr = proto;
  191. hdr->hop_limit = hlimit;
  192. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  193. ipv6_addr_copy(&hdr->daddr, first_hop);
  194. skb->priority = sk->sk_priority;
  195. mtu = dst_mtu(dst);
  196. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  197. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  198. IPSTATS_MIB_OUTREQUESTS);
  199. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  200. dst_output);
  201. }
  202. if (net_ratelimit())
  203. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  204. skb->dev = dst->dev;
  205. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  206. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  207. kfree_skb(skb);
  208. return -EMSGSIZE;
  209. }
  210. /*
  211. * To avoid extra problems ND packets are send through this
  212. * routine. It's code duplication but I really want to avoid
  213. * extra checks since ipv6_build_header is used by TCP (which
  214. * is for us performance critical)
  215. */
  216. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  217. struct in6_addr *saddr, struct in6_addr *daddr,
  218. int proto, int len)
  219. {
  220. struct ipv6_pinfo *np = inet6_sk(sk);
  221. struct ipv6hdr *hdr;
  222. int totlen;
  223. skb->protocol = htons(ETH_P_IPV6);
  224. skb->dev = dev;
  225. totlen = len + sizeof(struct ipv6hdr);
  226. hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
  227. skb->nh.ipv6h = hdr;
  228. *(__be32*)hdr = htonl(0x60000000);
  229. hdr->payload_len = htons(len);
  230. hdr->nexthdr = proto;
  231. hdr->hop_limit = np->hop_limit;
  232. ipv6_addr_copy(&hdr->saddr, saddr);
  233. ipv6_addr_copy(&hdr->daddr, daddr);
  234. return 0;
  235. }
  236. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  237. {
  238. struct ip6_ra_chain *ra;
  239. struct sock *last = NULL;
  240. read_lock(&ip6_ra_lock);
  241. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  242. struct sock *sk = ra->sk;
  243. if (sk && ra->sel == sel &&
  244. (!sk->sk_bound_dev_if ||
  245. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  246. if (last) {
  247. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  248. if (skb2)
  249. rawv6_rcv(last, skb2);
  250. }
  251. last = sk;
  252. }
  253. }
  254. if (last) {
  255. rawv6_rcv(last, skb);
  256. read_unlock(&ip6_ra_lock);
  257. return 1;
  258. }
  259. read_unlock(&ip6_ra_lock);
  260. return 0;
  261. }
  262. static int ip6_forward_proxy_check(struct sk_buff *skb)
  263. {
  264. struct ipv6hdr *hdr = skb->nh.ipv6h;
  265. u8 nexthdr = hdr->nexthdr;
  266. int offset;
  267. if (ipv6_ext_hdr(nexthdr)) {
  268. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  269. if (offset < 0)
  270. return 0;
  271. } else
  272. offset = sizeof(struct ipv6hdr);
  273. if (nexthdr == IPPROTO_ICMPV6) {
  274. struct icmp6hdr *icmp6;
  275. if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
  276. return 0;
  277. icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
  278. switch (icmp6->icmp6_type) {
  279. case NDISC_ROUTER_SOLICITATION:
  280. case NDISC_ROUTER_ADVERTISEMENT:
  281. case NDISC_NEIGHBOUR_SOLICITATION:
  282. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  283. case NDISC_REDIRECT:
  284. /* For reaction involving unicast neighbor discovery
  285. * message destined to the proxied address, pass it to
  286. * input function.
  287. */
  288. return 1;
  289. default:
  290. break;
  291. }
  292. }
  293. /*
  294. * The proxying router can't forward traffic sent to a link-local
  295. * address, so signal the sender and discard the packet. This
  296. * behavior is clarified by the MIPv6 specification.
  297. */
  298. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  299. dst_link_failure(skb);
  300. return -1;
  301. }
  302. return 0;
  303. }
  304. static inline int ip6_forward_finish(struct sk_buff *skb)
  305. {
  306. return dst_output(skb);
  307. }
  308. int ip6_forward(struct sk_buff *skb)
  309. {
  310. struct dst_entry *dst = skb->dst;
  311. struct ipv6hdr *hdr = skb->nh.ipv6h;
  312. struct inet6_skb_parm *opt = IP6CB(skb);
  313. if (ipv6_devconf.forwarding == 0)
  314. goto error;
  315. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  316. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  317. goto drop;
  318. }
  319. skb->ip_summed = CHECKSUM_NONE;
  320. /*
  321. * We DO NOT make any processing on
  322. * RA packets, pushing them to user level AS IS
  323. * without ane WARRANTY that application will be able
  324. * to interpret them. The reason is that we
  325. * cannot make anything clever here.
  326. *
  327. * We are not end-node, so that if packet contains
  328. * AH/ESP, we cannot make anything.
  329. * Defragmentation also would be mistake, RA packets
  330. * cannot be fragmented, because there is no warranty
  331. * that different fragments will go along one path. --ANK
  332. */
  333. if (opt->ra) {
  334. u8 *ptr = skb->nh.raw + opt->ra;
  335. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  336. return 0;
  337. }
  338. /*
  339. * check and decrement ttl
  340. */
  341. if (hdr->hop_limit <= 1) {
  342. /* Force OUTPUT device used as source address */
  343. skb->dev = dst->dev;
  344. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  345. 0, skb->dev);
  346. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  347. kfree_skb(skb);
  348. return -ETIMEDOUT;
  349. }
  350. /* XXX: idev->cnf.proxy_ndp? */
  351. if (ipv6_devconf.proxy_ndp &&
  352. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  353. int proxied = ip6_forward_proxy_check(skb);
  354. if (proxied > 0)
  355. return ip6_input(skb);
  356. else if (proxied < 0) {
  357. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  358. goto drop;
  359. }
  360. }
  361. if (!xfrm6_route_forward(skb)) {
  362. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  363. goto drop;
  364. }
  365. dst = skb->dst;
  366. /* IPv6 specs say nothing about it, but it is clear that we cannot
  367. send redirects to source routed frames.
  368. */
  369. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  370. struct in6_addr *target = NULL;
  371. struct rt6_info *rt;
  372. struct neighbour *n = dst->neighbour;
  373. /*
  374. * incoming and outgoing devices are the same
  375. * send a redirect.
  376. */
  377. rt = (struct rt6_info *) dst;
  378. if ((rt->rt6i_flags & RTF_GATEWAY))
  379. target = (struct in6_addr*)&n->primary_key;
  380. else
  381. target = &hdr->daddr;
  382. /* Limit redirects both by destination (here)
  383. and by source (inside ndisc_send_redirect)
  384. */
  385. if (xrlim_allow(dst, 1*HZ))
  386. ndisc_send_redirect(skb, n, target);
  387. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  388. |IPV6_ADDR_LINKLOCAL)) {
  389. /* This check is security critical. */
  390. goto error;
  391. }
  392. if (skb->len > dst_mtu(dst)) {
  393. /* Again, force OUTPUT device used as source address */
  394. skb->dev = dst->dev;
  395. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  396. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  397. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  398. kfree_skb(skb);
  399. return -EMSGSIZE;
  400. }
  401. if (skb_cow(skb, dst->dev->hard_header_len)) {
  402. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  403. goto drop;
  404. }
  405. hdr = skb->nh.ipv6h;
  406. /* Mangling hops number delayed to point after skb COW */
  407. hdr->hop_limit--;
  408. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  409. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  410. error:
  411. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  412. drop:
  413. kfree_skb(skb);
  414. return -EINVAL;
  415. }
  416. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  417. {
  418. to->pkt_type = from->pkt_type;
  419. to->priority = from->priority;
  420. to->protocol = from->protocol;
  421. dst_release(to->dst);
  422. to->dst = dst_clone(from->dst);
  423. to->dev = from->dev;
  424. to->mark = from->mark;
  425. #ifdef CONFIG_NET_SCHED
  426. to->tc_index = from->tc_index;
  427. #endif
  428. #ifdef CONFIG_NETFILTER
  429. /* Connection association is same as pre-frag packet */
  430. nf_conntrack_put(to->nfct);
  431. to->nfct = from->nfct;
  432. nf_conntrack_get(to->nfct);
  433. to->nfctinfo = from->nfctinfo;
  434. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  435. nf_conntrack_put_reasm(to->nfct_reasm);
  436. to->nfct_reasm = from->nfct_reasm;
  437. nf_conntrack_get_reasm(to->nfct_reasm);
  438. #endif
  439. #ifdef CONFIG_BRIDGE_NETFILTER
  440. nf_bridge_put(to->nf_bridge);
  441. to->nf_bridge = from->nf_bridge;
  442. nf_bridge_get(to->nf_bridge);
  443. #endif
  444. #endif
  445. skb_copy_secmark(to, from);
  446. }
  447. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  448. {
  449. u16 offset = sizeof(struct ipv6hdr);
  450. struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
  451. unsigned int packet_len = skb->tail - skb->nh.raw;
  452. int found_rhdr = 0;
  453. *nexthdr = &skb->nh.ipv6h->nexthdr;
  454. while (offset + 1 <= packet_len) {
  455. switch (**nexthdr) {
  456. case NEXTHDR_HOP:
  457. break;
  458. case NEXTHDR_ROUTING:
  459. found_rhdr = 1;
  460. break;
  461. case NEXTHDR_DEST:
  462. #ifdef CONFIG_IPV6_MIP6
  463. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  464. break;
  465. #endif
  466. if (found_rhdr)
  467. return offset;
  468. break;
  469. default :
  470. return offset;
  471. }
  472. offset += ipv6_optlen(exthdr);
  473. *nexthdr = &exthdr->nexthdr;
  474. exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
  475. }
  476. return offset;
  477. }
  478. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  479. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  480. {
  481. struct net_device *dev;
  482. struct sk_buff *frag;
  483. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  484. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  485. struct ipv6hdr *tmp_hdr;
  486. struct frag_hdr *fh;
  487. unsigned int mtu, hlen, left, len;
  488. __be32 frag_id = 0;
  489. int ptr, offset = 0, err=0;
  490. u8 *prevhdr, nexthdr = 0;
  491. dev = rt->u.dst.dev;
  492. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  493. nexthdr = *prevhdr;
  494. mtu = dst_mtu(&rt->u.dst);
  495. if (np && np->frag_size < mtu) {
  496. if (np->frag_size)
  497. mtu = np->frag_size;
  498. }
  499. mtu -= hlen + sizeof(struct frag_hdr);
  500. if (skb_shinfo(skb)->frag_list) {
  501. int first_len = skb_pagelen(skb);
  502. if (first_len - hlen > mtu ||
  503. ((first_len - hlen) & 7) ||
  504. skb_cloned(skb))
  505. goto slow_path;
  506. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  507. /* Correct geometry. */
  508. if (frag->len > mtu ||
  509. ((frag->len & 7) && frag->next) ||
  510. skb_headroom(frag) < hlen)
  511. goto slow_path;
  512. /* Partially cloned skb? */
  513. if (skb_shared(frag))
  514. goto slow_path;
  515. BUG_ON(frag->sk);
  516. if (skb->sk) {
  517. sock_hold(skb->sk);
  518. frag->sk = skb->sk;
  519. frag->destructor = sock_wfree;
  520. skb->truesize -= frag->truesize;
  521. }
  522. }
  523. err = 0;
  524. offset = 0;
  525. frag = skb_shinfo(skb)->frag_list;
  526. skb_shinfo(skb)->frag_list = NULL;
  527. /* BUILD HEADER */
  528. tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
  529. if (!tmp_hdr) {
  530. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  531. return -ENOMEM;
  532. }
  533. *prevhdr = NEXTHDR_FRAGMENT;
  534. __skb_pull(skb, hlen);
  535. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  536. skb->nh.raw = __skb_push(skb, hlen);
  537. memcpy(skb->nh.raw, tmp_hdr, hlen);
  538. ipv6_select_ident(skb, fh);
  539. fh->nexthdr = nexthdr;
  540. fh->reserved = 0;
  541. fh->frag_off = htons(IP6_MF);
  542. frag_id = fh->identification;
  543. first_len = skb_pagelen(skb);
  544. skb->data_len = first_len - skb_headlen(skb);
  545. skb->len = first_len;
  546. skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
  547. dst_hold(&rt->u.dst);
  548. for (;;) {
  549. /* Prepare header of the next frame,
  550. * before previous one went down. */
  551. if (frag) {
  552. frag->ip_summed = CHECKSUM_NONE;
  553. frag->h.raw = frag->data;
  554. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  555. frag->nh.raw = __skb_push(frag, hlen);
  556. memcpy(frag->nh.raw, tmp_hdr, hlen);
  557. offset += skb->len - hlen - sizeof(struct frag_hdr);
  558. fh->nexthdr = nexthdr;
  559. fh->reserved = 0;
  560. fh->frag_off = htons(offset);
  561. if (frag->next != NULL)
  562. fh->frag_off |= htons(IP6_MF);
  563. fh->identification = frag_id;
  564. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  565. ip6_copy_metadata(frag, skb);
  566. }
  567. err = output(skb);
  568. if(!err)
  569. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  570. if (err || !frag)
  571. break;
  572. skb = frag;
  573. frag = skb->next;
  574. skb->next = NULL;
  575. }
  576. kfree(tmp_hdr);
  577. if (err == 0) {
  578. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  579. dst_release(&rt->u.dst);
  580. return 0;
  581. }
  582. while (frag) {
  583. skb = frag->next;
  584. kfree_skb(frag);
  585. frag = skb;
  586. }
  587. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  588. dst_release(&rt->u.dst);
  589. return err;
  590. }
  591. slow_path:
  592. left = skb->len - hlen; /* Space per frame */
  593. ptr = hlen; /* Where to start from */
  594. /*
  595. * Fragment the datagram.
  596. */
  597. *prevhdr = NEXTHDR_FRAGMENT;
  598. /*
  599. * Keep copying data until we run out.
  600. */
  601. while(left > 0) {
  602. len = left;
  603. /* IF: it doesn't fit, use 'mtu' - the data space left */
  604. if (len > mtu)
  605. len = mtu;
  606. /* IF: we are not sending upto and including the packet end
  607. then align the next start on an eight byte boundary */
  608. if (len < left) {
  609. len &= ~7;
  610. }
  611. /*
  612. * Allocate buffer.
  613. */
  614. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  615. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  616. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  617. IPSTATS_MIB_FRAGFAILS);
  618. err = -ENOMEM;
  619. goto fail;
  620. }
  621. /*
  622. * Set up data on packet
  623. */
  624. ip6_copy_metadata(frag, skb);
  625. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  626. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  627. frag->nh.raw = frag->data;
  628. fh = (struct frag_hdr*)(frag->data + hlen);
  629. frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
  630. /*
  631. * Charge the memory for the fragment to any owner
  632. * it might possess
  633. */
  634. if (skb->sk)
  635. skb_set_owner_w(frag, skb->sk);
  636. /*
  637. * Copy the packet header into the new buffer.
  638. */
  639. memcpy(frag->nh.raw, skb->data, hlen);
  640. /*
  641. * Build fragment header.
  642. */
  643. fh->nexthdr = nexthdr;
  644. fh->reserved = 0;
  645. if (!frag_id) {
  646. ipv6_select_ident(skb, fh);
  647. frag_id = fh->identification;
  648. } else
  649. fh->identification = frag_id;
  650. /*
  651. * Copy a block of the IP datagram.
  652. */
  653. if (skb_copy_bits(skb, ptr, frag->h.raw, len))
  654. BUG();
  655. left -= len;
  656. fh->frag_off = htons(offset);
  657. if (left > 0)
  658. fh->frag_off |= htons(IP6_MF);
  659. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  660. ptr += len;
  661. offset += len;
  662. /*
  663. * Put this fragment into the sending queue.
  664. */
  665. err = output(frag);
  666. if (err)
  667. goto fail;
  668. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  669. }
  670. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  671. IPSTATS_MIB_FRAGOKS);
  672. kfree_skb(skb);
  673. return err;
  674. fail:
  675. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  676. IPSTATS_MIB_FRAGFAILS);
  677. kfree_skb(skb);
  678. return err;
  679. }
  680. static inline int ip6_rt_check(struct rt6key *rt_key,
  681. struct in6_addr *fl_addr,
  682. struct in6_addr *addr_cache)
  683. {
  684. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  685. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  686. }
  687. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  688. struct dst_entry *dst,
  689. struct flowi *fl)
  690. {
  691. struct ipv6_pinfo *np = inet6_sk(sk);
  692. struct rt6_info *rt = (struct rt6_info *)dst;
  693. if (!dst)
  694. goto out;
  695. /* Yes, checking route validity in not connected
  696. * case is not very simple. Take into account,
  697. * that we do not support routing by source, TOS,
  698. * and MSG_DONTROUTE --ANK (980726)
  699. *
  700. * 1. ip6_rt_check(): If route was host route,
  701. * check that cached destination is current.
  702. * If it is network route, we still may
  703. * check its validity using saved pointer
  704. * to the last used address: daddr_cache.
  705. * We do not want to save whole address now,
  706. * (because main consumer of this service
  707. * is tcp, which has not this problem),
  708. * so that the last trick works only on connected
  709. * sockets.
  710. * 2. oif also should be the same.
  711. */
  712. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  713. #ifdef CONFIG_IPV6_SUBTREES
  714. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  715. #endif
  716. (fl->oif && fl->oif != dst->dev->ifindex)) {
  717. dst_release(dst);
  718. dst = NULL;
  719. }
  720. out:
  721. return dst;
  722. }
  723. static int ip6_dst_lookup_tail(struct sock *sk,
  724. struct dst_entry **dst, struct flowi *fl)
  725. {
  726. int err;
  727. if (*dst == NULL)
  728. *dst = ip6_route_output(sk, fl);
  729. if ((err = (*dst)->error))
  730. goto out_err_release;
  731. if (ipv6_addr_any(&fl->fl6_src)) {
  732. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  733. if (err)
  734. goto out_err_release;
  735. }
  736. return 0;
  737. out_err_release:
  738. dst_release(*dst);
  739. *dst = NULL;
  740. return err;
  741. }
  742. /**
  743. * ip6_dst_lookup - perform route lookup on flow
  744. * @sk: socket which provides route info
  745. * @dst: pointer to dst_entry * for result
  746. * @fl: flow to lookup
  747. *
  748. * This function performs a route lookup on the given flow.
  749. *
  750. * It returns zero on success, or a standard errno code on error.
  751. */
  752. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  753. {
  754. *dst = NULL;
  755. return ip6_dst_lookup_tail(sk, dst, fl);
  756. }
  757. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  758. /**
  759. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  760. * @sk: socket which provides the dst cache and route info
  761. * @dst: pointer to dst_entry * for result
  762. * @fl: flow to lookup
  763. *
  764. * This function performs a route lookup on the given flow with the
  765. * possibility of using the cached route in the socket if it is valid.
  766. * It will take the socket dst lock when operating on the dst cache.
  767. * As a result, this function can only be used in process context.
  768. *
  769. * It returns zero on success, or a standard errno code on error.
  770. */
  771. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  772. {
  773. *dst = NULL;
  774. if (sk) {
  775. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  776. *dst = ip6_sk_dst_check(sk, *dst, fl);
  777. }
  778. return ip6_dst_lookup_tail(sk, dst, fl);
  779. }
  780. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  781. static inline int ip6_ufo_append_data(struct sock *sk,
  782. int getfrag(void *from, char *to, int offset, int len,
  783. int odd, struct sk_buff *skb),
  784. void *from, int length, int hh_len, int fragheaderlen,
  785. int transhdrlen, int mtu,unsigned int flags)
  786. {
  787. struct sk_buff *skb;
  788. int err;
  789. /* There is support for UDP large send offload by network
  790. * device, so create one single skb packet containing complete
  791. * udp datagram
  792. */
  793. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  794. skb = sock_alloc_send_skb(sk,
  795. hh_len + fragheaderlen + transhdrlen + 20,
  796. (flags & MSG_DONTWAIT), &err);
  797. if (skb == NULL)
  798. return -ENOMEM;
  799. /* reserve space for Hardware header */
  800. skb_reserve(skb, hh_len);
  801. /* create space for UDP/IP header */
  802. skb_put(skb,fragheaderlen + transhdrlen);
  803. /* initialize network header pointer */
  804. skb->nh.raw = skb->data;
  805. /* initialize protocol header pointer */
  806. skb->h.raw = skb->data + fragheaderlen;
  807. skb->ip_summed = CHECKSUM_PARTIAL;
  808. skb->csum = 0;
  809. sk->sk_sndmsg_off = 0;
  810. }
  811. err = skb_append_datato_frags(sk,skb, getfrag, from,
  812. (length - transhdrlen));
  813. if (!err) {
  814. struct frag_hdr fhdr;
  815. /* specify the length of each IP datagram fragment*/
  816. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  817. sizeof(struct frag_hdr);
  818. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  819. ipv6_select_ident(skb, &fhdr);
  820. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  821. __skb_queue_tail(&sk->sk_write_queue, skb);
  822. return 0;
  823. }
  824. /* There is not enough support do UPD LSO,
  825. * so follow normal path
  826. */
  827. kfree_skb(skb);
  828. return err;
  829. }
  830. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  831. int offset, int len, int odd, struct sk_buff *skb),
  832. void *from, int length, int transhdrlen,
  833. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  834. struct rt6_info *rt, unsigned int flags)
  835. {
  836. struct inet_sock *inet = inet_sk(sk);
  837. struct ipv6_pinfo *np = inet6_sk(sk);
  838. struct sk_buff *skb;
  839. unsigned int maxfraglen, fragheaderlen;
  840. int exthdrlen;
  841. int hh_len;
  842. int mtu;
  843. int copy;
  844. int err;
  845. int offset = 0;
  846. int csummode = CHECKSUM_NONE;
  847. if (flags&MSG_PROBE)
  848. return 0;
  849. if (skb_queue_empty(&sk->sk_write_queue)) {
  850. /*
  851. * setup for corking
  852. */
  853. if (opt) {
  854. if (np->cork.opt == NULL) {
  855. np->cork.opt = kmalloc(opt->tot_len,
  856. sk->sk_allocation);
  857. if (unlikely(np->cork.opt == NULL))
  858. return -ENOBUFS;
  859. } else if (np->cork.opt->tot_len < opt->tot_len) {
  860. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  861. return -EINVAL;
  862. }
  863. memcpy(np->cork.opt, opt, opt->tot_len);
  864. inet->cork.flags |= IPCORK_OPT;
  865. /* need source address above miyazawa*/
  866. }
  867. dst_hold(&rt->u.dst);
  868. np->cork.rt = rt;
  869. inet->cork.fl = *fl;
  870. np->cork.hop_limit = hlimit;
  871. np->cork.tclass = tclass;
  872. mtu = dst_mtu(rt->u.dst.path);
  873. if (np->frag_size < mtu) {
  874. if (np->frag_size)
  875. mtu = np->frag_size;
  876. }
  877. inet->cork.fragsize = mtu;
  878. if (dst_allfrag(rt->u.dst.path))
  879. inet->cork.flags |= IPCORK_ALLFRAG;
  880. inet->cork.length = 0;
  881. sk->sk_sndmsg_page = NULL;
  882. sk->sk_sndmsg_off = 0;
  883. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  884. length += exthdrlen;
  885. transhdrlen += exthdrlen;
  886. } else {
  887. rt = np->cork.rt;
  888. fl = &inet->cork.fl;
  889. if (inet->cork.flags & IPCORK_OPT)
  890. opt = np->cork.opt;
  891. transhdrlen = 0;
  892. exthdrlen = 0;
  893. mtu = inet->cork.fragsize;
  894. }
  895. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  896. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  897. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  898. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  899. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  900. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  901. return -EMSGSIZE;
  902. }
  903. }
  904. /*
  905. * Let's try using as much space as possible.
  906. * Use MTU if total length of the message fits into the MTU.
  907. * Otherwise, we need to reserve fragment header and
  908. * fragment alignment (= 8-15 octects, in total).
  909. *
  910. * Note that we may need to "move" the data from the tail of
  911. * of the buffer to the new fragment when we split
  912. * the message.
  913. *
  914. * FIXME: It may be fragmented into multiple chunks
  915. * at once if non-fragmentable extension headers
  916. * are too large.
  917. * --yoshfuji
  918. */
  919. inet->cork.length += length;
  920. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  921. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  922. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  923. fragheaderlen, transhdrlen, mtu,
  924. flags);
  925. if (err)
  926. goto error;
  927. return 0;
  928. }
  929. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  930. goto alloc_new_skb;
  931. while (length > 0) {
  932. /* Check if the remaining data fits into current packet. */
  933. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  934. if (copy < length)
  935. copy = maxfraglen - skb->len;
  936. if (copy <= 0) {
  937. char *data;
  938. unsigned int datalen;
  939. unsigned int fraglen;
  940. unsigned int fraggap;
  941. unsigned int alloclen;
  942. struct sk_buff *skb_prev;
  943. alloc_new_skb:
  944. skb_prev = skb;
  945. /* There's no room in the current skb */
  946. if (skb_prev)
  947. fraggap = skb_prev->len - maxfraglen;
  948. else
  949. fraggap = 0;
  950. /*
  951. * If remaining data exceeds the mtu,
  952. * we know we need more fragment(s).
  953. */
  954. datalen = length + fraggap;
  955. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  956. datalen = maxfraglen - fragheaderlen;
  957. fraglen = datalen + fragheaderlen;
  958. if ((flags & MSG_MORE) &&
  959. !(rt->u.dst.dev->features&NETIF_F_SG))
  960. alloclen = mtu;
  961. else
  962. alloclen = datalen + fragheaderlen;
  963. /*
  964. * The last fragment gets additional space at tail.
  965. * Note: we overallocate on fragments with MSG_MODE
  966. * because we have no idea if we're the last one.
  967. */
  968. if (datalen == length + fraggap)
  969. alloclen += rt->u.dst.trailer_len;
  970. /*
  971. * We just reserve space for fragment header.
  972. * Note: this may be overallocation if the message
  973. * (without MSG_MORE) fits into the MTU.
  974. */
  975. alloclen += sizeof(struct frag_hdr);
  976. if (transhdrlen) {
  977. skb = sock_alloc_send_skb(sk,
  978. alloclen + hh_len,
  979. (flags & MSG_DONTWAIT), &err);
  980. } else {
  981. skb = NULL;
  982. if (atomic_read(&sk->sk_wmem_alloc) <=
  983. 2 * sk->sk_sndbuf)
  984. skb = sock_wmalloc(sk,
  985. alloclen + hh_len, 1,
  986. sk->sk_allocation);
  987. if (unlikely(skb == NULL))
  988. err = -ENOBUFS;
  989. }
  990. if (skb == NULL)
  991. goto error;
  992. /*
  993. * Fill in the control structures
  994. */
  995. skb->ip_summed = csummode;
  996. skb->csum = 0;
  997. /* reserve for fragmentation */
  998. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  999. /*
  1000. * Find where to start putting bytes
  1001. */
  1002. data = skb_put(skb, fraglen);
  1003. skb->nh.raw = data + exthdrlen;
  1004. data += fragheaderlen;
  1005. skb->h.raw = data + exthdrlen;
  1006. if (fraggap) {
  1007. skb->csum = skb_copy_and_csum_bits(
  1008. skb_prev, maxfraglen,
  1009. data + transhdrlen, fraggap, 0);
  1010. skb_prev->csum = csum_sub(skb_prev->csum,
  1011. skb->csum);
  1012. data += fraggap;
  1013. pskb_trim_unique(skb_prev, maxfraglen);
  1014. }
  1015. copy = datalen - transhdrlen - fraggap;
  1016. if (copy < 0) {
  1017. err = -EINVAL;
  1018. kfree_skb(skb);
  1019. goto error;
  1020. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1021. err = -EFAULT;
  1022. kfree_skb(skb);
  1023. goto error;
  1024. }
  1025. offset += copy;
  1026. length -= datalen - fraggap;
  1027. transhdrlen = 0;
  1028. exthdrlen = 0;
  1029. csummode = CHECKSUM_NONE;
  1030. /*
  1031. * Put the packet on the pending queue
  1032. */
  1033. __skb_queue_tail(&sk->sk_write_queue, skb);
  1034. continue;
  1035. }
  1036. if (copy > length)
  1037. copy = length;
  1038. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1039. unsigned int off;
  1040. off = skb->len;
  1041. if (getfrag(from, skb_put(skb, copy),
  1042. offset, copy, off, skb) < 0) {
  1043. __skb_trim(skb, off);
  1044. err = -EFAULT;
  1045. goto error;
  1046. }
  1047. } else {
  1048. int i = skb_shinfo(skb)->nr_frags;
  1049. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1050. struct page *page = sk->sk_sndmsg_page;
  1051. int off = sk->sk_sndmsg_off;
  1052. unsigned int left;
  1053. if (page && (left = PAGE_SIZE - off) > 0) {
  1054. if (copy >= left)
  1055. copy = left;
  1056. if (page != frag->page) {
  1057. if (i == MAX_SKB_FRAGS) {
  1058. err = -EMSGSIZE;
  1059. goto error;
  1060. }
  1061. get_page(page);
  1062. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1063. frag = &skb_shinfo(skb)->frags[i];
  1064. }
  1065. } else if(i < MAX_SKB_FRAGS) {
  1066. if (copy > PAGE_SIZE)
  1067. copy = PAGE_SIZE;
  1068. page = alloc_pages(sk->sk_allocation, 0);
  1069. if (page == NULL) {
  1070. err = -ENOMEM;
  1071. goto error;
  1072. }
  1073. sk->sk_sndmsg_page = page;
  1074. sk->sk_sndmsg_off = 0;
  1075. skb_fill_page_desc(skb, i, page, 0, 0);
  1076. frag = &skb_shinfo(skb)->frags[i];
  1077. skb->truesize += PAGE_SIZE;
  1078. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1079. } else {
  1080. err = -EMSGSIZE;
  1081. goto error;
  1082. }
  1083. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1084. err = -EFAULT;
  1085. goto error;
  1086. }
  1087. sk->sk_sndmsg_off += copy;
  1088. frag->size += copy;
  1089. skb->len += copy;
  1090. skb->data_len += copy;
  1091. }
  1092. offset += copy;
  1093. length -= copy;
  1094. }
  1095. return 0;
  1096. error:
  1097. inet->cork.length -= length;
  1098. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1099. return err;
  1100. }
  1101. int ip6_push_pending_frames(struct sock *sk)
  1102. {
  1103. struct sk_buff *skb, *tmp_skb;
  1104. struct sk_buff **tail_skb;
  1105. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1106. struct inet_sock *inet = inet_sk(sk);
  1107. struct ipv6_pinfo *np = inet6_sk(sk);
  1108. struct ipv6hdr *hdr;
  1109. struct ipv6_txoptions *opt = np->cork.opt;
  1110. struct rt6_info *rt = np->cork.rt;
  1111. struct flowi *fl = &inet->cork.fl;
  1112. unsigned char proto = fl->proto;
  1113. int err = 0;
  1114. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1115. goto out;
  1116. tail_skb = &(skb_shinfo(skb)->frag_list);
  1117. /* move skb->data to ip header from ext header */
  1118. if (skb->data < skb->nh.raw)
  1119. __skb_pull(skb, skb->nh.raw - skb->data);
  1120. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1121. __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
  1122. *tail_skb = tmp_skb;
  1123. tail_skb = &(tmp_skb->next);
  1124. skb->len += tmp_skb->len;
  1125. skb->data_len += tmp_skb->len;
  1126. skb->truesize += tmp_skb->truesize;
  1127. __sock_put(tmp_skb->sk);
  1128. tmp_skb->destructor = NULL;
  1129. tmp_skb->sk = NULL;
  1130. }
  1131. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1132. __skb_pull(skb, skb->h.raw - skb->nh.raw);
  1133. if (opt && opt->opt_flen)
  1134. ipv6_push_frag_opts(skb, opt, &proto);
  1135. if (opt && opt->opt_nflen)
  1136. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1137. skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
  1138. *(__be32*)hdr = fl->fl6_flowlabel |
  1139. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1140. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1141. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1142. else
  1143. hdr->payload_len = 0;
  1144. hdr->hop_limit = np->cork.hop_limit;
  1145. hdr->nexthdr = proto;
  1146. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1147. ipv6_addr_copy(&hdr->daddr, final_dst);
  1148. skb->priority = sk->sk_priority;
  1149. skb->dst = dst_clone(&rt->u.dst);
  1150. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1151. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1152. if (err) {
  1153. if (err > 0)
  1154. err = np->recverr ? net_xmit_errno(err) : 0;
  1155. if (err)
  1156. goto error;
  1157. }
  1158. out:
  1159. inet->cork.flags &= ~IPCORK_OPT;
  1160. kfree(np->cork.opt);
  1161. np->cork.opt = NULL;
  1162. if (np->cork.rt) {
  1163. dst_release(&np->cork.rt->u.dst);
  1164. np->cork.rt = NULL;
  1165. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1166. }
  1167. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1168. return err;
  1169. error:
  1170. goto out;
  1171. }
  1172. void ip6_flush_pending_frames(struct sock *sk)
  1173. {
  1174. struct inet_sock *inet = inet_sk(sk);
  1175. struct ipv6_pinfo *np = inet6_sk(sk);
  1176. struct sk_buff *skb;
  1177. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1178. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1179. IPSTATS_MIB_OUTDISCARDS);
  1180. kfree_skb(skb);
  1181. }
  1182. inet->cork.flags &= ~IPCORK_OPT;
  1183. kfree(np->cork.opt);
  1184. np->cork.opt = NULL;
  1185. if (np->cork.rt) {
  1186. dst_release(&np->cork.rt->u.dst);
  1187. np->cork.rt = NULL;
  1188. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1189. }
  1190. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1191. }