ip6_output.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/config.h>
  31. #include <linux/errno.h>
  32. #include <linux/types.h>
  33. #include <linux/string.h>
  34. #include <linux/socket.h>
  35. #include <linux/net.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/if_arp.h>
  38. #include <linux/in6.h>
  39. #include <linux/tcp.h>
  40. #include <linux/route.h>
  41. #include <linux/module.h>
  42. #include <linux/netfilter.h>
  43. #include <linux/netfilter_ipv6.h>
  44. #include <net/sock.h>
  45. #include <net/snmp.h>
  46. #include <net/ipv6.h>
  47. #include <net/ndisc.h>
  48. #include <net/protocol.h>
  49. #include <net/ip6_route.h>
  50. #include <net/addrconf.h>
  51. #include <net/rawv6.h>
  52. #include <net/icmp.h>
  53. #include <net/xfrm.h>
  54. #include <net/checksum.h>
  55. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  56. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  57. {
  58. static u32 ipv6_fragmentation_id = 1;
  59. static DEFINE_SPINLOCK(ip6_id_lock);
  60. spin_lock_bh(&ip6_id_lock);
  61. fhdr->identification = htonl(ipv6_fragmentation_id);
  62. if (++ipv6_fragmentation_id == 0)
  63. ipv6_fragmentation_id = 1;
  64. spin_unlock_bh(&ip6_id_lock);
  65. }
  66. static inline int ip6_output_finish(struct sk_buff *skb)
  67. {
  68. struct dst_entry *dst = skb->dst;
  69. struct hh_cache *hh = dst->hh;
  70. if (hh) {
  71. int hh_alen;
  72. read_lock_bh(&hh->hh_lock);
  73. hh_alen = HH_DATA_ALIGN(hh->hh_len);
  74. memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
  75. read_unlock_bh(&hh->hh_lock);
  76. skb_push(skb, hh->hh_len);
  77. return hh->hh_output(skb);
  78. } else if (dst->neighbour)
  79. return dst->neighbour->output(skb);
  80. IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
  81. kfree_skb(skb);
  82. return -EINVAL;
  83. }
  84. /* dev_loopback_xmit for use with netfilter. */
  85. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  86. {
  87. newskb->mac.raw = newskb->data;
  88. __skb_pull(newskb, newskb->nh.raw - newskb->data);
  89. newskb->pkt_type = PACKET_LOOPBACK;
  90. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  91. BUG_TRAP(newskb->dst);
  92. netif_rx(newskb);
  93. return 0;
  94. }
  95. static int ip6_output2(struct sk_buff *skb)
  96. {
  97. struct dst_entry *dst = skb->dst;
  98. struct net_device *dev = dst->dev;
  99. skb->protocol = htons(ETH_P_IPV6);
  100. skb->dev = dev;
  101. if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
  102. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  103. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  104. ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
  105. &skb->nh.ipv6h->saddr)) {
  106. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  107. /* Do not check for IFF_ALLMULTI; multicast routing
  108. is not supported in any case.
  109. */
  110. if (newskb)
  111. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  112. newskb->dev,
  113. ip6_dev_loopback_xmit);
  114. if (skb->nh.ipv6h->hop_limit == 0) {
  115. IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
  116. kfree_skb(skb);
  117. return 0;
  118. }
  119. }
  120. IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
  121. }
  122. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  123. }
  124. int ip6_output(struct sk_buff *skb)
  125. {
  126. if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
  127. dst_allfrag(skb->dst))
  128. return ip6_fragment(skb, ip6_output2);
  129. else
  130. return ip6_output2(skb);
  131. }
  132. /*
  133. * xmit an sk_buff (used by TCP)
  134. */
  135. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  136. struct ipv6_txoptions *opt, int ipfragok)
  137. {
  138. struct ipv6_pinfo *np = inet6_sk(sk);
  139. struct in6_addr *first_hop = &fl->fl6_dst;
  140. struct dst_entry *dst = skb->dst;
  141. struct ipv6hdr *hdr;
  142. u8 proto = fl->proto;
  143. int seg_len = skb->len;
  144. int hlimit, tclass;
  145. u32 mtu;
  146. if (opt) {
  147. int head_room;
  148. /* First: exthdrs may take lots of space (~8K for now)
  149. MAX_HEADER is not enough.
  150. */
  151. head_room = opt->opt_nflen + opt->opt_flen;
  152. seg_len += head_room;
  153. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  154. if (skb_headroom(skb) < head_room) {
  155. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  156. kfree_skb(skb);
  157. skb = skb2;
  158. if (skb == NULL) {
  159. IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
  160. return -ENOBUFS;
  161. }
  162. if (sk)
  163. skb_set_owner_w(skb, sk);
  164. }
  165. if (opt->opt_flen)
  166. ipv6_push_frag_opts(skb, opt, &proto);
  167. if (opt->opt_nflen)
  168. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  169. }
  170. hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
  171. /*
  172. * Fill in the IPv6 header
  173. */
  174. hlimit = -1;
  175. if (np)
  176. hlimit = np->hop_limit;
  177. if (hlimit < 0)
  178. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  179. if (hlimit < 0)
  180. hlimit = ipv6_get_hoplimit(dst->dev);
  181. tclass = -1;
  182. if (np)
  183. tclass = np->tclass;
  184. if (tclass < 0)
  185. tclass = 0;
  186. *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  187. hdr->payload_len = htons(seg_len);
  188. hdr->nexthdr = proto;
  189. hdr->hop_limit = hlimit;
  190. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  191. ipv6_addr_copy(&hdr->daddr, first_hop);
  192. skb->priority = sk->sk_priority;
  193. mtu = dst_mtu(dst);
  194. if ((skb->len <= mtu) || ipfragok) {
  195. IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
  196. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  197. dst_output);
  198. }
  199. if (net_ratelimit())
  200. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  201. skb->dev = dst->dev;
  202. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  203. IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
  204. kfree_skb(skb);
  205. return -EMSGSIZE;
  206. }
  207. /*
  208. * To avoid extra problems ND packets are send through this
  209. * routine. It's code duplication but I really want to avoid
  210. * extra checks since ipv6_build_header is used by TCP (which
  211. * is for us performance critical)
  212. */
  213. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  214. struct in6_addr *saddr, struct in6_addr *daddr,
  215. int proto, int len)
  216. {
  217. struct ipv6_pinfo *np = inet6_sk(sk);
  218. struct ipv6hdr *hdr;
  219. int totlen;
  220. skb->protocol = htons(ETH_P_IPV6);
  221. skb->dev = dev;
  222. totlen = len + sizeof(struct ipv6hdr);
  223. hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
  224. skb->nh.ipv6h = hdr;
  225. *(u32*)hdr = htonl(0x60000000);
  226. hdr->payload_len = htons(len);
  227. hdr->nexthdr = proto;
  228. hdr->hop_limit = np->hop_limit;
  229. ipv6_addr_copy(&hdr->saddr, saddr);
  230. ipv6_addr_copy(&hdr->daddr, daddr);
  231. return 0;
  232. }
  233. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  234. {
  235. struct ip6_ra_chain *ra;
  236. struct sock *last = NULL;
  237. read_lock(&ip6_ra_lock);
  238. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  239. struct sock *sk = ra->sk;
  240. if (sk && ra->sel == sel &&
  241. (!sk->sk_bound_dev_if ||
  242. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  243. if (last) {
  244. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  245. if (skb2)
  246. rawv6_rcv(last, skb2);
  247. }
  248. last = sk;
  249. }
  250. }
  251. if (last) {
  252. rawv6_rcv(last, skb);
  253. read_unlock(&ip6_ra_lock);
  254. return 1;
  255. }
  256. read_unlock(&ip6_ra_lock);
  257. return 0;
  258. }
  259. static inline int ip6_forward_finish(struct sk_buff *skb)
  260. {
  261. return dst_output(skb);
  262. }
  263. int ip6_forward(struct sk_buff *skb)
  264. {
  265. struct dst_entry *dst = skb->dst;
  266. struct ipv6hdr *hdr = skb->nh.ipv6h;
  267. struct inet6_skb_parm *opt = IP6CB(skb);
  268. if (ipv6_devconf.forwarding == 0)
  269. goto error;
  270. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  271. IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
  272. goto drop;
  273. }
  274. skb->ip_summed = CHECKSUM_NONE;
  275. /*
  276. * We DO NOT make any processing on
  277. * RA packets, pushing them to user level AS IS
  278. * without ane WARRANTY that application will be able
  279. * to interpret them. The reason is that we
  280. * cannot make anything clever here.
  281. *
  282. * We are not end-node, so that if packet contains
  283. * AH/ESP, we cannot make anything.
  284. * Defragmentation also would be mistake, RA packets
  285. * cannot be fragmented, because there is no warranty
  286. * that different fragments will go along one path. --ANK
  287. */
  288. if (opt->ra) {
  289. u8 *ptr = skb->nh.raw + opt->ra;
  290. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  291. return 0;
  292. }
  293. /*
  294. * check and decrement ttl
  295. */
  296. if (hdr->hop_limit <= 1) {
  297. /* Force OUTPUT device used as source address */
  298. skb->dev = dst->dev;
  299. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  300. 0, skb->dev);
  301. kfree_skb(skb);
  302. return -ETIMEDOUT;
  303. }
  304. if (!xfrm6_route_forward(skb)) {
  305. IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
  306. goto drop;
  307. }
  308. dst = skb->dst;
  309. /* IPv6 specs say nothing about it, but it is clear that we cannot
  310. send redirects to source routed frames.
  311. */
  312. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  313. struct in6_addr *target = NULL;
  314. struct rt6_info *rt;
  315. struct neighbour *n = dst->neighbour;
  316. /*
  317. * incoming and outgoing devices are the same
  318. * send a redirect.
  319. */
  320. rt = (struct rt6_info *) dst;
  321. if ((rt->rt6i_flags & RTF_GATEWAY))
  322. target = (struct in6_addr*)&n->primary_key;
  323. else
  324. target = &hdr->daddr;
  325. /* Limit redirects both by destination (here)
  326. and by source (inside ndisc_send_redirect)
  327. */
  328. if (xrlim_allow(dst, 1*HZ))
  329. ndisc_send_redirect(skb, n, target);
  330. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  331. |IPV6_ADDR_LINKLOCAL)) {
  332. /* This check is security critical. */
  333. goto error;
  334. }
  335. if (skb->len > dst_mtu(dst)) {
  336. /* Again, force OUTPUT device used as source address */
  337. skb->dev = dst->dev;
  338. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  339. IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
  340. IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
  341. kfree_skb(skb);
  342. return -EMSGSIZE;
  343. }
  344. if (skb_cow(skb, dst->dev->hard_header_len)) {
  345. IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
  346. goto drop;
  347. }
  348. hdr = skb->nh.ipv6h;
  349. /* Mangling hops number delayed to point after skb COW */
  350. hdr->hop_limit--;
  351. IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
  352. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  353. error:
  354. IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
  355. drop:
  356. kfree_skb(skb);
  357. return -EINVAL;
  358. }
  359. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  360. {
  361. to->pkt_type = from->pkt_type;
  362. to->priority = from->priority;
  363. to->protocol = from->protocol;
  364. dst_release(to->dst);
  365. to->dst = dst_clone(from->dst);
  366. to->dev = from->dev;
  367. #ifdef CONFIG_NET_SCHED
  368. to->tc_index = from->tc_index;
  369. #endif
  370. #ifdef CONFIG_NETFILTER
  371. to->nfmark = from->nfmark;
  372. /* Connection association is same as pre-frag packet */
  373. nf_conntrack_put(to->nfct);
  374. to->nfct = from->nfct;
  375. nf_conntrack_get(to->nfct);
  376. to->nfctinfo = from->nfctinfo;
  377. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  378. nf_conntrack_put_reasm(to->nfct_reasm);
  379. to->nfct_reasm = from->nfct_reasm;
  380. nf_conntrack_get_reasm(to->nfct_reasm);
  381. #endif
  382. #ifdef CONFIG_BRIDGE_NETFILTER
  383. nf_bridge_put(to->nf_bridge);
  384. to->nf_bridge = from->nf_bridge;
  385. nf_bridge_get(to->nf_bridge);
  386. #endif
  387. #endif
  388. }
  389. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  390. {
  391. u16 offset = sizeof(struct ipv6hdr);
  392. struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
  393. unsigned int packet_len = skb->tail - skb->nh.raw;
  394. int found_rhdr = 0;
  395. *nexthdr = &skb->nh.ipv6h->nexthdr;
  396. while (offset + 1 <= packet_len) {
  397. switch (**nexthdr) {
  398. case NEXTHDR_HOP:
  399. case NEXTHDR_ROUTING:
  400. case NEXTHDR_DEST:
  401. if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1;
  402. if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset;
  403. offset += ipv6_optlen(exthdr);
  404. *nexthdr = &exthdr->nexthdr;
  405. exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
  406. break;
  407. default :
  408. return offset;
  409. }
  410. }
  411. return offset;
  412. }
  413. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  414. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  415. {
  416. struct net_device *dev;
  417. struct sk_buff *frag;
  418. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  419. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  420. struct ipv6hdr *tmp_hdr;
  421. struct frag_hdr *fh;
  422. unsigned int mtu, hlen, left, len;
  423. u32 frag_id = 0;
  424. int ptr, offset = 0, err=0;
  425. u8 *prevhdr, nexthdr = 0;
  426. dev = rt->u.dst.dev;
  427. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  428. nexthdr = *prevhdr;
  429. mtu = dst_mtu(&rt->u.dst);
  430. if (np && np->frag_size < mtu) {
  431. if (np->frag_size)
  432. mtu = np->frag_size;
  433. }
  434. mtu -= hlen + sizeof(struct frag_hdr);
  435. if (skb_shinfo(skb)->frag_list) {
  436. int first_len = skb_pagelen(skb);
  437. if (first_len - hlen > mtu ||
  438. ((first_len - hlen) & 7) ||
  439. skb_cloned(skb))
  440. goto slow_path;
  441. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  442. /* Correct geometry. */
  443. if (frag->len > mtu ||
  444. ((frag->len & 7) && frag->next) ||
  445. skb_headroom(frag) < hlen)
  446. goto slow_path;
  447. /* Partially cloned skb? */
  448. if (skb_shared(frag))
  449. goto slow_path;
  450. BUG_ON(frag->sk);
  451. if (skb->sk) {
  452. sock_hold(skb->sk);
  453. frag->sk = skb->sk;
  454. frag->destructor = sock_wfree;
  455. skb->truesize -= frag->truesize;
  456. }
  457. }
  458. err = 0;
  459. offset = 0;
  460. frag = skb_shinfo(skb)->frag_list;
  461. skb_shinfo(skb)->frag_list = NULL;
  462. /* BUILD HEADER */
  463. tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
  464. if (!tmp_hdr) {
  465. IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
  466. return -ENOMEM;
  467. }
  468. *prevhdr = NEXTHDR_FRAGMENT;
  469. memcpy(tmp_hdr, skb->nh.raw, hlen);
  470. __skb_pull(skb, hlen);
  471. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  472. skb->nh.raw = __skb_push(skb, hlen);
  473. memcpy(skb->nh.raw, tmp_hdr, hlen);
  474. ipv6_select_ident(skb, fh);
  475. fh->nexthdr = nexthdr;
  476. fh->reserved = 0;
  477. fh->frag_off = htons(IP6_MF);
  478. frag_id = fh->identification;
  479. first_len = skb_pagelen(skb);
  480. skb->data_len = first_len - skb_headlen(skb);
  481. skb->len = first_len;
  482. skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
  483. for (;;) {
  484. /* Prepare header of the next frame,
  485. * before previous one went down. */
  486. if (frag) {
  487. frag->ip_summed = CHECKSUM_NONE;
  488. frag->h.raw = frag->data;
  489. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  490. frag->nh.raw = __skb_push(frag, hlen);
  491. memcpy(frag->nh.raw, tmp_hdr, hlen);
  492. offset += skb->len - hlen - sizeof(struct frag_hdr);
  493. fh->nexthdr = nexthdr;
  494. fh->reserved = 0;
  495. fh->frag_off = htons(offset);
  496. if (frag->next != NULL)
  497. fh->frag_off |= htons(IP6_MF);
  498. fh->identification = frag_id;
  499. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  500. ip6_copy_metadata(frag, skb);
  501. }
  502. err = output(skb);
  503. if (err || !frag)
  504. break;
  505. skb = frag;
  506. frag = skb->next;
  507. skb->next = NULL;
  508. }
  509. kfree(tmp_hdr);
  510. if (err == 0) {
  511. IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
  512. return 0;
  513. }
  514. while (frag) {
  515. skb = frag->next;
  516. kfree_skb(frag);
  517. frag = skb;
  518. }
  519. IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
  520. return err;
  521. }
  522. slow_path:
  523. left = skb->len - hlen; /* Space per frame */
  524. ptr = hlen; /* Where to start from */
  525. /*
  526. * Fragment the datagram.
  527. */
  528. *prevhdr = NEXTHDR_FRAGMENT;
  529. /*
  530. * Keep copying data until we run out.
  531. */
  532. while(left > 0) {
  533. len = left;
  534. /* IF: it doesn't fit, use 'mtu' - the data space left */
  535. if (len > mtu)
  536. len = mtu;
  537. /* IF: we are not sending upto and including the packet end
  538. then align the next start on an eight byte boundary */
  539. if (len < left) {
  540. len &= ~7;
  541. }
  542. /*
  543. * Allocate buffer.
  544. */
  545. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  546. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  547. IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
  548. err = -ENOMEM;
  549. goto fail;
  550. }
  551. /*
  552. * Set up data on packet
  553. */
  554. ip6_copy_metadata(frag, skb);
  555. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  556. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  557. frag->nh.raw = frag->data;
  558. fh = (struct frag_hdr*)(frag->data + hlen);
  559. frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
  560. /*
  561. * Charge the memory for the fragment to any owner
  562. * it might possess
  563. */
  564. if (skb->sk)
  565. skb_set_owner_w(frag, skb->sk);
  566. /*
  567. * Copy the packet header into the new buffer.
  568. */
  569. memcpy(frag->nh.raw, skb->data, hlen);
  570. /*
  571. * Build fragment header.
  572. */
  573. fh->nexthdr = nexthdr;
  574. fh->reserved = 0;
  575. if (!frag_id) {
  576. ipv6_select_ident(skb, fh);
  577. frag_id = fh->identification;
  578. } else
  579. fh->identification = frag_id;
  580. /*
  581. * Copy a block of the IP datagram.
  582. */
  583. if (skb_copy_bits(skb, ptr, frag->h.raw, len))
  584. BUG();
  585. left -= len;
  586. fh->frag_off = htons(offset);
  587. if (left > 0)
  588. fh->frag_off |= htons(IP6_MF);
  589. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  590. ptr += len;
  591. offset += len;
  592. /*
  593. * Put this fragment into the sending queue.
  594. */
  595. IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
  596. err = output(frag);
  597. if (err)
  598. goto fail;
  599. }
  600. kfree_skb(skb);
  601. IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
  602. return err;
  603. fail:
  604. kfree_skb(skb);
  605. IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
  606. return err;
  607. }
  608. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  609. {
  610. int err = 0;
  611. *dst = NULL;
  612. if (sk) {
  613. struct ipv6_pinfo *np = inet6_sk(sk);
  614. *dst = sk_dst_check(sk, np->dst_cookie);
  615. if (*dst) {
  616. struct rt6_info *rt = (struct rt6_info*)*dst;
  617. /* Yes, checking route validity in not connected
  618. * case is not very simple. Take into account,
  619. * that we do not support routing by source, TOS,
  620. * and MSG_DONTROUTE --ANK (980726)
  621. *
  622. * 1. If route was host route, check that
  623. * cached destination is current.
  624. * If it is network route, we still may
  625. * check its validity using saved pointer
  626. * to the last used address: daddr_cache.
  627. * We do not want to save whole address now,
  628. * (because main consumer of this service
  629. * is tcp, which has not this problem),
  630. * so that the last trick works only on connected
  631. * sockets.
  632. * 2. oif also should be the same.
  633. */
  634. if (((rt->rt6i_dst.plen != 128 ||
  635. !ipv6_addr_equal(&fl->fl6_dst,
  636. &rt->rt6i_dst.addr))
  637. && (np->daddr_cache == NULL ||
  638. !ipv6_addr_equal(&fl->fl6_dst,
  639. np->daddr_cache)))
  640. || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
  641. dst_release(*dst);
  642. *dst = NULL;
  643. }
  644. }
  645. }
  646. if (*dst == NULL)
  647. *dst = ip6_route_output(sk, fl);
  648. if ((err = (*dst)->error))
  649. goto out_err_release;
  650. if (ipv6_addr_any(&fl->fl6_src)) {
  651. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  652. if (err)
  653. goto out_err_release;
  654. }
  655. return 0;
  656. out_err_release:
  657. dst_release(*dst);
  658. *dst = NULL;
  659. return err;
  660. }
  661. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  662. static inline int ip6_ufo_append_data(struct sock *sk,
  663. int getfrag(void *from, char *to, int offset, int len,
  664. int odd, struct sk_buff *skb),
  665. void *from, int length, int hh_len, int fragheaderlen,
  666. int transhdrlen, int mtu,unsigned int flags)
  667. {
  668. struct sk_buff *skb;
  669. int err;
  670. /* There is support for UDP large send offload by network
  671. * device, so create one single skb packet containing complete
  672. * udp datagram
  673. */
  674. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  675. skb = sock_alloc_send_skb(sk,
  676. hh_len + fragheaderlen + transhdrlen + 20,
  677. (flags & MSG_DONTWAIT), &err);
  678. if (skb == NULL)
  679. return -ENOMEM;
  680. /* reserve space for Hardware header */
  681. skb_reserve(skb, hh_len);
  682. /* create space for UDP/IP header */
  683. skb_put(skb,fragheaderlen + transhdrlen);
  684. /* initialize network header pointer */
  685. skb->nh.raw = skb->data;
  686. /* initialize protocol header pointer */
  687. skb->h.raw = skb->data + fragheaderlen;
  688. skb->ip_summed = CHECKSUM_HW;
  689. skb->csum = 0;
  690. sk->sk_sndmsg_off = 0;
  691. }
  692. err = skb_append_datato_frags(sk,skb, getfrag, from,
  693. (length - transhdrlen));
  694. if (!err) {
  695. struct frag_hdr fhdr;
  696. /* specify the length of each IP datagram fragment*/
  697. skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
  698. sizeof(struct frag_hdr);
  699. ipv6_select_ident(skb, &fhdr);
  700. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  701. __skb_queue_tail(&sk->sk_write_queue, skb);
  702. return 0;
  703. }
  704. /* There is not enough support do UPD LSO,
  705. * so follow normal path
  706. */
  707. kfree_skb(skb);
  708. return err;
  709. }
  710. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  711. int offset, int len, int odd, struct sk_buff *skb),
  712. void *from, int length, int transhdrlen,
  713. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  714. struct rt6_info *rt, unsigned int flags)
  715. {
  716. struct inet_sock *inet = inet_sk(sk);
  717. struct ipv6_pinfo *np = inet6_sk(sk);
  718. struct sk_buff *skb;
  719. unsigned int maxfraglen, fragheaderlen;
  720. int exthdrlen;
  721. int hh_len;
  722. int mtu;
  723. int copy;
  724. int err;
  725. int offset = 0;
  726. int csummode = CHECKSUM_NONE;
  727. if (flags&MSG_PROBE)
  728. return 0;
  729. if (skb_queue_empty(&sk->sk_write_queue)) {
  730. /*
  731. * setup for corking
  732. */
  733. if (opt) {
  734. if (np->cork.opt == NULL) {
  735. np->cork.opt = kmalloc(opt->tot_len,
  736. sk->sk_allocation);
  737. if (unlikely(np->cork.opt == NULL))
  738. return -ENOBUFS;
  739. } else if (np->cork.opt->tot_len < opt->tot_len) {
  740. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  741. return -EINVAL;
  742. }
  743. memcpy(np->cork.opt, opt, opt->tot_len);
  744. inet->cork.flags |= IPCORK_OPT;
  745. /* need source address above miyazawa*/
  746. }
  747. dst_hold(&rt->u.dst);
  748. np->cork.rt = rt;
  749. inet->cork.fl = *fl;
  750. np->cork.hop_limit = hlimit;
  751. np->cork.tclass = tclass;
  752. mtu = dst_mtu(rt->u.dst.path);
  753. if (np->frag_size < mtu) {
  754. if (np->frag_size)
  755. mtu = np->frag_size;
  756. }
  757. inet->cork.fragsize = mtu;
  758. if (dst_allfrag(rt->u.dst.path))
  759. inet->cork.flags |= IPCORK_ALLFRAG;
  760. inet->cork.length = 0;
  761. sk->sk_sndmsg_page = NULL;
  762. sk->sk_sndmsg_off = 0;
  763. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  764. length += exthdrlen;
  765. transhdrlen += exthdrlen;
  766. } else {
  767. rt = np->cork.rt;
  768. fl = &inet->cork.fl;
  769. if (inet->cork.flags & IPCORK_OPT)
  770. opt = np->cork.opt;
  771. transhdrlen = 0;
  772. exthdrlen = 0;
  773. mtu = inet->cork.fragsize;
  774. }
  775. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  776. fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
  777. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  778. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  779. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  780. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  781. return -EMSGSIZE;
  782. }
  783. }
  784. /*
  785. * Let's try using as much space as possible.
  786. * Use MTU if total length of the message fits into the MTU.
  787. * Otherwise, we need to reserve fragment header and
  788. * fragment alignment (= 8-15 octects, in total).
  789. *
  790. * Note that we may need to "move" the data from the tail of
  791. * of the buffer to the new fragment when we split
  792. * the message.
  793. *
  794. * FIXME: It may be fragmented into multiple chunks
  795. * at once if non-fragmentable extension headers
  796. * are too large.
  797. * --yoshfuji
  798. */
  799. inet->cork.length += length;
  800. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  801. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  802. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  803. fragheaderlen, transhdrlen, mtu,
  804. flags);
  805. if (err)
  806. goto error;
  807. return 0;
  808. }
  809. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  810. goto alloc_new_skb;
  811. while (length > 0) {
  812. /* Check if the remaining data fits into current packet. */
  813. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  814. if (copy < length)
  815. copy = maxfraglen - skb->len;
  816. if (copy <= 0) {
  817. char *data;
  818. unsigned int datalen;
  819. unsigned int fraglen;
  820. unsigned int fraggap;
  821. unsigned int alloclen;
  822. struct sk_buff *skb_prev;
  823. alloc_new_skb:
  824. skb_prev = skb;
  825. /* There's no room in the current skb */
  826. if (skb_prev)
  827. fraggap = skb_prev->len - maxfraglen;
  828. else
  829. fraggap = 0;
  830. /*
  831. * If remaining data exceeds the mtu,
  832. * we know we need more fragment(s).
  833. */
  834. datalen = length + fraggap;
  835. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  836. datalen = maxfraglen - fragheaderlen;
  837. fraglen = datalen + fragheaderlen;
  838. if ((flags & MSG_MORE) &&
  839. !(rt->u.dst.dev->features&NETIF_F_SG))
  840. alloclen = mtu;
  841. else
  842. alloclen = datalen + fragheaderlen;
  843. /*
  844. * The last fragment gets additional space at tail.
  845. * Note: we overallocate on fragments with MSG_MODE
  846. * because we have no idea if we're the last one.
  847. */
  848. if (datalen == length + fraggap)
  849. alloclen += rt->u.dst.trailer_len;
  850. /*
  851. * We just reserve space for fragment header.
  852. * Note: this may be overallocation if the message
  853. * (without MSG_MORE) fits into the MTU.
  854. */
  855. alloclen += sizeof(struct frag_hdr);
  856. if (transhdrlen) {
  857. skb = sock_alloc_send_skb(sk,
  858. alloclen + hh_len,
  859. (flags & MSG_DONTWAIT), &err);
  860. } else {
  861. skb = NULL;
  862. if (atomic_read(&sk->sk_wmem_alloc) <=
  863. 2 * sk->sk_sndbuf)
  864. skb = sock_wmalloc(sk,
  865. alloclen + hh_len, 1,
  866. sk->sk_allocation);
  867. if (unlikely(skb == NULL))
  868. err = -ENOBUFS;
  869. }
  870. if (skb == NULL)
  871. goto error;
  872. /*
  873. * Fill in the control structures
  874. */
  875. skb->ip_summed = csummode;
  876. skb->csum = 0;
  877. /* reserve for fragmentation */
  878. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  879. /*
  880. * Find where to start putting bytes
  881. */
  882. data = skb_put(skb, fraglen);
  883. skb->nh.raw = data + exthdrlen;
  884. data += fragheaderlen;
  885. skb->h.raw = data + exthdrlen;
  886. if (fraggap) {
  887. skb->csum = skb_copy_and_csum_bits(
  888. skb_prev, maxfraglen,
  889. data + transhdrlen, fraggap, 0);
  890. skb_prev->csum = csum_sub(skb_prev->csum,
  891. skb->csum);
  892. data += fraggap;
  893. skb_trim(skb_prev, maxfraglen);
  894. }
  895. copy = datalen - transhdrlen - fraggap;
  896. if (copy < 0) {
  897. err = -EINVAL;
  898. kfree_skb(skb);
  899. goto error;
  900. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  901. err = -EFAULT;
  902. kfree_skb(skb);
  903. goto error;
  904. }
  905. offset += copy;
  906. length -= datalen - fraggap;
  907. transhdrlen = 0;
  908. exthdrlen = 0;
  909. csummode = CHECKSUM_NONE;
  910. /*
  911. * Put the packet on the pending queue
  912. */
  913. __skb_queue_tail(&sk->sk_write_queue, skb);
  914. continue;
  915. }
  916. if (copy > length)
  917. copy = length;
  918. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  919. unsigned int off;
  920. off = skb->len;
  921. if (getfrag(from, skb_put(skb, copy),
  922. offset, copy, off, skb) < 0) {
  923. __skb_trim(skb, off);
  924. err = -EFAULT;
  925. goto error;
  926. }
  927. } else {
  928. int i = skb_shinfo(skb)->nr_frags;
  929. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  930. struct page *page = sk->sk_sndmsg_page;
  931. int off = sk->sk_sndmsg_off;
  932. unsigned int left;
  933. if (page && (left = PAGE_SIZE - off) > 0) {
  934. if (copy >= left)
  935. copy = left;
  936. if (page != frag->page) {
  937. if (i == MAX_SKB_FRAGS) {
  938. err = -EMSGSIZE;
  939. goto error;
  940. }
  941. get_page(page);
  942. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  943. frag = &skb_shinfo(skb)->frags[i];
  944. }
  945. } else if(i < MAX_SKB_FRAGS) {
  946. if (copy > PAGE_SIZE)
  947. copy = PAGE_SIZE;
  948. page = alloc_pages(sk->sk_allocation, 0);
  949. if (page == NULL) {
  950. err = -ENOMEM;
  951. goto error;
  952. }
  953. sk->sk_sndmsg_page = page;
  954. sk->sk_sndmsg_off = 0;
  955. skb_fill_page_desc(skb, i, page, 0, 0);
  956. frag = &skb_shinfo(skb)->frags[i];
  957. skb->truesize += PAGE_SIZE;
  958. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  959. } else {
  960. err = -EMSGSIZE;
  961. goto error;
  962. }
  963. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  964. err = -EFAULT;
  965. goto error;
  966. }
  967. sk->sk_sndmsg_off += copy;
  968. frag->size += copy;
  969. skb->len += copy;
  970. skb->data_len += copy;
  971. }
  972. offset += copy;
  973. length -= copy;
  974. }
  975. return 0;
  976. error:
  977. inet->cork.length -= length;
  978. IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
  979. return err;
  980. }
  981. int ip6_push_pending_frames(struct sock *sk)
  982. {
  983. struct sk_buff *skb, *tmp_skb;
  984. struct sk_buff **tail_skb;
  985. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  986. struct inet_sock *inet = inet_sk(sk);
  987. struct ipv6_pinfo *np = inet6_sk(sk);
  988. struct ipv6hdr *hdr;
  989. struct ipv6_txoptions *opt = np->cork.opt;
  990. struct rt6_info *rt = np->cork.rt;
  991. struct flowi *fl = &inet->cork.fl;
  992. unsigned char proto = fl->proto;
  993. int err = 0;
  994. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  995. goto out;
  996. tail_skb = &(skb_shinfo(skb)->frag_list);
  997. /* move skb->data to ip header from ext header */
  998. if (skb->data < skb->nh.raw)
  999. __skb_pull(skb, skb->nh.raw - skb->data);
  1000. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1001. __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
  1002. *tail_skb = tmp_skb;
  1003. tail_skb = &(tmp_skb->next);
  1004. skb->len += tmp_skb->len;
  1005. skb->data_len += tmp_skb->len;
  1006. skb->truesize += tmp_skb->truesize;
  1007. __sock_put(tmp_skb->sk);
  1008. tmp_skb->destructor = NULL;
  1009. tmp_skb->sk = NULL;
  1010. }
  1011. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1012. __skb_pull(skb, skb->h.raw - skb->nh.raw);
  1013. if (opt && opt->opt_flen)
  1014. ipv6_push_frag_opts(skb, opt, &proto);
  1015. if (opt && opt->opt_nflen)
  1016. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1017. skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
  1018. *(u32*)hdr = fl->fl6_flowlabel |
  1019. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1020. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1021. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1022. else
  1023. hdr->payload_len = 0;
  1024. hdr->hop_limit = np->cork.hop_limit;
  1025. hdr->nexthdr = proto;
  1026. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1027. ipv6_addr_copy(&hdr->daddr, final_dst);
  1028. skb->priority = sk->sk_priority;
  1029. skb->dst = dst_clone(&rt->u.dst);
  1030. IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
  1031. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1032. if (err) {
  1033. if (err > 0)
  1034. err = np->recverr ? net_xmit_errno(err) : 0;
  1035. if (err)
  1036. goto error;
  1037. }
  1038. out:
  1039. inet->cork.flags &= ~IPCORK_OPT;
  1040. kfree(np->cork.opt);
  1041. np->cork.opt = NULL;
  1042. if (np->cork.rt) {
  1043. dst_release(&np->cork.rt->u.dst);
  1044. np->cork.rt = NULL;
  1045. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1046. }
  1047. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1048. return err;
  1049. error:
  1050. goto out;
  1051. }
  1052. void ip6_flush_pending_frames(struct sock *sk)
  1053. {
  1054. struct inet_sock *inet = inet_sk(sk);
  1055. struct ipv6_pinfo *np = inet6_sk(sk);
  1056. struct sk_buff *skb;
  1057. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1058. IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
  1059. kfree_skb(skb);
  1060. }
  1061. inet->cork.flags &= ~IPCORK_OPT;
  1062. kfree(np->cork.opt);
  1063. np->cork.opt = NULL;
  1064. if (np->cork.rt) {
  1065. dst_release(&np->cork.rt->u.dst);
  1066. np->cork.rt = NULL;
  1067. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1068. }
  1069. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1070. }