ip6_output.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. if (dst->hh)
  69. return neigh_hh_output(dst->hh, skb);
  70. else if (dst->neighbour)
  71. return dst->neighbour->output(skb);
  72. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  73. kfree_skb(skb);
  74. return -EINVAL;
  75. }
  76. /* dev_loopback_xmit for use with netfilter. */
  77. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  78. {
  79. skb_reset_mac_header(newskb);
  80. __skb_pull(newskb, skb_network_offset(newskb));
  81. newskb->pkt_type = PACKET_LOOPBACK;
  82. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  83. BUG_TRAP(newskb->dst);
  84. netif_rx(newskb);
  85. return 0;
  86. }
  87. static int ip6_output2(struct sk_buff *skb)
  88. {
  89. struct dst_entry *dst = skb->dst;
  90. struct net_device *dev = dst->dev;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  94. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  95. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  96. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  97. ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  98. &ipv6_hdr(skb)->saddr)) {
  99. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  100. /* Do not check for IFF_ALLMULTI; multicast routing
  101. is not supported in any case.
  102. */
  103. if (newskb)
  104. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  105. newskb->dev,
  106. ip6_dev_loopback_xmit);
  107. if (ipv6_hdr(skb)->hop_limit == 0) {
  108. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  109. kfree_skb(skb);
  110. return 0;
  111. }
  112. }
  113. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  114. }
  115. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  116. }
  117. int ip6_output(struct sk_buff *skb)
  118. {
  119. if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  120. dst_allfrag(skb->dst))
  121. return ip6_fragment(skb, ip6_output2);
  122. else
  123. return ip6_output2(skb);
  124. }
  125. /*
  126. * xmit an sk_buff (used by TCP)
  127. */
  128. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  129. struct ipv6_txoptions *opt, int ipfragok)
  130. {
  131. struct ipv6_pinfo *np = inet6_sk(sk);
  132. struct in6_addr *first_hop = &fl->fl6_dst;
  133. struct dst_entry *dst = skb->dst;
  134. struct ipv6hdr *hdr;
  135. u8 proto = fl->proto;
  136. int seg_len = skb->len;
  137. int hlimit, tclass;
  138. u32 mtu;
  139. if (opt) {
  140. int head_room;
  141. /* First: exthdrs may take lots of space (~8K for now)
  142. MAX_HEADER is not enough.
  143. */
  144. head_room = opt->opt_nflen + opt->opt_flen;
  145. seg_len += head_room;
  146. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  147. if (skb_headroom(skb) < head_room) {
  148. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  149. if (skb2 == NULL) {
  150. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  151. IPSTATS_MIB_OUTDISCARDS);
  152. kfree_skb(skb);
  153. return -ENOBUFS;
  154. }
  155. kfree_skb(skb);
  156. skb = skb2;
  157. if (sk)
  158. skb_set_owner_w(skb, sk);
  159. }
  160. if (opt->opt_flen)
  161. ipv6_push_frag_opts(skb, opt, &proto);
  162. if (opt->opt_nflen)
  163. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  164. }
  165. skb_push(skb, sizeof(struct ipv6hdr));
  166. skb_reset_network_header(skb);
  167. hdr = ipv6_hdr(skb);
  168. /*
  169. * Fill in the IPv6 header
  170. */
  171. hlimit = -1;
  172. if (np)
  173. hlimit = np->hop_limit;
  174. if (hlimit < 0)
  175. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  176. if (hlimit < 0)
  177. hlimit = ipv6_get_hoplimit(dst->dev);
  178. tclass = -1;
  179. if (np)
  180. tclass = np->tclass;
  181. if (tclass < 0)
  182. tclass = 0;
  183. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  184. hdr->payload_len = htons(seg_len);
  185. hdr->nexthdr = proto;
  186. hdr->hop_limit = hlimit;
  187. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  188. ipv6_addr_copy(&hdr->daddr, first_hop);
  189. skb->priority = sk->sk_priority;
  190. mtu = dst_mtu(dst);
  191. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  192. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  193. IPSTATS_MIB_OUTREQUESTS);
  194. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  195. dst_output);
  196. }
  197. if (net_ratelimit())
  198. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  199. skb->dev = dst->dev;
  200. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  201. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  202. kfree_skb(skb);
  203. return -EMSGSIZE;
  204. }
  205. EXPORT_SYMBOL(ip6_xmit);
  206. /*
  207. * To avoid extra problems ND packets are send through this
  208. * routine. It's code duplication but I really want to avoid
  209. * extra checks since ipv6_build_header is used by TCP (which
  210. * is for us performance critical)
  211. */
  212. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  213. struct in6_addr *saddr, struct in6_addr *daddr,
  214. int proto, int len)
  215. {
  216. struct ipv6_pinfo *np = inet6_sk(sk);
  217. struct ipv6hdr *hdr;
  218. int totlen;
  219. skb->protocol = htons(ETH_P_IPV6);
  220. skb->dev = dev;
  221. totlen = len + sizeof(struct ipv6hdr);
  222. skb_reset_network_header(skb);
  223. skb_put(skb, sizeof(struct ipv6hdr));
  224. hdr = ipv6_hdr(skb);
  225. *(__be32*)hdr = htonl(0x60000000);
  226. hdr->payload_len = htons(len);
  227. hdr->nexthdr = proto;
  228. hdr->hop_limit = np->hop_limit;
  229. ipv6_addr_copy(&hdr->saddr, saddr);
  230. ipv6_addr_copy(&hdr->daddr, daddr);
  231. return 0;
  232. }
  233. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  234. {
  235. struct ip6_ra_chain *ra;
  236. struct sock *last = NULL;
  237. read_lock(&ip6_ra_lock);
  238. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  239. struct sock *sk = ra->sk;
  240. if (sk && ra->sel == sel &&
  241. (!sk->sk_bound_dev_if ||
  242. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  243. if (last) {
  244. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  245. if (skb2)
  246. rawv6_rcv(last, skb2);
  247. }
  248. last = sk;
  249. }
  250. }
  251. if (last) {
  252. rawv6_rcv(last, skb);
  253. read_unlock(&ip6_ra_lock);
  254. return 1;
  255. }
  256. read_unlock(&ip6_ra_lock);
  257. return 0;
  258. }
  259. static int ip6_forward_proxy_check(struct sk_buff *skb)
  260. {
  261. struct ipv6hdr *hdr = ipv6_hdr(skb);
  262. u8 nexthdr = hdr->nexthdr;
  263. int offset;
  264. if (ipv6_ext_hdr(nexthdr)) {
  265. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  266. if (offset < 0)
  267. return 0;
  268. } else
  269. offset = sizeof(struct ipv6hdr);
  270. if (nexthdr == IPPROTO_ICMPV6) {
  271. struct icmp6hdr *icmp6;
  272. if (!pskb_may_pull(skb, (skb_network_header(skb) +
  273. offset + 1 - skb->data)))
  274. return 0;
  275. icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
  276. switch (icmp6->icmp6_type) {
  277. case NDISC_ROUTER_SOLICITATION:
  278. case NDISC_ROUTER_ADVERTISEMENT:
  279. case NDISC_NEIGHBOUR_SOLICITATION:
  280. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  281. case NDISC_REDIRECT:
  282. /* For reaction involving unicast neighbor discovery
  283. * message destined to the proxied address, pass it to
  284. * input function.
  285. */
  286. return 1;
  287. default:
  288. break;
  289. }
  290. }
  291. /*
  292. * The proxying router can't forward traffic sent to a link-local
  293. * address, so signal the sender and discard the packet. This
  294. * behavior is clarified by the MIPv6 specification.
  295. */
  296. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  297. dst_link_failure(skb);
  298. return -1;
  299. }
  300. return 0;
  301. }
  302. static inline int ip6_forward_finish(struct sk_buff *skb)
  303. {
  304. return dst_output(skb);
  305. }
  306. int ip6_forward(struct sk_buff *skb)
  307. {
  308. struct dst_entry *dst = skb->dst;
  309. struct ipv6hdr *hdr = ipv6_hdr(skb);
  310. struct inet6_skb_parm *opt = IP6CB(skb);
  311. if (ipv6_devconf.forwarding == 0)
  312. goto error;
  313. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  314. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  315. goto drop;
  316. }
  317. skb->ip_summed = CHECKSUM_NONE;
  318. /*
  319. * We DO NOT make any processing on
  320. * RA packets, pushing them to user level AS IS
  321. * without ane WARRANTY that application will be able
  322. * to interpret them. The reason is that we
  323. * cannot make anything clever here.
  324. *
  325. * We are not end-node, so that if packet contains
  326. * AH/ESP, we cannot make anything.
  327. * Defragmentation also would be mistake, RA packets
  328. * cannot be fragmented, because there is no warranty
  329. * that different fragments will go along one path. --ANK
  330. */
  331. if (opt->ra) {
  332. u8 *ptr = skb_network_header(skb) + opt->ra;
  333. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  334. return 0;
  335. }
  336. /*
  337. * check and decrement ttl
  338. */
  339. if (hdr->hop_limit <= 1) {
  340. /* Force OUTPUT device used as source address */
  341. skb->dev = dst->dev;
  342. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  343. 0, skb->dev);
  344. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  345. kfree_skb(skb);
  346. return -ETIMEDOUT;
  347. }
  348. /* XXX: idev->cnf.proxy_ndp? */
  349. if (ipv6_devconf.proxy_ndp &&
  350. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  351. int proxied = ip6_forward_proxy_check(skb);
  352. if (proxied > 0)
  353. return ip6_input(skb);
  354. else if (proxied < 0) {
  355. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  356. goto drop;
  357. }
  358. }
  359. if (!xfrm6_route_forward(skb)) {
  360. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  361. goto drop;
  362. }
  363. dst = skb->dst;
  364. /* IPv6 specs say nothing about it, but it is clear that we cannot
  365. send redirects to source routed frames.
  366. */
  367. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  368. struct in6_addr *target = NULL;
  369. struct rt6_info *rt;
  370. struct neighbour *n = dst->neighbour;
  371. /*
  372. * incoming and outgoing devices are the same
  373. * send a redirect.
  374. */
  375. rt = (struct rt6_info *) dst;
  376. if ((rt->rt6i_flags & RTF_GATEWAY))
  377. target = (struct in6_addr*)&n->primary_key;
  378. else
  379. target = &hdr->daddr;
  380. /* Limit redirects both by destination (here)
  381. and by source (inside ndisc_send_redirect)
  382. */
  383. if (xrlim_allow(dst, 1*HZ))
  384. ndisc_send_redirect(skb, n, target);
  385. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  386. |IPV6_ADDR_LINKLOCAL)) {
  387. /* This check is security critical. */
  388. goto error;
  389. }
  390. if (skb->len > dst_mtu(dst)) {
  391. /* Again, force OUTPUT device used as source address */
  392. skb->dev = dst->dev;
  393. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  394. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  395. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  396. kfree_skb(skb);
  397. return -EMSGSIZE;
  398. }
  399. if (skb_cow(skb, dst->dev->hard_header_len)) {
  400. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  401. goto drop;
  402. }
  403. hdr = ipv6_hdr(skb);
  404. /* Mangling hops number delayed to point after skb COW */
  405. hdr->hop_limit--;
  406. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  407. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  408. error:
  409. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  410. drop:
  411. kfree_skb(skb);
  412. return -EINVAL;
  413. }
  414. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  415. {
  416. to->pkt_type = from->pkt_type;
  417. to->priority = from->priority;
  418. to->protocol = from->protocol;
  419. dst_release(to->dst);
  420. to->dst = dst_clone(from->dst);
  421. to->dev = from->dev;
  422. to->mark = from->mark;
  423. #ifdef CONFIG_NET_SCHED
  424. to->tc_index = from->tc_index;
  425. #endif
  426. nf_copy(to, from);
  427. skb_copy_secmark(to, from);
  428. }
  429. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  430. {
  431. u16 offset = sizeof(struct ipv6hdr);
  432. struct ipv6_opt_hdr *exthdr =
  433. (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
  434. unsigned int packet_len = skb->tail - skb->network_header;
  435. int found_rhdr = 0;
  436. *nexthdr = &ipv6_hdr(skb)->nexthdr;
  437. while (offset + 1 <= packet_len) {
  438. switch (**nexthdr) {
  439. case NEXTHDR_HOP:
  440. break;
  441. case NEXTHDR_ROUTING:
  442. found_rhdr = 1;
  443. break;
  444. case NEXTHDR_DEST:
  445. #ifdef CONFIG_IPV6_MIP6
  446. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  447. break;
  448. #endif
  449. if (found_rhdr)
  450. return offset;
  451. break;
  452. default :
  453. return offset;
  454. }
  455. offset += ipv6_optlen(exthdr);
  456. *nexthdr = &exthdr->nexthdr;
  457. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  458. offset);
  459. }
  460. return offset;
  461. }
  462. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  463. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  464. {
  465. struct net_device *dev;
  466. struct sk_buff *frag;
  467. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  468. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  469. struct ipv6hdr *tmp_hdr;
  470. struct frag_hdr *fh;
  471. unsigned int mtu, hlen, left, len;
  472. __be32 frag_id = 0;
  473. int ptr, offset = 0, err=0;
  474. u8 *prevhdr, nexthdr = 0;
  475. dev = rt->u.dst.dev;
  476. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  477. nexthdr = *prevhdr;
  478. mtu = dst_mtu(&rt->u.dst);
  479. if (np && np->frag_size < mtu) {
  480. if (np->frag_size)
  481. mtu = np->frag_size;
  482. }
  483. mtu -= hlen + sizeof(struct frag_hdr);
  484. if (skb_shinfo(skb)->frag_list) {
  485. int first_len = skb_pagelen(skb);
  486. if (first_len - hlen > mtu ||
  487. ((first_len - hlen) & 7) ||
  488. skb_cloned(skb))
  489. goto slow_path;
  490. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  491. /* Correct geometry. */
  492. if (frag->len > mtu ||
  493. ((frag->len & 7) && frag->next) ||
  494. skb_headroom(frag) < hlen)
  495. goto slow_path;
  496. /* Partially cloned skb? */
  497. if (skb_shared(frag))
  498. goto slow_path;
  499. BUG_ON(frag->sk);
  500. if (skb->sk) {
  501. sock_hold(skb->sk);
  502. frag->sk = skb->sk;
  503. frag->destructor = sock_wfree;
  504. skb->truesize -= frag->truesize;
  505. }
  506. }
  507. err = 0;
  508. offset = 0;
  509. frag = skb_shinfo(skb)->frag_list;
  510. skb_shinfo(skb)->frag_list = NULL;
  511. /* BUILD HEADER */
  512. *prevhdr = NEXTHDR_FRAGMENT;
  513. tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
  514. if (!tmp_hdr) {
  515. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  516. return -ENOMEM;
  517. }
  518. __skb_pull(skb, hlen);
  519. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  520. __skb_push(skb, hlen);
  521. skb_reset_network_header(skb);
  522. memcpy(skb_network_header(skb), tmp_hdr, hlen);
  523. ipv6_select_ident(skb, fh);
  524. fh->nexthdr = nexthdr;
  525. fh->reserved = 0;
  526. fh->frag_off = htons(IP6_MF);
  527. frag_id = fh->identification;
  528. first_len = skb_pagelen(skb);
  529. skb->data_len = first_len - skb_headlen(skb);
  530. skb->len = first_len;
  531. ipv6_hdr(skb)->payload_len = htons(first_len -
  532. sizeof(struct ipv6hdr));
  533. dst_hold(&rt->u.dst);
  534. for (;;) {
  535. /* Prepare header of the next frame,
  536. * before previous one went down. */
  537. if (frag) {
  538. frag->ip_summed = CHECKSUM_NONE;
  539. skb_reset_transport_header(frag);
  540. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  541. __skb_push(frag, hlen);
  542. skb_reset_network_header(frag);
  543. memcpy(skb_network_header(frag), tmp_hdr,
  544. hlen);
  545. offset += skb->len - hlen - sizeof(struct frag_hdr);
  546. fh->nexthdr = nexthdr;
  547. fh->reserved = 0;
  548. fh->frag_off = htons(offset);
  549. if (frag->next != NULL)
  550. fh->frag_off |= htons(IP6_MF);
  551. fh->identification = frag_id;
  552. ipv6_hdr(frag)->payload_len =
  553. htons(frag->len -
  554. sizeof(struct ipv6hdr));
  555. ip6_copy_metadata(frag, skb);
  556. }
  557. err = output(skb);
  558. if(!err)
  559. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  560. if (err || !frag)
  561. break;
  562. skb = frag;
  563. frag = skb->next;
  564. skb->next = NULL;
  565. }
  566. kfree(tmp_hdr);
  567. if (err == 0) {
  568. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  569. dst_release(&rt->u.dst);
  570. return 0;
  571. }
  572. while (frag) {
  573. skb = frag->next;
  574. kfree_skb(frag);
  575. frag = skb;
  576. }
  577. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  578. dst_release(&rt->u.dst);
  579. return err;
  580. }
  581. slow_path:
  582. left = skb->len - hlen; /* Space per frame */
  583. ptr = hlen; /* Where to start from */
  584. /*
  585. * Fragment the datagram.
  586. */
  587. *prevhdr = NEXTHDR_FRAGMENT;
  588. /*
  589. * Keep copying data until we run out.
  590. */
  591. while(left > 0) {
  592. len = left;
  593. /* IF: it doesn't fit, use 'mtu' - the data space left */
  594. if (len > mtu)
  595. len = mtu;
  596. /* IF: we are not sending upto and including the packet end
  597. then align the next start on an eight byte boundary */
  598. if (len < left) {
  599. len &= ~7;
  600. }
  601. /*
  602. * Allocate buffer.
  603. */
  604. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  605. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  606. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  607. IPSTATS_MIB_FRAGFAILS);
  608. err = -ENOMEM;
  609. goto fail;
  610. }
  611. /*
  612. * Set up data on packet
  613. */
  614. ip6_copy_metadata(frag, skb);
  615. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  616. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  617. skb_reset_network_header(frag);
  618. fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
  619. frag->transport_header = (frag->network_header + hlen +
  620. sizeof(struct frag_hdr));
  621. /*
  622. * Charge the memory for the fragment to any owner
  623. * it might possess
  624. */
  625. if (skb->sk)
  626. skb_set_owner_w(frag, skb->sk);
  627. /*
  628. * Copy the packet header into the new buffer.
  629. */
  630. memcpy(skb_network_header(frag), skb->data, hlen);
  631. /*
  632. * Build fragment header.
  633. */
  634. fh->nexthdr = nexthdr;
  635. fh->reserved = 0;
  636. if (!frag_id) {
  637. ipv6_select_ident(skb, fh);
  638. frag_id = fh->identification;
  639. } else
  640. fh->identification = frag_id;
  641. /*
  642. * Copy a block of the IP datagram.
  643. */
  644. if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len))
  645. BUG();
  646. left -= len;
  647. fh->frag_off = htons(offset);
  648. if (left > 0)
  649. fh->frag_off |= htons(IP6_MF);
  650. ipv6_hdr(frag)->payload_len = htons(frag->len -
  651. sizeof(struct ipv6hdr));
  652. ptr += len;
  653. offset += len;
  654. /*
  655. * Put this fragment into the sending queue.
  656. */
  657. err = output(frag);
  658. if (err)
  659. goto fail;
  660. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  661. }
  662. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  663. IPSTATS_MIB_FRAGOKS);
  664. kfree_skb(skb);
  665. return err;
  666. fail:
  667. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  668. IPSTATS_MIB_FRAGFAILS);
  669. kfree_skb(skb);
  670. return err;
  671. }
  672. static inline int ip6_rt_check(struct rt6key *rt_key,
  673. struct in6_addr *fl_addr,
  674. struct in6_addr *addr_cache)
  675. {
  676. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  677. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  678. }
  679. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  680. struct dst_entry *dst,
  681. struct flowi *fl)
  682. {
  683. struct ipv6_pinfo *np = inet6_sk(sk);
  684. struct rt6_info *rt = (struct rt6_info *)dst;
  685. if (!dst)
  686. goto out;
  687. /* Yes, checking route validity in not connected
  688. * case is not very simple. Take into account,
  689. * that we do not support routing by source, TOS,
  690. * and MSG_DONTROUTE --ANK (980726)
  691. *
  692. * 1. ip6_rt_check(): If route was host route,
  693. * check that cached destination is current.
  694. * If it is network route, we still may
  695. * check its validity using saved pointer
  696. * to the last used address: daddr_cache.
  697. * We do not want to save whole address now,
  698. * (because main consumer of this service
  699. * is tcp, which has not this problem),
  700. * so that the last trick works only on connected
  701. * sockets.
  702. * 2. oif also should be the same.
  703. */
  704. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  705. #ifdef CONFIG_IPV6_SUBTREES
  706. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  707. #endif
  708. (fl->oif && fl->oif != dst->dev->ifindex)) {
  709. dst_release(dst);
  710. dst = NULL;
  711. }
  712. out:
  713. return dst;
  714. }
  715. static int ip6_dst_lookup_tail(struct sock *sk,
  716. struct dst_entry **dst, struct flowi *fl)
  717. {
  718. int err;
  719. if (*dst == NULL)
  720. *dst = ip6_route_output(sk, fl);
  721. if ((err = (*dst)->error))
  722. goto out_err_release;
  723. if (ipv6_addr_any(&fl->fl6_src)) {
  724. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  725. if (err)
  726. goto out_err_release;
  727. }
  728. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  729. /*
  730. * Here if the dst entry we've looked up
  731. * has a neighbour entry that is in the INCOMPLETE
  732. * state and the src address from the flow is
  733. * marked as OPTIMISTIC, we release the found
  734. * dst entry and replace it instead with the
  735. * dst entry of the nexthop router
  736. */
  737. if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
  738. struct inet6_ifaddr *ifp;
  739. struct flowi fl_gw;
  740. int redirect;
  741. ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
  742. redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
  743. if (ifp)
  744. in6_ifa_put(ifp);
  745. if (redirect) {
  746. /*
  747. * We need to get the dst entry for the
  748. * default router instead
  749. */
  750. dst_release(*dst);
  751. memcpy(&fl_gw, fl, sizeof(struct flowi));
  752. memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
  753. *dst = ip6_route_output(sk, &fl_gw);
  754. if ((err = (*dst)->error))
  755. goto out_err_release;
  756. }
  757. }
  758. #endif
  759. return 0;
  760. out_err_release:
  761. dst_release(*dst);
  762. *dst = NULL;
  763. return err;
  764. }
  765. /**
  766. * ip6_dst_lookup - perform route lookup on flow
  767. * @sk: socket which provides route info
  768. * @dst: pointer to dst_entry * for result
  769. * @fl: flow to lookup
  770. *
  771. * This function performs a route lookup on the given flow.
  772. *
  773. * It returns zero on success, or a standard errno code on error.
  774. */
  775. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  776. {
  777. *dst = NULL;
  778. return ip6_dst_lookup_tail(sk, dst, fl);
  779. }
  780. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  781. /**
  782. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  783. * @sk: socket which provides the dst cache and route info
  784. * @dst: pointer to dst_entry * for result
  785. * @fl: flow to lookup
  786. *
  787. * This function performs a route lookup on the given flow with the
  788. * possibility of using the cached route in the socket if it is valid.
  789. * It will take the socket dst lock when operating on the dst cache.
  790. * As a result, this function can only be used in process context.
  791. *
  792. * It returns zero on success, or a standard errno code on error.
  793. */
  794. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  795. {
  796. *dst = NULL;
  797. if (sk) {
  798. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  799. *dst = ip6_sk_dst_check(sk, *dst, fl);
  800. }
  801. return ip6_dst_lookup_tail(sk, dst, fl);
  802. }
  803. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  804. static inline int ip6_ufo_append_data(struct sock *sk,
  805. int getfrag(void *from, char *to, int offset, int len,
  806. int odd, struct sk_buff *skb),
  807. void *from, int length, int hh_len, int fragheaderlen,
  808. int transhdrlen, int mtu,unsigned int flags)
  809. {
  810. struct sk_buff *skb;
  811. int err;
  812. /* There is support for UDP large send offload by network
  813. * device, so create one single skb packet containing complete
  814. * udp datagram
  815. */
  816. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  817. skb = sock_alloc_send_skb(sk,
  818. hh_len + fragheaderlen + transhdrlen + 20,
  819. (flags & MSG_DONTWAIT), &err);
  820. if (skb == NULL)
  821. return -ENOMEM;
  822. /* reserve space for Hardware header */
  823. skb_reserve(skb, hh_len);
  824. /* create space for UDP/IP header */
  825. skb_put(skb,fragheaderlen + transhdrlen);
  826. /* initialize network header pointer */
  827. skb_reset_network_header(skb);
  828. /* initialize protocol header pointer */
  829. skb->transport_header = skb->network_header + fragheaderlen;
  830. skb->ip_summed = CHECKSUM_PARTIAL;
  831. skb->csum = 0;
  832. sk->sk_sndmsg_off = 0;
  833. }
  834. err = skb_append_datato_frags(sk,skb, getfrag, from,
  835. (length - transhdrlen));
  836. if (!err) {
  837. struct frag_hdr fhdr;
  838. /* specify the length of each IP datagram fragment*/
  839. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  840. sizeof(struct frag_hdr);
  841. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  842. ipv6_select_ident(skb, &fhdr);
  843. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  844. __skb_queue_tail(&sk->sk_write_queue, skb);
  845. return 0;
  846. }
  847. /* There is not enough support do UPD LSO,
  848. * so follow normal path
  849. */
  850. kfree_skb(skb);
  851. return err;
  852. }
  853. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  854. int offset, int len, int odd, struct sk_buff *skb),
  855. void *from, int length, int transhdrlen,
  856. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  857. struct rt6_info *rt, unsigned int flags)
  858. {
  859. struct inet_sock *inet = inet_sk(sk);
  860. struct ipv6_pinfo *np = inet6_sk(sk);
  861. struct sk_buff *skb;
  862. unsigned int maxfraglen, fragheaderlen;
  863. int exthdrlen;
  864. int hh_len;
  865. int mtu;
  866. int copy;
  867. int err;
  868. int offset = 0;
  869. int csummode = CHECKSUM_NONE;
  870. if (flags&MSG_PROBE)
  871. return 0;
  872. if (skb_queue_empty(&sk->sk_write_queue)) {
  873. /*
  874. * setup for corking
  875. */
  876. if (opt) {
  877. if (np->cork.opt == NULL) {
  878. np->cork.opt = kmalloc(opt->tot_len,
  879. sk->sk_allocation);
  880. if (unlikely(np->cork.opt == NULL))
  881. return -ENOBUFS;
  882. } else if (np->cork.opt->tot_len < opt->tot_len) {
  883. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  884. return -EINVAL;
  885. }
  886. memcpy(np->cork.opt, opt, opt->tot_len);
  887. inet->cork.flags |= IPCORK_OPT;
  888. /* need source address above miyazawa*/
  889. }
  890. dst_hold(&rt->u.dst);
  891. np->cork.rt = rt;
  892. inet->cork.fl = *fl;
  893. np->cork.hop_limit = hlimit;
  894. np->cork.tclass = tclass;
  895. mtu = dst_mtu(rt->u.dst.path);
  896. if (np->frag_size < mtu) {
  897. if (np->frag_size)
  898. mtu = np->frag_size;
  899. }
  900. inet->cork.fragsize = mtu;
  901. if (dst_allfrag(rt->u.dst.path))
  902. inet->cork.flags |= IPCORK_ALLFRAG;
  903. inet->cork.length = 0;
  904. sk->sk_sndmsg_page = NULL;
  905. sk->sk_sndmsg_off = 0;
  906. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  907. length += exthdrlen;
  908. transhdrlen += exthdrlen;
  909. } else {
  910. rt = np->cork.rt;
  911. fl = &inet->cork.fl;
  912. if (inet->cork.flags & IPCORK_OPT)
  913. opt = np->cork.opt;
  914. transhdrlen = 0;
  915. exthdrlen = 0;
  916. mtu = inet->cork.fragsize;
  917. }
  918. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  919. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  920. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  921. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  922. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  923. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  924. return -EMSGSIZE;
  925. }
  926. }
  927. /*
  928. * Let's try using as much space as possible.
  929. * Use MTU if total length of the message fits into the MTU.
  930. * Otherwise, we need to reserve fragment header and
  931. * fragment alignment (= 8-15 octects, in total).
  932. *
  933. * Note that we may need to "move" the data from the tail of
  934. * of the buffer to the new fragment when we split
  935. * the message.
  936. *
  937. * FIXME: It may be fragmented into multiple chunks
  938. * at once if non-fragmentable extension headers
  939. * are too large.
  940. * --yoshfuji
  941. */
  942. inet->cork.length += length;
  943. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  944. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  945. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  946. fragheaderlen, transhdrlen, mtu,
  947. flags);
  948. if (err)
  949. goto error;
  950. return 0;
  951. }
  952. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  953. goto alloc_new_skb;
  954. while (length > 0) {
  955. /* Check if the remaining data fits into current packet. */
  956. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  957. if (copy < length)
  958. copy = maxfraglen - skb->len;
  959. if (copy <= 0) {
  960. char *data;
  961. unsigned int datalen;
  962. unsigned int fraglen;
  963. unsigned int fraggap;
  964. unsigned int alloclen;
  965. struct sk_buff *skb_prev;
  966. alloc_new_skb:
  967. skb_prev = skb;
  968. /* There's no room in the current skb */
  969. if (skb_prev)
  970. fraggap = skb_prev->len - maxfraglen;
  971. else
  972. fraggap = 0;
  973. /*
  974. * If remaining data exceeds the mtu,
  975. * we know we need more fragment(s).
  976. */
  977. datalen = length + fraggap;
  978. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  979. datalen = maxfraglen - fragheaderlen;
  980. fraglen = datalen + fragheaderlen;
  981. if ((flags & MSG_MORE) &&
  982. !(rt->u.dst.dev->features&NETIF_F_SG))
  983. alloclen = mtu;
  984. else
  985. alloclen = datalen + fragheaderlen;
  986. /*
  987. * The last fragment gets additional space at tail.
  988. * Note: we overallocate on fragments with MSG_MODE
  989. * because we have no idea if we're the last one.
  990. */
  991. if (datalen == length + fraggap)
  992. alloclen += rt->u.dst.trailer_len;
  993. /*
  994. * We just reserve space for fragment header.
  995. * Note: this may be overallocation if the message
  996. * (without MSG_MORE) fits into the MTU.
  997. */
  998. alloclen += sizeof(struct frag_hdr);
  999. if (transhdrlen) {
  1000. skb = sock_alloc_send_skb(sk,
  1001. alloclen + hh_len,
  1002. (flags & MSG_DONTWAIT), &err);
  1003. } else {
  1004. skb = NULL;
  1005. if (atomic_read(&sk->sk_wmem_alloc) <=
  1006. 2 * sk->sk_sndbuf)
  1007. skb = sock_wmalloc(sk,
  1008. alloclen + hh_len, 1,
  1009. sk->sk_allocation);
  1010. if (unlikely(skb == NULL))
  1011. err = -ENOBUFS;
  1012. }
  1013. if (skb == NULL)
  1014. goto error;
  1015. /*
  1016. * Fill in the control structures
  1017. */
  1018. skb->ip_summed = csummode;
  1019. skb->csum = 0;
  1020. /* reserve for fragmentation */
  1021. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  1022. /*
  1023. * Find where to start putting bytes
  1024. */
  1025. data = skb_put(skb, fraglen);
  1026. skb_set_network_header(skb, exthdrlen);
  1027. data += fragheaderlen;
  1028. skb->transport_header = (skb->network_header +
  1029. fragheaderlen);
  1030. if (fraggap) {
  1031. skb->csum = skb_copy_and_csum_bits(
  1032. skb_prev, maxfraglen,
  1033. data + transhdrlen, fraggap, 0);
  1034. skb_prev->csum = csum_sub(skb_prev->csum,
  1035. skb->csum);
  1036. data += fraggap;
  1037. pskb_trim_unique(skb_prev, maxfraglen);
  1038. }
  1039. copy = datalen - transhdrlen - fraggap;
  1040. if (copy < 0) {
  1041. err = -EINVAL;
  1042. kfree_skb(skb);
  1043. goto error;
  1044. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1045. err = -EFAULT;
  1046. kfree_skb(skb);
  1047. goto error;
  1048. }
  1049. offset += copy;
  1050. length -= datalen - fraggap;
  1051. transhdrlen = 0;
  1052. exthdrlen = 0;
  1053. csummode = CHECKSUM_NONE;
  1054. /*
  1055. * Put the packet on the pending queue
  1056. */
  1057. __skb_queue_tail(&sk->sk_write_queue, skb);
  1058. continue;
  1059. }
  1060. if (copy > length)
  1061. copy = length;
  1062. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1063. unsigned int off;
  1064. off = skb->len;
  1065. if (getfrag(from, skb_put(skb, copy),
  1066. offset, copy, off, skb) < 0) {
  1067. __skb_trim(skb, off);
  1068. err = -EFAULT;
  1069. goto error;
  1070. }
  1071. } else {
  1072. int i = skb_shinfo(skb)->nr_frags;
  1073. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1074. struct page *page = sk->sk_sndmsg_page;
  1075. int off = sk->sk_sndmsg_off;
  1076. unsigned int left;
  1077. if (page && (left = PAGE_SIZE - off) > 0) {
  1078. if (copy >= left)
  1079. copy = left;
  1080. if (page != frag->page) {
  1081. if (i == MAX_SKB_FRAGS) {
  1082. err = -EMSGSIZE;
  1083. goto error;
  1084. }
  1085. get_page(page);
  1086. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1087. frag = &skb_shinfo(skb)->frags[i];
  1088. }
  1089. } else if(i < MAX_SKB_FRAGS) {
  1090. if (copy > PAGE_SIZE)
  1091. copy = PAGE_SIZE;
  1092. page = alloc_pages(sk->sk_allocation, 0);
  1093. if (page == NULL) {
  1094. err = -ENOMEM;
  1095. goto error;
  1096. }
  1097. sk->sk_sndmsg_page = page;
  1098. sk->sk_sndmsg_off = 0;
  1099. skb_fill_page_desc(skb, i, page, 0, 0);
  1100. frag = &skb_shinfo(skb)->frags[i];
  1101. skb->truesize += PAGE_SIZE;
  1102. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1103. } else {
  1104. err = -EMSGSIZE;
  1105. goto error;
  1106. }
  1107. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1108. err = -EFAULT;
  1109. goto error;
  1110. }
  1111. sk->sk_sndmsg_off += copy;
  1112. frag->size += copy;
  1113. skb->len += copy;
  1114. skb->data_len += copy;
  1115. }
  1116. offset += copy;
  1117. length -= copy;
  1118. }
  1119. return 0;
  1120. error:
  1121. inet->cork.length -= length;
  1122. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1123. return err;
  1124. }
  1125. int ip6_push_pending_frames(struct sock *sk)
  1126. {
  1127. struct sk_buff *skb, *tmp_skb;
  1128. struct sk_buff **tail_skb;
  1129. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1130. struct inet_sock *inet = inet_sk(sk);
  1131. struct ipv6_pinfo *np = inet6_sk(sk);
  1132. struct ipv6hdr *hdr;
  1133. struct ipv6_txoptions *opt = np->cork.opt;
  1134. struct rt6_info *rt = np->cork.rt;
  1135. struct flowi *fl = &inet->cork.fl;
  1136. unsigned char proto = fl->proto;
  1137. int err = 0;
  1138. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1139. goto out;
  1140. tail_skb = &(skb_shinfo(skb)->frag_list);
  1141. /* move skb->data to ip header from ext header */
  1142. if (skb->data < skb_network_header(skb))
  1143. __skb_pull(skb, skb_network_offset(skb));
  1144. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1145. __skb_pull(tmp_skb, skb_network_header_len(skb));
  1146. *tail_skb = tmp_skb;
  1147. tail_skb = &(tmp_skb->next);
  1148. skb->len += tmp_skb->len;
  1149. skb->data_len += tmp_skb->len;
  1150. skb->truesize += tmp_skb->truesize;
  1151. __sock_put(tmp_skb->sk);
  1152. tmp_skb->destructor = NULL;
  1153. tmp_skb->sk = NULL;
  1154. }
  1155. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1156. __skb_pull(skb, skb_network_header_len(skb));
  1157. if (opt && opt->opt_flen)
  1158. ipv6_push_frag_opts(skb, opt, &proto);
  1159. if (opt && opt->opt_nflen)
  1160. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1161. skb_push(skb, sizeof(struct ipv6hdr));
  1162. skb_reset_network_header(skb);
  1163. hdr = ipv6_hdr(skb);
  1164. *(__be32*)hdr = fl->fl6_flowlabel |
  1165. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1166. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1167. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1168. else
  1169. hdr->payload_len = 0;
  1170. hdr->hop_limit = np->cork.hop_limit;
  1171. hdr->nexthdr = proto;
  1172. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1173. ipv6_addr_copy(&hdr->daddr, final_dst);
  1174. skb->priority = sk->sk_priority;
  1175. skb->dst = dst_clone(&rt->u.dst);
  1176. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1177. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1178. if (err) {
  1179. if (err > 0)
  1180. err = np->recverr ? net_xmit_errno(err) : 0;
  1181. if (err)
  1182. goto error;
  1183. }
  1184. out:
  1185. inet->cork.flags &= ~IPCORK_OPT;
  1186. kfree(np->cork.opt);
  1187. np->cork.opt = NULL;
  1188. if (np->cork.rt) {
  1189. dst_release(&np->cork.rt->u.dst);
  1190. np->cork.rt = NULL;
  1191. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1192. }
  1193. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1194. return err;
  1195. error:
  1196. goto out;
  1197. }
  1198. void ip6_flush_pending_frames(struct sock *sk)
  1199. {
  1200. struct inet_sock *inet = inet_sk(sk);
  1201. struct ipv6_pinfo *np = inet6_sk(sk);
  1202. struct sk_buff *skb;
  1203. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1204. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1205. IPSTATS_MIB_OUTDISCARDS);
  1206. kfree_skb(skb);
  1207. }
  1208. inet->cork.flags &= ~IPCORK_OPT;
  1209. kfree(np->cork.opt);
  1210. np->cork.opt = NULL;
  1211. if (np->cork.rt) {
  1212. dst_release(&np->cork.rt->u.dst);
  1213. np->cork.rt = NULL;
  1214. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1215. }
  1216. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1217. }