ip6_output.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. if (dst->hh)
  69. return neigh_hh_output(dst->hh, skb);
  70. else if (dst->neighbour)
  71. return dst->neighbour->output(skb);
  72. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  73. kfree_skb(skb);
  74. return -EINVAL;
  75. }
  76. /* dev_loopback_xmit for use with netfilter. */
  77. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  78. {
  79. skb_reset_mac_header(newskb);
  80. __skb_pull(newskb, skb_network_offset(newskb));
  81. newskb->pkt_type = PACKET_LOOPBACK;
  82. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  83. BUG_TRAP(newskb->dst);
  84. netif_rx(newskb);
  85. return 0;
  86. }
  87. static int ip6_output2(struct sk_buff *skb)
  88. {
  89. struct dst_entry *dst = skb->dst;
  90. struct net_device *dev = dst->dev;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
  94. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  95. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  96. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  97. ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
  98. &skb->nh.ipv6h->saddr)) {
  99. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  100. /* Do not check for IFF_ALLMULTI; multicast routing
  101. is not supported in any case.
  102. */
  103. if (newskb)
  104. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  105. newskb->dev,
  106. ip6_dev_loopback_xmit);
  107. if (skb->nh.ipv6h->hop_limit == 0) {
  108. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  109. kfree_skb(skb);
  110. return 0;
  111. }
  112. }
  113. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  114. }
  115. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  116. }
  117. int ip6_output(struct sk_buff *skb)
  118. {
  119. if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  120. dst_allfrag(skb->dst))
  121. return ip6_fragment(skb, ip6_output2);
  122. else
  123. return ip6_output2(skb);
  124. }
  125. /*
  126. * xmit an sk_buff (used by TCP)
  127. */
  128. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  129. struct ipv6_txoptions *opt, int ipfragok)
  130. {
  131. struct ipv6_pinfo *np = inet6_sk(sk);
  132. struct in6_addr *first_hop = &fl->fl6_dst;
  133. struct dst_entry *dst = skb->dst;
  134. struct ipv6hdr *hdr;
  135. u8 proto = fl->proto;
  136. int seg_len = skb->len;
  137. int hlimit, tclass;
  138. u32 mtu;
  139. if (opt) {
  140. int head_room;
  141. /* First: exthdrs may take lots of space (~8K for now)
  142. MAX_HEADER is not enough.
  143. */
  144. head_room = opt->opt_nflen + opt->opt_flen;
  145. seg_len += head_room;
  146. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  147. if (skb_headroom(skb) < head_room) {
  148. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  149. if (skb2 == NULL) {
  150. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  151. IPSTATS_MIB_OUTDISCARDS);
  152. kfree_skb(skb);
  153. return -ENOBUFS;
  154. }
  155. kfree_skb(skb);
  156. skb = skb2;
  157. if (sk)
  158. skb_set_owner_w(skb, sk);
  159. }
  160. if (opt->opt_flen)
  161. ipv6_push_frag_opts(skb, opt, &proto);
  162. if (opt->opt_nflen)
  163. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  164. }
  165. skb_push(skb, sizeof(struct ipv6hdr));
  166. skb_reset_network_header(skb);
  167. hdr = skb->nh.ipv6h;
  168. /*
  169. * Fill in the IPv6 header
  170. */
  171. hlimit = -1;
  172. if (np)
  173. hlimit = np->hop_limit;
  174. if (hlimit < 0)
  175. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  176. if (hlimit < 0)
  177. hlimit = ipv6_get_hoplimit(dst->dev);
  178. tclass = -1;
  179. if (np)
  180. tclass = np->tclass;
  181. if (tclass < 0)
  182. tclass = 0;
  183. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  184. hdr->payload_len = htons(seg_len);
  185. hdr->nexthdr = proto;
  186. hdr->hop_limit = hlimit;
  187. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  188. ipv6_addr_copy(&hdr->daddr, first_hop);
  189. skb->priority = sk->sk_priority;
  190. mtu = dst_mtu(dst);
  191. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  192. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  193. IPSTATS_MIB_OUTREQUESTS);
  194. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  195. dst_output);
  196. }
  197. if (net_ratelimit())
  198. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  199. skb->dev = dst->dev;
  200. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  201. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  202. kfree_skb(skb);
  203. return -EMSGSIZE;
  204. }
  205. EXPORT_SYMBOL(ip6_xmit);
  206. /*
  207. * To avoid extra problems ND packets are send through this
  208. * routine. It's code duplication but I really want to avoid
  209. * extra checks since ipv6_build_header is used by TCP (which
  210. * is for us performance critical)
  211. */
  212. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  213. struct in6_addr *saddr, struct in6_addr *daddr,
  214. int proto, int len)
  215. {
  216. struct ipv6_pinfo *np = inet6_sk(sk);
  217. struct ipv6hdr *hdr;
  218. int totlen;
  219. skb->protocol = htons(ETH_P_IPV6);
  220. skb->dev = dev;
  221. totlen = len + sizeof(struct ipv6hdr);
  222. hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
  223. skb->nh.ipv6h = hdr;
  224. *(__be32*)hdr = htonl(0x60000000);
  225. hdr->payload_len = htons(len);
  226. hdr->nexthdr = proto;
  227. hdr->hop_limit = np->hop_limit;
  228. ipv6_addr_copy(&hdr->saddr, saddr);
  229. ipv6_addr_copy(&hdr->daddr, daddr);
  230. return 0;
  231. }
  232. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  233. {
  234. struct ip6_ra_chain *ra;
  235. struct sock *last = NULL;
  236. read_lock(&ip6_ra_lock);
  237. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  238. struct sock *sk = ra->sk;
  239. if (sk && ra->sel == sel &&
  240. (!sk->sk_bound_dev_if ||
  241. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  242. if (last) {
  243. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  244. if (skb2)
  245. rawv6_rcv(last, skb2);
  246. }
  247. last = sk;
  248. }
  249. }
  250. if (last) {
  251. rawv6_rcv(last, skb);
  252. read_unlock(&ip6_ra_lock);
  253. return 1;
  254. }
  255. read_unlock(&ip6_ra_lock);
  256. return 0;
  257. }
  258. static int ip6_forward_proxy_check(struct sk_buff *skb)
  259. {
  260. struct ipv6hdr *hdr = skb->nh.ipv6h;
  261. u8 nexthdr = hdr->nexthdr;
  262. int offset;
  263. if (ipv6_ext_hdr(nexthdr)) {
  264. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  265. if (offset < 0)
  266. return 0;
  267. } else
  268. offset = sizeof(struct ipv6hdr);
  269. if (nexthdr == IPPROTO_ICMPV6) {
  270. struct icmp6hdr *icmp6;
  271. if (!pskb_may_pull(skb, (skb_network_header(skb) +
  272. offset + 1 - skb->data)))
  273. return 0;
  274. icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
  275. switch (icmp6->icmp6_type) {
  276. case NDISC_ROUTER_SOLICITATION:
  277. case NDISC_ROUTER_ADVERTISEMENT:
  278. case NDISC_NEIGHBOUR_SOLICITATION:
  279. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  280. case NDISC_REDIRECT:
  281. /* For reaction involving unicast neighbor discovery
  282. * message destined to the proxied address, pass it to
  283. * input function.
  284. */
  285. return 1;
  286. default:
  287. break;
  288. }
  289. }
  290. /*
  291. * The proxying router can't forward traffic sent to a link-local
  292. * address, so signal the sender and discard the packet. This
  293. * behavior is clarified by the MIPv6 specification.
  294. */
  295. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  296. dst_link_failure(skb);
  297. return -1;
  298. }
  299. return 0;
  300. }
  301. static inline int ip6_forward_finish(struct sk_buff *skb)
  302. {
  303. return dst_output(skb);
  304. }
  305. int ip6_forward(struct sk_buff *skb)
  306. {
  307. struct dst_entry *dst = skb->dst;
  308. struct ipv6hdr *hdr = skb->nh.ipv6h;
  309. struct inet6_skb_parm *opt = IP6CB(skb);
  310. if (ipv6_devconf.forwarding == 0)
  311. goto error;
  312. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  313. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  314. goto drop;
  315. }
  316. skb->ip_summed = CHECKSUM_NONE;
  317. /*
  318. * We DO NOT make any processing on
  319. * RA packets, pushing them to user level AS IS
  320. * without ane WARRANTY that application will be able
  321. * to interpret them. The reason is that we
  322. * cannot make anything clever here.
  323. *
  324. * We are not end-node, so that if packet contains
  325. * AH/ESP, we cannot make anything.
  326. * Defragmentation also would be mistake, RA packets
  327. * cannot be fragmented, because there is no warranty
  328. * that different fragments will go along one path. --ANK
  329. */
  330. if (opt->ra) {
  331. u8 *ptr = skb_network_header(skb) + opt->ra;
  332. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  333. return 0;
  334. }
  335. /*
  336. * check and decrement ttl
  337. */
  338. if (hdr->hop_limit <= 1) {
  339. /* Force OUTPUT device used as source address */
  340. skb->dev = dst->dev;
  341. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  342. 0, skb->dev);
  343. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  344. kfree_skb(skb);
  345. return -ETIMEDOUT;
  346. }
  347. /* XXX: idev->cnf.proxy_ndp? */
  348. if (ipv6_devconf.proxy_ndp &&
  349. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  350. int proxied = ip6_forward_proxy_check(skb);
  351. if (proxied > 0)
  352. return ip6_input(skb);
  353. else if (proxied < 0) {
  354. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  355. goto drop;
  356. }
  357. }
  358. if (!xfrm6_route_forward(skb)) {
  359. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  360. goto drop;
  361. }
  362. dst = skb->dst;
  363. /* IPv6 specs say nothing about it, but it is clear that we cannot
  364. send redirects to source routed frames.
  365. */
  366. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  367. struct in6_addr *target = NULL;
  368. struct rt6_info *rt;
  369. struct neighbour *n = dst->neighbour;
  370. /*
  371. * incoming and outgoing devices are the same
  372. * send a redirect.
  373. */
  374. rt = (struct rt6_info *) dst;
  375. if ((rt->rt6i_flags & RTF_GATEWAY))
  376. target = (struct in6_addr*)&n->primary_key;
  377. else
  378. target = &hdr->daddr;
  379. /* Limit redirects both by destination (here)
  380. and by source (inside ndisc_send_redirect)
  381. */
  382. if (xrlim_allow(dst, 1*HZ))
  383. ndisc_send_redirect(skb, n, target);
  384. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  385. |IPV6_ADDR_LINKLOCAL)) {
  386. /* This check is security critical. */
  387. goto error;
  388. }
  389. if (skb->len > dst_mtu(dst)) {
  390. /* Again, force OUTPUT device used as source address */
  391. skb->dev = dst->dev;
  392. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  393. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  394. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  395. kfree_skb(skb);
  396. return -EMSGSIZE;
  397. }
  398. if (skb_cow(skb, dst->dev->hard_header_len)) {
  399. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  400. goto drop;
  401. }
  402. hdr = skb->nh.ipv6h;
  403. /* Mangling hops number delayed to point after skb COW */
  404. hdr->hop_limit--;
  405. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  406. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  407. error:
  408. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  409. drop:
  410. kfree_skb(skb);
  411. return -EINVAL;
  412. }
  413. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  414. {
  415. to->pkt_type = from->pkt_type;
  416. to->priority = from->priority;
  417. to->protocol = from->protocol;
  418. dst_release(to->dst);
  419. to->dst = dst_clone(from->dst);
  420. to->dev = from->dev;
  421. to->mark = from->mark;
  422. #ifdef CONFIG_NET_SCHED
  423. to->tc_index = from->tc_index;
  424. #endif
  425. #ifdef CONFIG_NETFILTER
  426. /* Connection association is same as pre-frag packet */
  427. nf_conntrack_put(to->nfct);
  428. to->nfct = from->nfct;
  429. nf_conntrack_get(to->nfct);
  430. to->nfctinfo = from->nfctinfo;
  431. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  432. nf_conntrack_put_reasm(to->nfct_reasm);
  433. to->nfct_reasm = from->nfct_reasm;
  434. nf_conntrack_get_reasm(to->nfct_reasm);
  435. #endif
  436. #ifdef CONFIG_BRIDGE_NETFILTER
  437. nf_bridge_put(to->nf_bridge);
  438. to->nf_bridge = from->nf_bridge;
  439. nf_bridge_get(to->nf_bridge);
  440. #endif
  441. #endif
  442. skb_copy_secmark(to, from);
  443. }
  444. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  445. {
  446. u16 offset = sizeof(struct ipv6hdr);
  447. struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
  448. unsigned int packet_len = skb->tail - skb_network_header(skb);
  449. int found_rhdr = 0;
  450. *nexthdr = &skb->nh.ipv6h->nexthdr;
  451. while (offset + 1 <= packet_len) {
  452. switch (**nexthdr) {
  453. case NEXTHDR_HOP:
  454. break;
  455. case NEXTHDR_ROUTING:
  456. found_rhdr = 1;
  457. break;
  458. case NEXTHDR_DEST:
  459. #ifdef CONFIG_IPV6_MIP6
  460. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  461. break;
  462. #endif
  463. if (found_rhdr)
  464. return offset;
  465. break;
  466. default :
  467. return offset;
  468. }
  469. offset += ipv6_optlen(exthdr);
  470. *nexthdr = &exthdr->nexthdr;
  471. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  472. offset);
  473. }
  474. return offset;
  475. }
  476. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  477. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  478. {
  479. struct net_device *dev;
  480. struct sk_buff *frag;
  481. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  482. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  483. struct ipv6hdr *tmp_hdr;
  484. struct frag_hdr *fh;
  485. unsigned int mtu, hlen, left, len;
  486. __be32 frag_id = 0;
  487. int ptr, offset = 0, err=0;
  488. u8 *prevhdr, nexthdr = 0;
  489. dev = rt->u.dst.dev;
  490. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  491. nexthdr = *prevhdr;
  492. mtu = dst_mtu(&rt->u.dst);
  493. if (np && np->frag_size < mtu) {
  494. if (np->frag_size)
  495. mtu = np->frag_size;
  496. }
  497. mtu -= hlen + sizeof(struct frag_hdr);
  498. if (skb_shinfo(skb)->frag_list) {
  499. int first_len = skb_pagelen(skb);
  500. if (first_len - hlen > mtu ||
  501. ((first_len - hlen) & 7) ||
  502. skb_cloned(skb))
  503. goto slow_path;
  504. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  505. /* Correct geometry. */
  506. if (frag->len > mtu ||
  507. ((frag->len & 7) && frag->next) ||
  508. skb_headroom(frag) < hlen)
  509. goto slow_path;
  510. /* Partially cloned skb? */
  511. if (skb_shared(frag))
  512. goto slow_path;
  513. BUG_ON(frag->sk);
  514. if (skb->sk) {
  515. sock_hold(skb->sk);
  516. frag->sk = skb->sk;
  517. frag->destructor = sock_wfree;
  518. skb->truesize -= frag->truesize;
  519. }
  520. }
  521. err = 0;
  522. offset = 0;
  523. frag = skb_shinfo(skb)->frag_list;
  524. skb_shinfo(skb)->frag_list = NULL;
  525. /* BUILD HEADER */
  526. *prevhdr = NEXTHDR_FRAGMENT;
  527. tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
  528. if (!tmp_hdr) {
  529. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  530. return -ENOMEM;
  531. }
  532. __skb_pull(skb, hlen);
  533. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  534. __skb_push(skb, hlen);
  535. skb_reset_network_header(skb);
  536. memcpy(skb_network_header(skb), tmp_hdr, hlen);
  537. ipv6_select_ident(skb, fh);
  538. fh->nexthdr = nexthdr;
  539. fh->reserved = 0;
  540. fh->frag_off = htons(IP6_MF);
  541. frag_id = fh->identification;
  542. first_len = skb_pagelen(skb);
  543. skb->data_len = first_len - skb_headlen(skb);
  544. skb->len = first_len;
  545. skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
  546. dst_hold(&rt->u.dst);
  547. for (;;) {
  548. /* Prepare header of the next frame,
  549. * before previous one went down. */
  550. if (frag) {
  551. frag->ip_summed = CHECKSUM_NONE;
  552. frag->h.raw = frag->data;
  553. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  554. __skb_push(frag, hlen);
  555. skb_reset_network_header(frag);
  556. memcpy(skb_network_header(frag), tmp_hdr,
  557. hlen);
  558. offset += skb->len - hlen - sizeof(struct frag_hdr);
  559. fh->nexthdr = nexthdr;
  560. fh->reserved = 0;
  561. fh->frag_off = htons(offset);
  562. if (frag->next != NULL)
  563. fh->frag_off |= htons(IP6_MF);
  564. fh->identification = frag_id;
  565. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  566. ip6_copy_metadata(frag, skb);
  567. }
  568. err = output(skb);
  569. if(!err)
  570. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  571. if (err || !frag)
  572. break;
  573. skb = frag;
  574. frag = skb->next;
  575. skb->next = NULL;
  576. }
  577. kfree(tmp_hdr);
  578. if (err == 0) {
  579. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  580. dst_release(&rt->u.dst);
  581. return 0;
  582. }
  583. while (frag) {
  584. skb = frag->next;
  585. kfree_skb(frag);
  586. frag = skb;
  587. }
  588. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  589. dst_release(&rt->u.dst);
  590. return err;
  591. }
  592. slow_path:
  593. left = skb->len - hlen; /* Space per frame */
  594. ptr = hlen; /* Where to start from */
  595. /*
  596. * Fragment the datagram.
  597. */
  598. *prevhdr = NEXTHDR_FRAGMENT;
  599. /*
  600. * Keep copying data until we run out.
  601. */
  602. while(left > 0) {
  603. len = left;
  604. /* IF: it doesn't fit, use 'mtu' - the data space left */
  605. if (len > mtu)
  606. len = mtu;
  607. /* IF: we are not sending upto and including the packet end
  608. then align the next start on an eight byte boundary */
  609. if (len < left) {
  610. len &= ~7;
  611. }
  612. /*
  613. * Allocate buffer.
  614. */
  615. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  616. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  617. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  618. IPSTATS_MIB_FRAGFAILS);
  619. err = -ENOMEM;
  620. goto fail;
  621. }
  622. /*
  623. * Set up data on packet
  624. */
  625. ip6_copy_metadata(frag, skb);
  626. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  627. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  628. skb_reset_network_header(frag);
  629. fh = (struct frag_hdr*)(frag->data + hlen);
  630. frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
  631. /*
  632. * Charge the memory for the fragment to any owner
  633. * it might possess
  634. */
  635. if (skb->sk)
  636. skb_set_owner_w(frag, skb->sk);
  637. /*
  638. * Copy the packet header into the new buffer.
  639. */
  640. memcpy(skb_network_header(frag), skb->data, hlen);
  641. /*
  642. * Build fragment header.
  643. */
  644. fh->nexthdr = nexthdr;
  645. fh->reserved = 0;
  646. if (!frag_id) {
  647. ipv6_select_ident(skb, fh);
  648. frag_id = fh->identification;
  649. } else
  650. fh->identification = frag_id;
  651. /*
  652. * Copy a block of the IP datagram.
  653. */
  654. if (skb_copy_bits(skb, ptr, frag->h.raw, len))
  655. BUG();
  656. left -= len;
  657. fh->frag_off = htons(offset);
  658. if (left > 0)
  659. fh->frag_off |= htons(IP6_MF);
  660. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  661. ptr += len;
  662. offset += len;
  663. /*
  664. * Put this fragment into the sending queue.
  665. */
  666. err = output(frag);
  667. if (err)
  668. goto fail;
  669. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  670. }
  671. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  672. IPSTATS_MIB_FRAGOKS);
  673. kfree_skb(skb);
  674. return err;
  675. fail:
  676. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  677. IPSTATS_MIB_FRAGFAILS);
  678. kfree_skb(skb);
  679. return err;
  680. }
  681. static inline int ip6_rt_check(struct rt6key *rt_key,
  682. struct in6_addr *fl_addr,
  683. struct in6_addr *addr_cache)
  684. {
  685. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  686. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  687. }
  688. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  689. struct dst_entry *dst,
  690. struct flowi *fl)
  691. {
  692. struct ipv6_pinfo *np = inet6_sk(sk);
  693. struct rt6_info *rt = (struct rt6_info *)dst;
  694. if (!dst)
  695. goto out;
  696. /* Yes, checking route validity in not connected
  697. * case is not very simple. Take into account,
  698. * that we do not support routing by source, TOS,
  699. * and MSG_DONTROUTE --ANK (980726)
  700. *
  701. * 1. ip6_rt_check(): If route was host route,
  702. * check that cached destination is current.
  703. * If it is network route, we still may
  704. * check its validity using saved pointer
  705. * to the last used address: daddr_cache.
  706. * We do not want to save whole address now,
  707. * (because main consumer of this service
  708. * is tcp, which has not this problem),
  709. * so that the last trick works only on connected
  710. * sockets.
  711. * 2. oif also should be the same.
  712. */
  713. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  714. #ifdef CONFIG_IPV6_SUBTREES
  715. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  716. #endif
  717. (fl->oif && fl->oif != dst->dev->ifindex)) {
  718. dst_release(dst);
  719. dst = NULL;
  720. }
  721. out:
  722. return dst;
  723. }
  724. static int ip6_dst_lookup_tail(struct sock *sk,
  725. struct dst_entry **dst, struct flowi *fl)
  726. {
  727. int err;
  728. if (*dst == NULL)
  729. *dst = ip6_route_output(sk, fl);
  730. if ((err = (*dst)->error))
  731. goto out_err_release;
  732. if (ipv6_addr_any(&fl->fl6_src)) {
  733. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  734. if (err)
  735. goto out_err_release;
  736. }
  737. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  738. /*
  739. * Here if the dst entry we've looked up
  740. * has a neighbour entry that is in the INCOMPLETE
  741. * state and the src address from the flow is
  742. * marked as OPTIMISTIC, we release the found
  743. * dst entry and replace it instead with the
  744. * dst entry of the nexthop router
  745. */
  746. if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
  747. struct inet6_ifaddr *ifp;
  748. struct flowi fl_gw;
  749. int redirect;
  750. ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
  751. redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
  752. if (ifp)
  753. in6_ifa_put(ifp);
  754. if (redirect) {
  755. /*
  756. * We need to get the dst entry for the
  757. * default router instead
  758. */
  759. dst_release(*dst);
  760. memcpy(&fl_gw, fl, sizeof(struct flowi));
  761. memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
  762. *dst = ip6_route_output(sk, &fl_gw);
  763. if ((err = (*dst)->error))
  764. goto out_err_release;
  765. }
  766. }
  767. #endif
  768. return 0;
  769. out_err_release:
  770. dst_release(*dst);
  771. *dst = NULL;
  772. return err;
  773. }
  774. /**
  775. * ip6_dst_lookup - perform route lookup on flow
  776. * @sk: socket which provides route info
  777. * @dst: pointer to dst_entry * for result
  778. * @fl: flow to lookup
  779. *
  780. * This function performs a route lookup on the given flow.
  781. *
  782. * It returns zero on success, or a standard errno code on error.
  783. */
  784. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  785. {
  786. *dst = NULL;
  787. return ip6_dst_lookup_tail(sk, dst, fl);
  788. }
  789. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  790. /**
  791. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  792. * @sk: socket which provides the dst cache and route info
  793. * @dst: pointer to dst_entry * for result
  794. * @fl: flow to lookup
  795. *
  796. * This function performs a route lookup on the given flow with the
  797. * possibility of using the cached route in the socket if it is valid.
  798. * It will take the socket dst lock when operating on the dst cache.
  799. * As a result, this function can only be used in process context.
  800. *
  801. * It returns zero on success, or a standard errno code on error.
  802. */
  803. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  804. {
  805. *dst = NULL;
  806. if (sk) {
  807. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  808. *dst = ip6_sk_dst_check(sk, *dst, fl);
  809. }
  810. return ip6_dst_lookup_tail(sk, dst, fl);
  811. }
  812. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  813. static inline int ip6_ufo_append_data(struct sock *sk,
  814. int getfrag(void *from, char *to, int offset, int len,
  815. int odd, struct sk_buff *skb),
  816. void *from, int length, int hh_len, int fragheaderlen,
  817. int transhdrlen, int mtu,unsigned int flags)
  818. {
  819. struct sk_buff *skb;
  820. int err;
  821. /* There is support for UDP large send offload by network
  822. * device, so create one single skb packet containing complete
  823. * udp datagram
  824. */
  825. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  826. skb = sock_alloc_send_skb(sk,
  827. hh_len + fragheaderlen + transhdrlen + 20,
  828. (flags & MSG_DONTWAIT), &err);
  829. if (skb == NULL)
  830. return -ENOMEM;
  831. /* reserve space for Hardware header */
  832. skb_reserve(skb, hh_len);
  833. /* create space for UDP/IP header */
  834. skb_put(skb,fragheaderlen + transhdrlen);
  835. /* initialize network header pointer */
  836. skb_reset_network_header(skb);
  837. /* initialize protocol header pointer */
  838. skb->h.raw = skb->data + fragheaderlen;
  839. skb->ip_summed = CHECKSUM_PARTIAL;
  840. skb->csum = 0;
  841. sk->sk_sndmsg_off = 0;
  842. }
  843. err = skb_append_datato_frags(sk,skb, getfrag, from,
  844. (length - transhdrlen));
  845. if (!err) {
  846. struct frag_hdr fhdr;
  847. /* specify the length of each IP datagram fragment*/
  848. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  849. sizeof(struct frag_hdr);
  850. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  851. ipv6_select_ident(skb, &fhdr);
  852. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  853. __skb_queue_tail(&sk->sk_write_queue, skb);
  854. return 0;
  855. }
  856. /* There is not enough support do UPD LSO,
  857. * so follow normal path
  858. */
  859. kfree_skb(skb);
  860. return err;
  861. }
  862. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  863. int offset, int len, int odd, struct sk_buff *skb),
  864. void *from, int length, int transhdrlen,
  865. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  866. struct rt6_info *rt, unsigned int flags)
  867. {
  868. struct inet_sock *inet = inet_sk(sk);
  869. struct ipv6_pinfo *np = inet6_sk(sk);
  870. struct sk_buff *skb;
  871. unsigned int maxfraglen, fragheaderlen;
  872. int exthdrlen;
  873. int hh_len;
  874. int mtu;
  875. int copy;
  876. int err;
  877. int offset = 0;
  878. int csummode = CHECKSUM_NONE;
  879. if (flags&MSG_PROBE)
  880. return 0;
  881. if (skb_queue_empty(&sk->sk_write_queue)) {
  882. /*
  883. * setup for corking
  884. */
  885. if (opt) {
  886. if (np->cork.opt == NULL) {
  887. np->cork.opt = kmalloc(opt->tot_len,
  888. sk->sk_allocation);
  889. if (unlikely(np->cork.opt == NULL))
  890. return -ENOBUFS;
  891. } else if (np->cork.opt->tot_len < opt->tot_len) {
  892. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  893. return -EINVAL;
  894. }
  895. memcpy(np->cork.opt, opt, opt->tot_len);
  896. inet->cork.flags |= IPCORK_OPT;
  897. /* need source address above miyazawa*/
  898. }
  899. dst_hold(&rt->u.dst);
  900. np->cork.rt = rt;
  901. inet->cork.fl = *fl;
  902. np->cork.hop_limit = hlimit;
  903. np->cork.tclass = tclass;
  904. mtu = dst_mtu(rt->u.dst.path);
  905. if (np->frag_size < mtu) {
  906. if (np->frag_size)
  907. mtu = np->frag_size;
  908. }
  909. inet->cork.fragsize = mtu;
  910. if (dst_allfrag(rt->u.dst.path))
  911. inet->cork.flags |= IPCORK_ALLFRAG;
  912. inet->cork.length = 0;
  913. sk->sk_sndmsg_page = NULL;
  914. sk->sk_sndmsg_off = 0;
  915. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  916. length += exthdrlen;
  917. transhdrlen += exthdrlen;
  918. } else {
  919. rt = np->cork.rt;
  920. fl = &inet->cork.fl;
  921. if (inet->cork.flags & IPCORK_OPT)
  922. opt = np->cork.opt;
  923. transhdrlen = 0;
  924. exthdrlen = 0;
  925. mtu = inet->cork.fragsize;
  926. }
  927. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  928. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  929. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  930. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  931. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  932. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  933. return -EMSGSIZE;
  934. }
  935. }
  936. /*
  937. * Let's try using as much space as possible.
  938. * Use MTU if total length of the message fits into the MTU.
  939. * Otherwise, we need to reserve fragment header and
  940. * fragment alignment (= 8-15 octects, in total).
  941. *
  942. * Note that we may need to "move" the data from the tail of
  943. * of the buffer to the new fragment when we split
  944. * the message.
  945. *
  946. * FIXME: It may be fragmented into multiple chunks
  947. * at once if non-fragmentable extension headers
  948. * are too large.
  949. * --yoshfuji
  950. */
  951. inet->cork.length += length;
  952. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  953. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  954. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  955. fragheaderlen, transhdrlen, mtu,
  956. flags);
  957. if (err)
  958. goto error;
  959. return 0;
  960. }
  961. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  962. goto alloc_new_skb;
  963. while (length > 0) {
  964. /* Check if the remaining data fits into current packet. */
  965. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  966. if (copy < length)
  967. copy = maxfraglen - skb->len;
  968. if (copy <= 0) {
  969. char *data;
  970. unsigned int datalen;
  971. unsigned int fraglen;
  972. unsigned int fraggap;
  973. unsigned int alloclen;
  974. struct sk_buff *skb_prev;
  975. alloc_new_skb:
  976. skb_prev = skb;
  977. /* There's no room in the current skb */
  978. if (skb_prev)
  979. fraggap = skb_prev->len - maxfraglen;
  980. else
  981. fraggap = 0;
  982. /*
  983. * If remaining data exceeds the mtu,
  984. * we know we need more fragment(s).
  985. */
  986. datalen = length + fraggap;
  987. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  988. datalen = maxfraglen - fragheaderlen;
  989. fraglen = datalen + fragheaderlen;
  990. if ((flags & MSG_MORE) &&
  991. !(rt->u.dst.dev->features&NETIF_F_SG))
  992. alloclen = mtu;
  993. else
  994. alloclen = datalen + fragheaderlen;
  995. /*
  996. * The last fragment gets additional space at tail.
  997. * Note: we overallocate on fragments with MSG_MODE
  998. * because we have no idea if we're the last one.
  999. */
  1000. if (datalen == length + fraggap)
  1001. alloclen += rt->u.dst.trailer_len;
  1002. /*
  1003. * We just reserve space for fragment header.
  1004. * Note: this may be overallocation if the message
  1005. * (without MSG_MORE) fits into the MTU.
  1006. */
  1007. alloclen += sizeof(struct frag_hdr);
  1008. if (transhdrlen) {
  1009. skb = sock_alloc_send_skb(sk,
  1010. alloclen + hh_len,
  1011. (flags & MSG_DONTWAIT), &err);
  1012. } else {
  1013. skb = NULL;
  1014. if (atomic_read(&sk->sk_wmem_alloc) <=
  1015. 2 * sk->sk_sndbuf)
  1016. skb = sock_wmalloc(sk,
  1017. alloclen + hh_len, 1,
  1018. sk->sk_allocation);
  1019. if (unlikely(skb == NULL))
  1020. err = -ENOBUFS;
  1021. }
  1022. if (skb == NULL)
  1023. goto error;
  1024. /*
  1025. * Fill in the control structures
  1026. */
  1027. skb->ip_summed = csummode;
  1028. skb->csum = 0;
  1029. /* reserve for fragmentation */
  1030. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  1031. /*
  1032. * Find where to start putting bytes
  1033. */
  1034. data = skb_put(skb, fraglen);
  1035. skb->nh.raw = data + exthdrlen;
  1036. data += fragheaderlen;
  1037. skb->h.raw = data + exthdrlen;
  1038. if (fraggap) {
  1039. skb->csum = skb_copy_and_csum_bits(
  1040. skb_prev, maxfraglen,
  1041. data + transhdrlen, fraggap, 0);
  1042. skb_prev->csum = csum_sub(skb_prev->csum,
  1043. skb->csum);
  1044. data += fraggap;
  1045. pskb_trim_unique(skb_prev, maxfraglen);
  1046. }
  1047. copy = datalen - transhdrlen - fraggap;
  1048. if (copy < 0) {
  1049. err = -EINVAL;
  1050. kfree_skb(skb);
  1051. goto error;
  1052. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1053. err = -EFAULT;
  1054. kfree_skb(skb);
  1055. goto error;
  1056. }
  1057. offset += copy;
  1058. length -= datalen - fraggap;
  1059. transhdrlen = 0;
  1060. exthdrlen = 0;
  1061. csummode = CHECKSUM_NONE;
  1062. /*
  1063. * Put the packet on the pending queue
  1064. */
  1065. __skb_queue_tail(&sk->sk_write_queue, skb);
  1066. continue;
  1067. }
  1068. if (copy > length)
  1069. copy = length;
  1070. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1071. unsigned int off;
  1072. off = skb->len;
  1073. if (getfrag(from, skb_put(skb, copy),
  1074. offset, copy, off, skb) < 0) {
  1075. __skb_trim(skb, off);
  1076. err = -EFAULT;
  1077. goto error;
  1078. }
  1079. } else {
  1080. int i = skb_shinfo(skb)->nr_frags;
  1081. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1082. struct page *page = sk->sk_sndmsg_page;
  1083. int off = sk->sk_sndmsg_off;
  1084. unsigned int left;
  1085. if (page && (left = PAGE_SIZE - off) > 0) {
  1086. if (copy >= left)
  1087. copy = left;
  1088. if (page != frag->page) {
  1089. if (i == MAX_SKB_FRAGS) {
  1090. err = -EMSGSIZE;
  1091. goto error;
  1092. }
  1093. get_page(page);
  1094. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1095. frag = &skb_shinfo(skb)->frags[i];
  1096. }
  1097. } else if(i < MAX_SKB_FRAGS) {
  1098. if (copy > PAGE_SIZE)
  1099. copy = PAGE_SIZE;
  1100. page = alloc_pages(sk->sk_allocation, 0);
  1101. if (page == NULL) {
  1102. err = -ENOMEM;
  1103. goto error;
  1104. }
  1105. sk->sk_sndmsg_page = page;
  1106. sk->sk_sndmsg_off = 0;
  1107. skb_fill_page_desc(skb, i, page, 0, 0);
  1108. frag = &skb_shinfo(skb)->frags[i];
  1109. skb->truesize += PAGE_SIZE;
  1110. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1111. } else {
  1112. err = -EMSGSIZE;
  1113. goto error;
  1114. }
  1115. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1116. err = -EFAULT;
  1117. goto error;
  1118. }
  1119. sk->sk_sndmsg_off += copy;
  1120. frag->size += copy;
  1121. skb->len += copy;
  1122. skb->data_len += copy;
  1123. }
  1124. offset += copy;
  1125. length -= copy;
  1126. }
  1127. return 0;
  1128. error:
  1129. inet->cork.length -= length;
  1130. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1131. return err;
  1132. }
  1133. int ip6_push_pending_frames(struct sock *sk)
  1134. {
  1135. struct sk_buff *skb, *tmp_skb;
  1136. struct sk_buff **tail_skb;
  1137. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1138. struct inet_sock *inet = inet_sk(sk);
  1139. struct ipv6_pinfo *np = inet6_sk(sk);
  1140. struct ipv6hdr *hdr;
  1141. struct ipv6_txoptions *opt = np->cork.opt;
  1142. struct rt6_info *rt = np->cork.rt;
  1143. struct flowi *fl = &inet->cork.fl;
  1144. unsigned char proto = fl->proto;
  1145. int err = 0;
  1146. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1147. goto out;
  1148. tail_skb = &(skb_shinfo(skb)->frag_list);
  1149. /* move skb->data to ip header from ext header */
  1150. if (skb->data < skb_network_header(skb))
  1151. __skb_pull(skb, skb_network_offset(skb));
  1152. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1153. __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
  1154. *tail_skb = tmp_skb;
  1155. tail_skb = &(tmp_skb->next);
  1156. skb->len += tmp_skb->len;
  1157. skb->data_len += tmp_skb->len;
  1158. skb->truesize += tmp_skb->truesize;
  1159. __sock_put(tmp_skb->sk);
  1160. tmp_skb->destructor = NULL;
  1161. tmp_skb->sk = NULL;
  1162. }
  1163. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1164. __skb_pull(skb, skb->h.raw - skb->nh.raw);
  1165. if (opt && opt->opt_flen)
  1166. ipv6_push_frag_opts(skb, opt, &proto);
  1167. if (opt && opt->opt_nflen)
  1168. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1169. skb_push(skb, sizeof(struct ipv6hdr));
  1170. skb_reset_network_header(skb);
  1171. hdr = skb->nh.ipv6h;
  1172. *(__be32*)hdr = fl->fl6_flowlabel |
  1173. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1174. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1175. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1176. else
  1177. hdr->payload_len = 0;
  1178. hdr->hop_limit = np->cork.hop_limit;
  1179. hdr->nexthdr = proto;
  1180. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1181. ipv6_addr_copy(&hdr->daddr, final_dst);
  1182. skb->priority = sk->sk_priority;
  1183. skb->dst = dst_clone(&rt->u.dst);
  1184. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1185. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1186. if (err) {
  1187. if (err > 0)
  1188. err = np->recverr ? net_xmit_errno(err) : 0;
  1189. if (err)
  1190. goto error;
  1191. }
  1192. out:
  1193. inet->cork.flags &= ~IPCORK_OPT;
  1194. kfree(np->cork.opt);
  1195. np->cork.opt = NULL;
  1196. if (np->cork.rt) {
  1197. dst_release(&np->cork.rt->u.dst);
  1198. np->cork.rt = NULL;
  1199. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1200. }
  1201. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1202. return err;
  1203. error:
  1204. goto out;
  1205. }
  1206. void ip6_flush_pending_frames(struct sock *sk)
  1207. {
  1208. struct inet_sock *inet = inet_sk(sk);
  1209. struct ipv6_pinfo *np = inet6_sk(sk);
  1210. struct sk_buff *skb;
  1211. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1212. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1213. IPSTATS_MIB_OUTDISCARDS);
  1214. kfree_skb(skb);
  1215. }
  1216. inet->cork.flags &= ~IPCORK_OPT;
  1217. kfree(np->cork.opt);
  1218. np->cork.opt = NULL;
  1219. if (np->cork.rt) {
  1220. dst_release(&np->cork.rt->u.dst);
  1221. np->cork.rt = NULL;
  1222. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1223. }
  1224. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1225. }