ip6_output.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Based on linux/net/ipv4/ip_output.c
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Changes:
  18. * A.N.Kuznetsov : airthmetics in fragmentation.
  19. * extension headers are implemented.
  20. * route changes now work.
  21. * ip6_forward does not confuse sniffers.
  22. * etc.
  23. *
  24. * H. von Brand : Added missing #include <linux/string.h>
  25. * Imran Patel : frag id should be in NBO
  26. * Kazunori MIYAZAWA @USAGI
  27. * : add ip6_append_data and related functions
  28. * for datagram xmit
  29. */
  30. #include <linux/errno.h>
  31. #include <linux/types.h>
  32. #include <linux/string.h>
  33. #include <linux/socket.h>
  34. #include <linux/net.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/in6.h>
  38. #include <linux/tcp.h>
  39. #include <linux/route.h>
  40. #include <linux/module.h>
  41. #include <linux/netfilter.h>
  42. #include <linux/netfilter_ipv6.h>
  43. #include <net/sock.h>
  44. #include <net/snmp.h>
  45. #include <net/ipv6.h>
  46. #include <net/ndisc.h>
  47. #include <net/protocol.h>
  48. #include <net/ip6_route.h>
  49. #include <net/addrconf.h>
  50. #include <net/rawv6.h>
  51. #include <net/icmp.h>
  52. #include <net/xfrm.h>
  53. #include <net/checksum.h>
  54. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
  56. {
  57. static u32 ipv6_fragmentation_id = 1;
  58. static DEFINE_SPINLOCK(ip6_id_lock);
  59. spin_lock_bh(&ip6_id_lock);
  60. fhdr->identification = htonl(ipv6_fragmentation_id);
  61. if (++ipv6_fragmentation_id == 0)
  62. ipv6_fragmentation_id = 1;
  63. spin_unlock_bh(&ip6_id_lock);
  64. }
  65. static inline int ip6_output_finish(struct sk_buff *skb)
  66. {
  67. struct dst_entry *dst = skb->dst;
  68. if (dst->hh)
  69. return neigh_hh_output(dst->hh, skb);
  70. else if (dst->neighbour)
  71. return dst->neighbour->output(skb);
  72. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  73. kfree_skb(skb);
  74. return -EINVAL;
  75. }
  76. /* dev_loopback_xmit for use with netfilter. */
  77. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  78. {
  79. skb_reset_mac_header(newskb);
  80. __skb_pull(newskb, skb_network_offset(newskb));
  81. newskb->pkt_type = PACKET_LOOPBACK;
  82. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  83. BUG_TRAP(newskb->dst);
  84. netif_rx(newskb);
  85. return 0;
  86. }
  87. static int ip6_output2(struct sk_buff *skb)
  88. {
  89. struct dst_entry *dst = skb->dst;
  90. struct net_device *dev = dst->dev;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
  94. struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
  95. struct inet6_dev *idev = ip6_dst_idev(skb->dst);
  96. if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
  97. ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
  98. &skb->nh.ipv6h->saddr)) {
  99. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  100. /* Do not check for IFF_ALLMULTI; multicast routing
  101. is not supported in any case.
  102. */
  103. if (newskb)
  104. NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
  105. newskb->dev,
  106. ip6_dev_loopback_xmit);
  107. if (skb->nh.ipv6h->hop_limit == 0) {
  108. IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
  109. kfree_skb(skb);
  110. return 0;
  111. }
  112. }
  113. IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
  114. }
  115. return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
  116. }
  117. int ip6_output(struct sk_buff *skb)
  118. {
  119. if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  120. dst_allfrag(skb->dst))
  121. return ip6_fragment(skb, ip6_output2);
  122. else
  123. return ip6_output2(skb);
  124. }
  125. /*
  126. * xmit an sk_buff (used by TCP)
  127. */
  128. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
  129. struct ipv6_txoptions *opt, int ipfragok)
  130. {
  131. struct ipv6_pinfo *np = inet6_sk(sk);
  132. struct in6_addr *first_hop = &fl->fl6_dst;
  133. struct dst_entry *dst = skb->dst;
  134. struct ipv6hdr *hdr;
  135. u8 proto = fl->proto;
  136. int seg_len = skb->len;
  137. int hlimit, tclass;
  138. u32 mtu;
  139. if (opt) {
  140. int head_room;
  141. /* First: exthdrs may take lots of space (~8K for now)
  142. MAX_HEADER is not enough.
  143. */
  144. head_room = opt->opt_nflen + opt->opt_flen;
  145. seg_len += head_room;
  146. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  147. if (skb_headroom(skb) < head_room) {
  148. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  149. if (skb2 == NULL) {
  150. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  151. IPSTATS_MIB_OUTDISCARDS);
  152. kfree_skb(skb);
  153. return -ENOBUFS;
  154. }
  155. kfree_skb(skb);
  156. skb = skb2;
  157. if (sk)
  158. skb_set_owner_w(skb, sk);
  159. }
  160. if (opt->opt_flen)
  161. ipv6_push_frag_opts(skb, opt, &proto);
  162. if (opt->opt_nflen)
  163. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  164. }
  165. skb_push(skb, sizeof(struct ipv6hdr));
  166. skb_reset_network_header(skb);
  167. hdr = skb->nh.ipv6h;
  168. /*
  169. * Fill in the IPv6 header
  170. */
  171. hlimit = -1;
  172. if (np)
  173. hlimit = np->hop_limit;
  174. if (hlimit < 0)
  175. hlimit = dst_metric(dst, RTAX_HOPLIMIT);
  176. if (hlimit < 0)
  177. hlimit = ipv6_get_hoplimit(dst->dev);
  178. tclass = -1;
  179. if (np)
  180. tclass = np->tclass;
  181. if (tclass < 0)
  182. tclass = 0;
  183. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
  184. hdr->payload_len = htons(seg_len);
  185. hdr->nexthdr = proto;
  186. hdr->hop_limit = hlimit;
  187. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  188. ipv6_addr_copy(&hdr->daddr, first_hop);
  189. skb->priority = sk->sk_priority;
  190. mtu = dst_mtu(dst);
  191. if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
  192. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  193. IPSTATS_MIB_OUTREQUESTS);
  194. return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
  195. dst_output);
  196. }
  197. if (net_ratelimit())
  198. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  199. skb->dev = dst->dev;
  200. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  201. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  202. kfree_skb(skb);
  203. return -EMSGSIZE;
  204. }
  205. EXPORT_SYMBOL(ip6_xmit);
  206. /*
  207. * To avoid extra problems ND packets are send through this
  208. * routine. It's code duplication but I really want to avoid
  209. * extra checks since ipv6_build_header is used by TCP (which
  210. * is for us performance critical)
  211. */
  212. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  213. struct in6_addr *saddr, struct in6_addr *daddr,
  214. int proto, int len)
  215. {
  216. struct ipv6_pinfo *np = inet6_sk(sk);
  217. struct ipv6hdr *hdr;
  218. int totlen;
  219. skb->protocol = htons(ETH_P_IPV6);
  220. skb->dev = dev;
  221. totlen = len + sizeof(struct ipv6hdr);
  222. hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
  223. skb->nh.ipv6h = hdr;
  224. *(__be32*)hdr = htonl(0x60000000);
  225. hdr->payload_len = htons(len);
  226. hdr->nexthdr = proto;
  227. hdr->hop_limit = np->hop_limit;
  228. ipv6_addr_copy(&hdr->saddr, saddr);
  229. ipv6_addr_copy(&hdr->daddr, daddr);
  230. return 0;
  231. }
  232. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  233. {
  234. struct ip6_ra_chain *ra;
  235. struct sock *last = NULL;
  236. read_lock(&ip6_ra_lock);
  237. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  238. struct sock *sk = ra->sk;
  239. if (sk && ra->sel == sel &&
  240. (!sk->sk_bound_dev_if ||
  241. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  242. if (last) {
  243. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  244. if (skb2)
  245. rawv6_rcv(last, skb2);
  246. }
  247. last = sk;
  248. }
  249. }
  250. if (last) {
  251. rawv6_rcv(last, skb);
  252. read_unlock(&ip6_ra_lock);
  253. return 1;
  254. }
  255. read_unlock(&ip6_ra_lock);
  256. return 0;
  257. }
  258. static int ip6_forward_proxy_check(struct sk_buff *skb)
  259. {
  260. struct ipv6hdr *hdr = skb->nh.ipv6h;
  261. u8 nexthdr = hdr->nexthdr;
  262. int offset;
  263. if (ipv6_ext_hdr(nexthdr)) {
  264. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  265. if (offset < 0)
  266. return 0;
  267. } else
  268. offset = sizeof(struct ipv6hdr);
  269. if (nexthdr == IPPROTO_ICMPV6) {
  270. struct icmp6hdr *icmp6;
  271. if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
  272. return 0;
  273. icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
  274. switch (icmp6->icmp6_type) {
  275. case NDISC_ROUTER_SOLICITATION:
  276. case NDISC_ROUTER_ADVERTISEMENT:
  277. case NDISC_NEIGHBOUR_SOLICITATION:
  278. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  279. case NDISC_REDIRECT:
  280. /* For reaction involving unicast neighbor discovery
  281. * message destined to the proxied address, pass it to
  282. * input function.
  283. */
  284. return 1;
  285. default:
  286. break;
  287. }
  288. }
  289. /*
  290. * The proxying router can't forward traffic sent to a link-local
  291. * address, so signal the sender and discard the packet. This
  292. * behavior is clarified by the MIPv6 specification.
  293. */
  294. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  295. dst_link_failure(skb);
  296. return -1;
  297. }
  298. return 0;
  299. }
  300. static inline int ip6_forward_finish(struct sk_buff *skb)
  301. {
  302. return dst_output(skb);
  303. }
  304. int ip6_forward(struct sk_buff *skb)
  305. {
  306. struct dst_entry *dst = skb->dst;
  307. struct ipv6hdr *hdr = skb->nh.ipv6h;
  308. struct inet6_skb_parm *opt = IP6CB(skb);
  309. if (ipv6_devconf.forwarding == 0)
  310. goto error;
  311. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  312. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  313. goto drop;
  314. }
  315. skb->ip_summed = CHECKSUM_NONE;
  316. /*
  317. * We DO NOT make any processing on
  318. * RA packets, pushing them to user level AS IS
  319. * without ane WARRANTY that application will be able
  320. * to interpret them. The reason is that we
  321. * cannot make anything clever here.
  322. *
  323. * We are not end-node, so that if packet contains
  324. * AH/ESP, we cannot make anything.
  325. * Defragmentation also would be mistake, RA packets
  326. * cannot be fragmented, because there is no warranty
  327. * that different fragments will go along one path. --ANK
  328. */
  329. if (opt->ra) {
  330. u8 *ptr = skb->nh.raw + opt->ra;
  331. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  332. return 0;
  333. }
  334. /*
  335. * check and decrement ttl
  336. */
  337. if (hdr->hop_limit <= 1) {
  338. /* Force OUTPUT device used as source address */
  339. skb->dev = dst->dev;
  340. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  341. 0, skb->dev);
  342. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  343. kfree_skb(skb);
  344. return -ETIMEDOUT;
  345. }
  346. /* XXX: idev->cnf.proxy_ndp? */
  347. if (ipv6_devconf.proxy_ndp &&
  348. pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
  349. int proxied = ip6_forward_proxy_check(skb);
  350. if (proxied > 0)
  351. return ip6_input(skb);
  352. else if (proxied < 0) {
  353. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  354. goto drop;
  355. }
  356. }
  357. if (!xfrm6_route_forward(skb)) {
  358. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  359. goto drop;
  360. }
  361. dst = skb->dst;
  362. /* IPv6 specs say nothing about it, but it is clear that we cannot
  363. send redirects to source routed frames.
  364. */
  365. if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
  366. struct in6_addr *target = NULL;
  367. struct rt6_info *rt;
  368. struct neighbour *n = dst->neighbour;
  369. /*
  370. * incoming and outgoing devices are the same
  371. * send a redirect.
  372. */
  373. rt = (struct rt6_info *) dst;
  374. if ((rt->rt6i_flags & RTF_GATEWAY))
  375. target = (struct in6_addr*)&n->primary_key;
  376. else
  377. target = &hdr->daddr;
  378. /* Limit redirects both by destination (here)
  379. and by source (inside ndisc_send_redirect)
  380. */
  381. if (xrlim_allow(dst, 1*HZ))
  382. ndisc_send_redirect(skb, n, target);
  383. } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
  384. |IPV6_ADDR_LINKLOCAL)) {
  385. /* This check is security critical. */
  386. goto error;
  387. }
  388. if (skb->len > dst_mtu(dst)) {
  389. /* Again, force OUTPUT device used as source address */
  390. skb->dev = dst->dev;
  391. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
  392. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  393. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  394. kfree_skb(skb);
  395. return -EMSGSIZE;
  396. }
  397. if (skb_cow(skb, dst->dev->hard_header_len)) {
  398. IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  399. goto drop;
  400. }
  401. hdr = skb->nh.ipv6h;
  402. /* Mangling hops number delayed to point after skb COW */
  403. hdr->hop_limit--;
  404. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  405. return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
  406. error:
  407. IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  408. drop:
  409. kfree_skb(skb);
  410. return -EINVAL;
  411. }
  412. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  413. {
  414. to->pkt_type = from->pkt_type;
  415. to->priority = from->priority;
  416. to->protocol = from->protocol;
  417. dst_release(to->dst);
  418. to->dst = dst_clone(from->dst);
  419. to->dev = from->dev;
  420. to->mark = from->mark;
  421. #ifdef CONFIG_NET_SCHED
  422. to->tc_index = from->tc_index;
  423. #endif
  424. #ifdef CONFIG_NETFILTER
  425. /* Connection association is same as pre-frag packet */
  426. nf_conntrack_put(to->nfct);
  427. to->nfct = from->nfct;
  428. nf_conntrack_get(to->nfct);
  429. to->nfctinfo = from->nfctinfo;
  430. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  431. nf_conntrack_put_reasm(to->nfct_reasm);
  432. to->nfct_reasm = from->nfct_reasm;
  433. nf_conntrack_get_reasm(to->nfct_reasm);
  434. #endif
  435. #ifdef CONFIG_BRIDGE_NETFILTER
  436. nf_bridge_put(to->nf_bridge);
  437. to->nf_bridge = from->nf_bridge;
  438. nf_bridge_get(to->nf_bridge);
  439. #endif
  440. #endif
  441. skb_copy_secmark(to, from);
  442. }
  443. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  444. {
  445. u16 offset = sizeof(struct ipv6hdr);
  446. struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
  447. unsigned int packet_len = skb->tail - skb->nh.raw;
  448. int found_rhdr = 0;
  449. *nexthdr = &skb->nh.ipv6h->nexthdr;
  450. while (offset + 1 <= packet_len) {
  451. switch (**nexthdr) {
  452. case NEXTHDR_HOP:
  453. break;
  454. case NEXTHDR_ROUTING:
  455. found_rhdr = 1;
  456. break;
  457. case NEXTHDR_DEST:
  458. #ifdef CONFIG_IPV6_MIP6
  459. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  460. break;
  461. #endif
  462. if (found_rhdr)
  463. return offset;
  464. break;
  465. default :
  466. return offset;
  467. }
  468. offset += ipv6_optlen(exthdr);
  469. *nexthdr = &exthdr->nexthdr;
  470. exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
  471. }
  472. return offset;
  473. }
  474. EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
  475. static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  476. {
  477. struct net_device *dev;
  478. struct sk_buff *frag;
  479. struct rt6_info *rt = (struct rt6_info*)skb->dst;
  480. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  481. struct ipv6hdr *tmp_hdr;
  482. struct frag_hdr *fh;
  483. unsigned int mtu, hlen, left, len;
  484. __be32 frag_id = 0;
  485. int ptr, offset = 0, err=0;
  486. u8 *prevhdr, nexthdr = 0;
  487. dev = rt->u.dst.dev;
  488. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  489. nexthdr = *prevhdr;
  490. mtu = dst_mtu(&rt->u.dst);
  491. if (np && np->frag_size < mtu) {
  492. if (np->frag_size)
  493. mtu = np->frag_size;
  494. }
  495. mtu -= hlen + sizeof(struct frag_hdr);
  496. if (skb_shinfo(skb)->frag_list) {
  497. int first_len = skb_pagelen(skb);
  498. if (first_len - hlen > mtu ||
  499. ((first_len - hlen) & 7) ||
  500. skb_cloned(skb))
  501. goto slow_path;
  502. for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
  503. /* Correct geometry. */
  504. if (frag->len > mtu ||
  505. ((frag->len & 7) && frag->next) ||
  506. skb_headroom(frag) < hlen)
  507. goto slow_path;
  508. /* Partially cloned skb? */
  509. if (skb_shared(frag))
  510. goto slow_path;
  511. BUG_ON(frag->sk);
  512. if (skb->sk) {
  513. sock_hold(skb->sk);
  514. frag->sk = skb->sk;
  515. frag->destructor = sock_wfree;
  516. skb->truesize -= frag->truesize;
  517. }
  518. }
  519. err = 0;
  520. offset = 0;
  521. frag = skb_shinfo(skb)->frag_list;
  522. skb_shinfo(skb)->frag_list = NULL;
  523. /* BUILD HEADER */
  524. *prevhdr = NEXTHDR_FRAGMENT;
  525. tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
  526. if (!tmp_hdr) {
  527. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
  528. return -ENOMEM;
  529. }
  530. __skb_pull(skb, hlen);
  531. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  532. __skb_push(skb, hlen);
  533. skb_reset_network_header(skb);
  534. memcpy(skb->nh.raw, tmp_hdr, hlen);
  535. ipv6_select_ident(skb, fh);
  536. fh->nexthdr = nexthdr;
  537. fh->reserved = 0;
  538. fh->frag_off = htons(IP6_MF);
  539. frag_id = fh->identification;
  540. first_len = skb_pagelen(skb);
  541. skb->data_len = first_len - skb_headlen(skb);
  542. skb->len = first_len;
  543. skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
  544. dst_hold(&rt->u.dst);
  545. for (;;) {
  546. /* Prepare header of the next frame,
  547. * before previous one went down. */
  548. if (frag) {
  549. frag->ip_summed = CHECKSUM_NONE;
  550. frag->h.raw = frag->data;
  551. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  552. __skb_push(frag, hlen);
  553. skb_reset_network_header(frag);
  554. memcpy(frag->nh.raw, tmp_hdr, hlen);
  555. offset += skb->len - hlen - sizeof(struct frag_hdr);
  556. fh->nexthdr = nexthdr;
  557. fh->reserved = 0;
  558. fh->frag_off = htons(offset);
  559. if (frag->next != NULL)
  560. fh->frag_off |= htons(IP6_MF);
  561. fh->identification = frag_id;
  562. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  563. ip6_copy_metadata(frag, skb);
  564. }
  565. err = output(skb);
  566. if(!err)
  567. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
  568. if (err || !frag)
  569. break;
  570. skb = frag;
  571. frag = skb->next;
  572. skb->next = NULL;
  573. }
  574. kfree(tmp_hdr);
  575. if (err == 0) {
  576. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
  577. dst_release(&rt->u.dst);
  578. return 0;
  579. }
  580. while (frag) {
  581. skb = frag->next;
  582. kfree_skb(frag);
  583. frag = skb;
  584. }
  585. IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
  586. dst_release(&rt->u.dst);
  587. return err;
  588. }
  589. slow_path:
  590. left = skb->len - hlen; /* Space per frame */
  591. ptr = hlen; /* Where to start from */
  592. /*
  593. * Fragment the datagram.
  594. */
  595. *prevhdr = NEXTHDR_FRAGMENT;
  596. /*
  597. * Keep copying data until we run out.
  598. */
  599. while(left > 0) {
  600. len = left;
  601. /* IF: it doesn't fit, use 'mtu' - the data space left */
  602. if (len > mtu)
  603. len = mtu;
  604. /* IF: we are not sending upto and including the packet end
  605. then align the next start on an eight byte boundary */
  606. if (len < left) {
  607. len &= ~7;
  608. }
  609. /*
  610. * Allocate buffer.
  611. */
  612. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
  613. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  614. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  615. IPSTATS_MIB_FRAGFAILS);
  616. err = -ENOMEM;
  617. goto fail;
  618. }
  619. /*
  620. * Set up data on packet
  621. */
  622. ip6_copy_metadata(frag, skb);
  623. skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
  624. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  625. skb_reset_network_header(frag);
  626. fh = (struct frag_hdr*)(frag->data + hlen);
  627. frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
  628. /*
  629. * Charge the memory for the fragment to any owner
  630. * it might possess
  631. */
  632. if (skb->sk)
  633. skb_set_owner_w(frag, skb->sk);
  634. /*
  635. * Copy the packet header into the new buffer.
  636. */
  637. memcpy(frag->nh.raw, skb->data, hlen);
  638. /*
  639. * Build fragment header.
  640. */
  641. fh->nexthdr = nexthdr;
  642. fh->reserved = 0;
  643. if (!frag_id) {
  644. ipv6_select_ident(skb, fh);
  645. frag_id = fh->identification;
  646. } else
  647. fh->identification = frag_id;
  648. /*
  649. * Copy a block of the IP datagram.
  650. */
  651. if (skb_copy_bits(skb, ptr, frag->h.raw, len))
  652. BUG();
  653. left -= len;
  654. fh->frag_off = htons(offset);
  655. if (left > 0)
  656. fh->frag_off |= htons(IP6_MF);
  657. frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
  658. ptr += len;
  659. offset += len;
  660. /*
  661. * Put this fragment into the sending queue.
  662. */
  663. err = output(frag);
  664. if (err)
  665. goto fail;
  666. IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
  667. }
  668. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  669. IPSTATS_MIB_FRAGOKS);
  670. kfree_skb(skb);
  671. return err;
  672. fail:
  673. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  674. IPSTATS_MIB_FRAGFAILS);
  675. kfree_skb(skb);
  676. return err;
  677. }
  678. static inline int ip6_rt_check(struct rt6key *rt_key,
  679. struct in6_addr *fl_addr,
  680. struct in6_addr *addr_cache)
  681. {
  682. return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  683. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
  684. }
  685. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  686. struct dst_entry *dst,
  687. struct flowi *fl)
  688. {
  689. struct ipv6_pinfo *np = inet6_sk(sk);
  690. struct rt6_info *rt = (struct rt6_info *)dst;
  691. if (!dst)
  692. goto out;
  693. /* Yes, checking route validity in not connected
  694. * case is not very simple. Take into account,
  695. * that we do not support routing by source, TOS,
  696. * and MSG_DONTROUTE --ANK (980726)
  697. *
  698. * 1. ip6_rt_check(): If route was host route,
  699. * check that cached destination is current.
  700. * If it is network route, we still may
  701. * check its validity using saved pointer
  702. * to the last used address: daddr_cache.
  703. * We do not want to save whole address now,
  704. * (because main consumer of this service
  705. * is tcp, which has not this problem),
  706. * so that the last trick works only on connected
  707. * sockets.
  708. * 2. oif also should be the same.
  709. */
  710. if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
  711. #ifdef CONFIG_IPV6_SUBTREES
  712. ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
  713. #endif
  714. (fl->oif && fl->oif != dst->dev->ifindex)) {
  715. dst_release(dst);
  716. dst = NULL;
  717. }
  718. out:
  719. return dst;
  720. }
  721. static int ip6_dst_lookup_tail(struct sock *sk,
  722. struct dst_entry **dst, struct flowi *fl)
  723. {
  724. int err;
  725. if (*dst == NULL)
  726. *dst = ip6_route_output(sk, fl);
  727. if ((err = (*dst)->error))
  728. goto out_err_release;
  729. if (ipv6_addr_any(&fl->fl6_src)) {
  730. err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
  731. if (err)
  732. goto out_err_release;
  733. }
  734. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  735. /*
  736. * Here if the dst entry we've looked up
  737. * has a neighbour entry that is in the INCOMPLETE
  738. * state and the src address from the flow is
  739. * marked as OPTIMISTIC, we release the found
  740. * dst entry and replace it instead with the
  741. * dst entry of the nexthop router
  742. */
  743. if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
  744. struct inet6_ifaddr *ifp;
  745. struct flowi fl_gw;
  746. int redirect;
  747. ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
  748. redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
  749. if (ifp)
  750. in6_ifa_put(ifp);
  751. if (redirect) {
  752. /*
  753. * We need to get the dst entry for the
  754. * default router instead
  755. */
  756. dst_release(*dst);
  757. memcpy(&fl_gw, fl, sizeof(struct flowi));
  758. memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
  759. *dst = ip6_route_output(sk, &fl_gw);
  760. if ((err = (*dst)->error))
  761. goto out_err_release;
  762. }
  763. }
  764. #endif
  765. return 0;
  766. out_err_release:
  767. dst_release(*dst);
  768. *dst = NULL;
  769. return err;
  770. }
  771. /**
  772. * ip6_dst_lookup - perform route lookup on flow
  773. * @sk: socket which provides route info
  774. * @dst: pointer to dst_entry * for result
  775. * @fl: flow to lookup
  776. *
  777. * This function performs a route lookup on the given flow.
  778. *
  779. * It returns zero on success, or a standard errno code on error.
  780. */
  781. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  782. {
  783. *dst = NULL;
  784. return ip6_dst_lookup_tail(sk, dst, fl);
  785. }
  786. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  787. /**
  788. * ip6_sk_dst_lookup - perform socket cached route lookup on flow
  789. * @sk: socket which provides the dst cache and route info
  790. * @dst: pointer to dst_entry * for result
  791. * @fl: flow to lookup
  792. *
  793. * This function performs a route lookup on the given flow with the
  794. * possibility of using the cached route in the socket if it is valid.
  795. * It will take the socket dst lock when operating on the dst cache.
  796. * As a result, this function can only be used in process context.
  797. *
  798. * It returns zero on success, or a standard errno code on error.
  799. */
  800. int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
  801. {
  802. *dst = NULL;
  803. if (sk) {
  804. *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  805. *dst = ip6_sk_dst_check(sk, *dst, fl);
  806. }
  807. return ip6_dst_lookup_tail(sk, dst, fl);
  808. }
  809. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
  810. static inline int ip6_ufo_append_data(struct sock *sk,
  811. int getfrag(void *from, char *to, int offset, int len,
  812. int odd, struct sk_buff *skb),
  813. void *from, int length, int hh_len, int fragheaderlen,
  814. int transhdrlen, int mtu,unsigned int flags)
  815. {
  816. struct sk_buff *skb;
  817. int err;
  818. /* There is support for UDP large send offload by network
  819. * device, so create one single skb packet containing complete
  820. * udp datagram
  821. */
  822. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  823. skb = sock_alloc_send_skb(sk,
  824. hh_len + fragheaderlen + transhdrlen + 20,
  825. (flags & MSG_DONTWAIT), &err);
  826. if (skb == NULL)
  827. return -ENOMEM;
  828. /* reserve space for Hardware header */
  829. skb_reserve(skb, hh_len);
  830. /* create space for UDP/IP header */
  831. skb_put(skb,fragheaderlen + transhdrlen);
  832. /* initialize network header pointer */
  833. skb_reset_network_header(skb);
  834. /* initialize protocol header pointer */
  835. skb->h.raw = skb->data + fragheaderlen;
  836. skb->ip_summed = CHECKSUM_PARTIAL;
  837. skb->csum = 0;
  838. sk->sk_sndmsg_off = 0;
  839. }
  840. err = skb_append_datato_frags(sk,skb, getfrag, from,
  841. (length - transhdrlen));
  842. if (!err) {
  843. struct frag_hdr fhdr;
  844. /* specify the length of each IP datagram fragment*/
  845. skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
  846. sizeof(struct frag_hdr);
  847. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  848. ipv6_select_ident(skb, &fhdr);
  849. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  850. __skb_queue_tail(&sk->sk_write_queue, skb);
  851. return 0;
  852. }
  853. /* There is not enough support do UPD LSO,
  854. * so follow normal path
  855. */
  856. kfree_skb(skb);
  857. return err;
  858. }
  859. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  860. int offset, int len, int odd, struct sk_buff *skb),
  861. void *from, int length, int transhdrlen,
  862. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
  863. struct rt6_info *rt, unsigned int flags)
  864. {
  865. struct inet_sock *inet = inet_sk(sk);
  866. struct ipv6_pinfo *np = inet6_sk(sk);
  867. struct sk_buff *skb;
  868. unsigned int maxfraglen, fragheaderlen;
  869. int exthdrlen;
  870. int hh_len;
  871. int mtu;
  872. int copy;
  873. int err;
  874. int offset = 0;
  875. int csummode = CHECKSUM_NONE;
  876. if (flags&MSG_PROBE)
  877. return 0;
  878. if (skb_queue_empty(&sk->sk_write_queue)) {
  879. /*
  880. * setup for corking
  881. */
  882. if (opt) {
  883. if (np->cork.opt == NULL) {
  884. np->cork.opt = kmalloc(opt->tot_len,
  885. sk->sk_allocation);
  886. if (unlikely(np->cork.opt == NULL))
  887. return -ENOBUFS;
  888. } else if (np->cork.opt->tot_len < opt->tot_len) {
  889. printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
  890. return -EINVAL;
  891. }
  892. memcpy(np->cork.opt, opt, opt->tot_len);
  893. inet->cork.flags |= IPCORK_OPT;
  894. /* need source address above miyazawa*/
  895. }
  896. dst_hold(&rt->u.dst);
  897. np->cork.rt = rt;
  898. inet->cork.fl = *fl;
  899. np->cork.hop_limit = hlimit;
  900. np->cork.tclass = tclass;
  901. mtu = dst_mtu(rt->u.dst.path);
  902. if (np->frag_size < mtu) {
  903. if (np->frag_size)
  904. mtu = np->frag_size;
  905. }
  906. inet->cork.fragsize = mtu;
  907. if (dst_allfrag(rt->u.dst.path))
  908. inet->cork.flags |= IPCORK_ALLFRAG;
  909. inet->cork.length = 0;
  910. sk->sk_sndmsg_page = NULL;
  911. sk->sk_sndmsg_off = 0;
  912. exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
  913. length += exthdrlen;
  914. transhdrlen += exthdrlen;
  915. } else {
  916. rt = np->cork.rt;
  917. fl = &inet->cork.fl;
  918. if (inet->cork.flags & IPCORK_OPT)
  919. opt = np->cork.opt;
  920. transhdrlen = 0;
  921. exthdrlen = 0;
  922. mtu = inet->cork.fragsize;
  923. }
  924. hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
  925. fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
  926. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  927. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  928. if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  929. ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
  930. return -EMSGSIZE;
  931. }
  932. }
  933. /*
  934. * Let's try using as much space as possible.
  935. * Use MTU if total length of the message fits into the MTU.
  936. * Otherwise, we need to reserve fragment header and
  937. * fragment alignment (= 8-15 octects, in total).
  938. *
  939. * Note that we may need to "move" the data from the tail of
  940. * of the buffer to the new fragment when we split
  941. * the message.
  942. *
  943. * FIXME: It may be fragmented into multiple chunks
  944. * at once if non-fragmentable extension headers
  945. * are too large.
  946. * --yoshfuji
  947. */
  948. inet->cork.length += length;
  949. if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
  950. (rt->u.dst.dev->features & NETIF_F_UFO)) {
  951. err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
  952. fragheaderlen, transhdrlen, mtu,
  953. flags);
  954. if (err)
  955. goto error;
  956. return 0;
  957. }
  958. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  959. goto alloc_new_skb;
  960. while (length > 0) {
  961. /* Check if the remaining data fits into current packet. */
  962. copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  963. if (copy < length)
  964. copy = maxfraglen - skb->len;
  965. if (copy <= 0) {
  966. char *data;
  967. unsigned int datalen;
  968. unsigned int fraglen;
  969. unsigned int fraggap;
  970. unsigned int alloclen;
  971. struct sk_buff *skb_prev;
  972. alloc_new_skb:
  973. skb_prev = skb;
  974. /* There's no room in the current skb */
  975. if (skb_prev)
  976. fraggap = skb_prev->len - maxfraglen;
  977. else
  978. fraggap = 0;
  979. /*
  980. * If remaining data exceeds the mtu,
  981. * we know we need more fragment(s).
  982. */
  983. datalen = length + fraggap;
  984. if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  985. datalen = maxfraglen - fragheaderlen;
  986. fraglen = datalen + fragheaderlen;
  987. if ((flags & MSG_MORE) &&
  988. !(rt->u.dst.dev->features&NETIF_F_SG))
  989. alloclen = mtu;
  990. else
  991. alloclen = datalen + fragheaderlen;
  992. /*
  993. * The last fragment gets additional space at tail.
  994. * Note: we overallocate on fragments with MSG_MODE
  995. * because we have no idea if we're the last one.
  996. */
  997. if (datalen == length + fraggap)
  998. alloclen += rt->u.dst.trailer_len;
  999. /*
  1000. * We just reserve space for fragment header.
  1001. * Note: this may be overallocation if the message
  1002. * (without MSG_MORE) fits into the MTU.
  1003. */
  1004. alloclen += sizeof(struct frag_hdr);
  1005. if (transhdrlen) {
  1006. skb = sock_alloc_send_skb(sk,
  1007. alloclen + hh_len,
  1008. (flags & MSG_DONTWAIT), &err);
  1009. } else {
  1010. skb = NULL;
  1011. if (atomic_read(&sk->sk_wmem_alloc) <=
  1012. 2 * sk->sk_sndbuf)
  1013. skb = sock_wmalloc(sk,
  1014. alloclen + hh_len, 1,
  1015. sk->sk_allocation);
  1016. if (unlikely(skb == NULL))
  1017. err = -ENOBUFS;
  1018. }
  1019. if (skb == NULL)
  1020. goto error;
  1021. /*
  1022. * Fill in the control structures
  1023. */
  1024. skb->ip_summed = csummode;
  1025. skb->csum = 0;
  1026. /* reserve for fragmentation */
  1027. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  1028. /*
  1029. * Find where to start putting bytes
  1030. */
  1031. data = skb_put(skb, fraglen);
  1032. skb->nh.raw = data + exthdrlen;
  1033. data += fragheaderlen;
  1034. skb->h.raw = data + exthdrlen;
  1035. if (fraggap) {
  1036. skb->csum = skb_copy_and_csum_bits(
  1037. skb_prev, maxfraglen,
  1038. data + transhdrlen, fraggap, 0);
  1039. skb_prev->csum = csum_sub(skb_prev->csum,
  1040. skb->csum);
  1041. data += fraggap;
  1042. pskb_trim_unique(skb_prev, maxfraglen);
  1043. }
  1044. copy = datalen - transhdrlen - fraggap;
  1045. if (copy < 0) {
  1046. err = -EINVAL;
  1047. kfree_skb(skb);
  1048. goto error;
  1049. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1050. err = -EFAULT;
  1051. kfree_skb(skb);
  1052. goto error;
  1053. }
  1054. offset += copy;
  1055. length -= datalen - fraggap;
  1056. transhdrlen = 0;
  1057. exthdrlen = 0;
  1058. csummode = CHECKSUM_NONE;
  1059. /*
  1060. * Put the packet on the pending queue
  1061. */
  1062. __skb_queue_tail(&sk->sk_write_queue, skb);
  1063. continue;
  1064. }
  1065. if (copy > length)
  1066. copy = length;
  1067. if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
  1068. unsigned int off;
  1069. off = skb->len;
  1070. if (getfrag(from, skb_put(skb, copy),
  1071. offset, copy, off, skb) < 0) {
  1072. __skb_trim(skb, off);
  1073. err = -EFAULT;
  1074. goto error;
  1075. }
  1076. } else {
  1077. int i = skb_shinfo(skb)->nr_frags;
  1078. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1079. struct page *page = sk->sk_sndmsg_page;
  1080. int off = sk->sk_sndmsg_off;
  1081. unsigned int left;
  1082. if (page && (left = PAGE_SIZE - off) > 0) {
  1083. if (copy >= left)
  1084. copy = left;
  1085. if (page != frag->page) {
  1086. if (i == MAX_SKB_FRAGS) {
  1087. err = -EMSGSIZE;
  1088. goto error;
  1089. }
  1090. get_page(page);
  1091. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1092. frag = &skb_shinfo(skb)->frags[i];
  1093. }
  1094. } else if(i < MAX_SKB_FRAGS) {
  1095. if (copy > PAGE_SIZE)
  1096. copy = PAGE_SIZE;
  1097. page = alloc_pages(sk->sk_allocation, 0);
  1098. if (page == NULL) {
  1099. err = -ENOMEM;
  1100. goto error;
  1101. }
  1102. sk->sk_sndmsg_page = page;
  1103. sk->sk_sndmsg_off = 0;
  1104. skb_fill_page_desc(skb, i, page, 0, 0);
  1105. frag = &skb_shinfo(skb)->frags[i];
  1106. skb->truesize += PAGE_SIZE;
  1107. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1108. } else {
  1109. err = -EMSGSIZE;
  1110. goto error;
  1111. }
  1112. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1113. err = -EFAULT;
  1114. goto error;
  1115. }
  1116. sk->sk_sndmsg_off += copy;
  1117. frag->size += copy;
  1118. skb->len += copy;
  1119. skb->data_len += copy;
  1120. }
  1121. offset += copy;
  1122. length -= copy;
  1123. }
  1124. return 0;
  1125. error:
  1126. inet->cork.length -= length;
  1127. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1128. return err;
  1129. }
  1130. int ip6_push_pending_frames(struct sock *sk)
  1131. {
  1132. struct sk_buff *skb, *tmp_skb;
  1133. struct sk_buff **tail_skb;
  1134. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1135. struct inet_sock *inet = inet_sk(sk);
  1136. struct ipv6_pinfo *np = inet6_sk(sk);
  1137. struct ipv6hdr *hdr;
  1138. struct ipv6_txoptions *opt = np->cork.opt;
  1139. struct rt6_info *rt = np->cork.rt;
  1140. struct flowi *fl = &inet->cork.fl;
  1141. unsigned char proto = fl->proto;
  1142. int err = 0;
  1143. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1144. goto out;
  1145. tail_skb = &(skb_shinfo(skb)->frag_list);
  1146. /* move skb->data to ip header from ext header */
  1147. if (skb->data < skb->nh.raw)
  1148. __skb_pull(skb, skb_network_offset(skb));
  1149. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1150. __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
  1151. *tail_skb = tmp_skb;
  1152. tail_skb = &(tmp_skb->next);
  1153. skb->len += tmp_skb->len;
  1154. skb->data_len += tmp_skb->len;
  1155. skb->truesize += tmp_skb->truesize;
  1156. __sock_put(tmp_skb->sk);
  1157. tmp_skb->destructor = NULL;
  1158. tmp_skb->sk = NULL;
  1159. }
  1160. ipv6_addr_copy(final_dst, &fl->fl6_dst);
  1161. __skb_pull(skb, skb->h.raw - skb->nh.raw);
  1162. if (opt && opt->opt_flen)
  1163. ipv6_push_frag_opts(skb, opt, &proto);
  1164. if (opt && opt->opt_nflen)
  1165. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1166. skb_push(skb, sizeof(struct ipv6hdr));
  1167. skb_reset_network_header(skb);
  1168. hdr = skb->nh.ipv6h;
  1169. *(__be32*)hdr = fl->fl6_flowlabel |
  1170. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1171. if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
  1172. hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  1173. else
  1174. hdr->payload_len = 0;
  1175. hdr->hop_limit = np->cork.hop_limit;
  1176. hdr->nexthdr = proto;
  1177. ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
  1178. ipv6_addr_copy(&hdr->daddr, final_dst);
  1179. skb->priority = sk->sk_priority;
  1180. skb->dst = dst_clone(&rt->u.dst);
  1181. IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
  1182. err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
  1183. if (err) {
  1184. if (err > 0)
  1185. err = np->recverr ? net_xmit_errno(err) : 0;
  1186. if (err)
  1187. goto error;
  1188. }
  1189. out:
  1190. inet->cork.flags &= ~IPCORK_OPT;
  1191. kfree(np->cork.opt);
  1192. np->cork.opt = NULL;
  1193. if (np->cork.rt) {
  1194. dst_release(&np->cork.rt->u.dst);
  1195. np->cork.rt = NULL;
  1196. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1197. }
  1198. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1199. return err;
  1200. error:
  1201. goto out;
  1202. }
  1203. void ip6_flush_pending_frames(struct sock *sk)
  1204. {
  1205. struct inet_sock *inet = inet_sk(sk);
  1206. struct ipv6_pinfo *np = inet6_sk(sk);
  1207. struct sk_buff *skb;
  1208. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1209. IP6_INC_STATS(ip6_dst_idev(skb->dst),
  1210. IPSTATS_MIB_OUTDISCARDS);
  1211. kfree_skb(skb);
  1212. }
  1213. inet->cork.flags &= ~IPCORK_OPT;
  1214. kfree(np->cork.opt);
  1215. np->cork.opt = NULL;
  1216. if (np->cork.rt) {
  1217. dst_release(&np->cork.rt->u.dst);
  1218. np->cork.rt = NULL;
  1219. inet->cork.flags &= ~IPCORK_ALLFRAG;
  1220. }
  1221. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1222. }