ip_output.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The Internet Protocol (IP) output module.
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Donald Becker, <becker@super.org>
  11. * Alan Cox, <Alan.Cox@linux.org>
  12. * Richard Underwood
  13. * Stefan Becker, <stefanb@yello.ping.de>
  14. * Jorge Cwik, <jorge@laser.satlink.net>
  15. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  16. * Hirokazu Takahashi, <taka@valinux.co.jp>
  17. *
  18. * See ip_input.c for original log
  19. *
  20. * Fixes:
  21. * Alan Cox : Missing nonblock feature in ip_build_xmit.
  22. * Mike Kilburn : htons() missing in ip_build_xmit.
  23. * Bradford Johnson: Fix faulty handling of some frames when
  24. * no route is found.
  25. * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
  26. * (in case if packet not accepted by
  27. * output firewall rules)
  28. * Mike McLagan : Routing by source
  29. * Alexey Kuznetsov: use new route cache
  30. * Andi Kleen: Fix broken PMTU recovery and remove
  31. * some redundant tests.
  32. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  33. * Andi Kleen : Replace ip_reply with ip_send_reply.
  34. * Andi Kleen : Split fast and slow ip_build_xmit path
  35. * for decreased register pressure on x86
  36. * and more readibility.
  37. * Marc Boucher : When call_out_firewall returns FW_QUEUE,
  38. * silently drop skb instead of failing with -EPERM.
  39. * Detlev Wengorz : Copy protocol for fragments.
  40. * Hirokazu Takahashi: HW checksumming for outgoing UDP
  41. * datagrams.
  42. * Hirokazu Takahashi: sendfile() on UDP works now.
  43. */
  44. #include <asm/uaccess.h>
  45. #include <linux/module.h>
  46. #include <linux/types.h>
  47. #include <linux/kernel.h>
  48. #include <linux/mm.h>
  49. #include <linux/string.h>
  50. #include <linux/errno.h>
  51. #include <linux/highmem.h>
  52. #include <linux/slab.h>
  53. #include <linux/socket.h>
  54. #include <linux/sockios.h>
  55. #include <linux/in.h>
  56. #include <linux/inet.h>
  57. #include <linux/netdevice.h>
  58. #include <linux/etherdevice.h>
  59. #include <linux/proc_fs.h>
  60. #include <linux/stat.h>
  61. #include <linux/init.h>
  62. #include <net/snmp.h>
  63. #include <net/ip.h>
  64. #include <net/protocol.h>
  65. #include <net/route.h>
  66. #include <net/xfrm.h>
  67. #include <linux/skbuff.h>
  68. #include <net/sock.h>
  69. #include <net/arp.h>
  70. #include <net/icmp.h>
  71. #include <net/checksum.h>
  72. #include <net/inetpeer.h>
  73. #include <linux/igmp.h>
  74. #include <linux/netfilter_ipv4.h>
  75. #include <linux/netfilter_bridge.h>
  76. #include <linux/mroute.h>
  77. #include <linux/netlink.h>
  78. #include <linux/tcp.h>
  79. int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
  80. EXPORT_SYMBOL(sysctl_ip_default_ttl);
  81. /* Generate a checksum for an outgoing IP datagram. */
  82. void ip_send_check(struct iphdr *iph)
  83. {
  84. iph->check = 0;
  85. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  86. }
  87. EXPORT_SYMBOL(ip_send_check);
  88. int __ip_local_out(struct sk_buff *skb)
  89. {
  90. struct iphdr *iph = ip_hdr(skb);
  91. iph->tot_len = htons(skb->len);
  92. ip_send_check(iph);
  93. return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
  94. skb_dst(skb)->dev, dst_output);
  95. }
  96. int ip_local_out(struct sk_buff *skb)
  97. {
  98. int err;
  99. err = __ip_local_out(skb);
  100. if (likely(err == 1))
  101. err = dst_output(skb);
  102. return err;
  103. }
  104. EXPORT_SYMBOL_GPL(ip_local_out);
  105. static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
  106. {
  107. int ttl = inet->uc_ttl;
  108. if (ttl < 0)
  109. ttl = ip4_dst_hoplimit(dst);
  110. return ttl;
  111. }
  112. /*
  113. * Add an ip header to a skbuff and send it out.
  114. *
  115. */
  116. int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
  117. __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
  118. {
  119. struct inet_sock *inet = inet_sk(sk);
  120. struct rtable *rt = skb_rtable(skb);
  121. struct iphdr *iph;
  122. /* Build the IP header. */
  123. skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
  124. skb_reset_network_header(skb);
  125. iph = ip_hdr(skb);
  126. iph->version = 4;
  127. iph->ihl = 5;
  128. iph->tos = inet->tos;
  129. if (ip_dont_fragment(sk, &rt->dst))
  130. iph->frag_off = htons(IP_DF);
  131. else
  132. iph->frag_off = 0;
  133. iph->ttl = ip_select_ttl(inet, &rt->dst);
  134. iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
  135. iph->saddr = saddr;
  136. iph->protocol = sk->sk_protocol;
  137. ip_select_ident(iph, &rt->dst, sk);
  138. if (opt && opt->opt.optlen) {
  139. iph->ihl += opt->opt.optlen>>2;
  140. ip_options_build(skb, &opt->opt, daddr, rt, 0);
  141. }
  142. skb->priority = sk->sk_priority;
  143. skb->mark = sk->sk_mark;
  144. /* Send it out. */
  145. return ip_local_out(skb);
  146. }
  147. EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
  148. static inline int ip_finish_output2(struct sk_buff *skb)
  149. {
  150. struct dst_entry *dst = skb_dst(skb);
  151. struct rtable *rt = (struct rtable *)dst;
  152. struct net_device *dev = dst->dev;
  153. unsigned int hh_len = LL_RESERVED_SPACE(dev);
  154. struct neighbour *neigh;
  155. u32 nexthop;
  156. if (rt->rt_type == RTN_MULTICAST) {
  157. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
  158. } else if (rt->rt_type == RTN_BROADCAST)
  159. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
  160. /* Be paranoid, rather than too clever. */
  161. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  162. struct sk_buff *skb2;
  163. skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
  164. if (skb2 == NULL) {
  165. kfree_skb(skb);
  166. return -ENOMEM;
  167. }
  168. if (skb->sk)
  169. skb_set_owner_w(skb2, skb->sk);
  170. consume_skb(skb);
  171. skb = skb2;
  172. }
  173. rcu_read_lock_bh();
  174. nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
  175. neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
  176. if (unlikely(!neigh))
  177. neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
  178. if (!IS_ERR(neigh)) {
  179. int res = dst_neigh_output(dst, neigh, skb);
  180. rcu_read_unlock_bh();
  181. return res;
  182. }
  183. rcu_read_unlock_bh();
  184. net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
  185. __func__);
  186. kfree_skb(skb);
  187. return -EINVAL;
  188. }
  189. static int ip_finish_output(struct sk_buff *skb)
  190. {
  191. #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
  192. /* Policy lookup after SNAT yielded a new policy */
  193. if (skb_dst(skb)->xfrm != NULL) {
  194. IPCB(skb)->flags |= IPSKB_REROUTED;
  195. return dst_output(skb);
  196. }
  197. #endif
  198. if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
  199. return ip_fragment(skb, ip_finish_output2);
  200. else
  201. return ip_finish_output2(skb);
  202. }
  203. int ip_mc_output(struct sk_buff *skb)
  204. {
  205. struct sock *sk = skb->sk;
  206. struct rtable *rt = skb_rtable(skb);
  207. struct net_device *dev = rt->dst.dev;
  208. /*
  209. * If the indicated interface is up and running, send the packet.
  210. */
  211. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
  212. skb->dev = dev;
  213. skb->protocol = htons(ETH_P_IP);
  214. /*
  215. * Multicasts are looped back for other local users
  216. */
  217. if (rt->rt_flags&RTCF_MULTICAST) {
  218. if (sk_mc_loop(sk)
  219. #ifdef CONFIG_IP_MROUTE
  220. /* Small optimization: do not loopback not local frames,
  221. which returned after forwarding; they will be dropped
  222. by ip_mr_input in any case.
  223. Note, that local frames are looped back to be delivered
  224. to local recipients.
  225. This check is duplicated in ip_mr_input at the moment.
  226. */
  227. &&
  228. ((rt->rt_flags & RTCF_LOCAL) ||
  229. !(IPCB(skb)->flags & IPSKB_FORWARDED))
  230. #endif
  231. ) {
  232. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  233. if (newskb)
  234. NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
  235. newskb, NULL, newskb->dev,
  236. dev_loopback_xmit);
  237. }
  238. /* Multicasts with ttl 0 must not go beyond the host */
  239. if (ip_hdr(skb)->ttl == 0) {
  240. kfree_skb(skb);
  241. return 0;
  242. }
  243. }
  244. if (rt->rt_flags&RTCF_BROADCAST) {
  245. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  246. if (newskb)
  247. NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
  248. NULL, newskb->dev, dev_loopback_xmit);
  249. }
  250. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
  251. skb->dev, ip_finish_output,
  252. !(IPCB(skb)->flags & IPSKB_REROUTED));
  253. }
  254. int ip_output(struct sk_buff *skb)
  255. {
  256. struct net_device *dev = skb_dst(skb)->dev;
  257. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
  258. skb->dev = dev;
  259. skb->protocol = htons(ETH_P_IP);
  260. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
  261. ip_finish_output,
  262. !(IPCB(skb)->flags & IPSKB_REROUTED));
  263. }
  264. /*
  265. * copy saddr and daddr, possibly using 64bit load/stores
  266. * Equivalent to :
  267. * iph->saddr = fl4->saddr;
  268. * iph->daddr = fl4->daddr;
  269. */
  270. static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
  271. {
  272. BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
  273. offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
  274. memcpy(&iph->saddr, &fl4->saddr,
  275. sizeof(fl4->saddr) + sizeof(fl4->daddr));
  276. }
  277. int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
  278. {
  279. struct sock *sk = skb->sk;
  280. struct inet_sock *inet = inet_sk(sk);
  281. struct ip_options_rcu *inet_opt;
  282. struct flowi4 *fl4;
  283. struct rtable *rt;
  284. struct iphdr *iph;
  285. int res;
  286. /* Skip all of this if the packet is already routed,
  287. * f.e. by something like SCTP.
  288. */
  289. rcu_read_lock();
  290. inet_opt = rcu_dereference(inet->inet_opt);
  291. fl4 = &fl->u.ip4;
  292. rt = skb_rtable(skb);
  293. if (rt != NULL)
  294. goto packet_routed;
  295. /* Make sure we can route this packet. */
  296. rt = (struct rtable *)__sk_dst_check(sk, 0);
  297. if (rt == NULL) {
  298. __be32 daddr;
  299. /* Use correct destination address if we have options. */
  300. daddr = inet->inet_daddr;
  301. if (inet_opt && inet_opt->opt.srr)
  302. daddr = inet_opt->opt.faddr;
  303. /* If this fails, retransmit mechanism of transport layer will
  304. * keep trying until route appears or the connection times
  305. * itself out.
  306. */
  307. rt = ip_route_output_ports(sock_net(sk), fl4, sk,
  308. daddr, inet->inet_saddr,
  309. inet->inet_dport,
  310. inet->inet_sport,
  311. sk->sk_protocol,
  312. RT_CONN_FLAGS(sk),
  313. sk->sk_bound_dev_if);
  314. if (IS_ERR(rt))
  315. goto no_route;
  316. sk_setup_caps(sk, &rt->dst);
  317. }
  318. skb_dst_set_noref(skb, &rt->dst);
  319. packet_routed:
  320. if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
  321. goto no_route;
  322. /* OK, we know where to send it, allocate and build IP header. */
  323. skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
  324. skb_reset_network_header(skb);
  325. iph = ip_hdr(skb);
  326. *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
  327. if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
  328. iph->frag_off = htons(IP_DF);
  329. else
  330. iph->frag_off = 0;
  331. iph->ttl = ip_select_ttl(inet, &rt->dst);
  332. iph->protocol = sk->sk_protocol;
  333. ip_copy_addrs(iph, fl4);
  334. /* Transport layer set skb->h.foo itself. */
  335. if (inet_opt && inet_opt->opt.optlen) {
  336. iph->ihl += inet_opt->opt.optlen >> 2;
  337. ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
  338. }
  339. ip_select_ident_more(iph, &rt->dst, sk,
  340. (skb_shinfo(skb)->gso_segs ?: 1) - 1);
  341. skb->priority = sk->sk_priority;
  342. skb->mark = sk->sk_mark;
  343. res = ip_local_out(skb);
  344. rcu_read_unlock();
  345. return res;
  346. no_route:
  347. rcu_read_unlock();
  348. IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
  349. kfree_skb(skb);
  350. return -EHOSTUNREACH;
  351. }
  352. EXPORT_SYMBOL(ip_queue_xmit);
  353. static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  354. {
  355. to->pkt_type = from->pkt_type;
  356. to->priority = from->priority;
  357. to->protocol = from->protocol;
  358. skb_dst_drop(to);
  359. skb_dst_copy(to, from);
  360. to->dev = from->dev;
  361. to->mark = from->mark;
  362. /* Copy the flags to each fragment. */
  363. IPCB(to)->flags = IPCB(from)->flags;
  364. #ifdef CONFIG_NET_SCHED
  365. to->tc_index = from->tc_index;
  366. #endif
  367. nf_copy(to, from);
  368. #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
  369. to->nf_trace = from->nf_trace;
  370. #endif
  371. #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
  372. to->ipvs_property = from->ipvs_property;
  373. #endif
  374. skb_copy_secmark(to, from);
  375. }
  376. /*
  377. * This IP datagram is too large to be sent in one piece. Break it up into
  378. * smaller pieces (each of size equal to IP header plus
  379. * a block of the data of the original IP data part) that will yet fit in a
  380. * single device frame, and queue such a frame for sending.
  381. */
  382. int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  383. {
  384. struct iphdr *iph;
  385. int ptr;
  386. struct net_device *dev;
  387. struct sk_buff *skb2;
  388. unsigned int mtu, hlen, left, len, ll_rs;
  389. int offset;
  390. __be16 not_last_frag;
  391. struct rtable *rt = skb_rtable(skb);
  392. int err = 0;
  393. dev = rt->dst.dev;
  394. /*
  395. * Point into the IP datagram header.
  396. */
  397. iph = ip_hdr(skb);
  398. if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
  399. (IPCB(skb)->frag_max_size &&
  400. IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
  401. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
  402. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  403. htonl(ip_skb_dst_mtu(skb)));
  404. kfree_skb(skb);
  405. return -EMSGSIZE;
  406. }
  407. /*
  408. * Setup starting values.
  409. */
  410. hlen = iph->ihl * 4;
  411. mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
  412. #ifdef CONFIG_BRIDGE_NETFILTER
  413. if (skb->nf_bridge)
  414. mtu -= nf_bridge_mtu_reduction(skb);
  415. #endif
  416. IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
  417. /* When frag_list is given, use it. First, check its validity:
  418. * some transformers could create wrong frag_list or break existing
  419. * one, it is not prohibited. In this case fall back to copying.
  420. *
  421. * LATER: this step can be merged to real generation of fragments,
  422. * we can switch to copy when see the first bad fragment.
  423. */
  424. if (skb_has_frag_list(skb)) {
  425. struct sk_buff *frag, *frag2;
  426. int first_len = skb_pagelen(skb);
  427. if (first_len - hlen > mtu ||
  428. ((first_len - hlen) & 7) ||
  429. ip_is_fragment(iph) ||
  430. skb_cloned(skb))
  431. goto slow_path;
  432. skb_walk_frags(skb, frag) {
  433. /* Correct geometry. */
  434. if (frag->len > mtu ||
  435. ((frag->len & 7) && frag->next) ||
  436. skb_headroom(frag) < hlen)
  437. goto slow_path_clean;
  438. /* Partially cloned skb? */
  439. if (skb_shared(frag))
  440. goto slow_path_clean;
  441. BUG_ON(frag->sk);
  442. if (skb->sk) {
  443. frag->sk = skb->sk;
  444. frag->destructor = sock_wfree;
  445. }
  446. skb->truesize -= frag->truesize;
  447. }
  448. /* Everything is OK. Generate! */
  449. err = 0;
  450. offset = 0;
  451. frag = skb_shinfo(skb)->frag_list;
  452. skb_frag_list_init(skb);
  453. skb->data_len = first_len - skb_headlen(skb);
  454. skb->len = first_len;
  455. iph->tot_len = htons(first_len);
  456. iph->frag_off = htons(IP_MF);
  457. ip_send_check(iph);
  458. for (;;) {
  459. /* Prepare header of the next frame,
  460. * before previous one went down. */
  461. if (frag) {
  462. frag->ip_summed = CHECKSUM_NONE;
  463. skb_reset_transport_header(frag);
  464. __skb_push(frag, hlen);
  465. skb_reset_network_header(frag);
  466. memcpy(skb_network_header(frag), iph, hlen);
  467. iph = ip_hdr(frag);
  468. iph->tot_len = htons(frag->len);
  469. ip_copy_metadata(frag, skb);
  470. if (offset == 0)
  471. ip_options_fragment(frag);
  472. offset += skb->len - hlen;
  473. iph->frag_off = htons(offset>>3);
  474. if (frag->next != NULL)
  475. iph->frag_off |= htons(IP_MF);
  476. /* Ready, complete checksum */
  477. ip_send_check(iph);
  478. }
  479. err = output(skb);
  480. if (!err)
  481. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
  482. if (err || !frag)
  483. break;
  484. skb = frag;
  485. frag = skb->next;
  486. skb->next = NULL;
  487. }
  488. if (err == 0) {
  489. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
  490. return 0;
  491. }
  492. while (frag) {
  493. skb = frag->next;
  494. kfree_skb(frag);
  495. frag = skb;
  496. }
  497. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
  498. return err;
  499. slow_path_clean:
  500. skb_walk_frags(skb, frag2) {
  501. if (frag2 == frag)
  502. break;
  503. frag2->sk = NULL;
  504. frag2->destructor = NULL;
  505. skb->truesize += frag2->truesize;
  506. }
  507. }
  508. slow_path:
  509. /* for offloaded checksums cleanup checksum before fragmentation */
  510. if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
  511. goto fail;
  512. iph = ip_hdr(skb);
  513. left = skb->len - hlen; /* Space per frame */
  514. ptr = hlen; /* Where to start from */
  515. /* for bridged IP traffic encapsulated inside f.e. a vlan header,
  516. * we need to make room for the encapsulating header
  517. */
  518. ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
  519. /*
  520. * Fragment the datagram.
  521. */
  522. offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
  523. not_last_frag = iph->frag_off & htons(IP_MF);
  524. /*
  525. * Keep copying data until we run out.
  526. */
  527. while (left > 0) {
  528. len = left;
  529. /* IF: it doesn't fit, use 'mtu' - the data space left */
  530. if (len > mtu)
  531. len = mtu;
  532. /* IF: we are not sending up to and including the packet end
  533. then align the next start on an eight byte boundary */
  534. if (len < left) {
  535. len &= ~7;
  536. }
  537. /*
  538. * Allocate buffer.
  539. */
  540. if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
  541. NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
  542. err = -ENOMEM;
  543. goto fail;
  544. }
  545. /*
  546. * Set up data on packet
  547. */
  548. ip_copy_metadata(skb2, skb);
  549. skb_reserve(skb2, ll_rs);
  550. skb_put(skb2, len + hlen);
  551. skb_reset_network_header(skb2);
  552. skb2->transport_header = skb2->network_header + hlen;
  553. /*
  554. * Charge the memory for the fragment to any owner
  555. * it might possess
  556. */
  557. if (skb->sk)
  558. skb_set_owner_w(skb2, skb->sk);
  559. /*
  560. * Copy the packet header into the new buffer.
  561. */
  562. skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
  563. /*
  564. * Copy a block of the IP datagram.
  565. */
  566. if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
  567. BUG();
  568. left -= len;
  569. /*
  570. * Fill in the new header fields.
  571. */
  572. iph = ip_hdr(skb2);
  573. iph->frag_off = htons((offset >> 3));
  574. /* ANK: dirty, but effective trick. Upgrade options only if
  575. * the segment to be fragmented was THE FIRST (otherwise,
  576. * options are already fixed) and make it ONCE
  577. * on the initial skb, so that all the following fragments
  578. * will inherit fixed options.
  579. */
  580. if (offset == 0)
  581. ip_options_fragment(skb);
  582. /*
  583. * Added AC : If we are fragmenting a fragment that's not the
  584. * last fragment then keep MF on each bit
  585. */
  586. if (left > 0 || not_last_frag)
  587. iph->frag_off |= htons(IP_MF);
  588. ptr += len;
  589. offset += len;
  590. /*
  591. * Put this fragment into the sending queue.
  592. */
  593. iph->tot_len = htons(len + hlen);
  594. ip_send_check(iph);
  595. err = output(skb2);
  596. if (err)
  597. goto fail;
  598. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
  599. }
  600. consume_skb(skb);
  601. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
  602. return err;
  603. fail:
  604. kfree_skb(skb);
  605. IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
  606. return err;
  607. }
  608. EXPORT_SYMBOL(ip_fragment);
  609. int
  610. ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
  611. {
  612. struct iovec *iov = from;
  613. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  614. if (memcpy_fromiovecend(to, iov, offset, len) < 0)
  615. return -EFAULT;
  616. } else {
  617. __wsum csum = 0;
  618. if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
  619. return -EFAULT;
  620. skb->csum = csum_block_add(skb->csum, csum, odd);
  621. }
  622. return 0;
  623. }
  624. EXPORT_SYMBOL(ip_generic_getfrag);
  625. static inline __wsum
  626. csum_page(struct page *page, int offset, int copy)
  627. {
  628. char *kaddr;
  629. __wsum csum;
  630. kaddr = kmap(page);
  631. csum = csum_partial(kaddr + offset, copy, 0);
  632. kunmap(page);
  633. return csum;
  634. }
  635. static inline int ip_ufo_append_data(struct sock *sk,
  636. struct sk_buff_head *queue,
  637. int getfrag(void *from, char *to, int offset, int len,
  638. int odd, struct sk_buff *skb),
  639. void *from, int length, int hh_len, int fragheaderlen,
  640. int transhdrlen, int maxfraglen, unsigned int flags)
  641. {
  642. struct sk_buff *skb;
  643. int err;
  644. /* There is support for UDP fragmentation offload by network
  645. * device, so create one single skb packet containing complete
  646. * udp datagram
  647. */
  648. if ((skb = skb_peek_tail(queue)) == NULL) {
  649. skb = sock_alloc_send_skb(sk,
  650. hh_len + fragheaderlen + transhdrlen + 20,
  651. (flags & MSG_DONTWAIT), &err);
  652. if (skb == NULL)
  653. return err;
  654. /* reserve space for Hardware header */
  655. skb_reserve(skb, hh_len);
  656. /* create space for UDP/IP header */
  657. skb_put(skb, fragheaderlen + transhdrlen);
  658. /* initialize network header pointer */
  659. skb_reset_network_header(skb);
  660. /* initialize protocol header pointer */
  661. skb->transport_header = skb->network_header + fragheaderlen;
  662. skb->ip_summed = CHECKSUM_PARTIAL;
  663. skb->csum = 0;
  664. /* specify the length of each IP datagram fragment */
  665. skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
  666. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  667. __skb_queue_tail(queue, skb);
  668. }
  669. return skb_append_datato_frags(sk, skb, getfrag, from,
  670. (length - transhdrlen));
  671. }
  672. static int __ip_append_data(struct sock *sk,
  673. struct flowi4 *fl4,
  674. struct sk_buff_head *queue,
  675. struct inet_cork *cork,
  676. struct page_frag *pfrag,
  677. int getfrag(void *from, char *to, int offset,
  678. int len, int odd, struct sk_buff *skb),
  679. void *from, int length, int transhdrlen,
  680. unsigned int flags)
  681. {
  682. struct inet_sock *inet = inet_sk(sk);
  683. struct sk_buff *skb;
  684. struct ip_options *opt = cork->opt;
  685. int hh_len;
  686. int exthdrlen;
  687. int mtu;
  688. int copy;
  689. int err;
  690. int offset = 0;
  691. unsigned int maxfraglen, fragheaderlen;
  692. int csummode = CHECKSUM_NONE;
  693. struct rtable *rt = (struct rtable *)cork->dst;
  694. skb = skb_peek_tail(queue);
  695. exthdrlen = !skb ? rt->dst.header_len : 0;
  696. mtu = cork->fragsize;
  697. hh_len = LL_RESERVED_SPACE(rt->dst.dev);
  698. fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
  699. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
  700. if (cork->length + length > 0xFFFF - fragheaderlen) {
  701. ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
  702. mtu-exthdrlen);
  703. return -EMSGSIZE;
  704. }
  705. /*
  706. * transhdrlen > 0 means that this is the first fragment and we wish
  707. * it won't be fragmented in the future.
  708. */
  709. if (transhdrlen &&
  710. length + fragheaderlen <= mtu &&
  711. rt->dst.dev->features & NETIF_F_V4_CSUM &&
  712. !exthdrlen)
  713. csummode = CHECKSUM_PARTIAL;
  714. cork->length += length;
  715. if (((length > mtu) || (skb && skb_is_gso(skb))) &&
  716. (sk->sk_protocol == IPPROTO_UDP) &&
  717. (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
  718. err = ip_ufo_append_data(sk, queue, getfrag, from, length,
  719. hh_len, fragheaderlen, transhdrlen,
  720. maxfraglen, flags);
  721. if (err)
  722. goto error;
  723. return 0;
  724. }
  725. /* So, what's going on in the loop below?
  726. *
  727. * We use calculated fragment length to generate chained skb,
  728. * each of segments is IP fragment ready for sending to network after
  729. * adding appropriate IP header.
  730. */
  731. if (!skb)
  732. goto alloc_new_skb;
  733. while (length > 0) {
  734. /* Check if the remaining data fits into current packet. */
  735. copy = mtu - skb->len;
  736. if (copy < length)
  737. copy = maxfraglen - skb->len;
  738. if (copy <= 0) {
  739. char *data;
  740. unsigned int datalen;
  741. unsigned int fraglen;
  742. unsigned int fraggap;
  743. unsigned int alloclen;
  744. struct sk_buff *skb_prev;
  745. alloc_new_skb:
  746. skb_prev = skb;
  747. if (skb_prev)
  748. fraggap = skb_prev->len - maxfraglen;
  749. else
  750. fraggap = 0;
  751. /*
  752. * If remaining data exceeds the mtu,
  753. * we know we need more fragment(s).
  754. */
  755. datalen = length + fraggap;
  756. if (datalen > mtu - fragheaderlen)
  757. datalen = maxfraglen - fragheaderlen;
  758. fraglen = datalen + fragheaderlen;
  759. if ((flags & MSG_MORE) &&
  760. !(rt->dst.dev->features&NETIF_F_SG))
  761. alloclen = mtu;
  762. else
  763. alloclen = fraglen;
  764. alloclen += exthdrlen;
  765. /* The last fragment gets additional space at tail.
  766. * Note, with MSG_MORE we overallocate on fragments,
  767. * because we have no idea what fragment will be
  768. * the last.
  769. */
  770. if (datalen == length + fraggap)
  771. alloclen += rt->dst.trailer_len;
  772. if (transhdrlen) {
  773. skb = sock_alloc_send_skb(sk,
  774. alloclen + hh_len + 15,
  775. (flags & MSG_DONTWAIT), &err);
  776. } else {
  777. skb = NULL;
  778. if (atomic_read(&sk->sk_wmem_alloc) <=
  779. 2 * sk->sk_sndbuf)
  780. skb = sock_wmalloc(sk,
  781. alloclen + hh_len + 15, 1,
  782. sk->sk_allocation);
  783. if (unlikely(skb == NULL))
  784. err = -ENOBUFS;
  785. else
  786. /* only the initial fragment is
  787. time stamped */
  788. cork->tx_flags = 0;
  789. }
  790. if (skb == NULL)
  791. goto error;
  792. /*
  793. * Fill in the control structures
  794. */
  795. skb->ip_summed = csummode;
  796. skb->csum = 0;
  797. skb_reserve(skb, hh_len);
  798. skb_shinfo(skb)->tx_flags = cork->tx_flags;
  799. /*
  800. * Find where to start putting bytes.
  801. */
  802. data = skb_put(skb, fraglen + exthdrlen);
  803. skb_set_network_header(skb, exthdrlen);
  804. skb->transport_header = (skb->network_header +
  805. fragheaderlen);
  806. data += fragheaderlen + exthdrlen;
  807. if (fraggap) {
  808. skb->csum = skb_copy_and_csum_bits(
  809. skb_prev, maxfraglen,
  810. data + transhdrlen, fraggap, 0);
  811. skb_prev->csum = csum_sub(skb_prev->csum,
  812. skb->csum);
  813. data += fraggap;
  814. pskb_trim_unique(skb_prev, maxfraglen);
  815. }
  816. copy = datalen - transhdrlen - fraggap;
  817. if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  818. err = -EFAULT;
  819. kfree_skb(skb);
  820. goto error;
  821. }
  822. offset += copy;
  823. length -= datalen - fraggap;
  824. transhdrlen = 0;
  825. exthdrlen = 0;
  826. csummode = CHECKSUM_NONE;
  827. /*
  828. * Put the packet on the pending queue.
  829. */
  830. __skb_queue_tail(queue, skb);
  831. continue;
  832. }
  833. if (copy > length)
  834. copy = length;
  835. if (!(rt->dst.dev->features&NETIF_F_SG)) {
  836. unsigned int off;
  837. off = skb->len;
  838. if (getfrag(from, skb_put(skb, copy),
  839. offset, copy, off, skb) < 0) {
  840. __skb_trim(skb, off);
  841. err = -EFAULT;
  842. goto error;
  843. }
  844. } else {
  845. int i = skb_shinfo(skb)->nr_frags;
  846. err = -ENOMEM;
  847. if (!sk_page_frag_refill(sk, pfrag))
  848. goto error;
  849. if (!skb_can_coalesce(skb, i, pfrag->page,
  850. pfrag->offset)) {
  851. err = -EMSGSIZE;
  852. if (i == MAX_SKB_FRAGS)
  853. goto error;
  854. __skb_fill_page_desc(skb, i, pfrag->page,
  855. pfrag->offset, 0);
  856. skb_shinfo(skb)->nr_frags = ++i;
  857. get_page(pfrag->page);
  858. }
  859. copy = min_t(int, copy, pfrag->size - pfrag->offset);
  860. if (getfrag(from,
  861. page_address(pfrag->page) + pfrag->offset,
  862. offset, copy, skb->len, skb) < 0)
  863. goto error_efault;
  864. pfrag->offset += copy;
  865. skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
  866. skb->len += copy;
  867. skb->data_len += copy;
  868. skb->truesize += copy;
  869. atomic_add(copy, &sk->sk_wmem_alloc);
  870. }
  871. offset += copy;
  872. length -= copy;
  873. }
  874. return 0;
  875. error_efault:
  876. err = -EFAULT;
  877. error:
  878. cork->length -= length;
  879. IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
  880. return err;
  881. }
  882. static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
  883. struct ipcm_cookie *ipc, struct rtable **rtp)
  884. {
  885. struct inet_sock *inet = inet_sk(sk);
  886. struct ip_options_rcu *opt;
  887. struct rtable *rt;
  888. /*
  889. * setup for corking.
  890. */
  891. opt = ipc->opt;
  892. if (opt) {
  893. if (cork->opt == NULL) {
  894. cork->opt = kmalloc(sizeof(struct ip_options) + 40,
  895. sk->sk_allocation);
  896. if (unlikely(cork->opt == NULL))
  897. return -ENOBUFS;
  898. }
  899. memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
  900. cork->flags |= IPCORK_OPT;
  901. cork->addr = ipc->addr;
  902. }
  903. rt = *rtp;
  904. if (unlikely(!rt))
  905. return -EFAULT;
  906. /*
  907. * We steal reference to this route, caller should not release it
  908. */
  909. *rtp = NULL;
  910. cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
  911. rt->dst.dev->mtu : dst_mtu(&rt->dst);
  912. cork->dst = &rt->dst;
  913. cork->length = 0;
  914. cork->tx_flags = ipc->tx_flags;
  915. return 0;
  916. }
  917. /*
  918. * ip_append_data() and ip_append_page() can make one large IP datagram
  919. * from many pieces of data. Each pieces will be holded on the socket
  920. * until ip_push_pending_frames() is called. Each piece can be a page
  921. * or non-page data.
  922. *
  923. * Not only UDP, other transport protocols - e.g. raw sockets - can use
  924. * this interface potentially.
  925. *
  926. * LATER: length must be adjusted by pad at tail, when it is required.
  927. */
  928. int ip_append_data(struct sock *sk, struct flowi4 *fl4,
  929. int getfrag(void *from, char *to, int offset, int len,
  930. int odd, struct sk_buff *skb),
  931. void *from, int length, int transhdrlen,
  932. struct ipcm_cookie *ipc, struct rtable **rtp,
  933. unsigned int flags)
  934. {
  935. struct inet_sock *inet = inet_sk(sk);
  936. int err;
  937. if (flags&MSG_PROBE)
  938. return 0;
  939. if (skb_queue_empty(&sk->sk_write_queue)) {
  940. err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
  941. if (err)
  942. return err;
  943. } else {
  944. transhdrlen = 0;
  945. }
  946. return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
  947. sk_page_frag(sk), getfrag,
  948. from, length, transhdrlen, flags);
  949. }
  950. ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
  951. int offset, size_t size, int flags)
  952. {
  953. struct inet_sock *inet = inet_sk(sk);
  954. struct sk_buff *skb;
  955. struct rtable *rt;
  956. struct ip_options *opt = NULL;
  957. struct inet_cork *cork;
  958. int hh_len;
  959. int mtu;
  960. int len;
  961. int err;
  962. unsigned int maxfraglen, fragheaderlen, fraggap;
  963. if (inet->hdrincl)
  964. return -EPERM;
  965. if (flags&MSG_PROBE)
  966. return 0;
  967. if (skb_queue_empty(&sk->sk_write_queue))
  968. return -EINVAL;
  969. cork = &inet->cork.base;
  970. rt = (struct rtable *)cork->dst;
  971. if (cork->flags & IPCORK_OPT)
  972. opt = cork->opt;
  973. if (!(rt->dst.dev->features&NETIF_F_SG))
  974. return -EOPNOTSUPP;
  975. hh_len = LL_RESERVED_SPACE(rt->dst.dev);
  976. mtu = cork->fragsize;
  977. fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
  978. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
  979. if (cork->length + size > 0xFFFF - fragheaderlen) {
  980. ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
  981. return -EMSGSIZE;
  982. }
  983. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  984. return -EINVAL;
  985. cork->length += size;
  986. if ((size + skb->len > mtu) &&
  987. (sk->sk_protocol == IPPROTO_UDP) &&
  988. (rt->dst.dev->features & NETIF_F_UFO)) {
  989. skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  990. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  991. }
  992. while (size > 0) {
  993. int i;
  994. if (skb_is_gso(skb))
  995. len = size;
  996. else {
  997. /* Check if the remaining data fits into current packet. */
  998. len = mtu - skb->len;
  999. if (len < size)
  1000. len = maxfraglen - skb->len;
  1001. }
  1002. if (len <= 0) {
  1003. struct sk_buff *skb_prev;
  1004. int alloclen;
  1005. skb_prev = skb;
  1006. fraggap = skb_prev->len - maxfraglen;
  1007. alloclen = fragheaderlen + hh_len + fraggap + 15;
  1008. skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
  1009. if (unlikely(!skb)) {
  1010. err = -ENOBUFS;
  1011. goto error;
  1012. }
  1013. /*
  1014. * Fill in the control structures
  1015. */
  1016. skb->ip_summed = CHECKSUM_NONE;
  1017. skb->csum = 0;
  1018. skb_reserve(skb, hh_len);
  1019. /*
  1020. * Find where to start putting bytes.
  1021. */
  1022. skb_put(skb, fragheaderlen + fraggap);
  1023. skb_reset_network_header(skb);
  1024. skb->transport_header = (skb->network_header +
  1025. fragheaderlen);
  1026. if (fraggap) {
  1027. skb->csum = skb_copy_and_csum_bits(skb_prev,
  1028. maxfraglen,
  1029. skb_transport_header(skb),
  1030. fraggap, 0);
  1031. skb_prev->csum = csum_sub(skb_prev->csum,
  1032. skb->csum);
  1033. pskb_trim_unique(skb_prev, maxfraglen);
  1034. }
  1035. /*
  1036. * Put the packet on the pending queue.
  1037. */
  1038. __skb_queue_tail(&sk->sk_write_queue, skb);
  1039. continue;
  1040. }
  1041. i = skb_shinfo(skb)->nr_frags;
  1042. if (len > size)
  1043. len = size;
  1044. if (skb_can_coalesce(skb, i, page, offset)) {
  1045. skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
  1046. } else if (i < MAX_SKB_FRAGS) {
  1047. get_page(page);
  1048. skb_fill_page_desc(skb, i, page, offset, len);
  1049. } else {
  1050. err = -EMSGSIZE;
  1051. goto error;
  1052. }
  1053. if (skb->ip_summed == CHECKSUM_NONE) {
  1054. __wsum csum;
  1055. csum = csum_page(page, offset, len);
  1056. skb->csum = csum_block_add(skb->csum, csum, skb->len);
  1057. }
  1058. skb->len += len;
  1059. skb->data_len += len;
  1060. skb->truesize += len;
  1061. atomic_add(len, &sk->sk_wmem_alloc);
  1062. offset += len;
  1063. size -= len;
  1064. }
  1065. return 0;
  1066. error:
  1067. cork->length -= size;
  1068. IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
  1069. return err;
  1070. }
  1071. static void ip_cork_release(struct inet_cork *cork)
  1072. {
  1073. cork->flags &= ~IPCORK_OPT;
  1074. kfree(cork->opt);
  1075. cork->opt = NULL;
  1076. dst_release(cork->dst);
  1077. cork->dst = NULL;
  1078. }
  1079. /*
  1080. * Combined all pending IP fragments on the socket as one IP datagram
  1081. * and push them out.
  1082. */
  1083. struct sk_buff *__ip_make_skb(struct sock *sk,
  1084. struct flowi4 *fl4,
  1085. struct sk_buff_head *queue,
  1086. struct inet_cork *cork)
  1087. {
  1088. struct sk_buff *skb, *tmp_skb;
  1089. struct sk_buff **tail_skb;
  1090. struct inet_sock *inet = inet_sk(sk);
  1091. struct net *net = sock_net(sk);
  1092. struct ip_options *opt = NULL;
  1093. struct rtable *rt = (struct rtable *)cork->dst;
  1094. struct iphdr *iph;
  1095. __be16 df = 0;
  1096. __u8 ttl;
  1097. if ((skb = __skb_dequeue(queue)) == NULL)
  1098. goto out;
  1099. tail_skb = &(skb_shinfo(skb)->frag_list);
  1100. /* move skb->data to ip header from ext header */
  1101. if (skb->data < skb_network_header(skb))
  1102. __skb_pull(skb, skb_network_offset(skb));
  1103. while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
  1104. __skb_pull(tmp_skb, skb_network_header_len(skb));
  1105. *tail_skb = tmp_skb;
  1106. tail_skb = &(tmp_skb->next);
  1107. skb->len += tmp_skb->len;
  1108. skb->data_len += tmp_skb->len;
  1109. skb->truesize += tmp_skb->truesize;
  1110. tmp_skb->destructor = NULL;
  1111. tmp_skb->sk = NULL;
  1112. }
  1113. /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
  1114. * to fragment the frame generated here. No matter, what transforms
  1115. * how transforms change size of the packet, it will come out.
  1116. */
  1117. if (inet->pmtudisc < IP_PMTUDISC_DO)
  1118. skb->local_df = 1;
  1119. /* DF bit is set when we want to see DF on outgoing frames.
  1120. * If local_df is set too, we still allow to fragment this frame
  1121. * locally. */
  1122. if (inet->pmtudisc >= IP_PMTUDISC_DO ||
  1123. (skb->len <= dst_mtu(&rt->dst) &&
  1124. ip_dont_fragment(sk, &rt->dst)))
  1125. df = htons(IP_DF);
  1126. if (cork->flags & IPCORK_OPT)
  1127. opt = cork->opt;
  1128. if (rt->rt_type == RTN_MULTICAST)
  1129. ttl = inet->mc_ttl;
  1130. else
  1131. ttl = ip_select_ttl(inet, &rt->dst);
  1132. iph = (struct iphdr *)skb->data;
  1133. iph->version = 4;
  1134. iph->ihl = 5;
  1135. iph->tos = inet->tos;
  1136. iph->frag_off = df;
  1137. iph->ttl = ttl;
  1138. iph->protocol = sk->sk_protocol;
  1139. ip_copy_addrs(iph, fl4);
  1140. ip_select_ident(iph, &rt->dst, sk);
  1141. if (opt) {
  1142. iph->ihl += opt->optlen>>2;
  1143. ip_options_build(skb, opt, cork->addr, rt, 0);
  1144. }
  1145. skb->priority = sk->sk_priority;
  1146. skb->mark = sk->sk_mark;
  1147. /*
  1148. * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
  1149. * on dst refcount
  1150. */
  1151. cork->dst = NULL;
  1152. skb_dst_set(skb, &rt->dst);
  1153. if (iph->protocol == IPPROTO_ICMP)
  1154. icmp_out_count(net, ((struct icmphdr *)
  1155. skb_transport_header(skb))->type);
  1156. ip_cork_release(cork);
  1157. out:
  1158. return skb;
  1159. }
  1160. int ip_send_skb(struct net *net, struct sk_buff *skb)
  1161. {
  1162. int err;
  1163. err = ip_local_out(skb);
  1164. if (err) {
  1165. if (err > 0)
  1166. err = net_xmit_errno(err);
  1167. if (err)
  1168. IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
  1169. }
  1170. return err;
  1171. }
  1172. int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
  1173. {
  1174. struct sk_buff *skb;
  1175. skb = ip_finish_skb(sk, fl4);
  1176. if (!skb)
  1177. return 0;
  1178. /* Netfilter gets whole the not fragmented skb. */
  1179. return ip_send_skb(sock_net(sk), skb);
  1180. }
  1181. /*
  1182. * Throw away all pending data on the socket.
  1183. */
  1184. static void __ip_flush_pending_frames(struct sock *sk,
  1185. struct sk_buff_head *queue,
  1186. struct inet_cork *cork)
  1187. {
  1188. struct sk_buff *skb;
  1189. while ((skb = __skb_dequeue_tail(queue)) != NULL)
  1190. kfree_skb(skb);
  1191. ip_cork_release(cork);
  1192. }
  1193. void ip_flush_pending_frames(struct sock *sk)
  1194. {
  1195. __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
  1196. }
  1197. struct sk_buff *ip_make_skb(struct sock *sk,
  1198. struct flowi4 *fl4,
  1199. int getfrag(void *from, char *to, int offset,
  1200. int len, int odd, struct sk_buff *skb),
  1201. void *from, int length, int transhdrlen,
  1202. struct ipcm_cookie *ipc, struct rtable **rtp,
  1203. unsigned int flags)
  1204. {
  1205. struct inet_cork cork;
  1206. struct sk_buff_head queue;
  1207. int err;
  1208. if (flags & MSG_PROBE)
  1209. return NULL;
  1210. __skb_queue_head_init(&queue);
  1211. cork.flags = 0;
  1212. cork.addr = 0;
  1213. cork.opt = NULL;
  1214. err = ip_setup_cork(sk, &cork, ipc, rtp);
  1215. if (err)
  1216. return ERR_PTR(err);
  1217. err = __ip_append_data(sk, fl4, &queue, &cork,
  1218. &current->task_frag, getfrag,
  1219. from, length, transhdrlen, flags);
  1220. if (err) {
  1221. __ip_flush_pending_frames(sk, &queue, &cork);
  1222. return ERR_PTR(err);
  1223. }
  1224. return __ip_make_skb(sk, fl4, &queue, &cork);
  1225. }
  1226. /*
  1227. * Fetch data from kernel space and fill in checksum if needed.
  1228. */
  1229. static int ip_reply_glue_bits(void *dptr, char *to, int offset,
  1230. int len, int odd, struct sk_buff *skb)
  1231. {
  1232. __wsum csum;
  1233. csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
  1234. skb->csum = csum_block_add(skb->csum, csum, odd);
  1235. return 0;
  1236. }
  1237. /*
  1238. * Generic function to send a packet as reply to another packet.
  1239. * Used to send some TCP resets/acks so far.
  1240. *
  1241. * Use a fake percpu inet socket to avoid false sharing and contention.
  1242. */
  1243. static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
  1244. .sk = {
  1245. .__sk_common = {
  1246. .skc_refcnt = ATOMIC_INIT(1),
  1247. },
  1248. .sk_wmem_alloc = ATOMIC_INIT(1),
  1249. .sk_allocation = GFP_ATOMIC,
  1250. .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
  1251. },
  1252. .pmtudisc = IP_PMTUDISC_WANT,
  1253. .uc_ttl = -1,
  1254. };
  1255. void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
  1256. __be32 saddr, const struct ip_reply_arg *arg,
  1257. unsigned int len)
  1258. {
  1259. struct ip_options_data replyopts;
  1260. struct ipcm_cookie ipc;
  1261. struct flowi4 fl4;
  1262. struct rtable *rt = skb_rtable(skb);
  1263. struct sk_buff *nskb;
  1264. struct sock *sk;
  1265. struct inet_sock *inet;
  1266. if (ip_options_echo(&replyopts.opt.opt, skb))
  1267. return;
  1268. ipc.addr = daddr;
  1269. ipc.opt = NULL;
  1270. ipc.tx_flags = 0;
  1271. if (replyopts.opt.opt.optlen) {
  1272. ipc.opt = &replyopts.opt;
  1273. if (replyopts.opt.opt.srr)
  1274. daddr = replyopts.opt.opt.faddr;
  1275. }
  1276. flowi4_init_output(&fl4, arg->bound_dev_if, 0,
  1277. RT_TOS(arg->tos),
  1278. RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
  1279. ip_reply_arg_flowi_flags(arg),
  1280. daddr, saddr,
  1281. tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
  1282. security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
  1283. rt = ip_route_output_key(net, &fl4);
  1284. if (IS_ERR(rt))
  1285. return;
  1286. inet = &get_cpu_var(unicast_sock);
  1287. inet->tos = arg->tos;
  1288. sk = &inet->sk;
  1289. sk->sk_priority = skb->priority;
  1290. sk->sk_protocol = ip_hdr(skb)->protocol;
  1291. sk->sk_bound_dev_if = arg->bound_dev_if;
  1292. sock_net_set(sk, net);
  1293. __skb_queue_head_init(&sk->sk_write_queue);
  1294. sk->sk_sndbuf = sysctl_wmem_default;
  1295. ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
  1296. &ipc, &rt, MSG_DONTWAIT);
  1297. nskb = skb_peek(&sk->sk_write_queue);
  1298. if (nskb) {
  1299. if (arg->csumoffset >= 0)
  1300. *((__sum16 *)skb_transport_header(nskb) +
  1301. arg->csumoffset) = csum_fold(csum_add(nskb->csum,
  1302. arg->csum));
  1303. nskb->ip_summed = CHECKSUM_NONE;
  1304. skb_orphan(nskb);
  1305. skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
  1306. ip_push_pending_frames(sk, &fl4);
  1307. }
  1308. put_cpu_var(unicast_sock);
  1309. ip_rt_put(rt);
  1310. }
  1311. void __init ip_init(void)
  1312. {
  1313. ip_rt_init();
  1314. inet_initpeers();
  1315. #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
  1316. igmp_mc_proc_init();
  1317. #endif
  1318. }