ip_vs_xmit.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * ip_vs_xmit.c: various packet transmitters for IPVS
  3. *
  4. * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
  5. * Julian Anastasov <ja@ssi.bg>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Changes:
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/tcp.h> /* for tcphdr */
  17. #include <net/ip.h>
  18. #include <net/tcp.h> /* for csum_tcpudp_magic */
  19. #include <net/udp.h>
  20. #include <net/icmp.h> /* for icmp_send */
  21. #include <net/route.h> /* for ip_route_output */
  22. #include <net/ipv6.h>
  23. #include <net/ip6_route.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/netfilter.h>
  26. #include <linux/netfilter_ipv4.h>
  27. #include <net/ip_vs.h>
  28. /*
  29. * Destination cache to speed up outgoing route lookup
  30. */
  31. static inline void
  32. __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst)
  33. {
  34. struct dst_entry *old_dst;
  35. old_dst = dest->dst_cache;
  36. dest->dst_cache = dst;
  37. dest->dst_rtos = rtos;
  38. dst_release(old_dst);
  39. }
  40. static inline struct dst_entry *
  41. __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
  42. {
  43. struct dst_entry *dst = dest->dst_cache;
  44. if (!dst)
  45. return NULL;
  46. if ((dst->obsolete
  47. || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
  48. dst->ops->check(dst, cookie) == NULL) {
  49. dest->dst_cache = NULL;
  50. dst_release(dst);
  51. return NULL;
  52. }
  53. dst_hold(dst);
  54. return dst;
  55. }
  56. static struct rtable *
  57. __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
  58. {
  59. struct rtable *rt; /* Route to the other host */
  60. struct ip_vs_dest *dest = cp->dest;
  61. if (dest) {
  62. spin_lock(&dest->dst_lock);
  63. if (!(rt = (struct rtable *)
  64. __ip_vs_dst_check(dest, rtos, 0))) {
  65. struct flowi fl = {
  66. .oif = 0,
  67. .nl_u = {
  68. .ip4_u = {
  69. .daddr = dest->addr.ip,
  70. .saddr = 0,
  71. .tos = rtos, } },
  72. };
  73. if (ip_route_output_key(&init_net, &rt, &fl)) {
  74. spin_unlock(&dest->dst_lock);
  75. IP_VS_DBG_RL("ip_route_output error, "
  76. "dest: %u.%u.%u.%u\n",
  77. NIPQUAD(dest->addr.ip));
  78. return NULL;
  79. }
  80. __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
  81. IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n",
  82. NIPQUAD(dest->addr.ip),
  83. atomic_read(&rt->u.dst.__refcnt), rtos);
  84. }
  85. spin_unlock(&dest->dst_lock);
  86. } else {
  87. struct flowi fl = {
  88. .oif = 0,
  89. .nl_u = {
  90. .ip4_u = {
  91. .daddr = cp->daddr.ip,
  92. .saddr = 0,
  93. .tos = rtos, } },
  94. };
  95. if (ip_route_output_key(&init_net, &rt, &fl)) {
  96. IP_VS_DBG_RL("ip_route_output error, dest: "
  97. "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip));
  98. return NULL;
  99. }
  100. }
  101. return rt;
  102. }
  103. #ifdef CONFIG_IP_VS_IPV6
  104. static struct rt6_info *
  105. __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
  106. {
  107. struct rt6_info *rt; /* Route to the other host */
  108. struct ip_vs_dest *dest = cp->dest;
  109. if (dest) {
  110. spin_lock(&dest->dst_lock);
  111. rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
  112. if (!rt) {
  113. struct flowi fl = {
  114. .oif = 0,
  115. .nl_u = {
  116. .ip6_u = {
  117. .daddr = dest->addr.in6,
  118. .saddr = {
  119. .s6_addr32 =
  120. { 0, 0, 0, 0 },
  121. },
  122. },
  123. },
  124. };
  125. rt = (struct rt6_info *)ip6_route_output(&init_net,
  126. NULL, &fl);
  127. if (!rt) {
  128. spin_unlock(&dest->dst_lock);
  129. IP_VS_DBG_RL("ip6_route_output error, "
  130. "dest: " NIP6_FMT "\n",
  131. NIP6(dest->addr.in6));
  132. return NULL;
  133. }
  134. __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
  135. IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n",
  136. NIP6(dest->addr.in6),
  137. atomic_read(&rt->u.dst.__refcnt));
  138. }
  139. spin_unlock(&dest->dst_lock);
  140. } else {
  141. struct flowi fl = {
  142. .oif = 0,
  143. .nl_u = {
  144. .ip6_u = {
  145. .daddr = cp->daddr.in6,
  146. .saddr = {
  147. .s6_addr32 = { 0, 0, 0, 0 },
  148. },
  149. },
  150. },
  151. };
  152. rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
  153. if (!rt) {
  154. IP_VS_DBG_RL("ip6_route_output error, dest: "
  155. NIP6_FMT "\n", NIP6(cp->daddr.in6));
  156. return NULL;
  157. }
  158. }
  159. return rt;
  160. }
  161. #endif
  162. /*
  163. * Release dest->dst_cache before a dest is removed
  164. */
  165. void
  166. ip_vs_dst_reset(struct ip_vs_dest *dest)
  167. {
  168. struct dst_entry *old_dst;
  169. old_dst = dest->dst_cache;
  170. dest->dst_cache = NULL;
  171. dst_release(old_dst);
  172. }
  173. #define IP_VS_XMIT(pf, skb, rt) \
  174. do { \
  175. (skb)->ipvs_property = 1; \
  176. skb_forward_csum(skb); \
  177. NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
  178. (rt)->u.dst.dev, dst_output); \
  179. } while (0)
  180. /*
  181. * NULL transmitter (do nothing except return NF_ACCEPT)
  182. */
  183. int
  184. ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  185. struct ip_vs_protocol *pp)
  186. {
  187. /* we do not touch skb and do not need pskb ptr */
  188. return NF_ACCEPT;
  189. }
  190. /*
  191. * Bypass transmitter
  192. * Let packets bypass the destination when the destination is not
  193. * available, it may be only used in transparent cache cluster.
  194. */
  195. int
  196. ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  197. struct ip_vs_protocol *pp)
  198. {
  199. struct rtable *rt; /* Route to the other host */
  200. struct iphdr *iph = ip_hdr(skb);
  201. u8 tos = iph->tos;
  202. int mtu;
  203. struct flowi fl = {
  204. .oif = 0,
  205. .nl_u = {
  206. .ip4_u = {
  207. .daddr = iph->daddr,
  208. .saddr = 0,
  209. .tos = RT_TOS(tos), } },
  210. };
  211. EnterFunction(10);
  212. if (ip_route_output_key(&init_net, &rt, &fl)) {
  213. IP_VS_DBG_RL("ip_vs_bypass_xmit(): ip_route_output error, "
  214. "dest: %u.%u.%u.%u\n", NIPQUAD(iph->daddr));
  215. goto tx_error_icmp;
  216. }
  217. /* MTU checking */
  218. mtu = dst_mtu(&rt->u.dst);
  219. if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
  220. ip_rt_put(rt);
  221. icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
  222. IP_VS_DBG_RL("ip_vs_bypass_xmit(): frag needed\n");
  223. goto tx_error;
  224. }
  225. /*
  226. * Call ip_send_check because we are not sure it is called
  227. * after ip_defrag. Is copy-on-write needed?
  228. */
  229. if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
  230. ip_rt_put(rt);
  231. return NF_STOLEN;
  232. }
  233. ip_send_check(ip_hdr(skb));
  234. /* drop old route */
  235. dst_release(skb->dst);
  236. skb->dst = &rt->u.dst;
  237. /* Another hack: avoid icmp_send in ip_fragment */
  238. skb->local_df = 1;
  239. IP_VS_XMIT(PF_INET, skb, rt);
  240. LeaveFunction(10);
  241. return NF_STOLEN;
  242. tx_error_icmp:
  243. dst_link_failure(skb);
  244. tx_error:
  245. kfree_skb(skb);
  246. LeaveFunction(10);
  247. return NF_STOLEN;
  248. }
  249. #ifdef CONFIG_IP_VS_IPV6
  250. int
  251. ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
  252. struct ip_vs_protocol *pp)
  253. {
  254. struct rt6_info *rt; /* Route to the other host */
  255. struct ipv6hdr *iph = ipv6_hdr(skb);
  256. int mtu;
  257. struct flowi fl = {
  258. .oif = 0,
  259. .nl_u = {
  260. .ip6_u = {
  261. .daddr = iph->daddr,
  262. .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
  263. };
  264. EnterFunction(10);
  265. rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
  266. if (!rt) {
  267. IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, "
  268. "dest: " NIP6_FMT "\n", NIP6(iph->daddr));
  269. goto tx_error_icmp;
  270. }
  271. /* MTU checking */
  272. mtu = dst_mtu(&rt->u.dst);
  273. if (skb->len > mtu) {
  274. dst_release(&rt->u.dst);
  275. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  276. IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n");
  277. goto tx_error;
  278. }
  279. /*
  280. * Call ip_send_check because we are not sure it is called
  281. * after ip_defrag. Is copy-on-write needed?
  282. */
  283. skb = skb_share_check(skb, GFP_ATOMIC);
  284. if (unlikely(skb == NULL)) {
  285. dst_release(&rt->u.dst);
  286. return NF_STOLEN;
  287. }
  288. /* drop old route */
  289. dst_release(skb->dst);
  290. skb->dst = &rt->u.dst;
  291. /* Another hack: avoid icmp_send in ip_fragment */
  292. skb->local_df = 1;
  293. IP_VS_XMIT(PF_INET6, skb, rt);
  294. LeaveFunction(10);
  295. return NF_STOLEN;
  296. tx_error_icmp:
  297. dst_link_failure(skb);
  298. tx_error:
  299. kfree_skb(skb);
  300. LeaveFunction(10);
  301. return NF_STOLEN;
  302. }
  303. #endif
  304. /*
  305. * NAT transmitter (only for outside-to-inside nat forwarding)
  306. * Not used for related ICMP
  307. */
  308. int
  309. ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  310. struct ip_vs_protocol *pp)
  311. {
  312. struct rtable *rt; /* Route to the other host */
  313. int mtu;
  314. struct iphdr *iph = ip_hdr(skb);
  315. EnterFunction(10);
  316. /* check if it is a connection of no-client-port */
  317. if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
  318. __be16 _pt, *p;
  319. p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
  320. if (p == NULL)
  321. goto tx_error;
  322. ip_vs_conn_fill_cport(cp, *p);
  323. IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
  324. }
  325. if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
  326. goto tx_error_icmp;
  327. /* MTU checking */
  328. mtu = dst_mtu(&rt->u.dst);
  329. if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
  330. ip_rt_put(rt);
  331. icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
  332. IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
  333. goto tx_error;
  334. }
  335. /* copy-on-write the packet before mangling it */
  336. if (!skb_make_writable(skb, sizeof(struct iphdr)))
  337. goto tx_error_put;
  338. if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
  339. goto tx_error_put;
  340. /* drop old route */
  341. dst_release(skb->dst);
  342. skb->dst = &rt->u.dst;
  343. /* mangle the packet */
  344. if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
  345. goto tx_error;
  346. ip_hdr(skb)->daddr = cp->daddr.ip;
  347. ip_send_check(ip_hdr(skb));
  348. IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
  349. /* FIXME: when application helper enlarges the packet and the length
  350. is larger than the MTU of outgoing device, there will be still
  351. MTU problem. */
  352. /* Another hack: avoid icmp_send in ip_fragment */
  353. skb->local_df = 1;
  354. IP_VS_XMIT(PF_INET, skb, rt);
  355. LeaveFunction(10);
  356. return NF_STOLEN;
  357. tx_error_icmp:
  358. dst_link_failure(skb);
  359. tx_error:
  360. LeaveFunction(10);
  361. kfree_skb(skb);
  362. return NF_STOLEN;
  363. tx_error_put:
  364. ip_rt_put(rt);
  365. goto tx_error;
  366. }
  367. #ifdef CONFIG_IP_VS_IPV6
  368. int
  369. ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
  370. struct ip_vs_protocol *pp)
  371. {
  372. struct rt6_info *rt; /* Route to the other host */
  373. int mtu;
  374. EnterFunction(10);
  375. /* check if it is a connection of no-client-port */
  376. if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
  377. __be16 _pt, *p;
  378. p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
  379. sizeof(_pt), &_pt);
  380. if (p == NULL)
  381. goto tx_error;
  382. ip_vs_conn_fill_cport(cp, *p);
  383. IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
  384. }
  385. rt = __ip_vs_get_out_rt_v6(cp);
  386. if (!rt)
  387. goto tx_error_icmp;
  388. /* MTU checking */
  389. mtu = dst_mtu(&rt->u.dst);
  390. if (skb->len > mtu) {
  391. dst_release(&rt->u.dst);
  392. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  393. IP_VS_DBG_RL_PKT(0, pp, skb, 0,
  394. "ip_vs_nat_xmit_v6(): frag needed for");
  395. goto tx_error;
  396. }
  397. /* copy-on-write the packet before mangling it */
  398. if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
  399. goto tx_error_put;
  400. if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
  401. goto tx_error_put;
  402. /* drop old route */
  403. dst_release(skb->dst);
  404. skb->dst = &rt->u.dst;
  405. /* mangle the packet */
  406. if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
  407. goto tx_error;
  408. ipv6_hdr(skb)->daddr = cp->daddr.in6;
  409. IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
  410. /* FIXME: when application helper enlarges the packet and the length
  411. is larger than the MTU of outgoing device, there will be still
  412. MTU problem. */
  413. /* Another hack: avoid icmp_send in ip_fragment */
  414. skb->local_df = 1;
  415. IP_VS_XMIT(PF_INET6, skb, rt);
  416. LeaveFunction(10);
  417. return NF_STOLEN;
  418. tx_error_icmp:
  419. dst_link_failure(skb);
  420. tx_error:
  421. LeaveFunction(10);
  422. kfree_skb(skb);
  423. return NF_STOLEN;
  424. tx_error_put:
  425. dst_release(&rt->u.dst);
  426. goto tx_error;
  427. }
  428. #endif
  429. /*
  430. * IP Tunneling transmitter
  431. *
  432. * This function encapsulates the packet in a new IP packet, its
  433. * destination will be set to cp->daddr. Most code of this function
  434. * is taken from ipip.c.
  435. *
  436. * It is used in VS/TUN cluster. The load balancer selects a real
  437. * server from a cluster based on a scheduling algorithm,
  438. * encapsulates the request packet and forwards it to the selected
  439. * server. For example, all real servers are configured with
  440. * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
  441. * the encapsulated packet, it will decapsulate the packet, processe
  442. * the request and return the response packets directly to the client
  443. * without passing the load balancer. This can greatly increase the
  444. * scalability of virtual server.
  445. *
  446. * Used for ANY protocol
  447. */
  448. int
  449. ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  450. struct ip_vs_protocol *pp)
  451. {
  452. struct rtable *rt; /* Route to the other host */
  453. struct net_device *tdev; /* Device to other host */
  454. struct iphdr *old_iph = ip_hdr(skb);
  455. u8 tos = old_iph->tos;
  456. __be16 df = old_iph->frag_off;
  457. sk_buff_data_t old_transport_header = skb->transport_header;
  458. struct iphdr *iph; /* Our new IP header */
  459. unsigned int max_headroom; /* The extra header space needed */
  460. int mtu;
  461. EnterFunction(10);
  462. if (skb->protocol != htons(ETH_P_IP)) {
  463. IP_VS_DBG_RL("ip_vs_tunnel_xmit(): protocol error, "
  464. "ETH_P_IP: %d, skb protocol: %d\n",
  465. htons(ETH_P_IP), skb->protocol);
  466. goto tx_error;
  467. }
  468. if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
  469. goto tx_error_icmp;
  470. tdev = rt->u.dst.dev;
  471. mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
  472. if (mtu < 68) {
  473. ip_rt_put(rt);
  474. IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n");
  475. goto tx_error;
  476. }
  477. if (skb->dst)
  478. skb->dst->ops->update_pmtu(skb->dst, mtu);
  479. df |= (old_iph->frag_off & htons(IP_DF));
  480. if ((old_iph->frag_off & htons(IP_DF))
  481. && mtu < ntohs(old_iph->tot_len)) {
  482. icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
  483. ip_rt_put(rt);
  484. IP_VS_DBG_RL("ip_vs_tunnel_xmit(): frag needed\n");
  485. goto tx_error;
  486. }
  487. /*
  488. * Okay, now see if we can stuff it in the buffer as-is.
  489. */
  490. max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
  491. if (skb_headroom(skb) < max_headroom
  492. || skb_cloned(skb) || skb_shared(skb)) {
  493. struct sk_buff *new_skb =
  494. skb_realloc_headroom(skb, max_headroom);
  495. if (!new_skb) {
  496. ip_rt_put(rt);
  497. kfree_skb(skb);
  498. IP_VS_ERR_RL("ip_vs_tunnel_xmit(): no memory\n");
  499. return NF_STOLEN;
  500. }
  501. kfree_skb(skb);
  502. skb = new_skb;
  503. old_iph = ip_hdr(skb);
  504. }
  505. skb->transport_header = old_transport_header;
  506. /* fix old IP header checksum */
  507. ip_send_check(old_iph);
  508. skb_push(skb, sizeof(struct iphdr));
  509. skb_reset_network_header(skb);
  510. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  511. /* drop old route */
  512. dst_release(skb->dst);
  513. skb->dst = &rt->u.dst;
  514. /*
  515. * Push down and install the IPIP header.
  516. */
  517. iph = ip_hdr(skb);
  518. iph->version = 4;
  519. iph->ihl = sizeof(struct iphdr)>>2;
  520. iph->frag_off = df;
  521. iph->protocol = IPPROTO_IPIP;
  522. iph->tos = tos;
  523. iph->daddr = rt->rt_dst;
  524. iph->saddr = rt->rt_src;
  525. iph->ttl = old_iph->ttl;
  526. ip_select_ident(iph, &rt->u.dst, NULL);
  527. /* Another hack: avoid icmp_send in ip_fragment */
  528. skb->local_df = 1;
  529. ip_local_out(skb);
  530. LeaveFunction(10);
  531. return NF_STOLEN;
  532. tx_error_icmp:
  533. dst_link_failure(skb);
  534. tx_error:
  535. kfree_skb(skb);
  536. LeaveFunction(10);
  537. return NF_STOLEN;
  538. }
  539. #ifdef CONFIG_IP_VS_IPV6
  540. int
  541. ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
  542. struct ip_vs_protocol *pp)
  543. {
  544. struct rt6_info *rt; /* Route to the other host */
  545. struct net_device *tdev; /* Device to other host */
  546. struct ipv6hdr *old_iph = ipv6_hdr(skb);
  547. sk_buff_data_t old_transport_header = skb->transport_header;
  548. struct ipv6hdr *iph; /* Our new IP header */
  549. unsigned int max_headroom; /* The extra header space needed */
  550. int mtu;
  551. EnterFunction(10);
  552. if (skb->protocol != htons(ETH_P_IPV6)) {
  553. IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, "
  554. "ETH_P_IPV6: %d, skb protocol: %d\n",
  555. htons(ETH_P_IPV6), skb->protocol);
  556. goto tx_error;
  557. }
  558. rt = __ip_vs_get_out_rt_v6(cp);
  559. if (!rt)
  560. goto tx_error_icmp;
  561. tdev = rt->u.dst.dev;
  562. mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
  563. /* TODO IPv6: do we need this check in IPv6? */
  564. if (mtu < 1280) {
  565. dst_release(&rt->u.dst);
  566. IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
  567. goto tx_error;
  568. }
  569. if (skb->dst)
  570. skb->dst->ops->update_pmtu(skb->dst, mtu);
  571. if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
  572. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  573. dst_release(&rt->u.dst);
  574. IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n");
  575. goto tx_error;
  576. }
  577. /*
  578. * Okay, now see if we can stuff it in the buffer as-is.
  579. */
  580. max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
  581. if (skb_headroom(skb) < max_headroom
  582. || skb_cloned(skb) || skb_shared(skb)) {
  583. struct sk_buff *new_skb =
  584. skb_realloc_headroom(skb, max_headroom);
  585. if (!new_skb) {
  586. dst_release(&rt->u.dst);
  587. kfree_skb(skb);
  588. IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n");
  589. return NF_STOLEN;
  590. }
  591. kfree_skb(skb);
  592. skb = new_skb;
  593. old_iph = ipv6_hdr(skb);
  594. }
  595. skb->transport_header = old_transport_header;
  596. skb_push(skb, sizeof(struct ipv6hdr));
  597. skb_reset_network_header(skb);
  598. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  599. /* drop old route */
  600. dst_release(skb->dst);
  601. skb->dst = &rt->u.dst;
  602. /*
  603. * Push down and install the IPIP header.
  604. */
  605. iph = ipv6_hdr(skb);
  606. iph->version = 6;
  607. iph->nexthdr = IPPROTO_IPV6;
  608. iph->payload_len = old_iph->payload_len + sizeof(old_iph);
  609. iph->priority = old_iph->priority;
  610. memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
  611. iph->daddr = rt->rt6i_dst.addr;
  612. iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */
  613. iph->hop_limit = old_iph->hop_limit;
  614. /* Another hack: avoid icmp_send in ip_fragment */
  615. skb->local_df = 1;
  616. ip6_local_out(skb);
  617. LeaveFunction(10);
  618. return NF_STOLEN;
  619. tx_error_icmp:
  620. dst_link_failure(skb);
  621. tx_error:
  622. kfree_skb(skb);
  623. LeaveFunction(10);
  624. return NF_STOLEN;
  625. }
  626. #endif
  627. /*
  628. * Direct Routing transmitter
  629. * Used for ANY protocol
  630. */
  631. int
  632. ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  633. struct ip_vs_protocol *pp)
  634. {
  635. struct rtable *rt; /* Route to the other host */
  636. struct iphdr *iph = ip_hdr(skb);
  637. int mtu;
  638. EnterFunction(10);
  639. if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
  640. goto tx_error_icmp;
  641. /* MTU checking */
  642. mtu = dst_mtu(&rt->u.dst);
  643. if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
  644. icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
  645. ip_rt_put(rt);
  646. IP_VS_DBG_RL("ip_vs_dr_xmit(): frag needed\n");
  647. goto tx_error;
  648. }
  649. /*
  650. * Call ip_send_check because we are not sure it is called
  651. * after ip_defrag. Is copy-on-write needed?
  652. */
  653. if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
  654. ip_rt_put(rt);
  655. return NF_STOLEN;
  656. }
  657. ip_send_check(ip_hdr(skb));
  658. /* drop old route */
  659. dst_release(skb->dst);
  660. skb->dst = &rt->u.dst;
  661. /* Another hack: avoid icmp_send in ip_fragment */
  662. skb->local_df = 1;
  663. IP_VS_XMIT(PF_INET, skb, rt);
  664. LeaveFunction(10);
  665. return NF_STOLEN;
  666. tx_error_icmp:
  667. dst_link_failure(skb);
  668. tx_error:
  669. kfree_skb(skb);
  670. LeaveFunction(10);
  671. return NF_STOLEN;
  672. }
  673. #ifdef CONFIG_IP_VS_IPV6
  674. int
  675. ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
  676. struct ip_vs_protocol *pp)
  677. {
  678. struct rt6_info *rt; /* Route to the other host */
  679. int mtu;
  680. EnterFunction(10);
  681. rt = __ip_vs_get_out_rt_v6(cp);
  682. if (!rt)
  683. goto tx_error_icmp;
  684. /* MTU checking */
  685. mtu = dst_mtu(&rt->u.dst);
  686. if (skb->len > mtu) {
  687. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  688. dst_release(&rt->u.dst);
  689. IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n");
  690. goto tx_error;
  691. }
  692. /*
  693. * Call ip_send_check because we are not sure it is called
  694. * after ip_defrag. Is copy-on-write needed?
  695. */
  696. skb = skb_share_check(skb, GFP_ATOMIC);
  697. if (unlikely(skb == NULL)) {
  698. dst_release(&rt->u.dst);
  699. return NF_STOLEN;
  700. }
  701. /* drop old route */
  702. dst_release(skb->dst);
  703. skb->dst = &rt->u.dst;
  704. /* Another hack: avoid icmp_send in ip_fragment */
  705. skb->local_df = 1;
  706. IP_VS_XMIT(PF_INET6, skb, rt);
  707. LeaveFunction(10);
  708. return NF_STOLEN;
  709. tx_error_icmp:
  710. dst_link_failure(skb);
  711. tx_error:
  712. kfree_skb(skb);
  713. LeaveFunction(10);
  714. return NF_STOLEN;
  715. }
  716. #endif
  717. /*
  718. * ICMP packet transmitter
  719. * called by the ip_vs_in_icmp
  720. */
  721. int
  722. ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
  723. struct ip_vs_protocol *pp, int offset)
  724. {
  725. struct rtable *rt; /* Route to the other host */
  726. int mtu;
  727. int rc;
  728. EnterFunction(10);
  729. /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
  730. forwarded directly here, because there is no need to
  731. translate address/port back */
  732. if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
  733. if (cp->packet_xmit)
  734. rc = cp->packet_xmit(skb, cp, pp);
  735. else
  736. rc = NF_ACCEPT;
  737. /* do not touch skb anymore */
  738. atomic_inc(&cp->in_pkts);
  739. goto out;
  740. }
  741. /*
  742. * mangle and send the packet here (only for VS/NAT)
  743. */
  744. if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
  745. goto tx_error_icmp;
  746. /* MTU checking */
  747. mtu = dst_mtu(&rt->u.dst);
  748. if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
  749. ip_rt_put(rt);
  750. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
  751. IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
  752. goto tx_error;
  753. }
  754. /* copy-on-write the packet before mangling it */
  755. if (!skb_make_writable(skb, offset))
  756. goto tx_error_put;
  757. if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
  758. goto tx_error_put;
  759. /* drop the old route when skb is not shared */
  760. dst_release(skb->dst);
  761. skb->dst = &rt->u.dst;
  762. ip_vs_nat_icmp(skb, pp, cp, 0);
  763. /* Another hack: avoid icmp_send in ip_fragment */
  764. skb->local_df = 1;
  765. IP_VS_XMIT(PF_INET, skb, rt);
  766. rc = NF_STOLEN;
  767. goto out;
  768. tx_error_icmp:
  769. dst_link_failure(skb);
  770. tx_error:
  771. dev_kfree_skb(skb);
  772. rc = NF_STOLEN;
  773. out:
  774. LeaveFunction(10);
  775. return rc;
  776. tx_error_put:
  777. ip_rt_put(rt);
  778. goto tx_error;
  779. }
  780. #ifdef CONFIG_IP_VS_IPV6
  781. int
  782. ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
  783. struct ip_vs_protocol *pp, int offset)
  784. {
  785. struct rt6_info *rt; /* Route to the other host */
  786. int mtu;
  787. int rc;
  788. EnterFunction(10);
  789. /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
  790. forwarded directly here, because there is no need to
  791. translate address/port back */
  792. if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
  793. if (cp->packet_xmit)
  794. rc = cp->packet_xmit(skb, cp, pp);
  795. else
  796. rc = NF_ACCEPT;
  797. /* do not touch skb anymore */
  798. atomic_inc(&cp->in_pkts);
  799. goto out;
  800. }
  801. /*
  802. * mangle and send the packet here (only for VS/NAT)
  803. */
  804. rt = __ip_vs_get_out_rt_v6(cp);
  805. if (!rt)
  806. goto tx_error_icmp;
  807. /* MTU checking */
  808. mtu = dst_mtu(&rt->u.dst);
  809. if (skb->len > mtu) {
  810. dst_release(&rt->u.dst);
  811. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
  812. IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
  813. goto tx_error;
  814. }
  815. /* copy-on-write the packet before mangling it */
  816. if (!skb_make_writable(skb, offset))
  817. goto tx_error_put;
  818. if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
  819. goto tx_error_put;
  820. /* drop the old route when skb is not shared */
  821. dst_release(skb->dst);
  822. skb->dst = &rt->u.dst;
  823. ip_vs_nat_icmp_v6(skb, pp, cp, 0);
  824. /* Another hack: avoid icmp_send in ip_fragment */
  825. skb->local_df = 1;
  826. IP_VS_XMIT(PF_INET6, skb, rt);
  827. rc = NF_STOLEN;
  828. goto out;
  829. tx_error_icmp:
  830. dst_link_failure(skb);
  831. tx_error:
  832. dev_kfree_skb(skb);
  833. rc = NF_STOLEN;
  834. out:
  835. LeaveFunction(10);
  836. return rc;
  837. tx_error_put:
  838. dst_release(&rt->u.dst);
  839. goto tx_error;
  840. }
  841. #endif