ip_fragment.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The IP fragmentation functionality.
  7. *
  8. * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
  9. * Alan Cox <alan@lxorguk.ukuu.org.uk>
  10. *
  11. * Fixes:
  12. * Alan Cox : Split from ip.c , see ip_input.c for history.
  13. * David S. Miller : Begin massive cleanup...
  14. * Andi Kleen : Add sysctls.
  15. * xxxx : Overlapfrag bug.
  16. * Ultima : ip_expire() kernel panic.
  17. * Bill Hawes : Frag accounting and evictor fixes.
  18. * John McDonald : 0 length frag bug.
  19. * Alexey Kuznetsov: SMP races, threading, cleanup.
  20. * Patrick McHardy : LRU queue of frag heads for evictor.
  21. */
  22. #include <linux/compiler.h>
  23. #include <linux/module.h>
  24. #include <linux/types.h>
  25. #include <linux/mm.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/skbuff.h>
  28. #include <linux/list.h>
  29. #include <linux/ip.h>
  30. #include <linux/icmp.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/jhash.h>
  33. #include <linux/random.h>
  34. #include <linux/slab.h>
  35. #include <net/route.h>
  36. #include <net/dst.h>
  37. #include <net/sock.h>
  38. #include <net/ip.h>
  39. #include <net/icmp.h>
  40. #include <net/checksum.h>
  41. #include <net/inetpeer.h>
  42. #include <net/inet_frag.h>
  43. #include <linux/tcp.h>
  44. #include <linux/udp.h>
  45. #include <linux/inet.h>
  46. #include <linux/netfilter_ipv4.h>
  47. #include <net/inet_ecn.h>
  48. /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  49. * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
  50. * as well. Or notify me, at least. --ANK
  51. */
  52. static int sysctl_ipfrag_max_dist __read_mostly = 64;
  53. struct ipfrag_skb_cb
  54. {
  55. struct inet_skb_parm h;
  56. int offset;
  57. };
  58. #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
  59. /* Describe an entry in the "incomplete datagrams" queue. */
  60. struct ipq {
  61. struct inet_frag_queue q;
  62. u32 user;
  63. __be32 saddr;
  64. __be32 daddr;
  65. __be16 id;
  66. u8 protocol;
  67. u8 ecn; /* RFC3168 support */
  68. int iif;
  69. unsigned int rid;
  70. struct inet_peer *peer;
  71. };
  72. #define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */
  73. #define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */
  74. static inline u8 ip4_frag_ecn(u8 tos)
  75. {
  76. tos = (tos & INET_ECN_MASK) + 1;
  77. /*
  78. * After the last operation we have (in binary):
  79. * INET_ECN_NOT_ECT => 001
  80. * INET_ECN_ECT_1 => 010
  81. * INET_ECN_ECT_0 => 011
  82. * INET_ECN_CE => 100
  83. */
  84. return (tos & 2) ? 0 : tos;
  85. }
  86. static struct inet_frags ip4_frags;
  87. int ip_frag_nqueues(struct net *net)
  88. {
  89. return net->ipv4.frags.nqueues;
  90. }
  91. int ip_frag_mem(struct net *net)
  92. {
  93. return atomic_read(&net->ipv4.frags.mem);
  94. }
  95. static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
  96. struct net_device *dev);
  97. struct ip4_create_arg {
  98. struct iphdr *iph;
  99. u32 user;
  100. };
  101. static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
  102. {
  103. return jhash_3words((__force u32)id << 16 | prot,
  104. (__force u32)saddr, (__force u32)daddr,
  105. ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
  106. }
  107. static unsigned int ip4_hashfn(struct inet_frag_queue *q)
  108. {
  109. struct ipq *ipq;
  110. ipq = container_of(q, struct ipq, q);
  111. return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
  112. }
  113. static int ip4_frag_match(struct inet_frag_queue *q, void *a)
  114. {
  115. struct ipq *qp;
  116. struct ip4_create_arg *arg = a;
  117. qp = container_of(q, struct ipq, q);
  118. return qp->id == arg->iph->id &&
  119. qp->saddr == arg->iph->saddr &&
  120. qp->daddr == arg->iph->daddr &&
  121. qp->protocol == arg->iph->protocol &&
  122. qp->user == arg->user;
  123. }
  124. /* Memory Tracking Functions. */
  125. static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
  126. {
  127. atomic_sub(skb->truesize, &nf->mem);
  128. kfree_skb(skb);
  129. }
  130. static void ip4_frag_init(struct inet_frag_queue *q, void *a)
  131. {
  132. struct ipq *qp = container_of(q, struct ipq, q);
  133. struct ip4_create_arg *arg = a;
  134. qp->protocol = arg->iph->protocol;
  135. qp->id = arg->iph->id;
  136. qp->ecn = ip4_frag_ecn(arg->iph->tos);
  137. qp->saddr = arg->iph->saddr;
  138. qp->daddr = arg->iph->daddr;
  139. qp->user = arg->user;
  140. qp->peer = sysctl_ipfrag_max_dist ?
  141. inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
  142. }
  143. static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
  144. {
  145. struct ipq *qp;
  146. qp = container_of(q, struct ipq, q);
  147. if (qp->peer)
  148. inet_putpeer(qp->peer);
  149. }
  150. /* Destruction primitives. */
  151. static __inline__ void ipq_put(struct ipq *ipq)
  152. {
  153. inet_frag_put(&ipq->q, &ip4_frags);
  154. }
  155. /* Kill ipq entry. It is not destroyed immediately,
  156. * because caller (and someone more) holds reference count.
  157. */
  158. static void ipq_kill(struct ipq *ipq)
  159. {
  160. inet_frag_kill(&ipq->q, &ip4_frags);
  161. }
  162. /* Memory limiting on fragments. Evictor trashes the oldest
  163. * fragment queue until we are back under the threshold.
  164. */
  165. static void ip_evictor(struct net *net)
  166. {
  167. int evicted;
  168. evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
  169. if (evicted)
  170. IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
  171. }
  172. /*
  173. * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
  174. */
  175. static void ip_expire(unsigned long arg)
  176. {
  177. struct ipq *qp;
  178. struct net *net;
  179. qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
  180. net = container_of(qp->q.net, struct net, ipv4.frags);
  181. spin_lock(&qp->q.lock);
  182. if (qp->q.last_in & INET_FRAG_COMPLETE)
  183. goto out;
  184. ipq_kill(qp);
  185. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
  186. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
  187. if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
  188. struct sk_buff *head = qp->q.fragments;
  189. rcu_read_lock();
  190. head->dev = dev_get_by_index_rcu(net, qp->iif);
  191. if (!head->dev)
  192. goto out_rcu_unlock;
  193. /*
  194. * Only search router table for the head fragment,
  195. * when defraging timeout at PRE_ROUTING HOOK.
  196. */
  197. if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
  198. const struct iphdr *iph = ip_hdr(head);
  199. int err = ip_route_input(head, iph->daddr, iph->saddr,
  200. iph->tos, head->dev);
  201. if (unlikely(err))
  202. goto out_rcu_unlock;
  203. /*
  204. * Only an end host needs to send an ICMP
  205. * "Fragment Reassembly Timeout" message, per RFC792.
  206. */
  207. if (skb_rtable(head)->rt_type != RTN_LOCAL)
  208. goto out_rcu_unlock;
  209. }
  210. /* Send an ICMP "Fragment Reassembly Timeout" message. */
  211. icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
  212. out_rcu_unlock:
  213. rcu_read_unlock();
  214. }
  215. out:
  216. spin_unlock(&qp->q.lock);
  217. ipq_put(qp);
  218. }
  219. /* Find the correct entry in the "incomplete datagrams" queue for
  220. * this IP datagram, and create new one, if nothing is found.
  221. */
  222. static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
  223. {
  224. struct inet_frag_queue *q;
  225. struct ip4_create_arg arg;
  226. unsigned int hash;
  227. arg.iph = iph;
  228. arg.user = user;
  229. read_lock(&ip4_frags.lock);
  230. hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
  231. q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
  232. if (q == NULL)
  233. goto out_nomem;
  234. return container_of(q, struct ipq, q);
  235. out_nomem:
  236. LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
  237. return NULL;
  238. }
  239. /* Is the fragment too far ahead to be part of ipq? */
  240. static inline int ip_frag_too_far(struct ipq *qp)
  241. {
  242. struct inet_peer *peer = qp->peer;
  243. unsigned int max = sysctl_ipfrag_max_dist;
  244. unsigned int start, end;
  245. int rc;
  246. if (!peer || !max)
  247. return 0;
  248. start = qp->rid;
  249. end = atomic_inc_return(&peer->rid);
  250. qp->rid = end;
  251. rc = qp->q.fragments && (end - start) > max;
  252. if (rc) {
  253. struct net *net;
  254. net = container_of(qp->q.net, struct net, ipv4.frags);
  255. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
  256. }
  257. return rc;
  258. }
  259. static int ip_frag_reinit(struct ipq *qp)
  260. {
  261. struct sk_buff *fp;
  262. if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
  263. atomic_inc(&qp->q.refcnt);
  264. return -ETIMEDOUT;
  265. }
  266. fp = qp->q.fragments;
  267. do {
  268. struct sk_buff *xp = fp->next;
  269. frag_kfree_skb(qp->q.net, fp);
  270. fp = xp;
  271. } while (fp);
  272. qp->q.last_in = 0;
  273. qp->q.len = 0;
  274. qp->q.meat = 0;
  275. qp->q.fragments = NULL;
  276. qp->q.fragments_tail = NULL;
  277. qp->iif = 0;
  278. qp->ecn = 0;
  279. return 0;
  280. }
  281. /* Add new segment to existing queue. */
  282. static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
  283. {
  284. struct sk_buff *prev, *next;
  285. struct net_device *dev;
  286. int flags, offset;
  287. int ihl, end;
  288. int err = -ENOENT;
  289. u8 ecn;
  290. if (qp->q.last_in & INET_FRAG_COMPLETE)
  291. goto err;
  292. if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
  293. unlikely(ip_frag_too_far(qp)) &&
  294. unlikely(err = ip_frag_reinit(qp))) {
  295. ipq_kill(qp);
  296. goto err;
  297. }
  298. ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
  299. offset = ntohs(ip_hdr(skb)->frag_off);
  300. flags = offset & ~IP_OFFSET;
  301. offset &= IP_OFFSET;
  302. offset <<= 3; /* offset is in 8-byte chunks */
  303. ihl = ip_hdrlen(skb);
  304. /* Determine the position of this fragment. */
  305. end = offset + skb->len - ihl;
  306. err = -EINVAL;
  307. /* Is this the final fragment? */
  308. if ((flags & IP_MF) == 0) {
  309. /* If we already have some bits beyond end
  310. * or have different end, the segment is corrrupted.
  311. */
  312. if (end < qp->q.len ||
  313. ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
  314. goto err;
  315. qp->q.last_in |= INET_FRAG_LAST_IN;
  316. qp->q.len = end;
  317. } else {
  318. if (end&7) {
  319. end &= ~7;
  320. if (skb->ip_summed != CHECKSUM_UNNECESSARY)
  321. skb->ip_summed = CHECKSUM_NONE;
  322. }
  323. if (end > qp->q.len) {
  324. /* Some bits beyond end -> corruption. */
  325. if (qp->q.last_in & INET_FRAG_LAST_IN)
  326. goto err;
  327. qp->q.len = end;
  328. }
  329. }
  330. if (end == offset)
  331. goto err;
  332. err = -ENOMEM;
  333. if (pskb_pull(skb, ihl) == NULL)
  334. goto err;
  335. err = pskb_trim_rcsum(skb, end - offset);
  336. if (err)
  337. goto err;
  338. /* Find out which fragments are in front and at the back of us
  339. * in the chain of fragments so far. We must know where to put
  340. * this fragment, right?
  341. */
  342. prev = qp->q.fragments_tail;
  343. if (!prev || FRAG_CB(prev)->offset < offset) {
  344. next = NULL;
  345. goto found;
  346. }
  347. prev = NULL;
  348. for (next = qp->q.fragments; next != NULL; next = next->next) {
  349. if (FRAG_CB(next)->offset >= offset)
  350. break; /* bingo! */
  351. prev = next;
  352. }
  353. found:
  354. /* We found where to put this one. Check for overlap with
  355. * preceding fragment, and, if needed, align things so that
  356. * any overlaps are eliminated.
  357. */
  358. if (prev) {
  359. int i = (FRAG_CB(prev)->offset + prev->len) - offset;
  360. if (i > 0) {
  361. offset += i;
  362. err = -EINVAL;
  363. if (end <= offset)
  364. goto err;
  365. err = -ENOMEM;
  366. if (!pskb_pull(skb, i))
  367. goto err;
  368. if (skb->ip_summed != CHECKSUM_UNNECESSARY)
  369. skb->ip_summed = CHECKSUM_NONE;
  370. }
  371. }
  372. err = -ENOMEM;
  373. while (next && FRAG_CB(next)->offset < end) {
  374. int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
  375. if (i < next->len) {
  376. /* Eat head of the next overlapped fragment
  377. * and leave the loop. The next ones cannot overlap.
  378. */
  379. if (!pskb_pull(next, i))
  380. goto err;
  381. FRAG_CB(next)->offset += i;
  382. qp->q.meat -= i;
  383. if (next->ip_summed != CHECKSUM_UNNECESSARY)
  384. next->ip_summed = CHECKSUM_NONE;
  385. break;
  386. } else {
  387. struct sk_buff *free_it = next;
  388. /* Old fragment is completely overridden with
  389. * new one drop it.
  390. */
  391. next = next->next;
  392. if (prev)
  393. prev->next = next;
  394. else
  395. qp->q.fragments = next;
  396. qp->q.meat -= free_it->len;
  397. frag_kfree_skb(qp->q.net, free_it);
  398. }
  399. }
  400. FRAG_CB(skb)->offset = offset;
  401. /* Insert this fragment in the chain of fragments. */
  402. skb->next = next;
  403. if (!next)
  404. qp->q.fragments_tail = skb;
  405. if (prev)
  406. prev->next = skb;
  407. else
  408. qp->q.fragments = skb;
  409. dev = skb->dev;
  410. if (dev) {
  411. qp->iif = dev->ifindex;
  412. skb->dev = NULL;
  413. }
  414. qp->q.stamp = skb->tstamp;
  415. qp->q.meat += skb->len;
  416. qp->ecn |= ecn;
  417. atomic_add(skb->truesize, &qp->q.net->mem);
  418. if (offset == 0)
  419. qp->q.last_in |= INET_FRAG_FIRST_IN;
  420. if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
  421. qp->q.meat == qp->q.len)
  422. return ip_frag_reasm(qp, prev, dev);
  423. write_lock(&ip4_frags.lock);
  424. list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
  425. write_unlock(&ip4_frags.lock);
  426. return -EINPROGRESS;
  427. err:
  428. kfree_skb(skb);
  429. return err;
  430. }
  431. /* Build a new IP datagram from all its fragments. */
  432. static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
  433. struct net_device *dev)
  434. {
  435. struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
  436. struct iphdr *iph;
  437. struct sk_buff *fp, *head = qp->q.fragments;
  438. int len;
  439. int ihlen;
  440. int err;
  441. ipq_kill(qp);
  442. /* Make the one we just received the head. */
  443. if (prev) {
  444. head = prev->next;
  445. fp = skb_clone(head, GFP_ATOMIC);
  446. if (!fp)
  447. goto out_nomem;
  448. fp->next = head->next;
  449. if (!fp->next)
  450. qp->q.fragments_tail = fp;
  451. prev->next = fp;
  452. skb_morph(head, qp->q.fragments);
  453. head->next = qp->q.fragments->next;
  454. kfree_skb(qp->q.fragments);
  455. qp->q.fragments = head;
  456. }
  457. WARN_ON(head == NULL);
  458. WARN_ON(FRAG_CB(head)->offset != 0);
  459. /* Allocate a new buffer for the datagram. */
  460. ihlen = ip_hdrlen(head);
  461. len = ihlen + qp->q.len;
  462. err = -E2BIG;
  463. if (len > 65535)
  464. goto out_oversize;
  465. /* Head of list must not be cloned. */
  466. if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
  467. goto out_nomem;
  468. /* If the first fragment is fragmented itself, we split
  469. * it to two chunks: the first with data and paged part
  470. * and the second, holding only fragments. */
  471. if (skb_has_frag_list(head)) {
  472. struct sk_buff *clone;
  473. int i, plen = 0;
  474. if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
  475. goto out_nomem;
  476. clone->next = head->next;
  477. head->next = clone;
  478. skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
  479. skb_frag_list_init(head);
  480. for (i=0; i<skb_shinfo(head)->nr_frags; i++)
  481. plen += skb_shinfo(head)->frags[i].size;
  482. clone->len = clone->data_len = head->data_len - plen;
  483. head->data_len -= clone->len;
  484. head->len -= clone->len;
  485. clone->csum = 0;
  486. clone->ip_summed = head->ip_summed;
  487. atomic_add(clone->truesize, &qp->q.net->mem);
  488. }
  489. skb_shinfo(head)->frag_list = head->next;
  490. skb_push(head, head->data - skb_network_header(head));
  491. for (fp=head->next; fp; fp = fp->next) {
  492. head->data_len += fp->len;
  493. head->len += fp->len;
  494. if (head->ip_summed != fp->ip_summed)
  495. head->ip_summed = CHECKSUM_NONE;
  496. else if (head->ip_summed == CHECKSUM_COMPLETE)
  497. head->csum = csum_add(head->csum, fp->csum);
  498. head->truesize += fp->truesize;
  499. }
  500. atomic_sub(head->truesize, &qp->q.net->mem);
  501. head->next = NULL;
  502. head->dev = dev;
  503. head->tstamp = qp->q.stamp;
  504. iph = ip_hdr(head);
  505. iph->frag_off = 0;
  506. iph->tot_len = htons(len);
  507. /* RFC3168 5.3 Fragmentation support
  508. * If one fragment had INET_ECN_NOT_ECT,
  509. * reassembled frame also has INET_ECN_NOT_ECT
  510. * Elif one fragment had INET_ECN_CE
  511. * reassembled frame also has INET_ECN_CE
  512. */
  513. if (qp->ecn & IPFRAG_ECN_CLEAR)
  514. iph->tos &= ~INET_ECN_MASK;
  515. else if (qp->ecn & IPFRAG_ECN_SET_CE)
  516. iph->tos |= INET_ECN_CE;
  517. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
  518. qp->q.fragments = NULL;
  519. qp->q.fragments_tail = NULL;
  520. return 0;
  521. out_nomem:
  522. LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
  523. "queue %p\n", qp);
  524. err = -ENOMEM;
  525. goto out_fail;
  526. out_oversize:
  527. if (net_ratelimit())
  528. printk(KERN_INFO "Oversized IP packet from %pI4.\n",
  529. &qp->saddr);
  530. out_fail:
  531. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
  532. return err;
  533. }
  534. /* Process an incoming IP datagram fragment. */
  535. int ip_defrag(struct sk_buff *skb, u32 user)
  536. {
  537. struct ipq *qp;
  538. struct net *net;
  539. net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
  540. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
  541. /* Start by cleaning up the memory. */
  542. if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
  543. ip_evictor(net);
  544. /* Lookup (or create) queue header */
  545. if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
  546. int ret;
  547. spin_lock(&qp->q.lock);
  548. ret = ip_frag_queue(qp, skb);
  549. spin_unlock(&qp->q.lock);
  550. ipq_put(qp);
  551. return ret;
  552. }
  553. IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
  554. kfree_skb(skb);
  555. return -ENOMEM;
  556. }
  557. EXPORT_SYMBOL(ip_defrag);
  558. #ifdef CONFIG_SYSCTL
  559. static int zero;
  560. static struct ctl_table ip4_frags_ns_ctl_table[] = {
  561. {
  562. .procname = "ipfrag_high_thresh",
  563. .data = &init_net.ipv4.frags.high_thresh,
  564. .maxlen = sizeof(int),
  565. .mode = 0644,
  566. .proc_handler = proc_dointvec
  567. },
  568. {
  569. .procname = "ipfrag_low_thresh",
  570. .data = &init_net.ipv4.frags.low_thresh,
  571. .maxlen = sizeof(int),
  572. .mode = 0644,
  573. .proc_handler = proc_dointvec
  574. },
  575. {
  576. .procname = "ipfrag_time",
  577. .data = &init_net.ipv4.frags.timeout,
  578. .maxlen = sizeof(int),
  579. .mode = 0644,
  580. .proc_handler = proc_dointvec_jiffies,
  581. },
  582. { }
  583. };
  584. static struct ctl_table ip4_frags_ctl_table[] = {
  585. {
  586. .procname = "ipfrag_secret_interval",
  587. .data = &ip4_frags.secret_interval,
  588. .maxlen = sizeof(int),
  589. .mode = 0644,
  590. .proc_handler = proc_dointvec_jiffies,
  591. },
  592. {
  593. .procname = "ipfrag_max_dist",
  594. .data = &sysctl_ipfrag_max_dist,
  595. .maxlen = sizeof(int),
  596. .mode = 0644,
  597. .proc_handler = proc_dointvec_minmax,
  598. .extra1 = &zero
  599. },
  600. { }
  601. };
  602. static int __net_init ip4_frags_ns_ctl_register(struct net *net)
  603. {
  604. struct ctl_table *table;
  605. struct ctl_table_header *hdr;
  606. table = ip4_frags_ns_ctl_table;
  607. if (!net_eq(net, &init_net)) {
  608. table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
  609. if (table == NULL)
  610. goto err_alloc;
  611. table[0].data = &net->ipv4.frags.high_thresh;
  612. table[1].data = &net->ipv4.frags.low_thresh;
  613. table[2].data = &net->ipv4.frags.timeout;
  614. }
  615. hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
  616. if (hdr == NULL)
  617. goto err_reg;
  618. net->ipv4.frags_hdr = hdr;
  619. return 0;
  620. err_reg:
  621. if (!net_eq(net, &init_net))
  622. kfree(table);
  623. err_alloc:
  624. return -ENOMEM;
  625. }
  626. static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
  627. {
  628. struct ctl_table *table;
  629. table = net->ipv4.frags_hdr->ctl_table_arg;
  630. unregister_net_sysctl_table(net->ipv4.frags_hdr);
  631. kfree(table);
  632. }
  633. static void ip4_frags_ctl_register(void)
  634. {
  635. register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
  636. }
  637. #else
  638. static inline int ip4_frags_ns_ctl_register(struct net *net)
  639. {
  640. return 0;
  641. }
  642. static inline void ip4_frags_ns_ctl_unregister(struct net *net)
  643. {
  644. }
  645. static inline void ip4_frags_ctl_register(void)
  646. {
  647. }
  648. #endif
  649. static int __net_init ipv4_frags_init_net(struct net *net)
  650. {
  651. /*
  652. * Fragment cache limits. We will commit 256K at one time. Should we
  653. * cross that limit we will prune down to 192K. This should cope with
  654. * even the most extreme cases without allowing an attacker to
  655. * measurably harm machine performance.
  656. */
  657. net->ipv4.frags.high_thresh = 256 * 1024;
  658. net->ipv4.frags.low_thresh = 192 * 1024;
  659. /*
  660. * Important NOTE! Fragment queue must be destroyed before MSL expires.
  661. * RFC791 is wrong proposing to prolongate timer each fragment arrival
  662. * by TTL.
  663. */
  664. net->ipv4.frags.timeout = IP_FRAG_TIME;
  665. inet_frags_init_net(&net->ipv4.frags);
  666. return ip4_frags_ns_ctl_register(net);
  667. }
  668. static void __net_exit ipv4_frags_exit_net(struct net *net)
  669. {
  670. ip4_frags_ns_ctl_unregister(net);
  671. inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
  672. }
  673. static struct pernet_operations ip4_frags_ops = {
  674. .init = ipv4_frags_init_net,
  675. .exit = ipv4_frags_exit_net,
  676. };
  677. void __init ipfrag_init(void)
  678. {
  679. ip4_frags_ctl_register();
  680. register_pernet_subsys(&ip4_frags_ops);
  681. ip4_frags.hashfn = ip4_hashfn;
  682. ip4_frags.constructor = ip4_frag_init;
  683. ip4_frags.destructor = ip4_frag_free;
  684. ip4_frags.skb_free = NULL;
  685. ip4_frags.qsize = sizeof(struct ipq);
  686. ip4_frags.match = ip4_frag_match;
  687. ip4_frags.frag_expire = ip_expire;
  688. ip4_frags.secret_interval = 10 * 60 * HZ;
  689. inet_frags_init(&ip4_frags);
  690. }