ip_fragment.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The IP fragmentation functionality.
  7. *
  8. * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
  9. *
  10. * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
  11. * Alan Cox <Alan.Cox@linux.org>
  12. *
  13. * Fixes:
  14. * Alan Cox : Split from ip.c , see ip_input.c for history.
  15. * David S. Miller : Begin massive cleanup...
  16. * Andi Kleen : Add sysctls.
  17. * xxxx : Overlapfrag bug.
  18. * Ultima : ip_expire() kernel panic.
  19. * Bill Hawes : Frag accounting and evictor fixes.
  20. * John McDonald : 0 length frag bug.
  21. * Alexey Kuznetsov: SMP races, threading, cleanup.
  22. * Patrick McHardy : LRU queue of frag heads for evictor.
  23. */
  24. #include <linux/compiler.h>
  25. #include <linux/module.h>
  26. #include <linux/types.h>
  27. #include <linux/mm.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/list.h>
  31. #include <linux/ip.h>
  32. #include <linux/icmp.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/jhash.h>
  35. #include <linux/random.h>
  36. #include <net/sock.h>
  37. #include <net/ip.h>
  38. #include <net/icmp.h>
  39. #include <net/checksum.h>
  40. #include <net/inetpeer.h>
  41. #include <net/inet_frag.h>
  42. #include <linux/tcp.h>
  43. #include <linux/udp.h>
  44. #include <linux/inet.h>
  45. #include <linux/netfilter_ipv4.h>
  46. /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  47. * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
  48. * as well. Or notify me, at least. --ANK
  49. */
  50. int sysctl_ipfrag_max_dist __read_mostly = 64;
  51. struct ipfrag_skb_cb
  52. {
  53. struct inet_skb_parm h;
  54. int offset;
  55. };
  56. #define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb))
  57. /* Describe an entry in the "incomplete datagrams" queue. */
  58. struct ipq {
  59. struct inet_frag_queue q;
  60. u32 user;
  61. __be32 saddr;
  62. __be32 daddr;
  63. __be16 id;
  64. u8 protocol;
  65. int iif;
  66. unsigned int rid;
  67. struct inet_peer *peer;
  68. };
  69. struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
  70. /*
  71. * Fragment cache limits. We will commit 256K at one time. Should we
  72. * cross that limit we will prune down to 192K. This should cope with
  73. * even the most extreme cases without allowing an attacker to
  74. * measurably harm machine performance.
  75. */
  76. .high_thresh = 256 * 1024,
  77. .low_thresh = 192 * 1024,
  78. /*
  79. * Important NOTE! Fragment queue must be destroyed before MSL expires.
  80. * RFC791 is wrong proposing to prolongate timer each fragment arrival
  81. * by TTL.
  82. */
  83. .timeout = IP_FRAG_TIME,
  84. .secret_interval = 10 * 60 * HZ,
  85. };
  86. static struct inet_frags ip4_frags;
  87. int ip_frag_nqueues(void)
  88. {
  89. return ip4_frags.nqueues;
  90. }
  91. int ip_frag_mem(void)
  92. {
  93. return atomic_read(&ip4_frags.mem);
  94. }
  95. static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
  96. struct net_device *dev);
  97. static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
  98. {
  99. return jhash_3words((__force u32)id << 16 | prot,
  100. (__force u32)saddr, (__force u32)daddr,
  101. ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
  102. }
  103. static unsigned int ip4_hashfn(struct inet_frag_queue *q)
  104. {
  105. struct ipq *ipq;
  106. ipq = container_of(q, struct ipq, q);
  107. return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
  108. }
  109. /* Memory Tracking Functions. */
  110. static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
  111. {
  112. if (work)
  113. *work -= skb->truesize;
  114. atomic_sub(skb->truesize, &ip4_frags.mem);
  115. kfree_skb(skb);
  116. }
  117. static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
  118. {
  119. struct ipq *qp;
  120. qp = container_of(q, struct ipq, q);
  121. if (qp->peer)
  122. inet_putpeer(qp->peer);
  123. kfree(qp);
  124. }
  125. static __inline__ struct ipq *frag_alloc_queue(void)
  126. {
  127. struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC);
  128. if (!qp)
  129. return NULL;
  130. atomic_add(sizeof(struct ipq), &ip4_frags.mem);
  131. return qp;
  132. }
  133. /* Destruction primitives. */
  134. static __inline__ void ipq_put(struct ipq *ipq)
  135. {
  136. inet_frag_put(&ipq->q, &ip4_frags);
  137. }
  138. /* Kill ipq entry. It is not destroyed immediately,
  139. * because caller (and someone more) holds reference count.
  140. */
  141. static void ipq_kill(struct ipq *ipq)
  142. {
  143. inet_frag_kill(&ipq->q, &ip4_frags);
  144. }
  145. /* Memory limiting on fragments. Evictor trashes the oldest
  146. * fragment queue until we are back under the threshold.
  147. */
  148. static void ip_evictor(void)
  149. {
  150. int evicted;
  151. evicted = inet_frag_evictor(&ip4_frags);
  152. if (evicted)
  153. IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted);
  154. }
  155. /*
  156. * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
  157. */
  158. static void ip_expire(unsigned long arg)
  159. {
  160. struct ipq *qp = (struct ipq *) arg;
  161. spin_lock(&qp->q.lock);
  162. if (qp->q.last_in & COMPLETE)
  163. goto out;
  164. ipq_kill(qp);
  165. IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
  166. IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
  167. if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) {
  168. struct sk_buff *head = qp->q.fragments;
  169. /* Send an ICMP "Fragment Reassembly Timeout" message. */
  170. if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) {
  171. icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
  172. dev_put(head->dev);
  173. }
  174. }
  175. out:
  176. spin_unlock(&qp->q.lock);
  177. ipq_put(qp);
  178. }
  179. /* Creation primitives. */
  180. static struct ipq *ip_frag_intern(struct ipq *qp_in)
  181. {
  182. struct ipq *qp;
  183. #ifdef CONFIG_SMP
  184. struct hlist_node *n;
  185. #endif
  186. unsigned int hash;
  187. write_lock(&ip4_frags.lock);
  188. hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
  189. qp_in->protocol);
  190. #ifdef CONFIG_SMP
  191. /* With SMP race we have to recheck hash table, because
  192. * such entry could be created on other cpu, while we
  193. * promoted read lock to write lock.
  194. */
  195. hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
  196. if (qp->id == qp_in->id &&
  197. qp->saddr == qp_in->saddr &&
  198. qp->daddr == qp_in->daddr &&
  199. qp->protocol == qp_in->protocol &&
  200. qp->user == qp_in->user) {
  201. atomic_inc(&qp->q.refcnt);
  202. write_unlock(&ip4_frags.lock);
  203. qp_in->q.last_in |= COMPLETE;
  204. ipq_put(qp_in);
  205. return qp;
  206. }
  207. }
  208. #endif
  209. qp = qp_in;
  210. if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
  211. atomic_inc(&qp->q.refcnt);
  212. atomic_inc(&qp->q.refcnt);
  213. hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
  214. INIT_LIST_HEAD(&qp->q.lru_list);
  215. list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
  216. ip4_frags.nqueues++;
  217. write_unlock(&ip4_frags.lock);
  218. return qp;
  219. }
  220. /* Add an entry to the 'ipq' queue for a newly received IP datagram. */
  221. static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
  222. {
  223. struct ipq *qp;
  224. if ((qp = frag_alloc_queue()) == NULL)
  225. goto out_nomem;
  226. qp->protocol = iph->protocol;
  227. qp->q.last_in = 0;
  228. qp->id = iph->id;
  229. qp->saddr = iph->saddr;
  230. qp->daddr = iph->daddr;
  231. qp->user = user;
  232. qp->q.len = 0;
  233. qp->q.meat = 0;
  234. qp->q.fragments = NULL;
  235. qp->iif = 0;
  236. qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL;
  237. /* Initialize a timer for this entry. */
  238. init_timer(&qp->q.timer);
  239. qp->q.timer.data = (unsigned long) qp; /* pointer to queue */
  240. qp->q.timer.function = ip_expire; /* expire function */
  241. spin_lock_init(&qp->q.lock);
  242. atomic_set(&qp->q.refcnt, 1);
  243. return ip_frag_intern(qp);
  244. out_nomem:
  245. LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
  246. return NULL;
  247. }
  248. /* Find the correct entry in the "incomplete datagrams" queue for
  249. * this IP datagram, and create new one, if nothing is found.
  250. */
  251. static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
  252. {
  253. __be16 id = iph->id;
  254. __be32 saddr = iph->saddr;
  255. __be32 daddr = iph->daddr;
  256. __u8 protocol = iph->protocol;
  257. unsigned int hash;
  258. struct ipq *qp;
  259. struct hlist_node *n;
  260. read_lock(&ip4_frags.lock);
  261. hash = ipqhashfn(id, saddr, daddr, protocol);
  262. hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
  263. if (qp->id == id &&
  264. qp->saddr == saddr &&
  265. qp->daddr == daddr &&
  266. qp->protocol == protocol &&
  267. qp->user == user) {
  268. atomic_inc(&qp->q.refcnt);
  269. read_unlock(&ip4_frags.lock);
  270. return qp;
  271. }
  272. }
  273. read_unlock(&ip4_frags.lock);
  274. return ip_frag_create(iph, user);
  275. }
  276. /* Is the fragment too far ahead to be part of ipq? */
  277. static inline int ip_frag_too_far(struct ipq *qp)
  278. {
  279. struct inet_peer *peer = qp->peer;
  280. unsigned int max = sysctl_ipfrag_max_dist;
  281. unsigned int start, end;
  282. int rc;
  283. if (!peer || !max)
  284. return 0;
  285. start = qp->rid;
  286. end = atomic_inc_return(&peer->rid);
  287. qp->rid = end;
  288. rc = qp->q.fragments && (end - start) > max;
  289. if (rc) {
  290. IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
  291. }
  292. return rc;
  293. }
  294. static int ip_frag_reinit(struct ipq *qp)
  295. {
  296. struct sk_buff *fp;
  297. if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
  298. atomic_inc(&qp->q.refcnt);
  299. return -ETIMEDOUT;
  300. }
  301. fp = qp->q.fragments;
  302. do {
  303. struct sk_buff *xp = fp->next;
  304. frag_kfree_skb(fp, NULL);
  305. fp = xp;
  306. } while (fp);
  307. qp->q.last_in = 0;
  308. qp->q.len = 0;
  309. qp->q.meat = 0;
  310. qp->q.fragments = NULL;
  311. qp->iif = 0;
  312. return 0;
  313. }
  314. /* Add new segment to existing queue. */
  315. static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
  316. {
  317. struct sk_buff *prev, *next;
  318. struct net_device *dev;
  319. int flags, offset;
  320. int ihl, end;
  321. int err = -ENOENT;
  322. if (qp->q.last_in & COMPLETE)
  323. goto err;
  324. if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
  325. unlikely(ip_frag_too_far(qp)) &&
  326. unlikely(err = ip_frag_reinit(qp))) {
  327. ipq_kill(qp);
  328. goto err;
  329. }
  330. offset = ntohs(ip_hdr(skb)->frag_off);
  331. flags = offset & ~IP_OFFSET;
  332. offset &= IP_OFFSET;
  333. offset <<= 3; /* offset is in 8-byte chunks */
  334. ihl = ip_hdrlen(skb);
  335. /* Determine the position of this fragment. */
  336. end = offset + skb->len - ihl;
  337. err = -EINVAL;
  338. /* Is this the final fragment? */
  339. if ((flags & IP_MF) == 0) {
  340. /* If we already have some bits beyond end
  341. * or have different end, the segment is corrrupted.
  342. */
  343. if (end < qp->q.len ||
  344. ((qp->q.last_in & LAST_IN) && end != qp->q.len))
  345. goto err;
  346. qp->q.last_in |= LAST_IN;
  347. qp->q.len = end;
  348. } else {
  349. if (end&7) {
  350. end &= ~7;
  351. if (skb->ip_summed != CHECKSUM_UNNECESSARY)
  352. skb->ip_summed = CHECKSUM_NONE;
  353. }
  354. if (end > qp->q.len) {
  355. /* Some bits beyond end -> corruption. */
  356. if (qp->q.last_in & LAST_IN)
  357. goto err;
  358. qp->q.len = end;
  359. }
  360. }
  361. if (end == offset)
  362. goto err;
  363. err = -ENOMEM;
  364. if (pskb_pull(skb, ihl) == NULL)
  365. goto err;
  366. err = pskb_trim_rcsum(skb, end - offset);
  367. if (err)
  368. goto err;
  369. /* Find out which fragments are in front and at the back of us
  370. * in the chain of fragments so far. We must know where to put
  371. * this fragment, right?
  372. */
  373. prev = NULL;
  374. for (next = qp->q.fragments; next != NULL; next = next->next) {
  375. if (FRAG_CB(next)->offset >= offset)
  376. break; /* bingo! */
  377. prev = next;
  378. }
  379. /* We found where to put this one. Check for overlap with
  380. * preceding fragment, and, if needed, align things so that
  381. * any overlaps are eliminated.
  382. */
  383. if (prev) {
  384. int i = (FRAG_CB(prev)->offset + prev->len) - offset;
  385. if (i > 0) {
  386. offset += i;
  387. err = -EINVAL;
  388. if (end <= offset)
  389. goto err;
  390. err = -ENOMEM;
  391. if (!pskb_pull(skb, i))
  392. goto err;
  393. if (skb->ip_summed != CHECKSUM_UNNECESSARY)
  394. skb->ip_summed = CHECKSUM_NONE;
  395. }
  396. }
  397. err = -ENOMEM;
  398. while (next && FRAG_CB(next)->offset < end) {
  399. int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
  400. if (i < next->len) {
  401. /* Eat head of the next overlapped fragment
  402. * and leave the loop. The next ones cannot overlap.
  403. */
  404. if (!pskb_pull(next, i))
  405. goto err;
  406. FRAG_CB(next)->offset += i;
  407. qp->q.meat -= i;
  408. if (next->ip_summed != CHECKSUM_UNNECESSARY)
  409. next->ip_summed = CHECKSUM_NONE;
  410. break;
  411. } else {
  412. struct sk_buff *free_it = next;
  413. /* Old fragment is completely overridden with
  414. * new one drop it.
  415. */
  416. next = next->next;
  417. if (prev)
  418. prev->next = next;
  419. else
  420. qp->q.fragments = next;
  421. qp->q.meat -= free_it->len;
  422. frag_kfree_skb(free_it, NULL);
  423. }
  424. }
  425. FRAG_CB(skb)->offset = offset;
  426. /* Insert this fragment in the chain of fragments. */
  427. skb->next = next;
  428. if (prev)
  429. prev->next = skb;
  430. else
  431. qp->q.fragments = skb;
  432. dev = skb->dev;
  433. if (dev) {
  434. qp->iif = dev->ifindex;
  435. skb->dev = NULL;
  436. }
  437. qp->q.stamp = skb->tstamp;
  438. qp->q.meat += skb->len;
  439. atomic_add(skb->truesize, &ip4_frags.mem);
  440. if (offset == 0)
  441. qp->q.last_in |= FIRST_IN;
  442. if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
  443. return ip_frag_reasm(qp, prev, dev);
  444. write_lock(&ip4_frags.lock);
  445. list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
  446. write_unlock(&ip4_frags.lock);
  447. return -EINPROGRESS;
  448. err:
  449. kfree_skb(skb);
  450. return err;
  451. }
  452. /* Build a new IP datagram from all its fragments. */
  453. static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
  454. struct net_device *dev)
  455. {
  456. struct iphdr *iph;
  457. struct sk_buff *fp, *head = qp->q.fragments;
  458. int len;
  459. int ihlen;
  460. int err;
  461. ipq_kill(qp);
  462. /* Make the one we just received the head. */
  463. if (prev) {
  464. head = prev->next;
  465. fp = skb_clone(head, GFP_ATOMIC);
  466. if (!fp)
  467. goto out_nomem;
  468. fp->next = head->next;
  469. prev->next = fp;
  470. skb_morph(head, qp->q.fragments);
  471. head->next = qp->q.fragments->next;
  472. kfree_skb(qp->q.fragments);
  473. qp->q.fragments = head;
  474. }
  475. BUG_TRAP(head != NULL);
  476. BUG_TRAP(FRAG_CB(head)->offset == 0);
  477. /* Allocate a new buffer for the datagram. */
  478. ihlen = ip_hdrlen(head);
  479. len = ihlen + qp->q.len;
  480. err = -E2BIG;
  481. if (len > 65535)
  482. goto out_oversize;
  483. /* Head of list must not be cloned. */
  484. err = -ENOMEM;
  485. if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
  486. goto out_nomem;
  487. /* If the first fragment is fragmented itself, we split
  488. * it to two chunks: the first with data and paged part
  489. * and the second, holding only fragments. */
  490. if (skb_shinfo(head)->frag_list) {
  491. struct sk_buff *clone;
  492. int i, plen = 0;
  493. if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
  494. goto out_nomem;
  495. clone->next = head->next;
  496. head->next = clone;
  497. skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
  498. skb_shinfo(head)->frag_list = NULL;
  499. for (i=0; i<skb_shinfo(head)->nr_frags; i++)
  500. plen += skb_shinfo(head)->frags[i].size;
  501. clone->len = clone->data_len = head->data_len - plen;
  502. head->data_len -= clone->len;
  503. head->len -= clone->len;
  504. clone->csum = 0;
  505. clone->ip_summed = head->ip_summed;
  506. atomic_add(clone->truesize, &ip4_frags.mem);
  507. }
  508. skb_shinfo(head)->frag_list = head->next;
  509. skb_push(head, head->data - skb_network_header(head));
  510. atomic_sub(head->truesize, &ip4_frags.mem);
  511. for (fp=head->next; fp; fp = fp->next) {
  512. head->data_len += fp->len;
  513. head->len += fp->len;
  514. if (head->ip_summed != fp->ip_summed)
  515. head->ip_summed = CHECKSUM_NONE;
  516. else if (head->ip_summed == CHECKSUM_COMPLETE)
  517. head->csum = csum_add(head->csum, fp->csum);
  518. head->truesize += fp->truesize;
  519. atomic_sub(fp->truesize, &ip4_frags.mem);
  520. }
  521. head->next = NULL;
  522. head->dev = dev;
  523. head->tstamp = qp->q.stamp;
  524. iph = ip_hdr(head);
  525. iph->frag_off = 0;
  526. iph->tot_len = htons(len);
  527. IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
  528. qp->q.fragments = NULL;
  529. return 0;
  530. out_nomem:
  531. LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
  532. "queue %p\n", qp);
  533. goto out_fail;
  534. out_oversize:
  535. if (net_ratelimit())
  536. printk(KERN_INFO
  537. "Oversized IP packet from %d.%d.%d.%d.\n",
  538. NIPQUAD(qp->saddr));
  539. out_fail:
  540. IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
  541. return err;
  542. }
  543. /* Process an incoming IP datagram fragment. */
  544. int ip_defrag(struct sk_buff *skb, u32 user)
  545. {
  546. struct ipq *qp;
  547. IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
  548. /* Start by cleaning up the memory. */
  549. if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh)
  550. ip_evictor();
  551. /* Lookup (or create) queue header */
  552. if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
  553. int ret;
  554. spin_lock(&qp->q.lock);
  555. ret = ip_frag_queue(qp, skb);
  556. spin_unlock(&qp->q.lock);
  557. ipq_put(qp);
  558. return ret;
  559. }
  560. IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
  561. kfree_skb(skb);
  562. return -ENOMEM;
  563. }
  564. void __init ipfrag_init(void)
  565. {
  566. ip4_frags.ctl = &ip4_frags_ctl;
  567. ip4_frags.hashfn = ip4_hashfn;
  568. ip4_frags.destructor = ip4_frag_free;
  569. ip4_frags.skb_free = NULL;
  570. ip4_frags.qsize = sizeof(struct ipq);
  571. inet_frags_init(&ip4_frags);
  572. }
  573. EXPORT_SYMBOL(ip_defrag);