inet_frag.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #ifndef __NET_FRAG_H__
  2. #define __NET_FRAG_H__
  3. #include <linux/percpu_counter.h>
  4. struct netns_frags {
  5. int nqueues;
  6. struct list_head lru_list;
  7. spinlock_t lru_lock;
  8. /* The percpu_counter "mem" need to be cacheline aligned.
  9. * mem.count must not share cacheline with other writers
  10. */
  11. struct percpu_counter mem ____cacheline_aligned_in_smp;
  12. /* sysctls */
  13. int timeout;
  14. int high_thresh;
  15. int low_thresh;
  16. };
  17. struct inet_frag_queue {
  18. spinlock_t lock;
  19. struct timer_list timer; /* when will this queue expire? */
  20. struct list_head lru_list; /* lru list member */
  21. struct hlist_node list;
  22. atomic_t refcnt;
  23. struct sk_buff *fragments; /* list of received fragments */
  24. struct sk_buff *fragments_tail;
  25. ktime_t stamp;
  26. int len; /* total length of orig datagram */
  27. int meat;
  28. __u8 last_in; /* first/last segment arrived? */
  29. #define INET_FRAG_COMPLETE 4
  30. #define INET_FRAG_FIRST_IN 2
  31. #define INET_FRAG_LAST_IN 1
  32. u16 max_size;
  33. struct netns_frags *net;
  34. };
  35. #define INETFRAGS_HASHSZ 1024
  36. /* averaged:
  37. * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
  38. * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  39. * struct frag_queue))
  40. */
  41. #define INETFRAGS_MAXDEPTH 128
  42. struct inet_frag_bucket {
  43. struct hlist_head chain;
  44. spinlock_t chain_lock;
  45. };
  46. struct inet_frags {
  47. struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
  48. /* This rwlock is a global lock (seperate per IPv4, IPv6 and
  49. * netfilter). Important to keep this on a seperate cacheline.
  50. * Its primarily a rebuild protection rwlock.
  51. */
  52. rwlock_t lock ____cacheline_aligned_in_smp;
  53. int secret_interval;
  54. struct timer_list secret_timer;
  55. u32 rnd;
  56. int qsize;
  57. unsigned int (*hashfn)(struct inet_frag_queue *);
  58. bool (*match)(struct inet_frag_queue *q, void *arg);
  59. void (*constructor)(struct inet_frag_queue *q,
  60. void *arg);
  61. void (*destructor)(struct inet_frag_queue *);
  62. void (*skb_free)(struct sk_buff *);
  63. void (*frag_expire)(unsigned long data);
  64. };
  65. void inet_frags_init(struct inet_frags *);
  66. void inet_frags_fini(struct inet_frags *);
  67. void inet_frags_init_net(struct netns_frags *nf);
  68. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  69. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  70. void inet_frag_destroy(struct inet_frag_queue *q,
  71. struct inet_frags *f, int *work);
  72. int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
  73. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  74. struct inet_frags *f, void *key, unsigned int hash)
  75. __releases(&f->lock);
  76. void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  77. const char *prefix);
  78. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  79. {
  80. if (atomic_dec_and_test(&q->refcnt))
  81. inet_frag_destroy(q, f, NULL);
  82. }
  83. /* Memory Tracking Functions. */
  84. /* The default percpu_counter batch size is not big enough to scale to
  85. * fragmentation mem acct sizes.
  86. * The mem size of a 64K fragment is approx:
  87. * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
  88. */
  89. static unsigned int frag_percpu_counter_batch = 130000;
  90. static inline int frag_mem_limit(struct netns_frags *nf)
  91. {
  92. return percpu_counter_read(&nf->mem);
  93. }
  94. static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
  95. {
  96. __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
  97. }
  98. static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
  99. {
  100. __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
  101. }
  102. static inline void init_frag_mem_limit(struct netns_frags *nf)
  103. {
  104. percpu_counter_init(&nf->mem, 0);
  105. }
  106. static inline int sum_frag_mem_limit(struct netns_frags *nf)
  107. {
  108. int res;
  109. local_bh_disable();
  110. res = percpu_counter_sum_positive(&nf->mem);
  111. local_bh_enable();
  112. return res;
  113. }
  114. static inline void inet_frag_lru_move(struct inet_frag_queue *q)
  115. {
  116. spin_lock(&q->net->lru_lock);
  117. if (!list_empty(&q->lru_list))
  118. list_move_tail(&q->lru_list, &q->net->lru_list);
  119. spin_unlock(&q->net->lru_lock);
  120. }
  121. static inline void inet_frag_lru_del(struct inet_frag_queue *q)
  122. {
  123. spin_lock(&q->net->lru_lock);
  124. list_del_init(&q->lru_list);
  125. q->net->nqueues--;
  126. spin_unlock(&q->net->lru_lock);
  127. }
  128. static inline void inet_frag_lru_add(struct netns_frags *nf,
  129. struct inet_frag_queue *q)
  130. {
  131. spin_lock(&nf->lru_lock);
  132. list_add_tail(&q->lru_list, &nf->lru_list);
  133. q->net->nqueues++;
  134. spin_unlock(&nf->lru_lock);
  135. }
  136. /* RFC 3168 support :
  137. * We want to check ECN values of all fragments, do detect invalid combinations.
  138. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
  139. */
  140. #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
  141. #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
  142. #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
  143. #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
  144. extern const u8 ip_frag_ecn_table[16];
  145. #endif