inet_frag.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #ifndef __NET_FRAG_H__
  2. #define __NET_FRAG_H__
  3. #include <linux/percpu_counter.h>
  4. struct netns_frags {
  5. int nqueues;
  6. struct list_head lru_list;
  7. spinlock_t lru_lock;
  8. /* The percpu_counter "mem" need to be cacheline aligned.
  9. * mem.count must not share cacheline with other writers
  10. */
  11. struct percpu_counter mem ____cacheline_aligned_in_smp;
  12. /* sysctls */
  13. int timeout;
  14. int high_thresh;
  15. int low_thresh;
  16. };
  17. struct inet_frag_queue {
  18. spinlock_t lock;
  19. struct timer_list timer; /* when will this queue expire? */
  20. struct list_head lru_list; /* lru list member */
  21. struct hlist_node list;
  22. atomic_t refcnt;
  23. struct sk_buff *fragments; /* list of received fragments */
  24. struct sk_buff *fragments_tail;
  25. ktime_t stamp;
  26. int len; /* total length of orig datagram */
  27. int meat;
  28. __u8 last_in; /* first/last segment arrived? */
  29. #define INET_FRAG_COMPLETE 4
  30. #define INET_FRAG_FIRST_IN 2
  31. #define INET_FRAG_LAST_IN 1
  32. u16 max_size;
  33. struct netns_frags *net;
  34. };
  35. #define INETFRAGS_HASHSZ 64
  36. /* averaged:
  37. * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
  38. * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  39. * struct frag_queue))
  40. */
  41. #define INETFRAGS_MAXDEPTH 128
  42. struct inet_frags {
  43. struct hlist_head hash[INETFRAGS_HASHSZ];
  44. /* This rwlock is a global lock (seperate per IPv4, IPv6 and
  45. * netfilter). Important to keep this on a seperate cacheline.
  46. */
  47. rwlock_t lock ____cacheline_aligned_in_smp;
  48. int secret_interval;
  49. struct timer_list secret_timer;
  50. u32 rnd;
  51. int qsize;
  52. unsigned int (*hashfn)(struct inet_frag_queue *);
  53. bool (*match)(struct inet_frag_queue *q, void *arg);
  54. void (*constructor)(struct inet_frag_queue *q,
  55. void *arg);
  56. void (*destructor)(struct inet_frag_queue *);
  57. void (*skb_free)(struct sk_buff *);
  58. void (*frag_expire)(unsigned long data);
  59. };
  60. void inet_frags_init(struct inet_frags *);
  61. void inet_frags_fini(struct inet_frags *);
  62. void inet_frags_init_net(struct netns_frags *nf);
  63. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  64. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  65. void inet_frag_destroy(struct inet_frag_queue *q,
  66. struct inet_frags *f, int *work);
  67. int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
  68. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  69. struct inet_frags *f, void *key, unsigned int hash)
  70. __releases(&f->lock);
  71. void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  72. const char *prefix);
  73. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  74. {
  75. if (atomic_dec_and_test(&q->refcnt))
  76. inet_frag_destroy(q, f, NULL);
  77. }
  78. /* Memory Tracking Functions. */
  79. /* The default percpu_counter batch size is not big enough to scale to
  80. * fragmentation mem acct sizes.
  81. * The mem size of a 64K fragment is approx:
  82. * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
  83. */
  84. static unsigned int frag_percpu_counter_batch = 130000;
  85. static inline int frag_mem_limit(struct netns_frags *nf)
  86. {
  87. return percpu_counter_read(&nf->mem);
  88. }
  89. static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
  90. {
  91. __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
  92. }
  93. static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
  94. {
  95. __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
  96. }
  97. static inline void init_frag_mem_limit(struct netns_frags *nf)
  98. {
  99. percpu_counter_init(&nf->mem, 0);
  100. }
  101. static inline int sum_frag_mem_limit(struct netns_frags *nf)
  102. {
  103. int res;
  104. local_bh_disable();
  105. res = percpu_counter_sum_positive(&nf->mem);
  106. local_bh_enable();
  107. return res;
  108. }
  109. static inline void inet_frag_lru_move(struct inet_frag_queue *q)
  110. {
  111. spin_lock(&q->net->lru_lock);
  112. list_move_tail(&q->lru_list, &q->net->lru_list);
  113. spin_unlock(&q->net->lru_lock);
  114. }
  115. static inline void inet_frag_lru_del(struct inet_frag_queue *q)
  116. {
  117. spin_lock(&q->net->lru_lock);
  118. list_del(&q->lru_list);
  119. spin_unlock(&q->net->lru_lock);
  120. }
  121. static inline void inet_frag_lru_add(struct netns_frags *nf,
  122. struct inet_frag_queue *q)
  123. {
  124. spin_lock(&nf->lru_lock);
  125. list_add_tail(&q->lru_list, &nf->lru_list);
  126. spin_unlock(&nf->lru_lock);
  127. }
  128. #endif