inet_frag.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #ifndef __NET_FRAG_H__
  2. #define __NET_FRAG_H__
  3. #include <linux/percpu_counter.h>
  4. struct netns_frags {
  5. int nqueues;
  6. struct list_head lru_list;
  7. spinlock_t lru_lock;
  8. /* The percpu_counter "mem" need to be cacheline aligned.
  9. * mem.count must not share cacheline with other writers
  10. */
  11. struct percpu_counter mem ____cacheline_aligned_in_smp;
  12. /* sysctls */
  13. int timeout;
  14. int high_thresh;
  15. int low_thresh;
  16. };
  17. struct inet_frag_queue {
  18. spinlock_t lock;
  19. struct timer_list timer; /* when will this queue expire? */
  20. struct list_head lru_list; /* lru list member */
  21. struct hlist_node list;
  22. atomic_t refcnt;
  23. struct sk_buff *fragments; /* list of received fragments */
  24. struct sk_buff *fragments_tail;
  25. ktime_t stamp;
  26. int len; /* total length of orig datagram */
  27. int meat;
  28. __u8 last_in; /* first/last segment arrived? */
  29. #define INET_FRAG_COMPLETE 4
  30. #define INET_FRAG_FIRST_IN 2
  31. #define INET_FRAG_LAST_IN 1
  32. u16 max_size;
  33. struct netns_frags *net;
  34. };
  35. #define INETFRAGS_HASHSZ 64
  36. struct inet_frags {
  37. struct hlist_head hash[INETFRAGS_HASHSZ];
  38. /* This rwlock is a global lock (seperate per IPv4, IPv6 and
  39. * netfilter). Important to keep this on a seperate cacheline.
  40. */
  41. rwlock_t lock ____cacheline_aligned_in_smp;
  42. int secret_interval;
  43. struct timer_list secret_timer;
  44. u32 rnd;
  45. int qsize;
  46. unsigned int (*hashfn)(struct inet_frag_queue *);
  47. bool (*match)(struct inet_frag_queue *q, void *arg);
  48. void (*constructor)(struct inet_frag_queue *q,
  49. void *arg);
  50. void (*destructor)(struct inet_frag_queue *);
  51. void (*skb_free)(struct sk_buff *);
  52. void (*frag_expire)(unsigned long data);
  53. };
  54. void inet_frags_init(struct inet_frags *);
  55. void inet_frags_fini(struct inet_frags *);
  56. void inet_frags_init_net(struct netns_frags *nf);
  57. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  58. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  59. void inet_frag_destroy(struct inet_frag_queue *q,
  60. struct inet_frags *f, int *work);
  61. int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
  62. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  63. struct inet_frags *f, void *key, unsigned int hash)
  64. __releases(&f->lock);
  65. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  66. {
  67. if (atomic_dec_and_test(&q->refcnt))
  68. inet_frag_destroy(q, f, NULL);
  69. }
  70. /* Memory Tracking Functions. */
  71. /* The default percpu_counter batch size is not big enough to scale to
  72. * fragmentation mem acct sizes.
  73. * The mem size of a 64K fragment is approx:
  74. * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
  75. */
  76. static unsigned int frag_percpu_counter_batch = 130000;
  77. static inline int frag_mem_limit(struct netns_frags *nf)
  78. {
  79. return percpu_counter_read(&nf->mem);
  80. }
  81. static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
  82. {
  83. __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
  84. }
  85. static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
  86. {
  87. __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
  88. }
  89. static inline void init_frag_mem_limit(struct netns_frags *nf)
  90. {
  91. percpu_counter_init(&nf->mem, 0);
  92. }
  93. static inline int sum_frag_mem_limit(struct netns_frags *nf)
  94. {
  95. int res;
  96. local_bh_disable();
  97. res = percpu_counter_sum_positive(&nf->mem);
  98. local_bh_enable();
  99. return res;
  100. }
  101. static inline void inet_frag_lru_move(struct inet_frag_queue *q)
  102. {
  103. spin_lock(&q->net->lru_lock);
  104. list_move_tail(&q->lru_list, &q->net->lru_list);
  105. spin_unlock(&q->net->lru_lock);
  106. }
  107. static inline void inet_frag_lru_del(struct inet_frag_queue *q)
  108. {
  109. spin_lock(&q->net->lru_lock);
  110. list_del(&q->lru_list);
  111. spin_unlock(&q->net->lru_lock);
  112. }
  113. static inline void inet_frag_lru_add(struct netns_frags *nf,
  114. struct inet_frag_queue *q)
  115. {
  116. spin_lock(&nf->lru_lock);
  117. list_add_tail(&q->lru_list, &nf->lru_list);
  118. spin_unlock(&nf->lru_lock);
  119. }
  120. #endif