flow.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /* flow.c: Generic flow cache.
  2. *
  3. * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
  4. * Copyright (C) 2003 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/list.h>
  9. #include <linux/jhash.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/mm.h>
  12. #include <linux/random.h>
  13. #include <linux/init.h>
  14. #include <linux/slab.h>
  15. #include <linux/smp.h>
  16. #include <linux/completion.h>
  17. #include <linux/percpu.h>
  18. #include <linux/bitops.h>
  19. #include <linux/notifier.h>
  20. #include <linux/cpu.h>
  21. #include <linux/cpumask.h>
  22. #include <net/flow.h>
  23. #include <asm/atomic.h>
  24. #include <asm/semaphore.h>
  25. struct flow_cache_entry {
  26. struct flow_cache_entry *next;
  27. u16 family;
  28. u8 dir;
  29. struct flowi key;
  30. u32 genid;
  31. void *object;
  32. atomic_t *object_ref;
  33. };
  34. atomic_t flow_cache_genid = ATOMIC_INIT(0);
  35. static u32 flow_hash_shift;
  36. #define flow_hash_size (1 << flow_hash_shift)
  37. static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
  38. #define flow_table(cpu) (per_cpu(flow_tables, cpu))
  39. static kmem_cache_t *flow_cachep;
  40. static int flow_lwm, flow_hwm;
  41. struct flow_percpu_info {
  42. int hash_rnd_recalc;
  43. u32 hash_rnd;
  44. int count;
  45. } ____cacheline_aligned;
  46. static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
  47. #define flow_hash_rnd_recalc(cpu) \
  48. (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
  49. #define flow_hash_rnd(cpu) \
  50. (per_cpu(flow_hash_info, cpu).hash_rnd)
  51. #define flow_count(cpu) \
  52. (per_cpu(flow_hash_info, cpu).count)
  53. static struct timer_list flow_hash_rnd_timer;
  54. #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
  55. struct flow_flush_info {
  56. atomic_t cpuleft;
  57. struct completion completion;
  58. };
  59. static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
  60. #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
  61. static void flow_cache_new_hashrnd(unsigned long arg)
  62. {
  63. int i;
  64. for_each_cpu(i)
  65. flow_hash_rnd_recalc(i) = 1;
  66. flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  67. add_timer(&flow_hash_rnd_timer);
  68. }
  69. static void __flow_cache_shrink(int cpu, int shrink_to)
  70. {
  71. struct flow_cache_entry *fle, **flp;
  72. int i;
  73. for (i = 0; i < flow_hash_size; i++) {
  74. int k = 0;
  75. flp = &flow_table(cpu)[i];
  76. while ((fle = *flp) != NULL && k < shrink_to) {
  77. k++;
  78. flp = &fle->next;
  79. }
  80. while ((fle = *flp) != NULL) {
  81. *flp = fle->next;
  82. if (fle->object)
  83. atomic_dec(fle->object_ref);
  84. kmem_cache_free(flow_cachep, fle);
  85. flow_count(cpu)--;
  86. }
  87. }
  88. }
  89. static void flow_cache_shrink(int cpu)
  90. {
  91. int shrink_to = flow_lwm / flow_hash_size;
  92. __flow_cache_shrink(cpu, shrink_to);
  93. }
  94. static void flow_new_hash_rnd(int cpu)
  95. {
  96. get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
  97. flow_hash_rnd_recalc(cpu) = 0;
  98. __flow_cache_shrink(cpu, 0);
  99. }
  100. static u32 flow_hash_code(struct flowi *key, int cpu)
  101. {
  102. u32 *k = (u32 *) key;
  103. return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
  104. (flow_hash_size - 1));
  105. }
  106. #if (BITS_PER_LONG == 64)
  107. typedef u64 flow_compare_t;
  108. #else
  109. typedef u32 flow_compare_t;
  110. #endif
  111. extern void flowi_is_missized(void);
  112. /* I hear what you're saying, use memcmp. But memcmp cannot make
  113. * important assumptions that we can here, such as alignment and
  114. * constant size.
  115. */
  116. static int flow_key_compare(struct flowi *key1, struct flowi *key2)
  117. {
  118. flow_compare_t *k1, *k1_lim, *k2;
  119. const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
  120. if (sizeof(struct flowi) % sizeof(flow_compare_t))
  121. flowi_is_missized();
  122. k1 = (flow_compare_t *) key1;
  123. k1_lim = k1 + n_elem;
  124. k2 = (flow_compare_t *) key2;
  125. do {
  126. if (*k1++ != *k2++)
  127. return 1;
  128. } while (k1 < k1_lim);
  129. return 0;
  130. }
  131. void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
  132. flow_resolve_t resolver)
  133. {
  134. struct flow_cache_entry *fle, **head;
  135. unsigned int hash;
  136. int cpu;
  137. local_bh_disable();
  138. cpu = smp_processor_id();
  139. fle = NULL;
  140. /* Packet really early in init? Making flow_cache_init a
  141. * pre-smp initcall would solve this. --RR */
  142. if (!flow_table(cpu))
  143. goto nocache;
  144. if (flow_hash_rnd_recalc(cpu))
  145. flow_new_hash_rnd(cpu);
  146. hash = flow_hash_code(key, cpu);
  147. head = &flow_table(cpu)[hash];
  148. for (fle = *head; fle; fle = fle->next) {
  149. if (fle->family == family &&
  150. fle->dir == dir &&
  151. flow_key_compare(key, &fle->key) == 0) {
  152. if (fle->genid == atomic_read(&flow_cache_genid)) {
  153. void *ret = fle->object;
  154. if (ret)
  155. atomic_inc(fle->object_ref);
  156. local_bh_enable();
  157. return ret;
  158. }
  159. break;
  160. }
  161. }
  162. if (!fle) {
  163. if (flow_count(cpu) > flow_hwm)
  164. flow_cache_shrink(cpu);
  165. fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
  166. if (fle) {
  167. fle->next = *head;
  168. *head = fle;
  169. fle->family = family;
  170. fle->dir = dir;
  171. memcpy(&fle->key, key, sizeof(*key));
  172. fle->object = NULL;
  173. flow_count(cpu)++;
  174. }
  175. }
  176. nocache:
  177. {
  178. void *obj;
  179. atomic_t *obj_ref;
  180. resolver(key, family, dir, &obj, &obj_ref);
  181. if (fle) {
  182. fle->genid = atomic_read(&flow_cache_genid);
  183. if (fle->object)
  184. atomic_dec(fle->object_ref);
  185. fle->object = obj;
  186. fle->object_ref = obj_ref;
  187. if (obj)
  188. atomic_inc(fle->object_ref);
  189. }
  190. local_bh_enable();
  191. return obj;
  192. }
  193. }
  194. static void flow_cache_flush_tasklet(unsigned long data)
  195. {
  196. struct flow_flush_info *info = (void *)data;
  197. int i;
  198. int cpu;
  199. cpu = smp_processor_id();
  200. for (i = 0; i < flow_hash_size; i++) {
  201. struct flow_cache_entry *fle;
  202. fle = flow_table(cpu)[i];
  203. for (; fle; fle = fle->next) {
  204. unsigned genid = atomic_read(&flow_cache_genid);
  205. if (!fle->object || fle->genid == genid)
  206. continue;
  207. fle->object = NULL;
  208. atomic_dec(fle->object_ref);
  209. }
  210. }
  211. if (atomic_dec_and_test(&info->cpuleft))
  212. complete(&info->completion);
  213. }
  214. static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
  215. static void flow_cache_flush_per_cpu(void *data)
  216. {
  217. struct flow_flush_info *info = data;
  218. int cpu;
  219. struct tasklet_struct *tasklet;
  220. cpu = smp_processor_id();
  221. tasklet = flow_flush_tasklet(cpu);
  222. tasklet->data = (unsigned long)info;
  223. tasklet_schedule(tasklet);
  224. }
  225. void flow_cache_flush(void)
  226. {
  227. struct flow_flush_info info;
  228. static DECLARE_MUTEX(flow_flush_sem);
  229. /* Don't want cpus going down or up during this. */
  230. lock_cpu_hotplug();
  231. down(&flow_flush_sem);
  232. atomic_set(&info.cpuleft, num_online_cpus());
  233. init_completion(&info.completion);
  234. local_bh_disable();
  235. smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
  236. flow_cache_flush_tasklet((unsigned long)&info);
  237. local_bh_enable();
  238. wait_for_completion(&info.completion);
  239. up(&flow_flush_sem);
  240. unlock_cpu_hotplug();
  241. }
  242. static void __devinit flow_cache_cpu_prepare(int cpu)
  243. {
  244. struct tasklet_struct *tasklet;
  245. unsigned long order;
  246. for (order = 0;
  247. (PAGE_SIZE << order) <
  248. (sizeof(struct flow_cache_entry *)*flow_hash_size);
  249. order++)
  250. /* NOTHING */;
  251. flow_table(cpu) = (struct flow_cache_entry **)
  252. __get_free_pages(GFP_KERNEL, order);
  253. if (!flow_table(cpu))
  254. panic("NET: failed to allocate flow cache order %lu\n", order);
  255. memset(flow_table(cpu), 0, PAGE_SIZE << order);
  256. flow_hash_rnd_recalc(cpu) = 1;
  257. flow_count(cpu) = 0;
  258. tasklet = flow_flush_tasklet(cpu);
  259. tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
  260. }
  261. #ifdef CONFIG_HOTPLUG_CPU
  262. static int flow_cache_cpu(struct notifier_block *nfb,
  263. unsigned long action,
  264. void *hcpu)
  265. {
  266. if (action == CPU_DEAD)
  267. __flow_cache_shrink((unsigned long)hcpu, 0);
  268. return NOTIFY_OK;
  269. }
  270. #endif /* CONFIG_HOTPLUG_CPU */
  271. static int __init flow_cache_init(void)
  272. {
  273. int i;
  274. flow_cachep = kmem_cache_create("flow_cache",
  275. sizeof(struct flow_cache_entry),
  276. 0, SLAB_HWCACHE_ALIGN,
  277. NULL, NULL);
  278. if (!flow_cachep)
  279. panic("NET: failed to allocate flow cache slab\n");
  280. flow_hash_shift = 10;
  281. flow_lwm = 2 * flow_hash_size;
  282. flow_hwm = 4 * flow_hash_size;
  283. init_timer(&flow_hash_rnd_timer);
  284. flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
  285. flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  286. add_timer(&flow_hash_rnd_timer);
  287. for_each_cpu(i)
  288. flow_cache_cpu_prepare(i);
  289. hotcpu_notifier(flow_cache_cpu, 0);
  290. return 0;
  291. }
  292. module_init(flow_cache_init);
  293. EXPORT_SYMBOL(flow_cache_genid);
  294. EXPORT_SYMBOL(flow_cache_lookup);