flow.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /* flow.c: Generic flow cache.
  2. *
  3. * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
  4. * Copyright (C) 2003 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/list.h>
  9. #include <linux/jhash.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/mm.h>
  12. #include <linux/random.h>
  13. #include <linux/init.h>
  14. #include <linux/slab.h>
  15. #include <linux/smp.h>
  16. #include <linux/completion.h>
  17. #include <linux/percpu.h>
  18. #include <linux/bitops.h>
  19. #include <linux/notifier.h>
  20. #include <linux/cpu.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/mutex.h>
  23. #include <net/flow.h>
  24. #include <linux/atomic.h>
  25. #include <linux/security.h>
  26. struct flow_cache_entry {
  27. union {
  28. struct hlist_node hlist;
  29. struct list_head gc_list;
  30. } u;
  31. struct net *net;
  32. u16 family;
  33. u8 dir;
  34. u32 genid;
  35. struct flowi key;
  36. struct flow_cache_object *object;
  37. };
  38. struct flow_cache_percpu {
  39. struct hlist_head *hash_table;
  40. int hash_count;
  41. u32 hash_rnd;
  42. int hash_rnd_recalc;
  43. struct tasklet_struct flush_tasklet;
  44. };
  45. struct flow_flush_info {
  46. struct flow_cache *cache;
  47. atomic_t cpuleft;
  48. struct completion completion;
  49. };
  50. struct flow_cache {
  51. u32 hash_shift;
  52. struct flow_cache_percpu __percpu *percpu;
  53. struct notifier_block hotcpu_notifier;
  54. int low_watermark;
  55. int high_watermark;
  56. struct timer_list rnd_timer;
  57. };
  58. atomic_t flow_cache_genid = ATOMIC_INIT(0);
  59. EXPORT_SYMBOL(flow_cache_genid);
  60. static struct flow_cache flow_cache_global;
  61. static struct kmem_cache *flow_cachep __read_mostly;
  62. static DEFINE_SPINLOCK(flow_cache_gc_lock);
  63. static LIST_HEAD(flow_cache_gc_list);
  64. #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
  65. #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
  66. static void flow_cache_new_hashrnd(unsigned long arg)
  67. {
  68. struct flow_cache *fc = (void *) arg;
  69. int i;
  70. for_each_possible_cpu(i)
  71. per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
  72. fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  73. add_timer(&fc->rnd_timer);
  74. }
  75. static int flow_entry_valid(struct flow_cache_entry *fle)
  76. {
  77. if (atomic_read(&flow_cache_genid) != fle->genid)
  78. return 0;
  79. if (fle->object && !fle->object->ops->check(fle->object))
  80. return 0;
  81. return 1;
  82. }
  83. static void flow_entry_kill(struct flow_cache_entry *fle)
  84. {
  85. if (fle->object)
  86. fle->object->ops->delete(fle->object);
  87. kmem_cache_free(flow_cachep, fle);
  88. }
  89. static void flow_cache_gc_task(struct work_struct *work)
  90. {
  91. struct list_head gc_list;
  92. struct flow_cache_entry *fce, *n;
  93. INIT_LIST_HEAD(&gc_list);
  94. spin_lock_bh(&flow_cache_gc_lock);
  95. list_splice_tail_init(&flow_cache_gc_list, &gc_list);
  96. spin_unlock_bh(&flow_cache_gc_lock);
  97. list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
  98. flow_entry_kill(fce);
  99. }
  100. static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
  101. static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
  102. int deleted, struct list_head *gc_list)
  103. {
  104. if (deleted) {
  105. fcp->hash_count -= deleted;
  106. spin_lock_bh(&flow_cache_gc_lock);
  107. list_splice_tail(gc_list, &flow_cache_gc_list);
  108. spin_unlock_bh(&flow_cache_gc_lock);
  109. schedule_work(&flow_cache_gc_work);
  110. }
  111. }
  112. static void __flow_cache_shrink(struct flow_cache *fc,
  113. struct flow_cache_percpu *fcp,
  114. int shrink_to)
  115. {
  116. struct flow_cache_entry *fle;
  117. struct hlist_node *tmp;
  118. LIST_HEAD(gc_list);
  119. int i, deleted = 0;
  120. for (i = 0; i < flow_cache_hash_size(fc); i++) {
  121. int saved = 0;
  122. hlist_for_each_entry_safe(fle, tmp,
  123. &fcp->hash_table[i], u.hlist) {
  124. if (saved < shrink_to &&
  125. flow_entry_valid(fle)) {
  126. saved++;
  127. } else {
  128. deleted++;
  129. hlist_del(&fle->u.hlist);
  130. list_add_tail(&fle->u.gc_list, &gc_list);
  131. }
  132. }
  133. }
  134. flow_cache_queue_garbage(fcp, deleted, &gc_list);
  135. }
  136. static void flow_cache_shrink(struct flow_cache *fc,
  137. struct flow_cache_percpu *fcp)
  138. {
  139. int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
  140. __flow_cache_shrink(fc, fcp, shrink_to);
  141. }
  142. static void flow_new_hash_rnd(struct flow_cache *fc,
  143. struct flow_cache_percpu *fcp)
  144. {
  145. get_random_bytes(&fcp->hash_rnd, sizeof(u32));
  146. fcp->hash_rnd_recalc = 0;
  147. __flow_cache_shrink(fc, fcp, 0);
  148. }
  149. static u32 flow_hash_code(struct flow_cache *fc,
  150. struct flow_cache_percpu *fcp,
  151. const struct flowi *key,
  152. size_t keysize)
  153. {
  154. const u32 *k = (const u32 *) key;
  155. const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
  156. return jhash2(k, length, fcp->hash_rnd)
  157. & (flow_cache_hash_size(fc) - 1);
  158. }
  159. /* I hear what you're saying, use memcmp. But memcmp cannot make
  160. * important assumptions that we can here, such as alignment.
  161. */
  162. static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
  163. size_t keysize)
  164. {
  165. const flow_compare_t *k1, *k1_lim, *k2;
  166. k1 = (const flow_compare_t *) key1;
  167. k1_lim = k1 + keysize;
  168. k2 = (const flow_compare_t *) key2;
  169. do {
  170. if (*k1++ != *k2++)
  171. return 1;
  172. } while (k1 < k1_lim);
  173. return 0;
  174. }
  175. struct flow_cache_object *
  176. flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
  177. flow_resolve_t resolver, void *ctx)
  178. {
  179. struct flow_cache *fc = &flow_cache_global;
  180. struct flow_cache_percpu *fcp;
  181. struct flow_cache_entry *fle, *tfle;
  182. struct flow_cache_object *flo;
  183. size_t keysize;
  184. unsigned int hash;
  185. local_bh_disable();
  186. fcp = this_cpu_ptr(fc->percpu);
  187. fle = NULL;
  188. flo = NULL;
  189. keysize = flow_key_size(family);
  190. if (!keysize)
  191. goto nocache;
  192. /* Packet really early in init? Making flow_cache_init a
  193. * pre-smp initcall would solve this. --RR */
  194. if (!fcp->hash_table)
  195. goto nocache;
  196. if (fcp->hash_rnd_recalc)
  197. flow_new_hash_rnd(fc, fcp);
  198. hash = flow_hash_code(fc, fcp, key, keysize);
  199. hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
  200. if (tfle->net == net &&
  201. tfle->family == family &&
  202. tfle->dir == dir &&
  203. flow_key_compare(key, &tfle->key, keysize) == 0) {
  204. fle = tfle;
  205. break;
  206. }
  207. }
  208. if (unlikely(!fle)) {
  209. if (fcp->hash_count > fc->high_watermark)
  210. flow_cache_shrink(fc, fcp);
  211. fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
  212. if (fle) {
  213. fle->net = net;
  214. fle->family = family;
  215. fle->dir = dir;
  216. memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
  217. fle->object = NULL;
  218. hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
  219. fcp->hash_count++;
  220. }
  221. } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
  222. flo = fle->object;
  223. if (!flo)
  224. goto ret_object;
  225. flo = flo->ops->get(flo);
  226. if (flo)
  227. goto ret_object;
  228. } else if (fle->object) {
  229. flo = fle->object;
  230. flo->ops->delete(flo);
  231. fle->object = NULL;
  232. }
  233. nocache:
  234. flo = NULL;
  235. if (fle) {
  236. flo = fle->object;
  237. fle->object = NULL;
  238. }
  239. flo = resolver(net, key, family, dir, flo, ctx);
  240. if (fle) {
  241. fle->genid = atomic_read(&flow_cache_genid);
  242. if (!IS_ERR(flo))
  243. fle->object = flo;
  244. else
  245. fle->genid--;
  246. } else {
  247. if (!IS_ERR_OR_NULL(flo))
  248. flo->ops->delete(flo);
  249. }
  250. ret_object:
  251. local_bh_enable();
  252. return flo;
  253. }
  254. EXPORT_SYMBOL(flow_cache_lookup);
  255. static void flow_cache_flush_tasklet(unsigned long data)
  256. {
  257. struct flow_flush_info *info = (void *)data;
  258. struct flow_cache *fc = info->cache;
  259. struct flow_cache_percpu *fcp;
  260. struct flow_cache_entry *fle;
  261. struct hlist_node *tmp;
  262. LIST_HEAD(gc_list);
  263. int i, deleted = 0;
  264. fcp = this_cpu_ptr(fc->percpu);
  265. for (i = 0; i < flow_cache_hash_size(fc); i++) {
  266. hlist_for_each_entry_safe(fle, tmp,
  267. &fcp->hash_table[i], u.hlist) {
  268. if (flow_entry_valid(fle))
  269. continue;
  270. deleted++;
  271. hlist_del(&fle->u.hlist);
  272. list_add_tail(&fle->u.gc_list, &gc_list);
  273. }
  274. }
  275. flow_cache_queue_garbage(fcp, deleted, &gc_list);
  276. if (atomic_dec_and_test(&info->cpuleft))
  277. complete(&info->completion);
  278. }
  279. static void flow_cache_flush_per_cpu(void *data)
  280. {
  281. struct flow_flush_info *info = data;
  282. struct tasklet_struct *tasklet;
  283. tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
  284. tasklet->data = (unsigned long)info;
  285. tasklet_schedule(tasklet);
  286. }
  287. void flow_cache_flush(void)
  288. {
  289. struct flow_flush_info info;
  290. static DEFINE_MUTEX(flow_flush_sem);
  291. /* Don't want cpus going down or up during this. */
  292. get_online_cpus();
  293. mutex_lock(&flow_flush_sem);
  294. info.cache = &flow_cache_global;
  295. atomic_set(&info.cpuleft, num_online_cpus());
  296. init_completion(&info.completion);
  297. local_bh_disable();
  298. smp_call_function(flow_cache_flush_per_cpu, &info, 0);
  299. flow_cache_flush_tasklet((unsigned long)&info);
  300. local_bh_enable();
  301. wait_for_completion(&info.completion);
  302. mutex_unlock(&flow_flush_sem);
  303. put_online_cpus();
  304. }
  305. static void flow_cache_flush_task(struct work_struct *work)
  306. {
  307. flow_cache_flush();
  308. }
  309. static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
  310. void flow_cache_flush_deferred(void)
  311. {
  312. schedule_work(&flow_cache_flush_work);
  313. }
  314. static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
  315. {
  316. struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
  317. size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
  318. if (!fcp->hash_table) {
  319. fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
  320. if (!fcp->hash_table) {
  321. pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
  322. return -ENOMEM;
  323. }
  324. fcp->hash_rnd_recalc = 1;
  325. fcp->hash_count = 0;
  326. tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
  327. }
  328. return 0;
  329. }
  330. static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
  331. unsigned long action,
  332. void *hcpu)
  333. {
  334. struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
  335. int res, cpu = (unsigned long) hcpu;
  336. struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
  337. switch (action) {
  338. case CPU_UP_PREPARE:
  339. case CPU_UP_PREPARE_FROZEN:
  340. res = flow_cache_cpu_prepare(fc, cpu);
  341. if (res)
  342. return notifier_from_errno(res);
  343. break;
  344. case CPU_DEAD:
  345. case CPU_DEAD_FROZEN:
  346. __flow_cache_shrink(fc, fcp, 0);
  347. break;
  348. }
  349. return NOTIFY_OK;
  350. }
  351. static int __init flow_cache_init(struct flow_cache *fc)
  352. {
  353. int i;
  354. fc->hash_shift = 10;
  355. fc->low_watermark = 2 * flow_cache_hash_size(fc);
  356. fc->high_watermark = 4 * flow_cache_hash_size(fc);
  357. fc->percpu = alloc_percpu(struct flow_cache_percpu);
  358. if (!fc->percpu)
  359. return -ENOMEM;
  360. for_each_online_cpu(i) {
  361. if (flow_cache_cpu_prepare(fc, i))
  362. goto err;
  363. }
  364. fc->hotcpu_notifier = (struct notifier_block){
  365. .notifier_call = flow_cache_cpu,
  366. };
  367. register_hotcpu_notifier(&fc->hotcpu_notifier);
  368. setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
  369. (unsigned long) fc);
  370. fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  371. add_timer(&fc->rnd_timer);
  372. return 0;
  373. err:
  374. for_each_possible_cpu(i) {
  375. struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
  376. kfree(fcp->hash_table);
  377. fcp->hash_table = NULL;
  378. }
  379. free_percpu(fc->percpu);
  380. fc->percpu = NULL;
  381. return -ENOMEM;
  382. }
  383. static int __init flow_cache_init_global(void)
  384. {
  385. flow_cachep = kmem_cache_create("flow_cache",
  386. sizeof(struct flow_cache_entry),
  387. 0, SLAB_PANIC, NULL);
  388. return flow_cache_init(&flow_cache_global);
  389. }
  390. module_init(flow_cache_init_global);