inetpeer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * INETPEER - A storage for permanent information about peers
  3. *
  4. * This source is covered by the GNU GPL, the same as all kernel sources.
  5. *
  6. * Authors: Andrey V. Savochkin <saw@msu.ru>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/slab.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/random.h>
  14. #include <linux/timer.h>
  15. #include <linux/time.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mm.h>
  18. #include <linux/net.h>
  19. #include <net/ip.h>
  20. #include <net/inetpeer.h>
  21. /*
  22. * Theory of operations.
  23. * We keep one entry for each peer IP address. The nodes contains long-living
  24. * information about the peer which doesn't depend on routes.
  25. * At this moment this information consists only of ID field for the next
  26. * outgoing IP packet. This field is incremented with each packet as encoded
  27. * in inet_getid() function (include/net/inetpeer.h).
  28. * At the moment of writing this notes identifier of IP packets is generated
  29. * to be unpredictable using this code only for packets subjected
  30. * (actually or potentially) to defragmentation. I.e. DF packets less than
  31. * PMTU in size uses a constant ID and do not use this code (see
  32. * ip_select_ident() in include/net/ip.h).
  33. *
  34. * Route cache entries hold references to our nodes.
  35. * New cache entries get references via lookup by destination IP address in
  36. * the avl tree. The reference is grabbed only when it's needed i.e. only
  37. * when we try to output IP packet which needs an unpredictable ID (see
  38. * __ip_select_ident() in net/ipv4/route.c).
  39. * Nodes are removed only when reference counter goes to 0.
  40. * When it's happened the node may be removed when a sufficient amount of
  41. * time has been passed since its last use. The less-recently-used entry can
  42. * also be removed if the pool is overloaded i.e. if the total amount of
  43. * entries is greater-or-equal than the threshold.
  44. *
  45. * Node pool is organised as an AVL tree.
  46. * Such an implementation has been chosen not just for fun. It's a way to
  47. * prevent easy and efficient DoS attacks by creating hash collisions. A huge
  48. * amount of long living nodes in a single hash slot would significantly delay
  49. * lookups performed with disabled BHs.
  50. *
  51. * Serialisation issues.
  52. * 1. Nodes may appear in the tree only with the pool lock held.
  53. * 2. Nodes may disappear from the tree only with the pool lock held
  54. * AND reference count being 0.
  55. * 3. Nodes appears and disappears from unused node list only under
  56. * "inet_peer_unused_lock".
  57. * 4. Global variable peer_total is modified under the pool lock.
  58. * 5. struct inet_peer fields modification:
  59. * avl_left, avl_right, avl_parent, avl_height: pool lock
  60. * unused: unused node list lock
  61. * refcnt: atomically against modifications on other CPU;
  62. * usually under some other lock to prevent node disappearing
  63. * dtime: unused node list lock
  64. * v4daddr: unchangeable
  65. * ip_id_count: atomic value (no lock needed)
  66. */
  67. static struct kmem_cache *peer_cachep __read_mostly;
  68. #define node_height(x) x->avl_height
  69. #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
  70. #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
  71. static const struct inet_peer peer_fake_node = {
  72. .avl_left = peer_avl_empty_rcu,
  73. .avl_right = peer_avl_empty_rcu,
  74. .avl_height = 0
  75. };
  76. static struct {
  77. struct inet_peer __rcu *root;
  78. spinlock_t lock;
  79. int total;
  80. } peers = {
  81. .root = peer_avl_empty_rcu,
  82. .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
  83. .total = 0,
  84. };
  85. #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
  86. /* Exported for sysctl_net_ipv4. */
  87. int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
  88. * aggressively at this stage */
  89. int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
  90. int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
  91. int inet_peer_gc_mintime __read_mostly = 10 * HZ;
  92. int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
  93. static struct {
  94. struct list_head list;
  95. spinlock_t lock;
  96. } unused_peers = {
  97. .list = LIST_HEAD_INIT(unused_peers.list),
  98. .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
  99. };
  100. static void peer_check_expire(unsigned long dummy);
  101. static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
  102. /* Called from ip_output.c:ip_init */
  103. void __init inet_initpeers(void)
  104. {
  105. struct sysinfo si;
  106. /* Use the straight interface to information about memory. */
  107. si_meminfo(&si);
  108. /* The values below were suggested by Alexey Kuznetsov
  109. * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
  110. * myself. --SAW
  111. */
  112. if (si.totalram <= (32768*1024)/PAGE_SIZE)
  113. inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
  114. if (si.totalram <= (16384*1024)/PAGE_SIZE)
  115. inet_peer_threshold >>= 1; /* about 512KB */
  116. if (si.totalram <= (8192*1024)/PAGE_SIZE)
  117. inet_peer_threshold >>= 2; /* about 128KB */
  118. peer_cachep = kmem_cache_create("inet_peer_cache",
  119. sizeof(struct inet_peer),
  120. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
  121. NULL);
  122. /* All the timers, started at system startup tend
  123. to synchronize. Perturb it a bit.
  124. */
  125. peer_periodic_timer.expires = jiffies
  126. + net_random() % inet_peer_gc_maxtime
  127. + inet_peer_gc_maxtime;
  128. add_timer(&peer_periodic_timer);
  129. }
  130. /* Called with or without local BH being disabled. */
  131. static void unlink_from_unused(struct inet_peer *p)
  132. {
  133. if (!list_empty(&p->unused)) {
  134. spin_lock_bh(&unused_peers.lock);
  135. list_del_init(&p->unused);
  136. spin_unlock_bh(&unused_peers.lock);
  137. }
  138. }
  139. /*
  140. * Called with local BH disabled and the pool lock held.
  141. */
  142. #define lookup(_daddr, _stack) \
  143. ({ \
  144. struct inet_peer *u; \
  145. struct inet_peer __rcu **v; \
  146. \
  147. stackptr = _stack; \
  148. *stackptr++ = &peers.root; \
  149. for (u = rcu_dereference_protected(peers.root, \
  150. lockdep_is_held(&peers.lock)); \
  151. u != peer_avl_empty; ) { \
  152. if (_daddr == u->v4daddr) \
  153. break; \
  154. if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
  155. v = &u->avl_left; \
  156. else \
  157. v = &u->avl_right; \
  158. *stackptr++ = v; \
  159. u = rcu_dereference_protected(*v, \
  160. lockdep_is_held(&peers.lock)); \
  161. } \
  162. u; \
  163. })
  164. /*
  165. * Called with rcu_read_lock_bh()
  166. * Because we hold no lock against a writer, its quite possible we fall
  167. * in an endless loop.
  168. * But every pointer we follow is guaranteed to be valid thanks to RCU.
  169. * We exit from this function if number of links exceeds PEER_MAXDEPTH
  170. */
  171. static struct inet_peer *lookup_rcu_bh(__be32 daddr)
  172. {
  173. struct inet_peer *u = rcu_dereference_bh(peers.root);
  174. int count = 0;
  175. while (u != peer_avl_empty) {
  176. if (daddr == u->v4daddr) {
  177. /* Before taking a reference, check if this entry was
  178. * deleted, unlink_from_pool() sets refcnt=-1 to make
  179. * distinction between an unused entry (refcnt=0) and
  180. * a freed one.
  181. */
  182. if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
  183. u = NULL;
  184. return u;
  185. }
  186. if ((__force __u32)daddr < (__force __u32)u->v4daddr)
  187. u = rcu_dereference_bh(u->avl_left);
  188. else
  189. u = rcu_dereference_bh(u->avl_right);
  190. if (unlikely(++count == PEER_MAXDEPTH))
  191. break;
  192. }
  193. return NULL;
  194. }
  195. /* Called with local BH disabled and the pool lock held. */
  196. #define lookup_rightempty(start) \
  197. ({ \
  198. struct inet_peer *u; \
  199. struct inet_peer __rcu **v; \
  200. *stackptr++ = &start->avl_left; \
  201. v = &start->avl_left; \
  202. for (u = rcu_dereference_protected(*v, \
  203. lockdep_is_held(&peers.lock)); \
  204. u->avl_right != peer_avl_empty_rcu; ) { \
  205. v = &u->avl_right; \
  206. *stackptr++ = v; \
  207. u = rcu_dereference_protected(*v, \
  208. lockdep_is_held(&peers.lock)); \
  209. } \
  210. u; \
  211. })
  212. /* Called with local BH disabled and the pool lock held.
  213. * Variable names are the proof of operation correctness.
  214. * Look into mm/map_avl.c for more detail description of the ideas.
  215. */
  216. static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
  217. struct inet_peer __rcu ***stackend)
  218. {
  219. struct inet_peer __rcu **nodep;
  220. struct inet_peer *node, *l, *r;
  221. int lh, rh;
  222. while (stackend > stack) {
  223. nodep = *--stackend;
  224. node = rcu_dereference_protected(*nodep,
  225. lockdep_is_held(&peers.lock));
  226. l = rcu_dereference_protected(node->avl_left,
  227. lockdep_is_held(&peers.lock));
  228. r = rcu_dereference_protected(node->avl_right,
  229. lockdep_is_held(&peers.lock));
  230. lh = node_height(l);
  231. rh = node_height(r);
  232. if (lh > rh + 1) { /* l: RH+2 */
  233. struct inet_peer *ll, *lr, *lrl, *lrr;
  234. int lrh;
  235. ll = rcu_dereference_protected(l->avl_left,
  236. lockdep_is_held(&peers.lock));
  237. lr = rcu_dereference_protected(l->avl_right,
  238. lockdep_is_held(&peers.lock));
  239. lrh = node_height(lr);
  240. if (lrh <= node_height(ll)) { /* ll: RH+1 */
  241. RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
  242. RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
  243. node->avl_height = lrh + 1; /* RH+1 or RH+2 */
  244. RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
  245. RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
  246. l->avl_height = node->avl_height + 1;
  247. RCU_INIT_POINTER(*nodep, l);
  248. } else { /* ll: RH, lr: RH+1 */
  249. lrl = rcu_dereference_protected(lr->avl_left,
  250. lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
  251. lrr = rcu_dereference_protected(lr->avl_right,
  252. lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
  253. RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
  254. RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
  255. node->avl_height = rh + 1; /* node: RH+1 */
  256. RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
  257. RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
  258. l->avl_height = rh + 1; /* l: RH+1 */
  259. RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
  260. RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
  261. lr->avl_height = rh + 2;
  262. RCU_INIT_POINTER(*nodep, lr);
  263. }
  264. } else if (rh > lh + 1) { /* r: LH+2 */
  265. struct inet_peer *rr, *rl, *rlr, *rll;
  266. int rlh;
  267. rr = rcu_dereference_protected(r->avl_right,
  268. lockdep_is_held(&peers.lock));
  269. rl = rcu_dereference_protected(r->avl_left,
  270. lockdep_is_held(&peers.lock));
  271. rlh = node_height(rl);
  272. if (rlh <= node_height(rr)) { /* rr: LH+1 */
  273. RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
  274. RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
  275. node->avl_height = rlh + 1; /* LH+1 or LH+2 */
  276. RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
  277. RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
  278. r->avl_height = node->avl_height + 1;
  279. RCU_INIT_POINTER(*nodep, r);
  280. } else { /* rr: RH, rl: RH+1 */
  281. rlr = rcu_dereference_protected(rl->avl_right,
  282. lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
  283. rll = rcu_dereference_protected(rl->avl_left,
  284. lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
  285. RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
  286. RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
  287. node->avl_height = lh + 1; /* node: LH+1 */
  288. RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
  289. RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
  290. r->avl_height = lh + 1; /* r: LH+1 */
  291. RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
  292. RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
  293. rl->avl_height = lh + 2;
  294. RCU_INIT_POINTER(*nodep, rl);
  295. }
  296. } else {
  297. node->avl_height = (lh > rh ? lh : rh) + 1;
  298. }
  299. }
  300. }
  301. /* Called with local BH disabled and the pool lock held. */
  302. #define link_to_pool(n) \
  303. do { \
  304. n->avl_height = 1; \
  305. n->avl_left = peer_avl_empty_rcu; \
  306. n->avl_right = peer_avl_empty_rcu; \
  307. /* lockless readers can catch us now */ \
  308. rcu_assign_pointer(**--stackptr, n); \
  309. peer_avl_rebalance(stack, stackptr); \
  310. } while (0)
  311. static void inetpeer_free_rcu(struct rcu_head *head)
  312. {
  313. kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
  314. }
  315. /* May be called with local BH enabled. */
  316. static void unlink_from_pool(struct inet_peer *p)
  317. {
  318. int do_free;
  319. do_free = 0;
  320. spin_lock_bh(&peers.lock);
  321. /* Check the reference counter. It was artificially incremented by 1
  322. * in cleanup() function to prevent sudden disappearing. If we can
  323. * atomically (because of lockless readers) take this last reference,
  324. * it's safe to remove the node and free it later.
  325. * We use refcnt=-1 to alert lockless readers this entry is deleted.
  326. */
  327. if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
  328. struct inet_peer __rcu **stack[PEER_MAXDEPTH];
  329. struct inet_peer __rcu ***stackptr, ***delp;
  330. if (lookup(p->v4daddr, stack) != p)
  331. BUG();
  332. delp = stackptr - 1; /* *delp[0] == p */
  333. if (p->avl_left == peer_avl_empty_rcu) {
  334. *delp[0] = p->avl_right;
  335. --stackptr;
  336. } else {
  337. /* look for a node to insert instead of p */
  338. struct inet_peer *t;
  339. t = lookup_rightempty(p);
  340. BUG_ON(rcu_dereference_protected(*stackptr[-1],
  341. lockdep_is_held(&peers.lock)) != t);
  342. **--stackptr = t->avl_left;
  343. /* t is removed, t->v4daddr > x->v4daddr for any
  344. * x in p->avl_left subtree.
  345. * Put t in the old place of p. */
  346. RCU_INIT_POINTER(*delp[0], t);
  347. t->avl_left = p->avl_left;
  348. t->avl_right = p->avl_right;
  349. t->avl_height = p->avl_height;
  350. BUG_ON(delp[1] != &p->avl_left);
  351. delp[1] = &t->avl_left; /* was &p->avl_left */
  352. }
  353. peer_avl_rebalance(stack, stackptr);
  354. peers.total--;
  355. do_free = 1;
  356. }
  357. spin_unlock_bh(&peers.lock);
  358. if (do_free)
  359. call_rcu_bh(&p->rcu, inetpeer_free_rcu);
  360. else
  361. /* The node is used again. Decrease the reference counter
  362. * back. The loop "cleanup -> unlink_from_unused
  363. * -> unlink_from_pool -> putpeer -> link_to_unused
  364. * -> cleanup (for the same node)"
  365. * doesn't really exist because the entry will have a
  366. * recent deletion time and will not be cleaned again soon.
  367. */
  368. inet_putpeer(p);
  369. }
  370. /* May be called with local BH enabled. */
  371. static int cleanup_once(unsigned long ttl)
  372. {
  373. struct inet_peer *p = NULL;
  374. /* Remove the first entry from the list of unused nodes. */
  375. spin_lock_bh(&unused_peers.lock);
  376. if (!list_empty(&unused_peers.list)) {
  377. __u32 delta;
  378. p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
  379. delta = (__u32)jiffies - p->dtime;
  380. if (delta < ttl) {
  381. /* Do not prune fresh entries. */
  382. spin_unlock_bh(&unused_peers.lock);
  383. return -1;
  384. }
  385. list_del_init(&p->unused);
  386. /* Grab an extra reference to prevent node disappearing
  387. * before unlink_from_pool() call. */
  388. atomic_inc(&p->refcnt);
  389. }
  390. spin_unlock_bh(&unused_peers.lock);
  391. if (p == NULL)
  392. /* It means that the total number of USED entries has
  393. * grown over inet_peer_threshold. It shouldn't really
  394. * happen because of entry limits in route cache. */
  395. return -1;
  396. unlink_from_pool(p);
  397. return 0;
  398. }
  399. /* Called with or without local BH being disabled. */
  400. struct inet_peer *inet_getpeer(__be32 daddr, int create)
  401. {
  402. struct inet_peer *p;
  403. struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
  404. /* Look up for the address quickly, lockless.
  405. * Because of a concurrent writer, we might not find an existing entry.
  406. */
  407. rcu_read_lock_bh();
  408. p = lookup_rcu_bh(daddr);
  409. rcu_read_unlock_bh();
  410. if (p) {
  411. /* The existing node has been found.
  412. * Remove the entry from unused list if it was there.
  413. */
  414. unlink_from_unused(p);
  415. return p;
  416. }
  417. /* retry an exact lookup, taking the lock before.
  418. * At least, nodes should be hot in our cache.
  419. */
  420. spin_lock_bh(&peers.lock);
  421. p = lookup(daddr, stack);
  422. if (p != peer_avl_empty) {
  423. atomic_inc(&p->refcnt);
  424. spin_unlock_bh(&peers.lock);
  425. /* Remove the entry from unused list if it was there. */
  426. unlink_from_unused(p);
  427. return p;
  428. }
  429. p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
  430. if (p) {
  431. p->v4daddr = daddr;
  432. atomic_set(&p->refcnt, 1);
  433. atomic_set(&p->rid, 0);
  434. atomic_set(&p->ip_id_count, secure_ip_id(daddr));
  435. p->tcp_ts_stamp = 0;
  436. INIT_LIST_HEAD(&p->unused);
  437. /* Link the node. */
  438. link_to_pool(p);
  439. peers.total++;
  440. }
  441. spin_unlock_bh(&peers.lock);
  442. if (peers.total >= inet_peer_threshold)
  443. /* Remove one less-recently-used entry. */
  444. cleanup_once(0);
  445. return p;
  446. }
  447. /* Called with local BH disabled. */
  448. static void peer_check_expire(unsigned long dummy)
  449. {
  450. unsigned long now = jiffies;
  451. int ttl;
  452. if (peers.total >= inet_peer_threshold)
  453. ttl = inet_peer_minttl;
  454. else
  455. ttl = inet_peer_maxttl
  456. - (inet_peer_maxttl - inet_peer_minttl) / HZ *
  457. peers.total / inet_peer_threshold * HZ;
  458. while (!cleanup_once(ttl)) {
  459. if (jiffies != now)
  460. break;
  461. }
  462. /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
  463. * interval depending on the total number of entries (more entries,
  464. * less interval). */
  465. if (peers.total >= inet_peer_threshold)
  466. peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
  467. else
  468. peer_periodic_timer.expires = jiffies
  469. + inet_peer_gc_maxtime
  470. - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
  471. peers.total / inet_peer_threshold * HZ;
  472. add_timer(&peer_periodic_timer);
  473. }
  474. void inet_putpeer(struct inet_peer *p)
  475. {
  476. local_bh_disable();
  477. if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
  478. list_add_tail(&p->unused, &unused_peers.list);
  479. p->dtime = (__u32)jiffies;
  480. spin_unlock(&unused_peers.lock);
  481. }
  482. local_bh_enable();
  483. }