gc.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /* Key garbage collector
  2. *
  3. * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/security.h>
  14. #include <keys/keyring-type.h>
  15. #include "internal.h"
  16. /*
  17. * Delay between key revocation/expiry in seconds
  18. */
  19. unsigned key_gc_delay = 5 * 60;
  20. /*
  21. * Reaper for unused keys.
  22. */
  23. static void key_gc_unused_keys(struct work_struct *work);
  24. DECLARE_WORK(key_gc_unused_work, key_gc_unused_keys);
  25. /*
  26. * Reaper for links from keyrings to dead keys.
  27. */
  28. static void key_gc_timer_func(unsigned long);
  29. static void key_gc_dead_links(struct work_struct *);
  30. static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
  31. static DECLARE_WORK(key_gc_work, key_gc_dead_links);
  32. static key_serial_t key_gc_cursor; /* the last key the gc considered */
  33. static bool key_gc_again;
  34. static unsigned long key_gc_executing;
  35. static time_t key_gc_next_run = LONG_MAX;
  36. static time_t key_gc_new_timer;
  37. /*
  38. * Schedule a garbage collection run.
  39. * - time precision isn't particularly important
  40. */
  41. void key_schedule_gc(time_t gc_at)
  42. {
  43. unsigned long expires;
  44. time_t now = current_kernel_time().tv_sec;
  45. kenter("%ld", gc_at - now);
  46. if (gc_at <= now) {
  47. schedule_work(&key_gc_work);
  48. } else if (gc_at < key_gc_next_run) {
  49. expires = jiffies + (gc_at - now) * HZ;
  50. mod_timer(&key_gc_timer, expires);
  51. }
  52. }
  53. /*
  54. * The garbage collector timer kicked off
  55. */
  56. static void key_gc_timer_func(unsigned long data)
  57. {
  58. kenter("");
  59. key_gc_next_run = LONG_MAX;
  60. schedule_work(&key_gc_work);
  61. }
  62. /*
  63. * Garbage collect pointers from a keyring.
  64. *
  65. * Return true if we altered the keyring.
  66. */
  67. static bool key_gc_keyring(struct key *keyring, time_t limit)
  68. __releases(key_serial_lock)
  69. {
  70. struct keyring_list *klist;
  71. struct key *key;
  72. int loop;
  73. kenter("%x", key_serial(keyring));
  74. if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
  75. goto dont_gc;
  76. /* scan the keyring looking for dead keys */
  77. rcu_read_lock();
  78. klist = rcu_dereference(keyring->payload.subscriptions);
  79. if (!klist)
  80. goto unlock_dont_gc;
  81. for (loop = klist->nkeys - 1; loop >= 0; loop--) {
  82. key = klist->keys[loop];
  83. if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
  84. (key->expiry > 0 && key->expiry <= limit))
  85. goto do_gc;
  86. }
  87. unlock_dont_gc:
  88. rcu_read_unlock();
  89. dont_gc:
  90. kleave(" = false");
  91. return false;
  92. do_gc:
  93. rcu_read_unlock();
  94. key_gc_cursor = keyring->serial;
  95. key_get(keyring);
  96. spin_unlock(&key_serial_lock);
  97. keyring_gc(keyring, limit);
  98. key_put(keyring);
  99. kleave(" = true");
  100. return true;
  101. }
  102. /*
  103. * Garbage collector for links to dead keys.
  104. *
  105. * This involves scanning the keyrings for dead, expired and revoked keys that
  106. * have overstayed their welcome
  107. */
  108. static void key_gc_dead_links(struct work_struct *work)
  109. {
  110. struct rb_node *rb;
  111. key_serial_t cursor;
  112. struct key *key, *xkey;
  113. time_t new_timer = LONG_MAX, limit, now;
  114. now = current_kernel_time().tv_sec;
  115. kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
  116. if (test_and_set_bit(0, &key_gc_executing)) {
  117. key_schedule_gc(current_kernel_time().tv_sec + 1);
  118. kleave(" [busy; deferring]");
  119. return;
  120. }
  121. limit = now;
  122. if (limit > key_gc_delay)
  123. limit -= key_gc_delay;
  124. else
  125. limit = key_gc_delay;
  126. spin_lock(&key_serial_lock);
  127. if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
  128. spin_unlock(&key_serial_lock);
  129. clear_bit(0, &key_gc_executing);
  130. return;
  131. }
  132. cursor = key_gc_cursor;
  133. if (cursor < 0)
  134. cursor = 0;
  135. if (cursor > 0)
  136. new_timer = key_gc_new_timer;
  137. else
  138. key_gc_again = false;
  139. /* find the first key above the cursor */
  140. key = NULL;
  141. rb = key_serial_tree.rb_node;
  142. while (rb) {
  143. xkey = rb_entry(rb, struct key, serial_node);
  144. if (cursor < xkey->serial) {
  145. key = xkey;
  146. rb = rb->rb_left;
  147. } else if (cursor > xkey->serial) {
  148. rb = rb->rb_right;
  149. } else {
  150. rb = rb_next(rb);
  151. if (!rb)
  152. goto reached_the_end;
  153. key = rb_entry(rb, struct key, serial_node);
  154. break;
  155. }
  156. }
  157. if (!key)
  158. goto reached_the_end;
  159. /* trawl through the keys looking for keyrings */
  160. for (;;) {
  161. if (key->expiry > limit && key->expiry < new_timer) {
  162. kdebug("will expire %x in %ld",
  163. key_serial(key), key->expiry - limit);
  164. new_timer = key->expiry;
  165. }
  166. if (key->type == &key_type_keyring &&
  167. key_gc_keyring(key, limit))
  168. /* the gc had to release our lock so that the keyring
  169. * could be modified, so we have to get it again */
  170. goto gc_released_our_lock;
  171. rb = rb_next(&key->serial_node);
  172. if (!rb)
  173. goto reached_the_end;
  174. key = rb_entry(rb, struct key, serial_node);
  175. }
  176. gc_released_our_lock:
  177. kdebug("gc_released_our_lock");
  178. key_gc_new_timer = new_timer;
  179. key_gc_again = true;
  180. clear_bit(0, &key_gc_executing);
  181. schedule_work(&key_gc_work);
  182. kleave(" [continue]");
  183. return;
  184. /* when we reach the end of the run, we set the timer for the next one */
  185. reached_the_end:
  186. kdebug("reached_the_end");
  187. spin_unlock(&key_serial_lock);
  188. key_gc_new_timer = new_timer;
  189. key_gc_cursor = 0;
  190. clear_bit(0, &key_gc_executing);
  191. if (key_gc_again) {
  192. /* there may have been a key that expired whilst we were
  193. * scanning, so if we discarded any links we should do another
  194. * scan */
  195. new_timer = now + 1;
  196. key_schedule_gc(new_timer);
  197. } else if (new_timer < LONG_MAX) {
  198. new_timer += key_gc_delay;
  199. key_schedule_gc(new_timer);
  200. }
  201. kleave(" [end]");
  202. }
  203. /*
  204. * Garbage collector for unused keys.
  205. *
  206. * This is done in process context so that we don't have to disable interrupts
  207. * all over the place. key_put() schedules this rather than trying to do the
  208. * cleanup itself, which means key_put() doesn't have to sleep.
  209. */
  210. static void key_gc_unused_keys(struct work_struct *work)
  211. {
  212. struct rb_node *_n;
  213. struct key *key;
  214. go_again:
  215. /* look for a dead key in the tree */
  216. spin_lock(&key_serial_lock);
  217. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  218. key = rb_entry(_n, struct key, serial_node);
  219. if (atomic_read(&key->usage) == 0)
  220. goto found_dead_key;
  221. }
  222. spin_unlock(&key_serial_lock);
  223. return;
  224. found_dead_key:
  225. /* we found a dead key - once we've removed it from the tree, we can
  226. * drop the lock */
  227. rb_erase(&key->serial_node, &key_serial_tree);
  228. spin_unlock(&key_serial_lock);
  229. key_check(key);
  230. security_key_free(key);
  231. /* deal with the user's key tracking and quota */
  232. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  233. spin_lock(&key->user->lock);
  234. key->user->qnkeys--;
  235. key->user->qnbytes -= key->quotalen;
  236. spin_unlock(&key->user->lock);
  237. }
  238. atomic_dec(&key->user->nkeys);
  239. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  240. atomic_dec(&key->user->nikeys);
  241. key_user_put(key->user);
  242. /* now throw away the key memory */
  243. if (key->type->destroy)
  244. key->type->destroy(key);
  245. kfree(key->description);
  246. #ifdef KEY_DEBUGGING
  247. key->magic = KEY_DEBUG_MAGIC_X;
  248. #endif
  249. kmem_cache_free(key_jar, key);
  250. /* there may, of course, be more than one key to destroy */
  251. goto go_again;
  252. }