key.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include <linux/user_namespace.h>
  21. #include "internal.h"
  22. static struct kmem_cache *key_jar;
  23. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  24. DEFINE_SPINLOCK(key_serial_lock);
  25. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  26. DEFINE_SPINLOCK(key_user_lock);
  27. unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
  28. unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
  29. unsigned int key_quota_maxkeys = 200; /* general key count quota */
  30. unsigned int key_quota_maxbytes = 20000; /* general key space quota */
  31. static LIST_HEAD(key_types_list);
  32. static DECLARE_RWSEM(key_types_sem);
  33. static void key_cleanup(struct work_struct *work);
  34. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  35. /* we serialise key instantiation and link */
  36. DEFINE_MUTEX(key_construction_mutex);
  37. /* any key who's type gets unegistered will be re-typed to this */
  38. static struct key_type key_type_dead = {
  39. .name = "dead",
  40. };
  41. #ifdef KEY_DEBUGGING
  42. void __key_check(const struct key *key)
  43. {
  44. printk("__key_check: key %p {%08x} should be {%08x}\n",
  45. key, key->magic, KEY_DEBUG_MAGIC);
  46. BUG();
  47. }
  48. #endif
  49. /*****************************************************************************/
  50. /*
  51. * get the key quota record for a user, allocating a new record if one doesn't
  52. * already exist
  53. */
  54. struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
  55. {
  56. struct key_user *candidate = NULL, *user;
  57. struct rb_node *parent = NULL;
  58. struct rb_node **p;
  59. try_again:
  60. p = &key_user_tree.rb_node;
  61. spin_lock(&key_user_lock);
  62. /* search the tree for a user record with a matching UID */
  63. while (*p) {
  64. parent = *p;
  65. user = rb_entry(parent, struct key_user, node);
  66. if (uid < user->uid)
  67. p = &(*p)->rb_left;
  68. else if (uid > user->uid)
  69. p = &(*p)->rb_right;
  70. else if (user_ns < user->user_ns)
  71. p = &(*p)->rb_left;
  72. else if (user_ns > user->user_ns)
  73. p = &(*p)->rb_right;
  74. else
  75. goto found;
  76. }
  77. /* if we get here, we failed to find a match in the tree */
  78. if (!candidate) {
  79. /* allocate a candidate user record if we don't already have
  80. * one */
  81. spin_unlock(&key_user_lock);
  82. user = NULL;
  83. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  84. if (unlikely(!candidate))
  85. goto out;
  86. /* the allocation may have scheduled, so we need to repeat the
  87. * search lest someone else added the record whilst we were
  88. * asleep */
  89. goto try_again;
  90. }
  91. /* if we get here, then the user record still hadn't appeared on the
  92. * second pass - so we use the candidate record */
  93. atomic_set(&candidate->usage, 1);
  94. atomic_set(&candidate->nkeys, 0);
  95. atomic_set(&candidate->nikeys, 0);
  96. candidate->uid = uid;
  97. candidate->user_ns = get_user_ns(user_ns);
  98. candidate->qnkeys = 0;
  99. candidate->qnbytes = 0;
  100. spin_lock_init(&candidate->lock);
  101. mutex_init(&candidate->cons_lock);
  102. rb_link_node(&candidate->node, parent, p);
  103. rb_insert_color(&candidate->node, &key_user_tree);
  104. spin_unlock(&key_user_lock);
  105. user = candidate;
  106. goto out;
  107. /* okay - we found a user record for this UID */
  108. found:
  109. atomic_inc(&user->usage);
  110. spin_unlock(&key_user_lock);
  111. kfree(candidate);
  112. out:
  113. return user;
  114. } /* end key_user_lookup() */
  115. /*****************************************************************************/
  116. /*
  117. * dispose of a user structure
  118. */
  119. void key_user_put(struct key_user *user)
  120. {
  121. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  122. rb_erase(&user->node, &key_user_tree);
  123. spin_unlock(&key_user_lock);
  124. put_user_ns(user->user_ns);
  125. kfree(user);
  126. }
  127. } /* end key_user_put() */
  128. /*****************************************************************************/
  129. /*
  130. * assign a key the next unique serial number
  131. * - these are assigned randomly to avoid security issues through covert
  132. * channel problems
  133. */
  134. static inline void key_alloc_serial(struct key *key)
  135. {
  136. struct rb_node *parent, **p;
  137. struct key *xkey;
  138. /* propose a random serial number and look for a hole for it in the
  139. * serial number tree */
  140. do {
  141. get_random_bytes(&key->serial, sizeof(key->serial));
  142. key->serial >>= 1; /* negative numbers are not permitted */
  143. } while (key->serial < 3);
  144. spin_lock(&key_serial_lock);
  145. attempt_insertion:
  146. parent = NULL;
  147. p = &key_serial_tree.rb_node;
  148. while (*p) {
  149. parent = *p;
  150. xkey = rb_entry(parent, struct key, serial_node);
  151. if (key->serial < xkey->serial)
  152. p = &(*p)->rb_left;
  153. else if (key->serial > xkey->serial)
  154. p = &(*p)->rb_right;
  155. else
  156. goto serial_exists;
  157. }
  158. /* we've found a suitable hole - arrange for this key to occupy it */
  159. rb_link_node(&key->serial_node, parent, p);
  160. rb_insert_color(&key->serial_node, &key_serial_tree);
  161. spin_unlock(&key_serial_lock);
  162. return;
  163. /* we found a key with the proposed serial number - walk the tree from
  164. * that point looking for the next unused serial number */
  165. serial_exists:
  166. for (;;) {
  167. key->serial++;
  168. if (key->serial < 3) {
  169. key->serial = 3;
  170. goto attempt_insertion;
  171. }
  172. parent = rb_next(parent);
  173. if (!parent)
  174. goto attempt_insertion;
  175. xkey = rb_entry(parent, struct key, serial_node);
  176. if (key->serial < xkey->serial)
  177. goto attempt_insertion;
  178. }
  179. } /* end key_alloc_serial() */
  180. /*****************************************************************************/
  181. /*
  182. * allocate a key of the specified type
  183. * - update the user's quota to reflect the existence of the key
  184. * - called from a key-type operation with key_types_sem read-locked by
  185. * key_create_or_update()
  186. * - this prevents unregistration of the key type
  187. * - upon return the key is as yet uninstantiated; the caller needs to either
  188. * instantiate the key or discard it before returning
  189. */
  190. struct key *key_alloc(struct key_type *type, const char *desc,
  191. uid_t uid, gid_t gid, const struct cred *cred,
  192. key_perm_t perm, unsigned long flags)
  193. {
  194. struct key_user *user = NULL;
  195. struct key *key;
  196. size_t desclen, quotalen;
  197. int ret;
  198. key = ERR_PTR(-EINVAL);
  199. if (!desc || !*desc)
  200. goto error;
  201. desclen = strlen(desc) + 1;
  202. quotalen = desclen + type->def_datalen;
  203. /* get hold of the key tracking for this user */
  204. user = key_user_lookup(uid, cred->user->user_ns);
  205. if (!user)
  206. goto no_memory_1;
  207. /* check that the user's quota permits allocation of another key and
  208. * its description */
  209. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  210. unsigned maxkeys = (uid == 0) ?
  211. key_quota_root_maxkeys : key_quota_maxkeys;
  212. unsigned maxbytes = (uid == 0) ?
  213. key_quota_root_maxbytes : key_quota_maxbytes;
  214. spin_lock(&user->lock);
  215. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  216. if (user->qnkeys + 1 >= maxkeys ||
  217. user->qnbytes + quotalen >= maxbytes ||
  218. user->qnbytes + quotalen < user->qnbytes)
  219. goto no_quota;
  220. }
  221. user->qnkeys++;
  222. user->qnbytes += quotalen;
  223. spin_unlock(&user->lock);
  224. }
  225. /* allocate and initialise the key and its description */
  226. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  227. if (!key)
  228. goto no_memory_2;
  229. if (desc) {
  230. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  231. if (!key->description)
  232. goto no_memory_3;
  233. }
  234. atomic_set(&key->usage, 1);
  235. init_rwsem(&key->sem);
  236. key->type = type;
  237. key->user = user;
  238. key->quotalen = quotalen;
  239. key->datalen = type->def_datalen;
  240. key->uid = uid;
  241. key->gid = gid;
  242. key->perm = perm;
  243. key->flags = 0;
  244. key->expiry = 0;
  245. key->payload.data = NULL;
  246. key->security = NULL;
  247. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  248. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  249. memset(&key->type_data, 0, sizeof(key->type_data));
  250. #ifdef KEY_DEBUGGING
  251. key->magic = KEY_DEBUG_MAGIC;
  252. #endif
  253. /* let the security module know about the key */
  254. ret = security_key_alloc(key, cred, flags);
  255. if (ret < 0)
  256. goto security_error;
  257. /* publish the key by giving it a serial number */
  258. atomic_inc(&user->nkeys);
  259. key_alloc_serial(key);
  260. error:
  261. return key;
  262. security_error:
  263. kfree(key->description);
  264. kmem_cache_free(key_jar, key);
  265. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  266. spin_lock(&user->lock);
  267. user->qnkeys--;
  268. user->qnbytes -= quotalen;
  269. spin_unlock(&user->lock);
  270. }
  271. key_user_put(user);
  272. key = ERR_PTR(ret);
  273. goto error;
  274. no_memory_3:
  275. kmem_cache_free(key_jar, key);
  276. no_memory_2:
  277. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  278. spin_lock(&user->lock);
  279. user->qnkeys--;
  280. user->qnbytes -= quotalen;
  281. spin_unlock(&user->lock);
  282. }
  283. key_user_put(user);
  284. no_memory_1:
  285. key = ERR_PTR(-ENOMEM);
  286. goto error;
  287. no_quota:
  288. spin_unlock(&user->lock);
  289. key_user_put(user);
  290. key = ERR_PTR(-EDQUOT);
  291. goto error;
  292. } /* end key_alloc() */
  293. EXPORT_SYMBOL(key_alloc);
  294. /*****************************************************************************/
  295. /*
  296. * reserve an amount of quota for the key's payload
  297. */
  298. int key_payload_reserve(struct key *key, size_t datalen)
  299. {
  300. int delta = (int)datalen - key->datalen;
  301. int ret = 0;
  302. key_check(key);
  303. /* contemplate the quota adjustment */
  304. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  305. unsigned maxbytes = (key->user->uid == 0) ?
  306. key_quota_root_maxbytes : key_quota_maxbytes;
  307. spin_lock(&key->user->lock);
  308. if (delta > 0 &&
  309. (key->user->qnbytes + delta >= maxbytes ||
  310. key->user->qnbytes + delta < key->user->qnbytes)) {
  311. ret = -EDQUOT;
  312. }
  313. else {
  314. key->user->qnbytes += delta;
  315. key->quotalen += delta;
  316. }
  317. spin_unlock(&key->user->lock);
  318. }
  319. /* change the recorded data length if that didn't generate an error */
  320. if (ret == 0)
  321. key->datalen = datalen;
  322. return ret;
  323. } /* end key_payload_reserve() */
  324. EXPORT_SYMBOL(key_payload_reserve);
  325. /*****************************************************************************/
  326. /*
  327. * instantiate a key and link it into the target keyring atomically
  328. * - called with the target keyring's semaphore writelocked
  329. */
  330. static int __key_instantiate_and_link(struct key *key,
  331. const void *data,
  332. size_t datalen,
  333. struct key *keyring,
  334. struct key *authkey,
  335. struct keyring_list **_prealloc)
  336. {
  337. int ret, awaken;
  338. key_check(key);
  339. key_check(keyring);
  340. awaken = 0;
  341. ret = -EBUSY;
  342. mutex_lock(&key_construction_mutex);
  343. /* can't instantiate twice */
  344. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  345. /* instantiate the key */
  346. ret = key->type->instantiate(key, data, datalen);
  347. if (ret == 0) {
  348. /* mark the key as being instantiated */
  349. atomic_inc(&key->user->nikeys);
  350. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  351. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  352. awaken = 1;
  353. /* and link it into the destination keyring */
  354. if (keyring)
  355. __key_link(keyring, key, _prealloc);
  356. /* disable the authorisation key */
  357. if (authkey)
  358. key_revoke(authkey);
  359. }
  360. }
  361. mutex_unlock(&key_construction_mutex);
  362. /* wake up anyone waiting for a key to be constructed */
  363. if (awaken)
  364. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  365. return ret;
  366. } /* end __key_instantiate_and_link() */
  367. /*****************************************************************************/
  368. /*
  369. * instantiate a key and link it into the target keyring atomically
  370. */
  371. int key_instantiate_and_link(struct key *key,
  372. const void *data,
  373. size_t datalen,
  374. struct key *keyring,
  375. struct key *authkey)
  376. {
  377. struct keyring_list *prealloc;
  378. int ret;
  379. if (keyring) {
  380. ret = __key_link_begin(keyring, key->type, key->description,
  381. &prealloc);
  382. if (ret < 0)
  383. return ret;
  384. }
  385. ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
  386. &prealloc);
  387. if (keyring)
  388. __key_link_end(keyring, key->type, prealloc);
  389. return ret;
  390. } /* end key_instantiate_and_link() */
  391. EXPORT_SYMBOL(key_instantiate_and_link);
  392. /*****************************************************************************/
  393. /*
  394. * negatively instantiate a key and link it into the target keyring atomically
  395. */
  396. int key_negate_and_link(struct key *key,
  397. unsigned timeout,
  398. struct key *keyring,
  399. struct key *authkey)
  400. {
  401. struct keyring_list *prealloc;
  402. struct timespec now;
  403. int ret, awaken, link_ret = 0;
  404. key_check(key);
  405. key_check(keyring);
  406. awaken = 0;
  407. ret = -EBUSY;
  408. if (keyring)
  409. link_ret = __key_link_begin(keyring, key->type,
  410. key->description, &prealloc);
  411. mutex_lock(&key_construction_mutex);
  412. /* can't instantiate twice */
  413. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  414. /* mark the key as being negatively instantiated */
  415. atomic_inc(&key->user->nikeys);
  416. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  417. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  418. now = current_kernel_time();
  419. key->expiry = now.tv_sec + timeout;
  420. key_schedule_gc(key->expiry + key_gc_delay);
  421. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  422. awaken = 1;
  423. ret = 0;
  424. /* and link it into the destination keyring */
  425. if (keyring && link_ret == 0)
  426. __key_link(keyring, key, &prealloc);
  427. /* disable the authorisation key */
  428. if (authkey)
  429. key_revoke(authkey);
  430. }
  431. mutex_unlock(&key_construction_mutex);
  432. if (keyring)
  433. __key_link_end(keyring, key->type, prealloc);
  434. /* wake up anyone waiting for a key to be constructed */
  435. if (awaken)
  436. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  437. return ret == 0 ? link_ret : ret;
  438. } /* end key_negate_and_link() */
  439. EXPORT_SYMBOL(key_negate_and_link);
  440. /*****************************************************************************/
  441. /*
  442. * do cleaning up in process context so that we don't have to disable
  443. * interrupts all over the place
  444. */
  445. static void key_cleanup(struct work_struct *work)
  446. {
  447. struct rb_node *_n;
  448. struct key *key;
  449. go_again:
  450. /* look for a dead key in the tree */
  451. spin_lock(&key_serial_lock);
  452. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  453. key = rb_entry(_n, struct key, serial_node);
  454. if (atomic_read(&key->usage) == 0)
  455. goto found_dead_key;
  456. }
  457. spin_unlock(&key_serial_lock);
  458. return;
  459. found_dead_key:
  460. /* we found a dead key - once we've removed it from the tree, we can
  461. * drop the lock */
  462. rb_erase(&key->serial_node, &key_serial_tree);
  463. spin_unlock(&key_serial_lock);
  464. key_check(key);
  465. security_key_free(key);
  466. /* deal with the user's key tracking and quota */
  467. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  468. spin_lock(&key->user->lock);
  469. key->user->qnkeys--;
  470. key->user->qnbytes -= key->quotalen;
  471. spin_unlock(&key->user->lock);
  472. }
  473. atomic_dec(&key->user->nkeys);
  474. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  475. atomic_dec(&key->user->nikeys);
  476. key_user_put(key->user);
  477. /* now throw away the key memory */
  478. if (key->type->destroy)
  479. key->type->destroy(key);
  480. kfree(key->description);
  481. #ifdef KEY_DEBUGGING
  482. key->magic = KEY_DEBUG_MAGIC_X;
  483. #endif
  484. kmem_cache_free(key_jar, key);
  485. /* there may, of course, be more than one key to destroy */
  486. goto go_again;
  487. } /* end key_cleanup() */
  488. /*****************************************************************************/
  489. /*
  490. * dispose of a reference to a key
  491. * - when all the references are gone, we schedule the cleanup task to come and
  492. * pull it out of the tree in definite process context
  493. */
  494. void key_put(struct key *key)
  495. {
  496. if (key) {
  497. key_check(key);
  498. if (atomic_dec_and_test(&key->usage))
  499. schedule_work(&key_cleanup_task);
  500. }
  501. } /* end key_put() */
  502. EXPORT_SYMBOL(key_put);
  503. /*****************************************************************************/
  504. /*
  505. * find a key by its serial number
  506. */
  507. struct key *key_lookup(key_serial_t id)
  508. {
  509. struct rb_node *n;
  510. struct key *key;
  511. spin_lock(&key_serial_lock);
  512. /* search the tree for the specified key */
  513. n = key_serial_tree.rb_node;
  514. while (n) {
  515. key = rb_entry(n, struct key, serial_node);
  516. if (id < key->serial)
  517. n = n->rb_left;
  518. else if (id > key->serial)
  519. n = n->rb_right;
  520. else
  521. goto found;
  522. }
  523. not_found:
  524. key = ERR_PTR(-ENOKEY);
  525. goto error;
  526. found:
  527. /* pretend it doesn't exist if it is awaiting deletion */
  528. if (atomic_read(&key->usage) == 0)
  529. goto not_found;
  530. /* this races with key_put(), but that doesn't matter since key_put()
  531. * doesn't actually change the key
  532. */
  533. atomic_inc(&key->usage);
  534. error:
  535. spin_unlock(&key_serial_lock);
  536. return key;
  537. } /* end key_lookup() */
  538. /*****************************************************************************/
  539. /*
  540. * find and lock the specified key type against removal
  541. * - we return with the sem readlocked
  542. */
  543. struct key_type *key_type_lookup(const char *type)
  544. {
  545. struct key_type *ktype;
  546. down_read(&key_types_sem);
  547. /* look up the key type to see if it's one of the registered kernel
  548. * types */
  549. list_for_each_entry(ktype, &key_types_list, link) {
  550. if (strcmp(ktype->name, type) == 0)
  551. goto found_kernel_type;
  552. }
  553. up_read(&key_types_sem);
  554. ktype = ERR_PTR(-ENOKEY);
  555. found_kernel_type:
  556. return ktype;
  557. } /* end key_type_lookup() */
  558. /*****************************************************************************/
  559. /*
  560. * unlock a key type
  561. */
  562. void key_type_put(struct key_type *ktype)
  563. {
  564. up_read(&key_types_sem);
  565. } /* end key_type_put() */
  566. /*****************************************************************************/
  567. /*
  568. * attempt to update an existing key
  569. * - the key has an incremented refcount
  570. * - we need to put the key if we get an error
  571. */
  572. static inline key_ref_t __key_update(key_ref_t key_ref,
  573. const void *payload, size_t plen)
  574. {
  575. struct key *key = key_ref_to_ptr(key_ref);
  576. int ret;
  577. /* need write permission on the key to update it */
  578. ret = key_permission(key_ref, KEY_WRITE);
  579. if (ret < 0)
  580. goto error;
  581. ret = -EEXIST;
  582. if (!key->type->update)
  583. goto error;
  584. down_write(&key->sem);
  585. ret = key->type->update(key, payload, plen);
  586. if (ret == 0)
  587. /* updating a negative key instantiates it */
  588. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  589. up_write(&key->sem);
  590. if (ret < 0)
  591. goto error;
  592. out:
  593. return key_ref;
  594. error:
  595. key_put(key);
  596. key_ref = ERR_PTR(ret);
  597. goto out;
  598. } /* end __key_update() */
  599. /*****************************************************************************/
  600. /*
  601. * search the specified keyring for a key of the same description; if one is
  602. * found, update it, otherwise add a new one
  603. */
  604. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  605. const char *type,
  606. const char *description,
  607. const void *payload,
  608. size_t plen,
  609. key_perm_t perm,
  610. unsigned long flags)
  611. {
  612. struct keyring_list *prealloc;
  613. const struct cred *cred = current_cred();
  614. struct key_type *ktype;
  615. struct key *keyring, *key = NULL;
  616. key_ref_t key_ref;
  617. int ret;
  618. /* look up the key type to see if it's one of the registered kernel
  619. * types */
  620. ktype = key_type_lookup(type);
  621. if (IS_ERR(ktype)) {
  622. key_ref = ERR_PTR(-ENODEV);
  623. goto error;
  624. }
  625. key_ref = ERR_PTR(-EINVAL);
  626. if (!ktype->match || !ktype->instantiate)
  627. goto error_2;
  628. keyring = key_ref_to_ptr(keyring_ref);
  629. key_check(keyring);
  630. key_ref = ERR_PTR(-ENOTDIR);
  631. if (keyring->type != &key_type_keyring)
  632. goto error_2;
  633. ret = __key_link_begin(keyring, ktype, description, &prealloc);
  634. if (ret < 0)
  635. goto error_2;
  636. /* if we're going to allocate a new key, we're going to have
  637. * to modify the keyring */
  638. ret = key_permission(keyring_ref, KEY_WRITE);
  639. if (ret < 0) {
  640. key_ref = ERR_PTR(ret);
  641. goto error_3;
  642. }
  643. /* if it's possible to update this type of key, search for an existing
  644. * key of the same type and description in the destination keyring and
  645. * update that instead if possible
  646. */
  647. if (ktype->update) {
  648. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  649. 0);
  650. if (!IS_ERR(key_ref))
  651. goto found_matching_key;
  652. }
  653. /* if the client doesn't provide, decide on the permissions we want */
  654. if (perm == KEY_PERM_UNDEF) {
  655. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  656. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  657. if (ktype->read)
  658. perm |= KEY_POS_READ | KEY_USR_READ;
  659. if (ktype == &key_type_keyring || ktype->update)
  660. perm |= KEY_USR_WRITE;
  661. }
  662. /* allocate a new key */
  663. key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
  664. perm, flags);
  665. if (IS_ERR(key)) {
  666. key_ref = ERR_CAST(key);
  667. goto error_3;
  668. }
  669. /* instantiate it and link it into the target keyring */
  670. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
  671. &prealloc);
  672. if (ret < 0) {
  673. key_put(key);
  674. key_ref = ERR_PTR(ret);
  675. goto error_3;
  676. }
  677. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  678. error_3:
  679. __key_link_end(keyring, ktype, prealloc);
  680. error_2:
  681. key_type_put(ktype);
  682. error:
  683. return key_ref;
  684. found_matching_key:
  685. /* we found a matching key, so we're going to try to update it
  686. * - we can drop the locks first as we have the key pinned
  687. */
  688. __key_link_end(keyring, ktype, prealloc);
  689. key_type_put(ktype);
  690. key_ref = __key_update(key_ref, payload, plen);
  691. goto error;
  692. } /* end key_create_or_update() */
  693. EXPORT_SYMBOL(key_create_or_update);
  694. /*****************************************************************************/
  695. /*
  696. * update a key
  697. */
  698. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  699. {
  700. struct key *key = key_ref_to_ptr(key_ref);
  701. int ret;
  702. key_check(key);
  703. /* the key must be writable */
  704. ret = key_permission(key_ref, KEY_WRITE);
  705. if (ret < 0)
  706. goto error;
  707. /* attempt to update it if supported */
  708. ret = -EOPNOTSUPP;
  709. if (key->type->update) {
  710. down_write(&key->sem);
  711. ret = key->type->update(key, payload, plen);
  712. if (ret == 0)
  713. /* updating a negative key instantiates it */
  714. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  715. up_write(&key->sem);
  716. }
  717. error:
  718. return ret;
  719. } /* end key_update() */
  720. EXPORT_SYMBOL(key_update);
  721. /*****************************************************************************/
  722. /*
  723. * revoke a key
  724. */
  725. void key_revoke(struct key *key)
  726. {
  727. struct timespec now;
  728. time_t time;
  729. key_check(key);
  730. /* make sure no one's trying to change or use the key when we mark it
  731. * - we tell lockdep that we might nest because we might be revoking an
  732. * authorisation key whilst holding the sem on a key we've just
  733. * instantiated
  734. */
  735. down_write_nested(&key->sem, 1);
  736. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  737. key->type->revoke)
  738. key->type->revoke(key);
  739. /* set the death time to no more than the expiry time */
  740. now = current_kernel_time();
  741. time = now.tv_sec;
  742. if (key->revoked_at == 0 || key->revoked_at > time) {
  743. key->revoked_at = time;
  744. key_schedule_gc(key->revoked_at + key_gc_delay);
  745. }
  746. up_write(&key->sem);
  747. } /* end key_revoke() */
  748. EXPORT_SYMBOL(key_revoke);
  749. /*****************************************************************************/
  750. /*
  751. * register a type of key
  752. */
  753. int register_key_type(struct key_type *ktype)
  754. {
  755. struct key_type *p;
  756. int ret;
  757. ret = -EEXIST;
  758. down_write(&key_types_sem);
  759. /* disallow key types with the same name */
  760. list_for_each_entry(p, &key_types_list, link) {
  761. if (strcmp(p->name, ktype->name) == 0)
  762. goto out;
  763. }
  764. /* store the type */
  765. list_add(&ktype->link, &key_types_list);
  766. ret = 0;
  767. out:
  768. up_write(&key_types_sem);
  769. return ret;
  770. } /* end register_key_type() */
  771. EXPORT_SYMBOL(register_key_type);
  772. /*****************************************************************************/
  773. /*
  774. * unregister a type of key
  775. */
  776. void unregister_key_type(struct key_type *ktype)
  777. {
  778. struct rb_node *_n;
  779. struct key *key;
  780. down_write(&key_types_sem);
  781. /* withdraw the key type */
  782. list_del_init(&ktype->link);
  783. /* mark all the keys of this type dead */
  784. spin_lock(&key_serial_lock);
  785. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  786. key = rb_entry(_n, struct key, serial_node);
  787. if (key->type == ktype) {
  788. key->type = &key_type_dead;
  789. set_bit(KEY_FLAG_DEAD, &key->flags);
  790. }
  791. }
  792. spin_unlock(&key_serial_lock);
  793. /* make sure everyone revalidates their keys */
  794. synchronize_rcu();
  795. /* we should now be able to destroy the payloads of all the keys of
  796. * this type with impunity */
  797. spin_lock(&key_serial_lock);
  798. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  799. key = rb_entry(_n, struct key, serial_node);
  800. if (key->type == ktype) {
  801. if (ktype->destroy)
  802. ktype->destroy(key);
  803. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  804. }
  805. }
  806. spin_unlock(&key_serial_lock);
  807. up_write(&key_types_sem);
  808. key_schedule_gc(0);
  809. } /* end unregister_key_type() */
  810. EXPORT_SYMBOL(unregister_key_type);
  811. /*****************************************************************************/
  812. /*
  813. * initialise the key management stuff
  814. */
  815. void __init key_init(void)
  816. {
  817. /* allocate a slab in which we can store keys */
  818. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  819. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  820. /* add the special key types */
  821. list_add_tail(&key_type_keyring.link, &key_types_list);
  822. list_add_tail(&key_type_dead.link, &key_types_list);
  823. list_add_tail(&key_type_user.link, &key_types_list);
  824. /* record the root user tracking */
  825. rb_link_node(&root_key_user.node,
  826. NULL,
  827. &key_user_tree.rb_node);
  828. rb_insert_color(&root_key_user.node,
  829. &key_user_tree);
  830. } /* end key_init() */