key.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include <linux/user_namespace.h>
  21. #include "internal.h"
  22. static struct kmem_cache *key_jar;
  23. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  24. DEFINE_SPINLOCK(key_serial_lock);
  25. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  26. DEFINE_SPINLOCK(key_user_lock);
  27. unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
  28. unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
  29. unsigned int key_quota_maxkeys = 200; /* general key count quota */
  30. unsigned int key_quota_maxbytes = 20000; /* general key space quota */
  31. static LIST_HEAD(key_types_list);
  32. static DECLARE_RWSEM(key_types_sem);
  33. static void key_cleanup(struct work_struct *work);
  34. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  35. /* we serialise key instantiation and link */
  36. DEFINE_MUTEX(key_construction_mutex);
  37. /* any key who's type gets unegistered will be re-typed to this */
  38. static struct key_type key_type_dead = {
  39. .name = "dead",
  40. };
  41. #ifdef KEY_DEBUGGING
  42. void __key_check(const struct key *key)
  43. {
  44. printk("__key_check: key %p {%08x} should be {%08x}\n",
  45. key, key->magic, KEY_DEBUG_MAGIC);
  46. BUG();
  47. }
  48. #endif
  49. /*****************************************************************************/
  50. /*
  51. * get the key quota record for a user, allocating a new record if one doesn't
  52. * already exist
  53. */
  54. struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
  55. {
  56. struct key_user *candidate = NULL, *user;
  57. struct rb_node *parent = NULL;
  58. struct rb_node **p;
  59. try_again:
  60. p = &key_user_tree.rb_node;
  61. spin_lock(&key_user_lock);
  62. /* search the tree for a user record with a matching UID */
  63. while (*p) {
  64. parent = *p;
  65. user = rb_entry(parent, struct key_user, node);
  66. if (uid < user->uid)
  67. p = &(*p)->rb_left;
  68. else if (uid > user->uid)
  69. p = &(*p)->rb_right;
  70. else if (user_ns < user->user_ns)
  71. p = &(*p)->rb_left;
  72. else if (user_ns > user->user_ns)
  73. p = &(*p)->rb_right;
  74. else
  75. goto found;
  76. }
  77. /* if we get here, we failed to find a match in the tree */
  78. if (!candidate) {
  79. /* allocate a candidate user record if we don't already have
  80. * one */
  81. spin_unlock(&key_user_lock);
  82. user = NULL;
  83. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  84. if (unlikely(!candidate))
  85. goto out;
  86. /* the allocation may have scheduled, so we need to repeat the
  87. * search lest someone else added the record whilst we were
  88. * asleep */
  89. goto try_again;
  90. }
  91. /* if we get here, then the user record still hadn't appeared on the
  92. * second pass - so we use the candidate record */
  93. atomic_set(&candidate->usage, 1);
  94. atomic_set(&candidate->nkeys, 0);
  95. atomic_set(&candidate->nikeys, 0);
  96. candidate->uid = uid;
  97. candidate->user_ns = get_user_ns(user_ns);
  98. candidate->qnkeys = 0;
  99. candidate->qnbytes = 0;
  100. spin_lock_init(&candidate->lock);
  101. mutex_init(&candidate->cons_lock);
  102. rb_link_node(&candidate->node, parent, p);
  103. rb_insert_color(&candidate->node, &key_user_tree);
  104. spin_unlock(&key_user_lock);
  105. user = candidate;
  106. goto out;
  107. /* okay - we found a user record for this UID */
  108. found:
  109. atomic_inc(&user->usage);
  110. spin_unlock(&key_user_lock);
  111. kfree(candidate);
  112. out:
  113. return user;
  114. } /* end key_user_lookup() */
  115. /*****************************************************************************/
  116. /*
  117. * dispose of a user structure
  118. */
  119. void key_user_put(struct key_user *user)
  120. {
  121. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  122. rb_erase(&user->node, &key_user_tree);
  123. spin_unlock(&key_user_lock);
  124. put_user_ns(user->user_ns);
  125. kfree(user);
  126. }
  127. } /* end key_user_put() */
  128. /*****************************************************************************/
  129. /*
  130. * assign a key the next unique serial number
  131. * - these are assigned randomly to avoid security issues through covert
  132. * channel problems
  133. */
  134. static inline void key_alloc_serial(struct key *key)
  135. {
  136. struct rb_node *parent, **p;
  137. struct key *xkey;
  138. /* propose a random serial number and look for a hole for it in the
  139. * serial number tree */
  140. do {
  141. get_random_bytes(&key->serial, sizeof(key->serial));
  142. key->serial >>= 1; /* negative numbers are not permitted */
  143. } while (key->serial < 3);
  144. spin_lock(&key_serial_lock);
  145. attempt_insertion:
  146. parent = NULL;
  147. p = &key_serial_tree.rb_node;
  148. while (*p) {
  149. parent = *p;
  150. xkey = rb_entry(parent, struct key, serial_node);
  151. if (key->serial < xkey->serial)
  152. p = &(*p)->rb_left;
  153. else if (key->serial > xkey->serial)
  154. p = &(*p)->rb_right;
  155. else
  156. goto serial_exists;
  157. }
  158. /* we've found a suitable hole - arrange for this key to occupy it */
  159. rb_link_node(&key->serial_node, parent, p);
  160. rb_insert_color(&key->serial_node, &key_serial_tree);
  161. spin_unlock(&key_serial_lock);
  162. return;
  163. /* we found a key with the proposed serial number - walk the tree from
  164. * that point looking for the next unused serial number */
  165. serial_exists:
  166. for (;;) {
  167. key->serial++;
  168. if (key->serial < 3) {
  169. key->serial = 3;
  170. goto attempt_insertion;
  171. }
  172. parent = rb_next(parent);
  173. if (!parent)
  174. goto attempt_insertion;
  175. xkey = rb_entry(parent, struct key, serial_node);
  176. if (key->serial < xkey->serial)
  177. goto attempt_insertion;
  178. }
  179. } /* end key_alloc_serial() */
  180. /*****************************************************************************/
  181. /*
  182. * allocate a key of the specified type
  183. * - update the user's quota to reflect the existence of the key
  184. * - called from a key-type operation with key_types_sem read-locked by
  185. * key_create_or_update()
  186. * - this prevents unregistration of the key type
  187. * - upon return the key is as yet uninstantiated; the caller needs to either
  188. * instantiate the key or discard it before returning
  189. */
  190. struct key *key_alloc(struct key_type *type, const char *desc,
  191. uid_t uid, gid_t gid, const struct cred *cred,
  192. key_perm_t perm, unsigned long flags)
  193. {
  194. struct key_user *user = NULL;
  195. struct key *key;
  196. size_t desclen, quotalen;
  197. int ret;
  198. key = ERR_PTR(-EINVAL);
  199. if (!desc || !*desc)
  200. goto error;
  201. desclen = strlen(desc) + 1;
  202. quotalen = desclen + type->def_datalen;
  203. /* get hold of the key tracking for this user */
  204. user = key_user_lookup(uid, cred->user->user_ns);
  205. if (!user)
  206. goto no_memory_1;
  207. /* check that the user's quota permits allocation of another key and
  208. * its description */
  209. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  210. unsigned maxkeys = (uid == 0) ?
  211. key_quota_root_maxkeys : key_quota_maxkeys;
  212. unsigned maxbytes = (uid == 0) ?
  213. key_quota_root_maxbytes : key_quota_maxbytes;
  214. spin_lock(&user->lock);
  215. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  216. if (user->qnkeys + 1 >= maxkeys ||
  217. user->qnbytes + quotalen >= maxbytes ||
  218. user->qnbytes + quotalen < user->qnbytes)
  219. goto no_quota;
  220. }
  221. user->qnkeys++;
  222. user->qnbytes += quotalen;
  223. spin_unlock(&user->lock);
  224. }
  225. /* allocate and initialise the key and its description */
  226. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  227. if (!key)
  228. goto no_memory_2;
  229. if (desc) {
  230. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  231. if (!key->description)
  232. goto no_memory_3;
  233. }
  234. atomic_set(&key->usage, 1);
  235. init_rwsem(&key->sem);
  236. key->type = type;
  237. key->user = user;
  238. key->quotalen = quotalen;
  239. key->datalen = type->def_datalen;
  240. key->uid = uid;
  241. key->gid = gid;
  242. key->perm = perm;
  243. key->flags = 0;
  244. key->expiry = 0;
  245. key->payload.data = NULL;
  246. key->security = NULL;
  247. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  248. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  249. memset(&key->type_data, 0, sizeof(key->type_data));
  250. #ifdef KEY_DEBUGGING
  251. key->magic = KEY_DEBUG_MAGIC;
  252. #endif
  253. /* let the security module know about the key */
  254. ret = security_key_alloc(key, cred, flags);
  255. if (ret < 0)
  256. goto security_error;
  257. /* publish the key by giving it a serial number */
  258. atomic_inc(&user->nkeys);
  259. key_alloc_serial(key);
  260. error:
  261. return key;
  262. security_error:
  263. kfree(key->description);
  264. kmem_cache_free(key_jar, key);
  265. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  266. spin_lock(&user->lock);
  267. user->qnkeys--;
  268. user->qnbytes -= quotalen;
  269. spin_unlock(&user->lock);
  270. }
  271. key_user_put(user);
  272. key = ERR_PTR(ret);
  273. goto error;
  274. no_memory_3:
  275. kmem_cache_free(key_jar, key);
  276. no_memory_2:
  277. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  278. spin_lock(&user->lock);
  279. user->qnkeys--;
  280. user->qnbytes -= quotalen;
  281. spin_unlock(&user->lock);
  282. }
  283. key_user_put(user);
  284. no_memory_1:
  285. key = ERR_PTR(-ENOMEM);
  286. goto error;
  287. no_quota:
  288. spin_unlock(&user->lock);
  289. key_user_put(user);
  290. key = ERR_PTR(-EDQUOT);
  291. goto error;
  292. } /* end key_alloc() */
  293. EXPORT_SYMBOL(key_alloc);
  294. /*****************************************************************************/
  295. /*
  296. * reserve an amount of quota for the key's payload
  297. */
  298. int key_payload_reserve(struct key *key, size_t datalen)
  299. {
  300. int delta = (int) datalen - key->datalen;
  301. int ret = 0;
  302. key_check(key);
  303. /* contemplate the quota adjustment */
  304. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  305. unsigned maxbytes = (key->user->uid == 0) ?
  306. key_quota_root_maxbytes : key_quota_maxbytes;
  307. spin_lock(&key->user->lock);
  308. if (delta > 0 &&
  309. (key->user->qnbytes + delta >= maxbytes ||
  310. key->user->qnbytes + delta < key->user->qnbytes)) {
  311. ret = -EDQUOT;
  312. }
  313. else {
  314. key->user->qnbytes += delta;
  315. key->quotalen += delta;
  316. }
  317. spin_unlock(&key->user->lock);
  318. }
  319. /* change the recorded data length if that didn't generate an error */
  320. if (ret == 0)
  321. key->datalen = datalen;
  322. return ret;
  323. } /* end key_payload_reserve() */
  324. EXPORT_SYMBOL(key_payload_reserve);
  325. /*****************************************************************************/
  326. /*
  327. * instantiate a key and link it into the target keyring atomically
  328. * - called with the target keyring's semaphore writelocked
  329. */
  330. static int __key_instantiate_and_link(struct key *key,
  331. const void *data,
  332. size_t datalen,
  333. struct key *keyring,
  334. struct key *authkey)
  335. {
  336. int ret, awaken;
  337. key_check(key);
  338. key_check(keyring);
  339. awaken = 0;
  340. ret = -EBUSY;
  341. mutex_lock(&key_construction_mutex);
  342. /* can't instantiate twice */
  343. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  344. /* instantiate the key */
  345. ret = key->type->instantiate(key, data, datalen);
  346. if (ret == 0) {
  347. /* mark the key as being instantiated */
  348. atomic_inc(&key->user->nikeys);
  349. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  350. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  351. awaken = 1;
  352. /* and link it into the destination keyring */
  353. if (keyring)
  354. ret = __key_link(keyring, key);
  355. /* disable the authorisation key */
  356. if (authkey)
  357. key_revoke(authkey);
  358. }
  359. }
  360. mutex_unlock(&key_construction_mutex);
  361. /* wake up anyone waiting for a key to be constructed */
  362. if (awaken)
  363. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  364. return ret;
  365. } /* end __key_instantiate_and_link() */
  366. /*****************************************************************************/
  367. /*
  368. * instantiate a key and link it into the target keyring atomically
  369. */
  370. int key_instantiate_and_link(struct key *key,
  371. const void *data,
  372. size_t datalen,
  373. struct key *keyring,
  374. struct key *authkey)
  375. {
  376. int ret;
  377. if (keyring)
  378. down_write(&keyring->sem);
  379. ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
  380. if (keyring)
  381. up_write(&keyring->sem);
  382. return ret;
  383. } /* end key_instantiate_and_link() */
  384. EXPORT_SYMBOL(key_instantiate_and_link);
  385. /*****************************************************************************/
  386. /*
  387. * negatively instantiate a key and link it into the target keyring atomically
  388. */
  389. int key_negate_and_link(struct key *key,
  390. unsigned timeout,
  391. struct key *keyring,
  392. struct key *authkey)
  393. {
  394. struct timespec now;
  395. int ret, awaken;
  396. key_check(key);
  397. key_check(keyring);
  398. awaken = 0;
  399. ret = -EBUSY;
  400. if (keyring)
  401. down_write(&keyring->sem);
  402. mutex_lock(&key_construction_mutex);
  403. /* can't instantiate twice */
  404. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  405. /* mark the key as being negatively instantiated */
  406. atomic_inc(&key->user->nikeys);
  407. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  408. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  409. now = current_kernel_time();
  410. key->expiry = now.tv_sec + timeout;
  411. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  412. awaken = 1;
  413. ret = 0;
  414. /* and link it into the destination keyring */
  415. if (keyring)
  416. ret = __key_link(keyring, key);
  417. /* disable the authorisation key */
  418. if (authkey)
  419. key_revoke(authkey);
  420. }
  421. mutex_unlock(&key_construction_mutex);
  422. if (keyring)
  423. up_write(&keyring->sem);
  424. /* wake up anyone waiting for a key to be constructed */
  425. if (awaken)
  426. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  427. return ret;
  428. } /* end key_negate_and_link() */
  429. EXPORT_SYMBOL(key_negate_and_link);
  430. /*****************************************************************************/
  431. /*
  432. * do cleaning up in process context so that we don't have to disable
  433. * interrupts all over the place
  434. */
  435. static void key_cleanup(struct work_struct *work)
  436. {
  437. struct rb_node *_n;
  438. struct key *key;
  439. go_again:
  440. /* look for a dead key in the tree */
  441. spin_lock(&key_serial_lock);
  442. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  443. key = rb_entry(_n, struct key, serial_node);
  444. if (atomic_read(&key->usage) == 0)
  445. goto found_dead_key;
  446. }
  447. spin_unlock(&key_serial_lock);
  448. return;
  449. found_dead_key:
  450. /* we found a dead key - once we've removed it from the tree, we can
  451. * drop the lock */
  452. rb_erase(&key->serial_node, &key_serial_tree);
  453. spin_unlock(&key_serial_lock);
  454. key_check(key);
  455. security_key_free(key);
  456. /* deal with the user's key tracking and quota */
  457. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  458. spin_lock(&key->user->lock);
  459. key->user->qnkeys--;
  460. key->user->qnbytes -= key->quotalen;
  461. spin_unlock(&key->user->lock);
  462. }
  463. atomic_dec(&key->user->nkeys);
  464. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  465. atomic_dec(&key->user->nikeys);
  466. key_user_put(key->user);
  467. /* now throw away the key memory */
  468. if (key->type->destroy)
  469. key->type->destroy(key);
  470. kfree(key->description);
  471. #ifdef KEY_DEBUGGING
  472. key->magic = KEY_DEBUG_MAGIC_X;
  473. #endif
  474. kmem_cache_free(key_jar, key);
  475. /* there may, of course, be more than one key to destroy */
  476. goto go_again;
  477. } /* end key_cleanup() */
  478. /*****************************************************************************/
  479. /*
  480. * dispose of a reference to a key
  481. * - when all the references are gone, we schedule the cleanup task to come and
  482. * pull it out of the tree in definite process context
  483. */
  484. void key_put(struct key *key)
  485. {
  486. if (key) {
  487. key_check(key);
  488. if (atomic_dec_and_test(&key->usage))
  489. schedule_work(&key_cleanup_task);
  490. }
  491. } /* end key_put() */
  492. EXPORT_SYMBOL(key_put);
  493. /*****************************************************************************/
  494. /*
  495. * find a key by its serial number
  496. */
  497. struct key *key_lookup(key_serial_t id)
  498. {
  499. struct rb_node *n;
  500. struct key *key;
  501. spin_lock(&key_serial_lock);
  502. /* search the tree for the specified key */
  503. n = key_serial_tree.rb_node;
  504. while (n) {
  505. key = rb_entry(n, struct key, serial_node);
  506. if (id < key->serial)
  507. n = n->rb_left;
  508. else if (id > key->serial)
  509. n = n->rb_right;
  510. else
  511. goto found;
  512. }
  513. not_found:
  514. key = ERR_PTR(-ENOKEY);
  515. goto error;
  516. found:
  517. /* pretend it doesn't exist if it's dead */
  518. if (atomic_read(&key->usage) == 0 ||
  519. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  520. key->type == &key_type_dead)
  521. goto not_found;
  522. /* this races with key_put(), but that doesn't matter since key_put()
  523. * doesn't actually change the key
  524. */
  525. atomic_inc(&key->usage);
  526. error:
  527. spin_unlock(&key_serial_lock);
  528. return key;
  529. } /* end key_lookup() */
  530. /*****************************************************************************/
  531. /*
  532. * find and lock the specified key type against removal
  533. * - we return with the sem readlocked
  534. */
  535. struct key_type *key_type_lookup(const char *type)
  536. {
  537. struct key_type *ktype;
  538. down_read(&key_types_sem);
  539. /* look up the key type to see if it's one of the registered kernel
  540. * types */
  541. list_for_each_entry(ktype, &key_types_list, link) {
  542. if (strcmp(ktype->name, type) == 0)
  543. goto found_kernel_type;
  544. }
  545. up_read(&key_types_sem);
  546. ktype = ERR_PTR(-ENOKEY);
  547. found_kernel_type:
  548. return ktype;
  549. } /* end key_type_lookup() */
  550. /*****************************************************************************/
  551. /*
  552. * unlock a key type
  553. */
  554. void key_type_put(struct key_type *ktype)
  555. {
  556. up_read(&key_types_sem);
  557. } /* end key_type_put() */
  558. /*****************************************************************************/
  559. /*
  560. * attempt to update an existing key
  561. * - the key has an incremented refcount
  562. * - we need to put the key if we get an error
  563. */
  564. static inline key_ref_t __key_update(key_ref_t key_ref,
  565. const void *payload, size_t plen)
  566. {
  567. struct key *key = key_ref_to_ptr(key_ref);
  568. int ret;
  569. /* need write permission on the key to update it */
  570. ret = key_permission(key_ref, KEY_WRITE);
  571. if (ret < 0)
  572. goto error;
  573. ret = -EEXIST;
  574. if (!key->type->update)
  575. goto error;
  576. down_write(&key->sem);
  577. ret = key->type->update(key, payload, plen);
  578. if (ret == 0)
  579. /* updating a negative key instantiates it */
  580. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  581. up_write(&key->sem);
  582. if (ret < 0)
  583. goto error;
  584. out:
  585. return key_ref;
  586. error:
  587. key_put(key);
  588. key_ref = ERR_PTR(ret);
  589. goto out;
  590. } /* end __key_update() */
  591. /*****************************************************************************/
  592. /*
  593. * search the specified keyring for a key of the same description; if one is
  594. * found, update it, otherwise add a new one
  595. */
  596. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  597. const char *type,
  598. const char *description,
  599. const void *payload,
  600. size_t plen,
  601. key_perm_t perm,
  602. unsigned long flags)
  603. {
  604. const struct cred *cred = current_cred();
  605. struct key_type *ktype;
  606. struct key *keyring, *key = NULL;
  607. key_ref_t key_ref;
  608. int ret;
  609. /* look up the key type to see if it's one of the registered kernel
  610. * types */
  611. ktype = key_type_lookup(type);
  612. if (IS_ERR(ktype)) {
  613. key_ref = ERR_PTR(-ENODEV);
  614. goto error;
  615. }
  616. key_ref = ERR_PTR(-EINVAL);
  617. if (!ktype->match || !ktype->instantiate)
  618. goto error_2;
  619. keyring = key_ref_to_ptr(keyring_ref);
  620. key_check(keyring);
  621. key_ref = ERR_PTR(-ENOTDIR);
  622. if (keyring->type != &key_type_keyring)
  623. goto error_2;
  624. down_write(&keyring->sem);
  625. /* if we're going to allocate a new key, we're going to have
  626. * to modify the keyring */
  627. ret = key_permission(keyring_ref, KEY_WRITE);
  628. if (ret < 0) {
  629. key_ref = ERR_PTR(ret);
  630. goto error_3;
  631. }
  632. /* if it's possible to update this type of key, search for an existing
  633. * key of the same type and description in the destination keyring and
  634. * update that instead if possible
  635. */
  636. if (ktype->update) {
  637. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  638. 0);
  639. if (!IS_ERR(key_ref))
  640. goto found_matching_key;
  641. }
  642. /* if the client doesn't provide, decide on the permissions we want */
  643. if (perm == KEY_PERM_UNDEF) {
  644. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  645. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  646. if (ktype->read)
  647. perm |= KEY_POS_READ | KEY_USR_READ;
  648. if (ktype == &key_type_keyring || ktype->update)
  649. perm |= KEY_USR_WRITE;
  650. }
  651. /* allocate a new key */
  652. key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
  653. perm, flags);
  654. if (IS_ERR(key)) {
  655. key_ref = ERR_CAST(key);
  656. goto error_3;
  657. }
  658. /* instantiate it and link it into the target keyring */
  659. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  660. if (ret < 0) {
  661. key_put(key);
  662. key_ref = ERR_PTR(ret);
  663. goto error_3;
  664. }
  665. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  666. error_3:
  667. up_write(&keyring->sem);
  668. error_2:
  669. key_type_put(ktype);
  670. error:
  671. return key_ref;
  672. found_matching_key:
  673. /* we found a matching key, so we're going to try to update it
  674. * - we can drop the locks first as we have the key pinned
  675. */
  676. up_write(&keyring->sem);
  677. key_type_put(ktype);
  678. key_ref = __key_update(key_ref, payload, plen);
  679. goto error;
  680. } /* end key_create_or_update() */
  681. EXPORT_SYMBOL(key_create_or_update);
  682. /*****************************************************************************/
  683. /*
  684. * update a key
  685. */
  686. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  687. {
  688. struct key *key = key_ref_to_ptr(key_ref);
  689. int ret;
  690. key_check(key);
  691. /* the key must be writable */
  692. ret = key_permission(key_ref, KEY_WRITE);
  693. if (ret < 0)
  694. goto error;
  695. /* attempt to update it if supported */
  696. ret = -EOPNOTSUPP;
  697. if (key->type->update) {
  698. down_write(&key->sem);
  699. ret = key->type->update(key, payload, plen);
  700. if (ret == 0)
  701. /* updating a negative key instantiates it */
  702. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  703. up_write(&key->sem);
  704. }
  705. error:
  706. return ret;
  707. } /* end key_update() */
  708. EXPORT_SYMBOL(key_update);
  709. /*****************************************************************************/
  710. /*
  711. * revoke a key
  712. */
  713. void key_revoke(struct key *key)
  714. {
  715. key_check(key);
  716. /* make sure no one's trying to change or use the key when we mark it
  717. * - we tell lockdep that we might nest because we might be revoking an
  718. * authorisation key whilst holding the sem on a key we've just
  719. * instantiated
  720. */
  721. down_write_nested(&key->sem, 1);
  722. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  723. key->type->revoke)
  724. key->type->revoke(key);
  725. up_write(&key->sem);
  726. } /* end key_revoke() */
  727. EXPORT_SYMBOL(key_revoke);
  728. /*****************************************************************************/
  729. /*
  730. * register a type of key
  731. */
  732. int register_key_type(struct key_type *ktype)
  733. {
  734. struct key_type *p;
  735. int ret;
  736. ret = -EEXIST;
  737. down_write(&key_types_sem);
  738. /* disallow key types with the same name */
  739. list_for_each_entry(p, &key_types_list, link) {
  740. if (strcmp(p->name, ktype->name) == 0)
  741. goto out;
  742. }
  743. /* store the type */
  744. list_add(&ktype->link, &key_types_list);
  745. ret = 0;
  746. out:
  747. up_write(&key_types_sem);
  748. return ret;
  749. } /* end register_key_type() */
  750. EXPORT_SYMBOL(register_key_type);
  751. /*****************************************************************************/
  752. /*
  753. * unregister a type of key
  754. */
  755. void unregister_key_type(struct key_type *ktype)
  756. {
  757. struct rb_node *_n;
  758. struct key *key;
  759. down_write(&key_types_sem);
  760. /* withdraw the key type */
  761. list_del_init(&ktype->link);
  762. /* mark all the keys of this type dead */
  763. spin_lock(&key_serial_lock);
  764. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  765. key = rb_entry(_n, struct key, serial_node);
  766. if (key->type == ktype)
  767. key->type = &key_type_dead;
  768. }
  769. spin_unlock(&key_serial_lock);
  770. /* make sure everyone revalidates their keys */
  771. synchronize_rcu();
  772. /* we should now be able to destroy the payloads of all the keys of
  773. * this type with impunity */
  774. spin_lock(&key_serial_lock);
  775. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  776. key = rb_entry(_n, struct key, serial_node);
  777. if (key->type == ktype) {
  778. if (ktype->destroy)
  779. ktype->destroy(key);
  780. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  781. }
  782. }
  783. spin_unlock(&key_serial_lock);
  784. up_write(&key_types_sem);
  785. } /* end unregister_key_type() */
  786. EXPORT_SYMBOL(unregister_key_type);
  787. /*****************************************************************************/
  788. /*
  789. * initialise the key management stuff
  790. */
  791. void __init key_init(void)
  792. {
  793. /* allocate a slab in which we can store keys */
  794. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  795. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  796. /* add the special key types */
  797. list_add_tail(&key_type_keyring.link, &key_types_list);
  798. list_add_tail(&key_type_dead.link, &key_types_list);
  799. list_add_tail(&key_type_user.link, &key_types_list);
  800. /* record the root user tracking */
  801. rb_link_node(&root_key_user.node,
  802. NULL,
  803. &key_user_tree.rb_node);
  804. rb_insert_color(&root_key_user.node,
  805. &key_user_tree);
  806. } /* end key_init() */