key.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. /* key.c: basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/security.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/err.h>
  18. #include "internal.h"
  19. static kmem_cache_t *key_jar;
  20. static key_serial_t key_serial_next = 3;
  21. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  22. DEFINE_SPINLOCK(key_serial_lock);
  23. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  24. DEFINE_SPINLOCK(key_user_lock);
  25. static LIST_HEAD(key_types_list);
  26. static DECLARE_RWSEM(key_types_sem);
  27. static void key_cleanup(void *data);
  28. static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
  29. /* we serialise key instantiation and link */
  30. DECLARE_RWSEM(key_construction_sem);
  31. /* any key who's type gets unegistered will be re-typed to this */
  32. static struct key_type key_type_dead = {
  33. .name = "dead",
  34. };
  35. #ifdef KEY_DEBUGGING
  36. void __key_check(const struct key *key)
  37. {
  38. printk("__key_check: key %p {%08x} should be {%08x}\n",
  39. key, key->magic, KEY_DEBUG_MAGIC);
  40. BUG();
  41. }
  42. #endif
  43. /*****************************************************************************/
  44. /*
  45. * get the key quota record for a user, allocating a new record if one doesn't
  46. * already exist
  47. */
  48. struct key_user *key_user_lookup(uid_t uid)
  49. {
  50. struct key_user *candidate = NULL, *user;
  51. struct rb_node *parent = NULL;
  52. struct rb_node **p;
  53. try_again:
  54. p = &key_user_tree.rb_node;
  55. spin_lock(&key_user_lock);
  56. /* search the tree for a user record with a matching UID */
  57. while (*p) {
  58. parent = *p;
  59. user = rb_entry(parent, struct key_user, node);
  60. if (uid < user->uid)
  61. p = &(*p)->rb_left;
  62. else if (uid > user->uid)
  63. p = &(*p)->rb_right;
  64. else
  65. goto found;
  66. }
  67. /* if we get here, we failed to find a match in the tree */
  68. if (!candidate) {
  69. /* allocate a candidate user record if we don't already have
  70. * one */
  71. spin_unlock(&key_user_lock);
  72. user = NULL;
  73. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  74. if (unlikely(!candidate))
  75. goto out;
  76. /* the allocation may have scheduled, so we need to repeat the
  77. * search lest someone else added the record whilst we were
  78. * asleep */
  79. goto try_again;
  80. }
  81. /* if we get here, then the user record still hadn't appeared on the
  82. * second pass - so we use the candidate record */
  83. atomic_set(&candidate->usage, 1);
  84. atomic_set(&candidate->nkeys, 0);
  85. atomic_set(&candidate->nikeys, 0);
  86. candidate->uid = uid;
  87. candidate->qnkeys = 0;
  88. candidate->qnbytes = 0;
  89. spin_lock_init(&candidate->lock);
  90. INIT_LIST_HEAD(&candidate->consq);
  91. rb_link_node(&candidate->node, parent, p);
  92. rb_insert_color(&candidate->node, &key_user_tree);
  93. spin_unlock(&key_user_lock);
  94. user = candidate;
  95. goto out;
  96. /* okay - we found a user record for this UID */
  97. found:
  98. atomic_inc(&user->usage);
  99. spin_unlock(&key_user_lock);
  100. kfree(candidate);
  101. out:
  102. return user;
  103. } /* end key_user_lookup() */
  104. /*****************************************************************************/
  105. /*
  106. * dispose of a user structure
  107. */
  108. void key_user_put(struct key_user *user)
  109. {
  110. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  111. rb_erase(&user->node, &key_user_tree);
  112. spin_unlock(&key_user_lock);
  113. kfree(user);
  114. }
  115. } /* end key_user_put() */
  116. /*****************************************************************************/
  117. /*
  118. * insert a key with a fixed serial number
  119. */
  120. static void __init __key_insert_serial(struct key *key)
  121. {
  122. struct rb_node *parent, **p;
  123. struct key *xkey;
  124. parent = NULL;
  125. p = &key_serial_tree.rb_node;
  126. while (*p) {
  127. parent = *p;
  128. xkey = rb_entry(parent, struct key, serial_node);
  129. if (key->serial < xkey->serial)
  130. p = &(*p)->rb_left;
  131. else if (key->serial > xkey->serial)
  132. p = &(*p)->rb_right;
  133. else
  134. BUG();
  135. }
  136. /* we've found a suitable hole - arrange for this key to occupy it */
  137. rb_link_node(&key->serial_node, parent, p);
  138. rb_insert_color(&key->serial_node, &key_serial_tree);
  139. } /* end __key_insert_serial() */
  140. /*****************************************************************************/
  141. /*
  142. * assign a key the next unique serial number
  143. * - we work through all the serial numbers between 2 and 2^31-1 in turn and
  144. * then wrap
  145. */
  146. static inline void key_alloc_serial(struct key *key)
  147. {
  148. struct rb_node *parent, **p;
  149. struct key *xkey;
  150. spin_lock(&key_serial_lock);
  151. /* propose a likely serial number and look for a hole for it in the
  152. * serial number tree */
  153. key->serial = key_serial_next;
  154. if (key->serial < 3)
  155. key->serial = 3;
  156. key_serial_next = key->serial + 1;
  157. parent = NULL;
  158. p = &key_serial_tree.rb_node;
  159. while (*p) {
  160. parent = *p;
  161. xkey = rb_entry(parent, struct key, serial_node);
  162. if (key->serial < xkey->serial)
  163. p = &(*p)->rb_left;
  164. else if (key->serial > xkey->serial)
  165. p = &(*p)->rb_right;
  166. else
  167. goto serial_exists;
  168. }
  169. goto insert_here;
  170. /* we found a key with the proposed serial number - walk the tree from
  171. * that point looking for the next unused serial number */
  172. serial_exists:
  173. for (;;) {
  174. key->serial = key_serial_next;
  175. if (key->serial < 2)
  176. key->serial = 2;
  177. key_serial_next = key->serial + 1;
  178. if (!rb_parent(parent))
  179. p = &key_serial_tree.rb_node;
  180. else if (rb_parent(parent)->rb_left == parent)
  181. p = &(rb_parent(parent)->rb_left);
  182. else
  183. p = &(rb_parent(parent)->rb_right);
  184. parent = rb_next(parent);
  185. if (!parent)
  186. break;
  187. xkey = rb_entry(parent, struct key, serial_node);
  188. if (key->serial < xkey->serial)
  189. goto insert_here;
  190. }
  191. /* we've found a suitable hole - arrange for this key to occupy it */
  192. insert_here:
  193. rb_link_node(&key->serial_node, parent, p);
  194. rb_insert_color(&key->serial_node, &key_serial_tree);
  195. spin_unlock(&key_serial_lock);
  196. } /* end key_alloc_serial() */
  197. /*****************************************************************************/
  198. /*
  199. * allocate a key of the specified type
  200. * - update the user's quota to reflect the existence of the key
  201. * - called from a key-type operation with key_types_sem read-locked by
  202. * key_create_or_update()
  203. * - this prevents unregistration of the key type
  204. * - upon return the key is as yet uninstantiated; the caller needs to either
  205. * instantiate the key or discard it before returning
  206. */
  207. struct key *key_alloc(struct key_type *type, const char *desc,
  208. uid_t uid, gid_t gid, struct task_struct *ctx,
  209. key_perm_t perm, unsigned long flags)
  210. {
  211. struct key_user *user = NULL;
  212. struct key *key;
  213. size_t desclen, quotalen;
  214. int ret;
  215. key = ERR_PTR(-EINVAL);
  216. if (!desc || !*desc)
  217. goto error;
  218. desclen = strlen(desc) + 1;
  219. quotalen = desclen + type->def_datalen;
  220. /* get hold of the key tracking for this user */
  221. user = key_user_lookup(uid);
  222. if (!user)
  223. goto no_memory_1;
  224. /* check that the user's quota permits allocation of another key and
  225. * its description */
  226. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  227. spin_lock(&user->lock);
  228. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  229. if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
  230. user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
  231. )
  232. goto no_quota;
  233. }
  234. user->qnkeys++;
  235. user->qnbytes += quotalen;
  236. spin_unlock(&user->lock);
  237. }
  238. /* allocate and initialise the key and its description */
  239. key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
  240. if (!key)
  241. goto no_memory_2;
  242. if (desc) {
  243. key->description = kmalloc(desclen, GFP_KERNEL);
  244. if (!key->description)
  245. goto no_memory_3;
  246. memcpy(key->description, desc, desclen);
  247. }
  248. atomic_set(&key->usage, 1);
  249. init_rwsem(&key->sem);
  250. key->type = type;
  251. key->user = user;
  252. key->quotalen = quotalen;
  253. key->datalen = type->def_datalen;
  254. key->uid = uid;
  255. key->gid = gid;
  256. key->perm = perm;
  257. key->flags = 0;
  258. key->expiry = 0;
  259. key->payload.data = NULL;
  260. key->security = NULL;
  261. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  262. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  263. memset(&key->type_data, 0, sizeof(key->type_data));
  264. #ifdef KEY_DEBUGGING
  265. key->magic = KEY_DEBUG_MAGIC;
  266. #endif
  267. /* let the security module know about the key */
  268. ret = security_key_alloc(key, ctx, flags);
  269. if (ret < 0)
  270. goto security_error;
  271. /* publish the key by giving it a serial number */
  272. atomic_inc(&user->nkeys);
  273. key_alloc_serial(key);
  274. error:
  275. return key;
  276. security_error:
  277. kfree(key->description);
  278. kmem_cache_free(key_jar, key);
  279. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  280. spin_lock(&user->lock);
  281. user->qnkeys--;
  282. user->qnbytes -= quotalen;
  283. spin_unlock(&user->lock);
  284. }
  285. key_user_put(user);
  286. key = ERR_PTR(ret);
  287. goto error;
  288. no_memory_3:
  289. kmem_cache_free(key_jar, key);
  290. no_memory_2:
  291. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  292. spin_lock(&user->lock);
  293. user->qnkeys--;
  294. user->qnbytes -= quotalen;
  295. spin_unlock(&user->lock);
  296. }
  297. key_user_put(user);
  298. no_memory_1:
  299. key = ERR_PTR(-ENOMEM);
  300. goto error;
  301. no_quota:
  302. spin_unlock(&user->lock);
  303. key_user_put(user);
  304. key = ERR_PTR(-EDQUOT);
  305. goto error;
  306. } /* end key_alloc() */
  307. EXPORT_SYMBOL(key_alloc);
  308. /*****************************************************************************/
  309. /*
  310. * reserve an amount of quota for the key's payload
  311. */
  312. int key_payload_reserve(struct key *key, size_t datalen)
  313. {
  314. int delta = (int) datalen - key->datalen;
  315. int ret = 0;
  316. key_check(key);
  317. /* contemplate the quota adjustment */
  318. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  319. spin_lock(&key->user->lock);
  320. if (delta > 0 &&
  321. key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
  322. ) {
  323. ret = -EDQUOT;
  324. }
  325. else {
  326. key->user->qnbytes += delta;
  327. key->quotalen += delta;
  328. }
  329. spin_unlock(&key->user->lock);
  330. }
  331. /* change the recorded data length if that didn't generate an error */
  332. if (ret == 0)
  333. key->datalen = datalen;
  334. return ret;
  335. } /* end key_payload_reserve() */
  336. EXPORT_SYMBOL(key_payload_reserve);
  337. /*****************************************************************************/
  338. /*
  339. * instantiate a key and link it into the target keyring atomically
  340. * - called with the target keyring's semaphore writelocked
  341. */
  342. static int __key_instantiate_and_link(struct key *key,
  343. const void *data,
  344. size_t datalen,
  345. struct key *keyring,
  346. struct key *instkey)
  347. {
  348. int ret, awaken;
  349. key_check(key);
  350. key_check(keyring);
  351. awaken = 0;
  352. ret = -EBUSY;
  353. down_write(&key_construction_sem);
  354. /* can't instantiate twice */
  355. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  356. /* instantiate the key */
  357. ret = key->type->instantiate(key, data, datalen);
  358. if (ret == 0) {
  359. /* mark the key as being instantiated */
  360. atomic_inc(&key->user->nikeys);
  361. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  362. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  363. awaken = 1;
  364. /* and link it into the destination keyring */
  365. if (keyring)
  366. ret = __key_link(keyring, key);
  367. /* disable the authorisation key */
  368. if (instkey)
  369. key_revoke(instkey);
  370. }
  371. }
  372. up_write(&key_construction_sem);
  373. /* wake up anyone waiting for a key to be constructed */
  374. if (awaken)
  375. wake_up_all(&request_key_conswq);
  376. return ret;
  377. } /* end __key_instantiate_and_link() */
  378. /*****************************************************************************/
  379. /*
  380. * instantiate a key and link it into the target keyring atomically
  381. */
  382. int key_instantiate_and_link(struct key *key,
  383. const void *data,
  384. size_t datalen,
  385. struct key *keyring,
  386. struct key *instkey)
  387. {
  388. int ret;
  389. if (keyring)
  390. down_write(&keyring->sem);
  391. ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
  392. if (keyring)
  393. up_write(&keyring->sem);
  394. return ret;
  395. } /* end key_instantiate_and_link() */
  396. EXPORT_SYMBOL(key_instantiate_and_link);
  397. /*****************************************************************************/
  398. /*
  399. * negatively instantiate a key and link it into the target keyring atomically
  400. */
  401. int key_negate_and_link(struct key *key,
  402. unsigned timeout,
  403. struct key *keyring,
  404. struct key *instkey)
  405. {
  406. struct timespec now;
  407. int ret, awaken;
  408. key_check(key);
  409. key_check(keyring);
  410. awaken = 0;
  411. ret = -EBUSY;
  412. if (keyring)
  413. down_write(&keyring->sem);
  414. down_write(&key_construction_sem);
  415. /* can't instantiate twice */
  416. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  417. /* mark the key as being negatively instantiated */
  418. atomic_inc(&key->user->nikeys);
  419. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  420. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  421. now = current_kernel_time();
  422. key->expiry = now.tv_sec + timeout;
  423. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  424. awaken = 1;
  425. ret = 0;
  426. /* and link it into the destination keyring */
  427. if (keyring)
  428. ret = __key_link(keyring, key);
  429. /* disable the authorisation key */
  430. if (instkey)
  431. key_revoke(instkey);
  432. }
  433. up_write(&key_construction_sem);
  434. if (keyring)
  435. up_write(&keyring->sem);
  436. /* wake up anyone waiting for a key to be constructed */
  437. if (awaken)
  438. wake_up_all(&request_key_conswq);
  439. return ret;
  440. } /* end key_negate_and_link() */
  441. EXPORT_SYMBOL(key_negate_and_link);
  442. /*****************************************************************************/
  443. /*
  444. * do cleaning up in process context so that we don't have to disable
  445. * interrupts all over the place
  446. */
  447. static void key_cleanup(void *data)
  448. {
  449. struct rb_node *_n;
  450. struct key *key;
  451. go_again:
  452. /* look for a dead key in the tree */
  453. spin_lock(&key_serial_lock);
  454. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  455. key = rb_entry(_n, struct key, serial_node);
  456. if (atomic_read(&key->usage) == 0)
  457. goto found_dead_key;
  458. }
  459. spin_unlock(&key_serial_lock);
  460. return;
  461. found_dead_key:
  462. /* we found a dead key - once we've removed it from the tree, we can
  463. * drop the lock */
  464. rb_erase(&key->serial_node, &key_serial_tree);
  465. spin_unlock(&key_serial_lock);
  466. key_check(key);
  467. security_key_free(key);
  468. /* deal with the user's key tracking and quota */
  469. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  470. spin_lock(&key->user->lock);
  471. key->user->qnkeys--;
  472. key->user->qnbytes -= key->quotalen;
  473. spin_unlock(&key->user->lock);
  474. }
  475. atomic_dec(&key->user->nkeys);
  476. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  477. atomic_dec(&key->user->nikeys);
  478. key_user_put(key->user);
  479. /* now throw away the key memory */
  480. if (key->type->destroy)
  481. key->type->destroy(key);
  482. kfree(key->description);
  483. #ifdef KEY_DEBUGGING
  484. key->magic = KEY_DEBUG_MAGIC_X;
  485. #endif
  486. kmem_cache_free(key_jar, key);
  487. /* there may, of course, be more than one key to destroy */
  488. goto go_again;
  489. } /* end key_cleanup() */
  490. /*****************************************************************************/
  491. /*
  492. * dispose of a reference to a key
  493. * - when all the references are gone, we schedule the cleanup task to come and
  494. * pull it out of the tree in definite process context
  495. */
  496. void key_put(struct key *key)
  497. {
  498. if (key) {
  499. key_check(key);
  500. if (atomic_dec_and_test(&key->usage))
  501. schedule_work(&key_cleanup_task);
  502. }
  503. } /* end key_put() */
  504. EXPORT_SYMBOL(key_put);
  505. /*****************************************************************************/
  506. /*
  507. * find a key by its serial number
  508. */
  509. struct key *key_lookup(key_serial_t id)
  510. {
  511. struct rb_node *n;
  512. struct key *key;
  513. spin_lock(&key_serial_lock);
  514. /* search the tree for the specified key */
  515. n = key_serial_tree.rb_node;
  516. while (n) {
  517. key = rb_entry(n, struct key, serial_node);
  518. if (id < key->serial)
  519. n = n->rb_left;
  520. else if (id > key->serial)
  521. n = n->rb_right;
  522. else
  523. goto found;
  524. }
  525. not_found:
  526. key = ERR_PTR(-ENOKEY);
  527. goto error;
  528. found:
  529. /* pretend it doesn't exist if it's dead */
  530. if (atomic_read(&key->usage) == 0 ||
  531. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  532. key->type == &key_type_dead)
  533. goto not_found;
  534. /* this races with key_put(), but that doesn't matter since key_put()
  535. * doesn't actually change the key
  536. */
  537. atomic_inc(&key->usage);
  538. error:
  539. spin_unlock(&key_serial_lock);
  540. return key;
  541. } /* end key_lookup() */
  542. /*****************************************************************************/
  543. /*
  544. * find and lock the specified key type against removal
  545. * - we return with the sem readlocked
  546. */
  547. struct key_type *key_type_lookup(const char *type)
  548. {
  549. struct key_type *ktype;
  550. down_read(&key_types_sem);
  551. /* look up the key type to see if it's one of the registered kernel
  552. * types */
  553. list_for_each_entry(ktype, &key_types_list, link) {
  554. if (strcmp(ktype->name, type) == 0)
  555. goto found_kernel_type;
  556. }
  557. up_read(&key_types_sem);
  558. ktype = ERR_PTR(-ENOKEY);
  559. found_kernel_type:
  560. return ktype;
  561. } /* end key_type_lookup() */
  562. /*****************************************************************************/
  563. /*
  564. * unlock a key type
  565. */
  566. void key_type_put(struct key_type *ktype)
  567. {
  568. up_read(&key_types_sem);
  569. } /* end key_type_put() */
  570. /*****************************************************************************/
  571. /*
  572. * attempt to update an existing key
  573. * - the key has an incremented refcount
  574. * - we need to put the key if we get an error
  575. */
  576. static inline key_ref_t __key_update(key_ref_t key_ref,
  577. const void *payload, size_t plen)
  578. {
  579. struct key *key = key_ref_to_ptr(key_ref);
  580. int ret;
  581. /* need write permission on the key to update it */
  582. ret = key_permission(key_ref, KEY_WRITE);
  583. if (ret < 0)
  584. goto error;
  585. ret = -EEXIST;
  586. if (!key->type->update)
  587. goto error;
  588. down_write(&key->sem);
  589. ret = key->type->update(key, payload, plen);
  590. if (ret == 0)
  591. /* updating a negative key instantiates it */
  592. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  593. up_write(&key->sem);
  594. if (ret < 0)
  595. goto error;
  596. out:
  597. return key_ref;
  598. error:
  599. key_put(key);
  600. key_ref = ERR_PTR(ret);
  601. goto out;
  602. } /* end __key_update() */
  603. /*****************************************************************************/
  604. /*
  605. * search the specified keyring for a key of the same description; if one is
  606. * found, update it, otherwise add a new one
  607. */
  608. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  609. const char *type,
  610. const char *description,
  611. const void *payload,
  612. size_t plen,
  613. unsigned long flags)
  614. {
  615. struct key_type *ktype;
  616. struct key *keyring, *key = NULL;
  617. key_perm_t perm;
  618. key_ref_t key_ref;
  619. int ret;
  620. /* look up the key type to see if it's one of the registered kernel
  621. * types */
  622. ktype = key_type_lookup(type);
  623. if (IS_ERR(ktype)) {
  624. key_ref = ERR_PTR(-ENODEV);
  625. goto error;
  626. }
  627. key_ref = ERR_PTR(-EINVAL);
  628. if (!ktype->match || !ktype->instantiate)
  629. goto error_2;
  630. keyring = key_ref_to_ptr(keyring_ref);
  631. key_check(keyring);
  632. key_ref = ERR_PTR(-ENOTDIR);
  633. if (keyring->type != &key_type_keyring)
  634. goto error_2;
  635. down_write(&keyring->sem);
  636. /* if we're going to allocate a new key, we're going to have
  637. * to modify the keyring */
  638. ret = key_permission(keyring_ref, KEY_WRITE);
  639. if (ret < 0) {
  640. key_ref = ERR_PTR(ret);
  641. goto error_3;
  642. }
  643. /* if it's possible to update this type of key, search for an existing
  644. * key of the same type and description in the destination keyring and
  645. * update that instead if possible
  646. */
  647. if (ktype->update) {
  648. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  649. 0);
  650. if (!IS_ERR(key_ref))
  651. goto found_matching_key;
  652. }
  653. /* decide on the permissions we want */
  654. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  655. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  656. if (ktype->read)
  657. perm |= KEY_POS_READ | KEY_USR_READ;
  658. if (ktype == &key_type_keyring || ktype->update)
  659. perm |= KEY_USR_WRITE;
  660. /* allocate a new key */
  661. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  662. current, perm, flags);
  663. if (IS_ERR(key)) {
  664. key_ref = ERR_PTR(PTR_ERR(key));
  665. goto error_3;
  666. }
  667. /* instantiate it and link it into the target keyring */
  668. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  669. if (ret < 0) {
  670. key_put(key);
  671. key_ref = ERR_PTR(ret);
  672. goto error_3;
  673. }
  674. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  675. error_3:
  676. up_write(&keyring->sem);
  677. error_2:
  678. key_type_put(ktype);
  679. error:
  680. return key_ref;
  681. found_matching_key:
  682. /* we found a matching key, so we're going to try to update it
  683. * - we can drop the locks first as we have the key pinned
  684. */
  685. up_write(&keyring->sem);
  686. key_type_put(ktype);
  687. key_ref = __key_update(key_ref, payload, plen);
  688. goto error;
  689. } /* end key_create_or_update() */
  690. EXPORT_SYMBOL(key_create_or_update);
  691. /*****************************************************************************/
  692. /*
  693. * update a key
  694. */
  695. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  696. {
  697. struct key *key = key_ref_to_ptr(key_ref);
  698. int ret;
  699. key_check(key);
  700. /* the key must be writable */
  701. ret = key_permission(key_ref, KEY_WRITE);
  702. if (ret < 0)
  703. goto error;
  704. /* attempt to update it if supported */
  705. ret = -EOPNOTSUPP;
  706. if (key->type->update) {
  707. down_write(&key->sem);
  708. ret = key->type->update(key, payload, plen);
  709. if (ret == 0)
  710. /* updating a negative key instantiates it */
  711. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  712. up_write(&key->sem);
  713. }
  714. error:
  715. return ret;
  716. } /* end key_update() */
  717. EXPORT_SYMBOL(key_update);
  718. /*****************************************************************************/
  719. /*
  720. * revoke a key
  721. */
  722. void key_revoke(struct key *key)
  723. {
  724. key_check(key);
  725. /* make sure no one's trying to change or use the key when we mark
  726. * it */
  727. down_write(&key->sem);
  728. set_bit(KEY_FLAG_REVOKED, &key->flags);
  729. if (key->type->revoke)
  730. key->type->revoke(key);
  731. up_write(&key->sem);
  732. } /* end key_revoke() */
  733. EXPORT_SYMBOL(key_revoke);
  734. /*****************************************************************************/
  735. /*
  736. * register a type of key
  737. */
  738. int register_key_type(struct key_type *ktype)
  739. {
  740. struct key_type *p;
  741. int ret;
  742. ret = -EEXIST;
  743. down_write(&key_types_sem);
  744. /* disallow key types with the same name */
  745. list_for_each_entry(p, &key_types_list, link) {
  746. if (strcmp(p->name, ktype->name) == 0)
  747. goto out;
  748. }
  749. /* store the type */
  750. list_add(&ktype->link, &key_types_list);
  751. ret = 0;
  752. out:
  753. up_write(&key_types_sem);
  754. return ret;
  755. } /* end register_key_type() */
  756. EXPORT_SYMBOL(register_key_type);
  757. /*****************************************************************************/
  758. /*
  759. * unregister a type of key
  760. */
  761. void unregister_key_type(struct key_type *ktype)
  762. {
  763. struct rb_node *_n;
  764. struct key *key;
  765. down_write(&key_types_sem);
  766. /* withdraw the key type */
  767. list_del_init(&ktype->link);
  768. /* mark all the keys of this type dead */
  769. spin_lock(&key_serial_lock);
  770. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  771. key = rb_entry(_n, struct key, serial_node);
  772. if (key->type == ktype)
  773. key->type = &key_type_dead;
  774. }
  775. spin_unlock(&key_serial_lock);
  776. /* make sure everyone revalidates their keys */
  777. synchronize_rcu();
  778. /* we should now be able to destroy the payloads of all the keys of
  779. * this type with impunity */
  780. spin_lock(&key_serial_lock);
  781. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  782. key = rb_entry(_n, struct key, serial_node);
  783. if (key->type == ktype) {
  784. if (ktype->destroy)
  785. ktype->destroy(key);
  786. memset(&key->payload, 0xbd, sizeof(key->payload));
  787. }
  788. }
  789. spin_unlock(&key_serial_lock);
  790. up_write(&key_types_sem);
  791. } /* end unregister_key_type() */
  792. EXPORT_SYMBOL(unregister_key_type);
  793. /*****************************************************************************/
  794. /*
  795. * initialise the key management stuff
  796. */
  797. void __init key_init(void)
  798. {
  799. /* allocate a slab in which we can store keys */
  800. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  801. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  802. /* add the special key types */
  803. list_add_tail(&key_type_keyring.link, &key_types_list);
  804. list_add_tail(&key_type_dead.link, &key_types_list);
  805. list_add_tail(&key_type_user.link, &key_types_list);
  806. /* record the root user tracking */
  807. rb_link_node(&root_key_user.node,
  808. NULL,
  809. &key_user_tree.rb_node);
  810. rb_insert_color(&root_key_user.node,
  811. &key_user_tree);
  812. /* record root's user standard keyrings */
  813. key_check(&root_user_keyring);
  814. key_check(&root_session_keyring);
  815. __key_insert_serial(&root_user_keyring);
  816. __key_insert_serial(&root_session_keyring);
  817. keyring_publish_name(&root_user_keyring);
  818. keyring_publish_name(&root_session_keyring);
  819. /* link the two root keyrings together */
  820. key_link(&root_session_keyring, &root_user_keyring);
  821. } /* end key_init() */