key.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include "internal.h"
  21. static struct kmem_cache *key_jar;
  22. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  23. DEFINE_SPINLOCK(key_serial_lock);
  24. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  25. DEFINE_SPINLOCK(key_user_lock);
  26. static LIST_HEAD(key_types_list);
  27. static DECLARE_RWSEM(key_types_sem);
  28. static void key_cleanup(struct work_struct *work);
  29. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  30. /* we serialise key instantiation and link */
  31. DEFINE_MUTEX(key_construction_mutex);
  32. /* any key who's type gets unegistered will be re-typed to this */
  33. static struct key_type key_type_dead = {
  34. .name = "dead",
  35. };
  36. #ifdef KEY_DEBUGGING
  37. void __key_check(const struct key *key)
  38. {
  39. printk("__key_check: key %p {%08x} should be {%08x}\n",
  40. key, key->magic, KEY_DEBUG_MAGIC);
  41. BUG();
  42. }
  43. #endif
  44. /*****************************************************************************/
  45. /*
  46. * get the key quota record for a user, allocating a new record if one doesn't
  47. * already exist
  48. */
  49. struct key_user *key_user_lookup(uid_t uid)
  50. {
  51. struct key_user *candidate = NULL, *user;
  52. struct rb_node *parent = NULL;
  53. struct rb_node **p;
  54. try_again:
  55. p = &key_user_tree.rb_node;
  56. spin_lock(&key_user_lock);
  57. /* search the tree for a user record with a matching UID */
  58. while (*p) {
  59. parent = *p;
  60. user = rb_entry(parent, struct key_user, node);
  61. if (uid < user->uid)
  62. p = &(*p)->rb_left;
  63. else if (uid > user->uid)
  64. p = &(*p)->rb_right;
  65. else
  66. goto found;
  67. }
  68. /* if we get here, we failed to find a match in the tree */
  69. if (!candidate) {
  70. /* allocate a candidate user record if we don't already have
  71. * one */
  72. spin_unlock(&key_user_lock);
  73. user = NULL;
  74. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  75. if (unlikely(!candidate))
  76. goto out;
  77. /* the allocation may have scheduled, so we need to repeat the
  78. * search lest someone else added the record whilst we were
  79. * asleep */
  80. goto try_again;
  81. }
  82. /* if we get here, then the user record still hadn't appeared on the
  83. * second pass - so we use the candidate record */
  84. atomic_set(&candidate->usage, 1);
  85. atomic_set(&candidate->nkeys, 0);
  86. atomic_set(&candidate->nikeys, 0);
  87. candidate->uid = uid;
  88. candidate->qnkeys = 0;
  89. candidate->qnbytes = 0;
  90. spin_lock_init(&candidate->lock);
  91. mutex_init(&candidate->cons_lock);
  92. rb_link_node(&candidate->node, parent, p);
  93. rb_insert_color(&candidate->node, &key_user_tree);
  94. spin_unlock(&key_user_lock);
  95. user = candidate;
  96. goto out;
  97. /* okay - we found a user record for this UID */
  98. found:
  99. atomic_inc(&user->usage);
  100. spin_unlock(&key_user_lock);
  101. kfree(candidate);
  102. out:
  103. return user;
  104. } /* end key_user_lookup() */
  105. /*****************************************************************************/
  106. /*
  107. * dispose of a user structure
  108. */
  109. void key_user_put(struct key_user *user)
  110. {
  111. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  112. rb_erase(&user->node, &key_user_tree);
  113. spin_unlock(&key_user_lock);
  114. kfree(user);
  115. }
  116. } /* end key_user_put() */
  117. /*****************************************************************************/
  118. /*
  119. * assign a key the next unique serial number
  120. * - these are assigned randomly to avoid security issues through covert
  121. * channel problems
  122. */
  123. static inline void key_alloc_serial(struct key *key)
  124. {
  125. struct rb_node *parent, **p;
  126. struct key *xkey;
  127. /* propose a random serial number and look for a hole for it in the
  128. * serial number tree */
  129. do {
  130. get_random_bytes(&key->serial, sizeof(key->serial));
  131. key->serial >>= 1; /* negative numbers are not permitted */
  132. } while (key->serial < 3);
  133. spin_lock(&key_serial_lock);
  134. attempt_insertion:
  135. parent = NULL;
  136. p = &key_serial_tree.rb_node;
  137. while (*p) {
  138. parent = *p;
  139. xkey = rb_entry(parent, struct key, serial_node);
  140. if (key->serial < xkey->serial)
  141. p = &(*p)->rb_left;
  142. else if (key->serial > xkey->serial)
  143. p = &(*p)->rb_right;
  144. else
  145. goto serial_exists;
  146. }
  147. /* we've found a suitable hole - arrange for this key to occupy it */
  148. rb_link_node(&key->serial_node, parent, p);
  149. rb_insert_color(&key->serial_node, &key_serial_tree);
  150. spin_unlock(&key_serial_lock);
  151. return;
  152. /* we found a key with the proposed serial number - walk the tree from
  153. * that point looking for the next unused serial number */
  154. serial_exists:
  155. for (;;) {
  156. key->serial++;
  157. if (key->serial < 3) {
  158. key->serial = 3;
  159. goto attempt_insertion;
  160. }
  161. parent = rb_next(parent);
  162. if (!parent)
  163. goto attempt_insertion;
  164. xkey = rb_entry(parent, struct key, serial_node);
  165. if (key->serial < xkey->serial)
  166. goto attempt_insertion;
  167. }
  168. } /* end key_alloc_serial() */
  169. /*****************************************************************************/
  170. /*
  171. * allocate a key of the specified type
  172. * - update the user's quota to reflect the existence of the key
  173. * - called from a key-type operation with key_types_sem read-locked by
  174. * key_create_or_update()
  175. * - this prevents unregistration of the key type
  176. * - upon return the key is as yet uninstantiated; the caller needs to either
  177. * instantiate the key or discard it before returning
  178. */
  179. struct key *key_alloc(struct key_type *type, const char *desc,
  180. uid_t uid, gid_t gid, struct task_struct *ctx,
  181. key_perm_t perm, unsigned long flags)
  182. {
  183. struct key_user *user = NULL;
  184. struct key *key;
  185. size_t desclen, quotalen;
  186. int ret;
  187. key = ERR_PTR(-EINVAL);
  188. if (!desc || !*desc)
  189. goto error;
  190. desclen = strlen(desc) + 1;
  191. quotalen = desclen + type->def_datalen;
  192. /* get hold of the key tracking for this user */
  193. user = key_user_lookup(uid);
  194. if (!user)
  195. goto no_memory_1;
  196. /* check that the user's quota permits allocation of another key and
  197. * its description */
  198. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  199. spin_lock(&user->lock);
  200. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  201. if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
  202. user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
  203. )
  204. goto no_quota;
  205. }
  206. user->qnkeys++;
  207. user->qnbytes += quotalen;
  208. spin_unlock(&user->lock);
  209. }
  210. /* allocate and initialise the key and its description */
  211. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  212. if (!key)
  213. goto no_memory_2;
  214. if (desc) {
  215. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  216. if (!key->description)
  217. goto no_memory_3;
  218. }
  219. atomic_set(&key->usage, 1);
  220. init_rwsem(&key->sem);
  221. key->type = type;
  222. key->user = user;
  223. key->quotalen = quotalen;
  224. key->datalen = type->def_datalen;
  225. key->uid = uid;
  226. key->gid = gid;
  227. key->perm = perm;
  228. key->flags = 0;
  229. key->expiry = 0;
  230. key->payload.data = NULL;
  231. key->security = NULL;
  232. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  233. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  234. memset(&key->type_data, 0, sizeof(key->type_data));
  235. #ifdef KEY_DEBUGGING
  236. key->magic = KEY_DEBUG_MAGIC;
  237. #endif
  238. /* let the security module know about the key */
  239. ret = security_key_alloc(key, ctx, flags);
  240. if (ret < 0)
  241. goto security_error;
  242. /* publish the key by giving it a serial number */
  243. atomic_inc(&user->nkeys);
  244. key_alloc_serial(key);
  245. error:
  246. return key;
  247. security_error:
  248. kfree(key->description);
  249. kmem_cache_free(key_jar, key);
  250. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  251. spin_lock(&user->lock);
  252. user->qnkeys--;
  253. user->qnbytes -= quotalen;
  254. spin_unlock(&user->lock);
  255. }
  256. key_user_put(user);
  257. key = ERR_PTR(ret);
  258. goto error;
  259. no_memory_3:
  260. kmem_cache_free(key_jar, key);
  261. no_memory_2:
  262. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  263. spin_lock(&user->lock);
  264. user->qnkeys--;
  265. user->qnbytes -= quotalen;
  266. spin_unlock(&user->lock);
  267. }
  268. key_user_put(user);
  269. no_memory_1:
  270. key = ERR_PTR(-ENOMEM);
  271. goto error;
  272. no_quota:
  273. spin_unlock(&user->lock);
  274. key_user_put(user);
  275. key = ERR_PTR(-EDQUOT);
  276. goto error;
  277. } /* end key_alloc() */
  278. EXPORT_SYMBOL(key_alloc);
  279. /*****************************************************************************/
  280. /*
  281. * reserve an amount of quota for the key's payload
  282. */
  283. int key_payload_reserve(struct key *key, size_t datalen)
  284. {
  285. int delta = (int) datalen - key->datalen;
  286. int ret = 0;
  287. key_check(key);
  288. /* contemplate the quota adjustment */
  289. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  290. spin_lock(&key->user->lock);
  291. if (delta > 0 &&
  292. key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
  293. ) {
  294. ret = -EDQUOT;
  295. }
  296. else {
  297. key->user->qnbytes += delta;
  298. key->quotalen += delta;
  299. }
  300. spin_unlock(&key->user->lock);
  301. }
  302. /* change the recorded data length if that didn't generate an error */
  303. if (ret == 0)
  304. key->datalen = datalen;
  305. return ret;
  306. } /* end key_payload_reserve() */
  307. EXPORT_SYMBOL(key_payload_reserve);
  308. /*****************************************************************************/
  309. /*
  310. * instantiate a key and link it into the target keyring atomically
  311. * - called with the target keyring's semaphore writelocked
  312. */
  313. static int __key_instantiate_and_link(struct key *key,
  314. const void *data,
  315. size_t datalen,
  316. struct key *keyring,
  317. struct key *instkey)
  318. {
  319. int ret, awaken;
  320. key_check(key);
  321. key_check(keyring);
  322. awaken = 0;
  323. ret = -EBUSY;
  324. mutex_lock(&key_construction_mutex);
  325. /* can't instantiate twice */
  326. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  327. /* instantiate the key */
  328. ret = key->type->instantiate(key, data, datalen);
  329. if (ret == 0) {
  330. /* mark the key as being instantiated */
  331. atomic_inc(&key->user->nikeys);
  332. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  333. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  334. awaken = 1;
  335. /* and link it into the destination keyring */
  336. if (keyring)
  337. ret = __key_link(keyring, key);
  338. /* disable the authorisation key */
  339. if (instkey)
  340. key_revoke(instkey);
  341. }
  342. }
  343. mutex_unlock(&key_construction_mutex);
  344. /* wake up anyone waiting for a key to be constructed */
  345. if (awaken)
  346. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  347. return ret;
  348. } /* end __key_instantiate_and_link() */
  349. /*****************************************************************************/
  350. /*
  351. * instantiate a key and link it into the target keyring atomically
  352. */
  353. int key_instantiate_and_link(struct key *key,
  354. const void *data,
  355. size_t datalen,
  356. struct key *keyring,
  357. struct key *instkey)
  358. {
  359. int ret;
  360. if (keyring)
  361. down_write(&keyring->sem);
  362. ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
  363. if (keyring)
  364. up_write(&keyring->sem);
  365. return ret;
  366. } /* end key_instantiate_and_link() */
  367. EXPORT_SYMBOL(key_instantiate_and_link);
  368. /*****************************************************************************/
  369. /*
  370. * negatively instantiate a key and link it into the target keyring atomically
  371. */
  372. int key_negate_and_link(struct key *key,
  373. unsigned timeout,
  374. struct key *keyring,
  375. struct key *instkey)
  376. {
  377. struct timespec now;
  378. int ret, awaken;
  379. key_check(key);
  380. key_check(keyring);
  381. awaken = 0;
  382. ret = -EBUSY;
  383. if (keyring)
  384. down_write(&keyring->sem);
  385. mutex_lock(&key_construction_mutex);
  386. /* can't instantiate twice */
  387. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  388. /* mark the key as being negatively instantiated */
  389. atomic_inc(&key->user->nikeys);
  390. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  391. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  392. now = current_kernel_time();
  393. key->expiry = now.tv_sec + timeout;
  394. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  395. awaken = 1;
  396. ret = 0;
  397. /* and link it into the destination keyring */
  398. if (keyring)
  399. ret = __key_link(keyring, key);
  400. /* disable the authorisation key */
  401. if (instkey)
  402. key_revoke(instkey);
  403. }
  404. mutex_unlock(&key_construction_mutex);
  405. if (keyring)
  406. up_write(&keyring->sem);
  407. /* wake up anyone waiting for a key to be constructed */
  408. if (awaken)
  409. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  410. return ret;
  411. } /* end key_negate_and_link() */
  412. EXPORT_SYMBOL(key_negate_and_link);
  413. /*****************************************************************************/
  414. /*
  415. * do cleaning up in process context so that we don't have to disable
  416. * interrupts all over the place
  417. */
  418. static void key_cleanup(struct work_struct *work)
  419. {
  420. struct rb_node *_n;
  421. struct key *key;
  422. go_again:
  423. /* look for a dead key in the tree */
  424. spin_lock(&key_serial_lock);
  425. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  426. key = rb_entry(_n, struct key, serial_node);
  427. if (atomic_read(&key->usage) == 0)
  428. goto found_dead_key;
  429. }
  430. spin_unlock(&key_serial_lock);
  431. return;
  432. found_dead_key:
  433. /* we found a dead key - once we've removed it from the tree, we can
  434. * drop the lock */
  435. rb_erase(&key->serial_node, &key_serial_tree);
  436. spin_unlock(&key_serial_lock);
  437. key_check(key);
  438. security_key_free(key);
  439. /* deal with the user's key tracking and quota */
  440. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  441. spin_lock(&key->user->lock);
  442. key->user->qnkeys--;
  443. key->user->qnbytes -= key->quotalen;
  444. spin_unlock(&key->user->lock);
  445. }
  446. atomic_dec(&key->user->nkeys);
  447. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  448. atomic_dec(&key->user->nikeys);
  449. key_user_put(key->user);
  450. /* now throw away the key memory */
  451. if (key->type->destroy)
  452. key->type->destroy(key);
  453. kfree(key->description);
  454. #ifdef KEY_DEBUGGING
  455. key->magic = KEY_DEBUG_MAGIC_X;
  456. #endif
  457. kmem_cache_free(key_jar, key);
  458. /* there may, of course, be more than one key to destroy */
  459. goto go_again;
  460. } /* end key_cleanup() */
  461. /*****************************************************************************/
  462. /*
  463. * dispose of a reference to a key
  464. * - when all the references are gone, we schedule the cleanup task to come and
  465. * pull it out of the tree in definite process context
  466. */
  467. void key_put(struct key *key)
  468. {
  469. if (key) {
  470. key_check(key);
  471. if (atomic_dec_and_test(&key->usage))
  472. schedule_work(&key_cleanup_task);
  473. }
  474. } /* end key_put() */
  475. EXPORT_SYMBOL(key_put);
  476. /*****************************************************************************/
  477. /*
  478. * find a key by its serial number
  479. */
  480. struct key *key_lookup(key_serial_t id)
  481. {
  482. struct rb_node *n;
  483. struct key *key;
  484. spin_lock(&key_serial_lock);
  485. /* search the tree for the specified key */
  486. n = key_serial_tree.rb_node;
  487. while (n) {
  488. key = rb_entry(n, struct key, serial_node);
  489. if (id < key->serial)
  490. n = n->rb_left;
  491. else if (id > key->serial)
  492. n = n->rb_right;
  493. else
  494. goto found;
  495. }
  496. not_found:
  497. key = ERR_PTR(-ENOKEY);
  498. goto error;
  499. found:
  500. /* pretend it doesn't exist if it's dead */
  501. if (atomic_read(&key->usage) == 0 ||
  502. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  503. key->type == &key_type_dead)
  504. goto not_found;
  505. /* this races with key_put(), but that doesn't matter since key_put()
  506. * doesn't actually change the key
  507. */
  508. atomic_inc(&key->usage);
  509. error:
  510. spin_unlock(&key_serial_lock);
  511. return key;
  512. } /* end key_lookup() */
  513. /*****************************************************************************/
  514. /*
  515. * find and lock the specified key type against removal
  516. * - we return with the sem readlocked
  517. */
  518. struct key_type *key_type_lookup(const char *type)
  519. {
  520. struct key_type *ktype;
  521. down_read(&key_types_sem);
  522. /* look up the key type to see if it's one of the registered kernel
  523. * types */
  524. list_for_each_entry(ktype, &key_types_list, link) {
  525. if (strcmp(ktype->name, type) == 0)
  526. goto found_kernel_type;
  527. }
  528. up_read(&key_types_sem);
  529. ktype = ERR_PTR(-ENOKEY);
  530. found_kernel_type:
  531. return ktype;
  532. } /* end key_type_lookup() */
  533. /*****************************************************************************/
  534. /*
  535. * unlock a key type
  536. */
  537. void key_type_put(struct key_type *ktype)
  538. {
  539. up_read(&key_types_sem);
  540. } /* end key_type_put() */
  541. /*****************************************************************************/
  542. /*
  543. * attempt to update an existing key
  544. * - the key has an incremented refcount
  545. * - we need to put the key if we get an error
  546. */
  547. static inline key_ref_t __key_update(key_ref_t key_ref,
  548. const void *payload, size_t plen)
  549. {
  550. struct key *key = key_ref_to_ptr(key_ref);
  551. int ret;
  552. /* need write permission on the key to update it */
  553. ret = key_permission(key_ref, KEY_WRITE);
  554. if (ret < 0)
  555. goto error;
  556. ret = -EEXIST;
  557. if (!key->type->update)
  558. goto error;
  559. down_write(&key->sem);
  560. ret = key->type->update(key, payload, plen);
  561. if (ret == 0)
  562. /* updating a negative key instantiates it */
  563. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  564. up_write(&key->sem);
  565. if (ret < 0)
  566. goto error;
  567. out:
  568. return key_ref;
  569. error:
  570. key_put(key);
  571. key_ref = ERR_PTR(ret);
  572. goto out;
  573. } /* end __key_update() */
  574. /*****************************************************************************/
  575. /*
  576. * search the specified keyring for a key of the same description; if one is
  577. * found, update it, otherwise add a new one
  578. */
  579. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  580. const char *type,
  581. const char *description,
  582. const void *payload,
  583. size_t plen,
  584. key_perm_t perm,
  585. unsigned long flags)
  586. {
  587. struct key_type *ktype;
  588. struct key *keyring, *key = NULL;
  589. key_ref_t key_ref;
  590. int ret;
  591. /* look up the key type to see if it's one of the registered kernel
  592. * types */
  593. ktype = key_type_lookup(type);
  594. if (IS_ERR(ktype)) {
  595. key_ref = ERR_PTR(-ENODEV);
  596. goto error;
  597. }
  598. key_ref = ERR_PTR(-EINVAL);
  599. if (!ktype->match || !ktype->instantiate)
  600. goto error_2;
  601. keyring = key_ref_to_ptr(keyring_ref);
  602. key_check(keyring);
  603. key_ref = ERR_PTR(-ENOTDIR);
  604. if (keyring->type != &key_type_keyring)
  605. goto error_2;
  606. down_write(&keyring->sem);
  607. /* if we're going to allocate a new key, we're going to have
  608. * to modify the keyring */
  609. ret = key_permission(keyring_ref, KEY_WRITE);
  610. if (ret < 0) {
  611. key_ref = ERR_PTR(ret);
  612. goto error_3;
  613. }
  614. /* if it's possible to update this type of key, search for an existing
  615. * key of the same type and description in the destination keyring and
  616. * update that instead if possible
  617. */
  618. if (ktype->update) {
  619. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  620. 0);
  621. if (!IS_ERR(key_ref))
  622. goto found_matching_key;
  623. }
  624. /* if the client doesn't provide, decide on the permissions we want */
  625. if (perm == KEY_PERM_UNDEF) {
  626. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  627. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  628. if (ktype->read)
  629. perm |= KEY_POS_READ | KEY_USR_READ;
  630. if (ktype == &key_type_keyring || ktype->update)
  631. perm |= KEY_USR_WRITE;
  632. }
  633. /* allocate a new key */
  634. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  635. current, perm, flags);
  636. if (IS_ERR(key)) {
  637. key_ref = ERR_CAST(key);
  638. goto error_3;
  639. }
  640. /* instantiate it and link it into the target keyring */
  641. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  642. if (ret < 0) {
  643. key_put(key);
  644. key_ref = ERR_PTR(ret);
  645. goto error_3;
  646. }
  647. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  648. error_3:
  649. up_write(&keyring->sem);
  650. error_2:
  651. key_type_put(ktype);
  652. error:
  653. return key_ref;
  654. found_matching_key:
  655. /* we found a matching key, so we're going to try to update it
  656. * - we can drop the locks first as we have the key pinned
  657. */
  658. up_write(&keyring->sem);
  659. key_type_put(ktype);
  660. key_ref = __key_update(key_ref, payload, plen);
  661. goto error;
  662. } /* end key_create_or_update() */
  663. EXPORT_SYMBOL(key_create_or_update);
  664. /*****************************************************************************/
  665. /*
  666. * update a key
  667. */
  668. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  669. {
  670. struct key *key = key_ref_to_ptr(key_ref);
  671. int ret;
  672. key_check(key);
  673. /* the key must be writable */
  674. ret = key_permission(key_ref, KEY_WRITE);
  675. if (ret < 0)
  676. goto error;
  677. /* attempt to update it if supported */
  678. ret = -EOPNOTSUPP;
  679. if (key->type->update) {
  680. down_write(&key->sem);
  681. ret = key->type->update(key, payload, plen);
  682. if (ret == 0)
  683. /* updating a negative key instantiates it */
  684. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  685. up_write(&key->sem);
  686. }
  687. error:
  688. return ret;
  689. } /* end key_update() */
  690. EXPORT_SYMBOL(key_update);
  691. /*****************************************************************************/
  692. /*
  693. * revoke a key
  694. */
  695. void key_revoke(struct key *key)
  696. {
  697. key_check(key);
  698. /* make sure no one's trying to change or use the key when we mark it
  699. * - we tell lockdep that we might nest because we might be revoking an
  700. * authorisation key whilst holding the sem on a key we've just
  701. * instantiated
  702. */
  703. down_write_nested(&key->sem, 1);
  704. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  705. key->type->revoke)
  706. key->type->revoke(key);
  707. up_write(&key->sem);
  708. } /* end key_revoke() */
  709. EXPORT_SYMBOL(key_revoke);
  710. /*****************************************************************************/
  711. /*
  712. * register a type of key
  713. */
  714. int register_key_type(struct key_type *ktype)
  715. {
  716. struct key_type *p;
  717. int ret;
  718. ret = -EEXIST;
  719. down_write(&key_types_sem);
  720. /* disallow key types with the same name */
  721. list_for_each_entry(p, &key_types_list, link) {
  722. if (strcmp(p->name, ktype->name) == 0)
  723. goto out;
  724. }
  725. /* store the type */
  726. list_add(&ktype->link, &key_types_list);
  727. ret = 0;
  728. out:
  729. up_write(&key_types_sem);
  730. return ret;
  731. } /* end register_key_type() */
  732. EXPORT_SYMBOL(register_key_type);
  733. /*****************************************************************************/
  734. /*
  735. * unregister a type of key
  736. */
  737. void unregister_key_type(struct key_type *ktype)
  738. {
  739. struct rb_node *_n;
  740. struct key *key;
  741. down_write(&key_types_sem);
  742. /* withdraw the key type */
  743. list_del_init(&ktype->link);
  744. /* mark all the keys of this type dead */
  745. spin_lock(&key_serial_lock);
  746. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  747. key = rb_entry(_n, struct key, serial_node);
  748. if (key->type == ktype)
  749. key->type = &key_type_dead;
  750. }
  751. spin_unlock(&key_serial_lock);
  752. /* make sure everyone revalidates their keys */
  753. synchronize_rcu();
  754. /* we should now be able to destroy the payloads of all the keys of
  755. * this type with impunity */
  756. spin_lock(&key_serial_lock);
  757. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  758. key = rb_entry(_n, struct key, serial_node);
  759. if (key->type == ktype) {
  760. if (ktype->destroy)
  761. ktype->destroy(key);
  762. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  763. }
  764. }
  765. spin_unlock(&key_serial_lock);
  766. up_write(&key_types_sem);
  767. } /* end unregister_key_type() */
  768. EXPORT_SYMBOL(unregister_key_type);
  769. /*****************************************************************************/
  770. /*
  771. * initialise the key management stuff
  772. */
  773. void __init key_init(void)
  774. {
  775. /* allocate a slab in which we can store keys */
  776. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  777. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  778. /* add the special key types */
  779. list_add_tail(&key_type_keyring.link, &key_types_list);
  780. list_add_tail(&key_type_dead.link, &key_types_list);
  781. list_add_tail(&key_type_user.link, &key_types_list);
  782. /* record the root user tracking */
  783. rb_link_node(&root_key_user.node,
  784. NULL,
  785. &key_user_tree.rb_node);
  786. rb_insert_color(&root_key_user.node,
  787. &key_user_tree);
  788. } /* end key_init() */