key.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /* key.c: basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/err.h>
  17. #include "internal.h"
  18. static kmem_cache_t *key_jar;
  19. static key_serial_t key_serial_next = 3;
  20. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  21. DEFINE_SPINLOCK(key_serial_lock);
  22. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  23. DEFINE_SPINLOCK(key_user_lock);
  24. static LIST_HEAD(key_types_list);
  25. static DECLARE_RWSEM(key_types_sem);
  26. static void key_cleanup(void *data);
  27. static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
  28. /* we serialise key instantiation and link */
  29. DECLARE_RWSEM(key_construction_sem);
  30. /* any key who's type gets unegistered will be re-typed to this */
  31. struct key_type key_type_dead = {
  32. .name = "dead",
  33. };
  34. #ifdef KEY_DEBUGGING
  35. void __key_check(const struct key *key)
  36. {
  37. printk("__key_check: key %p {%08x} should be {%08x}\n",
  38. key, key->magic, KEY_DEBUG_MAGIC);
  39. BUG();
  40. }
  41. #endif
  42. /*****************************************************************************/
  43. /*
  44. * get the key quota record for a user, allocating a new record if one doesn't
  45. * already exist
  46. */
  47. struct key_user *key_user_lookup(uid_t uid)
  48. {
  49. struct key_user *candidate = NULL, *user;
  50. struct rb_node *parent = NULL;
  51. struct rb_node **p;
  52. try_again:
  53. p = &key_user_tree.rb_node;
  54. spin_lock(&key_user_lock);
  55. /* search the tree for a user record with a matching UID */
  56. while (*p) {
  57. parent = *p;
  58. user = rb_entry(parent, struct key_user, node);
  59. if (uid < user->uid)
  60. p = &(*p)->rb_left;
  61. else if (uid > user->uid)
  62. p = &(*p)->rb_right;
  63. else
  64. goto found;
  65. }
  66. /* if we get here, we failed to find a match in the tree */
  67. if (!candidate) {
  68. /* allocate a candidate user record if we don't already have
  69. * one */
  70. spin_unlock(&key_user_lock);
  71. user = NULL;
  72. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  73. if (unlikely(!candidate))
  74. goto out;
  75. /* the allocation may have scheduled, so we need to repeat the
  76. * search lest someone else added the record whilst we were
  77. * asleep */
  78. goto try_again;
  79. }
  80. /* if we get here, then the user record still hadn't appeared on the
  81. * second pass - so we use the candidate record */
  82. atomic_set(&candidate->usage, 1);
  83. atomic_set(&candidate->nkeys, 0);
  84. atomic_set(&candidate->nikeys, 0);
  85. candidate->uid = uid;
  86. candidate->qnkeys = 0;
  87. candidate->qnbytes = 0;
  88. spin_lock_init(&candidate->lock);
  89. INIT_LIST_HEAD(&candidate->consq);
  90. rb_link_node(&candidate->node, parent, p);
  91. rb_insert_color(&candidate->node, &key_user_tree);
  92. spin_unlock(&key_user_lock);
  93. user = candidate;
  94. goto out;
  95. /* okay - we found a user record for this UID */
  96. found:
  97. atomic_inc(&user->usage);
  98. spin_unlock(&key_user_lock);
  99. if (candidate)
  100. kfree(candidate);
  101. out:
  102. return user;
  103. } /* end key_user_lookup() */
  104. /*****************************************************************************/
  105. /*
  106. * dispose of a user structure
  107. */
  108. void key_user_put(struct key_user *user)
  109. {
  110. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  111. rb_erase(&user->node, &key_user_tree);
  112. spin_unlock(&key_user_lock);
  113. kfree(user);
  114. }
  115. } /* end key_user_put() */
  116. /*****************************************************************************/
  117. /*
  118. * insert a key with a fixed serial number
  119. */
  120. static void __init __key_insert_serial(struct key *key)
  121. {
  122. struct rb_node *parent, **p;
  123. struct key *xkey;
  124. parent = NULL;
  125. p = &key_serial_tree.rb_node;
  126. while (*p) {
  127. parent = *p;
  128. xkey = rb_entry(parent, struct key, serial_node);
  129. if (key->serial < xkey->serial)
  130. p = &(*p)->rb_left;
  131. else if (key->serial > xkey->serial)
  132. p = &(*p)->rb_right;
  133. else
  134. BUG();
  135. }
  136. /* we've found a suitable hole - arrange for this key to occupy it */
  137. rb_link_node(&key->serial_node, parent, p);
  138. rb_insert_color(&key->serial_node, &key_serial_tree);
  139. } /* end __key_insert_serial() */
  140. /*****************************************************************************/
  141. /*
  142. * assign a key the next unique serial number
  143. * - we work through all the serial numbers between 2 and 2^31-1 in turn and
  144. * then wrap
  145. */
  146. static inline void key_alloc_serial(struct key *key)
  147. {
  148. struct rb_node *parent, **p;
  149. struct key *xkey;
  150. spin_lock(&key_serial_lock);
  151. /* propose a likely serial number and look for a hole for it in the
  152. * serial number tree */
  153. key->serial = key_serial_next;
  154. if (key->serial < 3)
  155. key->serial = 3;
  156. key_serial_next = key->serial + 1;
  157. parent = NULL;
  158. p = &key_serial_tree.rb_node;
  159. while (*p) {
  160. parent = *p;
  161. xkey = rb_entry(parent, struct key, serial_node);
  162. if (key->serial < xkey->serial)
  163. p = &(*p)->rb_left;
  164. else if (key->serial > xkey->serial)
  165. p = &(*p)->rb_right;
  166. else
  167. goto serial_exists;
  168. }
  169. goto insert_here;
  170. /* we found a key with the proposed serial number - walk the tree from
  171. * that point looking for the next unused serial number */
  172. serial_exists:
  173. for (;;) {
  174. key->serial = key_serial_next;
  175. if (key->serial < 2)
  176. key->serial = 2;
  177. key_serial_next = key->serial + 1;
  178. if (!parent->rb_parent)
  179. p = &key_serial_tree.rb_node;
  180. else if (parent->rb_parent->rb_left == parent)
  181. p = &parent->rb_parent->rb_left;
  182. else
  183. p = &parent->rb_parent->rb_right;
  184. parent = rb_next(parent);
  185. if (!parent)
  186. break;
  187. xkey = rb_entry(parent, struct key, serial_node);
  188. if (key->serial < xkey->serial)
  189. goto insert_here;
  190. }
  191. /* we've found a suitable hole - arrange for this key to occupy it */
  192. insert_here:
  193. rb_link_node(&key->serial_node, parent, p);
  194. rb_insert_color(&key->serial_node, &key_serial_tree);
  195. spin_unlock(&key_serial_lock);
  196. } /* end key_alloc_serial() */
  197. /*****************************************************************************/
  198. /*
  199. * allocate a key of the specified type
  200. * - update the user's quota to reflect the existence of the key
  201. * - called from a key-type operation with key_types_sem read-locked by either
  202. * key_create_or_update() or by key_duplicate(); this prevents unregistration
  203. * of the key type
  204. * - upon return the key is as yet uninstantiated; the caller needs to either
  205. * instantiate the key or discard it before returning
  206. */
  207. struct key *key_alloc(struct key_type *type, const char *desc,
  208. uid_t uid, gid_t gid, key_perm_t perm,
  209. int not_in_quota)
  210. {
  211. struct key_user *user = NULL;
  212. struct key *key;
  213. size_t desclen, quotalen;
  214. key = ERR_PTR(-EINVAL);
  215. if (!desc || !*desc)
  216. goto error;
  217. desclen = strlen(desc) + 1;
  218. quotalen = desclen + type->def_datalen;
  219. /* get hold of the key tracking for this user */
  220. user = key_user_lookup(uid);
  221. if (!user)
  222. goto no_memory_1;
  223. /* check that the user's quota permits allocation of another key and
  224. * its description */
  225. if (!not_in_quota) {
  226. spin_lock(&user->lock);
  227. if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS &&
  228. user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
  229. )
  230. goto no_quota;
  231. user->qnkeys++;
  232. user->qnbytes += quotalen;
  233. spin_unlock(&user->lock);
  234. }
  235. /* allocate and initialise the key and its description */
  236. key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
  237. if (!key)
  238. goto no_memory_2;
  239. if (desc) {
  240. key->description = kmalloc(desclen, GFP_KERNEL);
  241. if (!key->description)
  242. goto no_memory_3;
  243. memcpy(key->description, desc, desclen);
  244. }
  245. atomic_set(&key->usage, 1);
  246. init_rwsem(&key->sem);
  247. key->type = type;
  248. key->user = user;
  249. key->quotalen = quotalen;
  250. key->datalen = type->def_datalen;
  251. key->uid = uid;
  252. key->gid = gid;
  253. key->perm = perm;
  254. key->flags = 0;
  255. key->expiry = 0;
  256. key->payload.data = NULL;
  257. if (!not_in_quota)
  258. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  259. memset(&key->type_data, 0, sizeof(key->type_data));
  260. #ifdef KEY_DEBUGGING
  261. key->magic = KEY_DEBUG_MAGIC;
  262. #endif
  263. /* publish the key by giving it a serial number */
  264. atomic_inc(&user->nkeys);
  265. key_alloc_serial(key);
  266. error:
  267. return key;
  268. no_memory_3:
  269. kmem_cache_free(key_jar, key);
  270. no_memory_2:
  271. if (!not_in_quota) {
  272. spin_lock(&user->lock);
  273. user->qnkeys--;
  274. user->qnbytes -= quotalen;
  275. spin_unlock(&user->lock);
  276. }
  277. key_user_put(user);
  278. no_memory_1:
  279. key = ERR_PTR(-ENOMEM);
  280. goto error;
  281. no_quota:
  282. spin_unlock(&user->lock);
  283. key_user_put(user);
  284. key = ERR_PTR(-EDQUOT);
  285. goto error;
  286. } /* end key_alloc() */
  287. EXPORT_SYMBOL(key_alloc);
  288. /*****************************************************************************/
  289. /*
  290. * reserve an amount of quota for the key's payload
  291. */
  292. int key_payload_reserve(struct key *key, size_t datalen)
  293. {
  294. int delta = (int) datalen - key->datalen;
  295. int ret = 0;
  296. key_check(key);
  297. /* contemplate the quota adjustment */
  298. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  299. spin_lock(&key->user->lock);
  300. if (delta > 0 &&
  301. key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
  302. ) {
  303. ret = -EDQUOT;
  304. }
  305. else {
  306. key->user->qnbytes += delta;
  307. key->quotalen += delta;
  308. }
  309. spin_unlock(&key->user->lock);
  310. }
  311. /* change the recorded data length if that didn't generate an error */
  312. if (ret == 0)
  313. key->datalen = datalen;
  314. return ret;
  315. } /* end key_payload_reserve() */
  316. EXPORT_SYMBOL(key_payload_reserve);
  317. /*****************************************************************************/
  318. /*
  319. * instantiate a key and link it into the target keyring atomically
  320. * - called with the target keyring's semaphore writelocked
  321. */
  322. static int __key_instantiate_and_link(struct key *key,
  323. const void *data,
  324. size_t datalen,
  325. struct key *keyring,
  326. struct key *instkey)
  327. {
  328. int ret, awaken;
  329. key_check(key);
  330. key_check(keyring);
  331. awaken = 0;
  332. ret = -EBUSY;
  333. down_write(&key_construction_sem);
  334. /* can't instantiate twice */
  335. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  336. /* instantiate the key */
  337. ret = key->type->instantiate(key, data, datalen);
  338. if (ret == 0) {
  339. /* mark the key as being instantiated */
  340. atomic_inc(&key->user->nikeys);
  341. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  342. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  343. awaken = 1;
  344. /* and link it into the destination keyring */
  345. if (keyring)
  346. ret = __key_link(keyring, key);
  347. /* disable the authorisation key */
  348. if (instkey)
  349. key_revoke(instkey);
  350. }
  351. }
  352. up_write(&key_construction_sem);
  353. /* wake up anyone waiting for a key to be constructed */
  354. if (awaken)
  355. wake_up_all(&request_key_conswq);
  356. return ret;
  357. } /* end __key_instantiate_and_link() */
  358. /*****************************************************************************/
  359. /*
  360. * instantiate a key and link it into the target keyring atomically
  361. */
  362. int key_instantiate_and_link(struct key *key,
  363. const void *data,
  364. size_t datalen,
  365. struct key *keyring,
  366. struct key *instkey)
  367. {
  368. int ret;
  369. if (keyring)
  370. down_write(&keyring->sem);
  371. ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
  372. if (keyring)
  373. up_write(&keyring->sem);
  374. return ret;
  375. } /* end key_instantiate_and_link() */
  376. EXPORT_SYMBOL(key_instantiate_and_link);
  377. /*****************************************************************************/
  378. /*
  379. * negatively instantiate a key and link it into the target keyring atomically
  380. */
  381. int key_negate_and_link(struct key *key,
  382. unsigned timeout,
  383. struct key *keyring,
  384. struct key *instkey)
  385. {
  386. struct timespec now;
  387. int ret, awaken;
  388. key_check(key);
  389. key_check(keyring);
  390. awaken = 0;
  391. ret = -EBUSY;
  392. if (keyring)
  393. down_write(&keyring->sem);
  394. down_write(&key_construction_sem);
  395. /* can't instantiate twice */
  396. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  397. /* mark the key as being negatively instantiated */
  398. atomic_inc(&key->user->nikeys);
  399. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  400. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  401. now = current_kernel_time();
  402. key->expiry = now.tv_sec + timeout;
  403. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  404. awaken = 1;
  405. ret = 0;
  406. /* and link it into the destination keyring */
  407. if (keyring)
  408. ret = __key_link(keyring, key);
  409. /* disable the authorisation key */
  410. if (instkey)
  411. key_revoke(instkey);
  412. }
  413. up_write(&key_construction_sem);
  414. if (keyring)
  415. up_write(&keyring->sem);
  416. /* wake up anyone waiting for a key to be constructed */
  417. if (awaken)
  418. wake_up_all(&request_key_conswq);
  419. return ret;
  420. } /* end key_negate_and_link() */
  421. EXPORT_SYMBOL(key_negate_and_link);
  422. /*****************************************************************************/
  423. /*
  424. * do cleaning up in process context so that we don't have to disable
  425. * interrupts all over the place
  426. */
  427. static void key_cleanup(void *data)
  428. {
  429. struct rb_node *_n;
  430. struct key *key;
  431. go_again:
  432. /* look for a dead key in the tree */
  433. spin_lock(&key_serial_lock);
  434. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  435. key = rb_entry(_n, struct key, serial_node);
  436. if (atomic_read(&key->usage) == 0)
  437. goto found_dead_key;
  438. }
  439. spin_unlock(&key_serial_lock);
  440. return;
  441. found_dead_key:
  442. /* we found a dead key - once we've removed it from the tree, we can
  443. * drop the lock */
  444. rb_erase(&key->serial_node, &key_serial_tree);
  445. spin_unlock(&key_serial_lock);
  446. key_check(key);
  447. /* deal with the user's key tracking and quota */
  448. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  449. spin_lock(&key->user->lock);
  450. key->user->qnkeys--;
  451. key->user->qnbytes -= key->quotalen;
  452. spin_unlock(&key->user->lock);
  453. }
  454. atomic_dec(&key->user->nkeys);
  455. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  456. atomic_dec(&key->user->nikeys);
  457. key_user_put(key->user);
  458. /* now throw away the key memory */
  459. if (key->type->destroy)
  460. key->type->destroy(key);
  461. kfree(key->description);
  462. #ifdef KEY_DEBUGGING
  463. key->magic = KEY_DEBUG_MAGIC_X;
  464. #endif
  465. kmem_cache_free(key_jar, key);
  466. /* there may, of course, be more than one key to destroy */
  467. goto go_again;
  468. } /* end key_cleanup() */
  469. /*****************************************************************************/
  470. /*
  471. * dispose of a reference to a key
  472. * - when all the references are gone, we schedule the cleanup task to come and
  473. * pull it out of the tree in definite process context
  474. */
  475. void key_put(struct key *key)
  476. {
  477. if (key) {
  478. key_check(key);
  479. if (atomic_dec_and_test(&key->usage))
  480. schedule_work(&key_cleanup_task);
  481. }
  482. } /* end key_put() */
  483. EXPORT_SYMBOL(key_put);
  484. /*****************************************************************************/
  485. /*
  486. * find a key by its serial number
  487. */
  488. struct key *key_lookup(key_serial_t id)
  489. {
  490. struct rb_node *n;
  491. struct key *key;
  492. spin_lock(&key_serial_lock);
  493. /* search the tree for the specified key */
  494. n = key_serial_tree.rb_node;
  495. while (n) {
  496. key = rb_entry(n, struct key, serial_node);
  497. if (id < key->serial)
  498. n = n->rb_left;
  499. else if (id > key->serial)
  500. n = n->rb_right;
  501. else
  502. goto found;
  503. }
  504. not_found:
  505. key = ERR_PTR(-ENOKEY);
  506. goto error;
  507. found:
  508. /* pretend it doesn't exist if it's dead */
  509. if (atomic_read(&key->usage) == 0 ||
  510. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  511. key->type == &key_type_dead)
  512. goto not_found;
  513. /* this races with key_put(), but that doesn't matter since key_put()
  514. * doesn't actually change the key
  515. */
  516. atomic_inc(&key->usage);
  517. error:
  518. spin_unlock(&key_serial_lock);
  519. return key;
  520. } /* end key_lookup() */
  521. /*****************************************************************************/
  522. /*
  523. * find and lock the specified key type against removal
  524. * - we return with the sem readlocked
  525. */
  526. struct key_type *key_type_lookup(const char *type)
  527. {
  528. struct key_type *ktype;
  529. down_read(&key_types_sem);
  530. /* look up the key type to see if it's one of the registered kernel
  531. * types */
  532. list_for_each_entry(ktype, &key_types_list, link) {
  533. if (strcmp(ktype->name, type) == 0)
  534. goto found_kernel_type;
  535. }
  536. up_read(&key_types_sem);
  537. ktype = ERR_PTR(-ENOKEY);
  538. found_kernel_type:
  539. return ktype;
  540. } /* end key_type_lookup() */
  541. /*****************************************************************************/
  542. /*
  543. * unlock a key type
  544. */
  545. void key_type_put(struct key_type *ktype)
  546. {
  547. up_read(&key_types_sem);
  548. } /* end key_type_put() */
  549. /*****************************************************************************/
  550. /*
  551. * attempt to update an existing key
  552. * - the key has an incremented refcount
  553. * - we need to put the key if we get an error
  554. */
  555. static inline struct key *__key_update(struct key *key, const void *payload,
  556. size_t plen)
  557. {
  558. int ret;
  559. /* need write permission on the key to update it */
  560. ret = -EACCES;
  561. if (!key_permission(key, KEY_WRITE))
  562. goto error;
  563. ret = -EEXIST;
  564. if (!key->type->update)
  565. goto error;
  566. down_write(&key->sem);
  567. ret = key->type->update(key, payload, plen);
  568. if (ret == 0)
  569. /* updating a negative key instantiates it */
  570. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  571. up_write(&key->sem);
  572. if (ret < 0)
  573. goto error;
  574. out:
  575. return key;
  576. error:
  577. key_put(key);
  578. key = ERR_PTR(ret);
  579. goto out;
  580. } /* end __key_update() */
  581. /*****************************************************************************/
  582. /*
  583. * search the specified keyring for a key of the same description; if one is
  584. * found, update it, otherwise add a new one
  585. */
  586. struct key *key_create_or_update(struct key *keyring,
  587. const char *type,
  588. const char *description,
  589. const void *payload,
  590. size_t plen,
  591. int not_in_quota)
  592. {
  593. struct key_type *ktype;
  594. struct key *key = NULL;
  595. key_perm_t perm;
  596. int ret;
  597. key_check(keyring);
  598. /* look up the key type to see if it's one of the registered kernel
  599. * types */
  600. ktype = key_type_lookup(type);
  601. if (IS_ERR(ktype)) {
  602. key = ERR_PTR(-ENODEV);
  603. goto error;
  604. }
  605. ret = -EINVAL;
  606. if (!ktype->match || !ktype->instantiate)
  607. goto error_2;
  608. /* search for an existing key of the same type and description in the
  609. * destination keyring
  610. */
  611. down_write(&keyring->sem);
  612. key = __keyring_search_one(keyring, ktype, description, 0);
  613. if (!IS_ERR(key))
  614. goto found_matching_key;
  615. /* if we're going to allocate a new key, we're going to have to modify
  616. * the keyring */
  617. ret = -EACCES;
  618. if (!key_permission(keyring, KEY_WRITE))
  619. goto error_3;
  620. /* decide on the permissions we want */
  621. perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK;
  622. if (ktype->read)
  623. perm |= KEY_USR_READ;
  624. if (ktype == &key_type_keyring || ktype->update)
  625. perm |= KEY_USR_WRITE;
  626. /* allocate a new key */
  627. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  628. perm, not_in_quota);
  629. if (IS_ERR(key)) {
  630. ret = PTR_ERR(key);
  631. goto error_3;
  632. }
  633. /* instantiate it and link it into the target keyring */
  634. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  635. if (ret < 0) {
  636. key_put(key);
  637. key = ERR_PTR(ret);
  638. }
  639. error_3:
  640. up_write(&keyring->sem);
  641. error_2:
  642. key_type_put(ktype);
  643. error:
  644. return key;
  645. found_matching_key:
  646. /* we found a matching key, so we're going to try to update it
  647. * - we can drop the locks first as we have the key pinned
  648. */
  649. up_write(&keyring->sem);
  650. key_type_put(ktype);
  651. key = __key_update(key, payload, plen);
  652. goto error;
  653. } /* end key_create_or_update() */
  654. EXPORT_SYMBOL(key_create_or_update);
  655. /*****************************************************************************/
  656. /*
  657. * update a key
  658. */
  659. int key_update(struct key *key, const void *payload, size_t plen)
  660. {
  661. int ret;
  662. key_check(key);
  663. /* the key must be writable */
  664. ret = -EACCES;
  665. if (!key_permission(key, KEY_WRITE))
  666. goto error;
  667. /* attempt to update it if supported */
  668. ret = -EOPNOTSUPP;
  669. if (key->type->update) {
  670. down_write(&key->sem);
  671. ret = key->type->update(key, payload, plen);
  672. if (ret == 0)
  673. /* updating a negative key instantiates it */
  674. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  675. up_write(&key->sem);
  676. }
  677. error:
  678. return ret;
  679. } /* end key_update() */
  680. EXPORT_SYMBOL(key_update);
  681. /*****************************************************************************/
  682. /*
  683. * duplicate a key, potentially with a revised description
  684. * - must be supported by the keytype (keyrings for instance can be duplicated)
  685. */
  686. struct key *key_duplicate(struct key *source, const char *desc)
  687. {
  688. struct key *key;
  689. int ret;
  690. key_check(source);
  691. if (!desc)
  692. desc = source->description;
  693. down_read(&key_types_sem);
  694. ret = -EINVAL;
  695. if (!source->type->duplicate)
  696. goto error;
  697. /* allocate and instantiate a key */
  698. key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
  699. source->perm, 0);
  700. if (IS_ERR(key))
  701. goto error_k;
  702. down_read(&source->sem);
  703. ret = key->type->duplicate(key, source);
  704. up_read(&source->sem);
  705. if (ret < 0)
  706. goto error2;
  707. atomic_inc(&key->user->nikeys);
  708. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  709. error_k:
  710. up_read(&key_types_sem);
  711. out:
  712. return key;
  713. error2:
  714. key_put(key);
  715. error:
  716. up_read(&key_types_sem);
  717. key = ERR_PTR(ret);
  718. goto out;
  719. } /* end key_duplicate() */
  720. /*****************************************************************************/
  721. /*
  722. * revoke a key
  723. */
  724. void key_revoke(struct key *key)
  725. {
  726. key_check(key);
  727. /* make sure no one's trying to change or use the key when we mark
  728. * it */
  729. down_write(&key->sem);
  730. set_bit(KEY_FLAG_REVOKED, &key->flags);
  731. up_write(&key->sem);
  732. } /* end key_revoke() */
  733. EXPORT_SYMBOL(key_revoke);
  734. /*****************************************************************************/
  735. /*
  736. * register a type of key
  737. */
  738. int register_key_type(struct key_type *ktype)
  739. {
  740. struct key_type *p;
  741. int ret;
  742. ret = -EEXIST;
  743. down_write(&key_types_sem);
  744. /* disallow key types with the same name */
  745. list_for_each_entry(p, &key_types_list, link) {
  746. if (strcmp(p->name, ktype->name) == 0)
  747. goto out;
  748. }
  749. /* store the type */
  750. list_add(&ktype->link, &key_types_list);
  751. ret = 0;
  752. out:
  753. up_write(&key_types_sem);
  754. return ret;
  755. } /* end register_key_type() */
  756. EXPORT_SYMBOL(register_key_type);
  757. /*****************************************************************************/
  758. /*
  759. * unregister a type of key
  760. */
  761. void unregister_key_type(struct key_type *ktype)
  762. {
  763. struct rb_node *_n;
  764. struct key *key;
  765. down_write(&key_types_sem);
  766. /* withdraw the key type */
  767. list_del_init(&ktype->link);
  768. /* mark all the keys of this type dead */
  769. spin_lock(&key_serial_lock);
  770. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  771. key = rb_entry(_n, struct key, serial_node);
  772. if (key->type == ktype)
  773. key->type = &key_type_dead;
  774. }
  775. spin_unlock(&key_serial_lock);
  776. /* make sure everyone revalidates their keys */
  777. synchronize_rcu();
  778. /* we should now be able to destroy the payloads of all the keys of
  779. * this type with impunity */
  780. spin_lock(&key_serial_lock);
  781. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  782. key = rb_entry(_n, struct key, serial_node);
  783. if (key->type == ktype) {
  784. if (ktype->destroy)
  785. ktype->destroy(key);
  786. memset(&key->payload, 0xbd, sizeof(key->payload));
  787. }
  788. }
  789. spin_unlock(&key_serial_lock);
  790. up_write(&key_types_sem);
  791. } /* end unregister_key_type() */
  792. EXPORT_SYMBOL(unregister_key_type);
  793. /*****************************************************************************/
  794. /*
  795. * initialise the key management stuff
  796. */
  797. void __init key_init(void)
  798. {
  799. /* allocate a slab in which we can store keys */
  800. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  801. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  802. /* add the special key types */
  803. list_add_tail(&key_type_keyring.link, &key_types_list);
  804. list_add_tail(&key_type_dead.link, &key_types_list);
  805. list_add_tail(&key_type_user.link, &key_types_list);
  806. /* record the root user tracking */
  807. rb_link_node(&root_key_user.node,
  808. NULL,
  809. &key_user_tree.rb_node);
  810. rb_insert_color(&root_key_user.node,
  811. &key_user_tree);
  812. /* record root's user standard keyrings */
  813. key_check(&root_user_keyring);
  814. key_check(&root_session_keyring);
  815. __key_insert_serial(&root_user_keyring);
  816. __key_insert_serial(&root_session_keyring);
  817. keyring_publish_name(&root_user_keyring);
  818. keyring_publish_name(&root_session_keyring);
  819. /* link the two root keyrings together */
  820. key_link(&root_session_keyring, &root_user_keyring);
  821. } /* end key_init() */