key.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include "internal.h"
  21. static struct kmem_cache *key_jar;
  22. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  23. DEFINE_SPINLOCK(key_serial_lock);
  24. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  25. DEFINE_SPINLOCK(key_user_lock);
  26. unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
  27. unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
  28. unsigned int key_quota_maxkeys = 200; /* general key count quota */
  29. unsigned int key_quota_maxbytes = 20000; /* general key space quota */
  30. static LIST_HEAD(key_types_list);
  31. static DECLARE_RWSEM(key_types_sem);
  32. static void key_cleanup(struct work_struct *work);
  33. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  34. /* we serialise key instantiation and link */
  35. DEFINE_MUTEX(key_construction_mutex);
  36. /* any key who's type gets unegistered will be re-typed to this */
  37. static struct key_type key_type_dead = {
  38. .name = "dead",
  39. };
  40. #ifdef KEY_DEBUGGING
  41. void __key_check(const struct key *key)
  42. {
  43. printk("__key_check: key %p {%08x} should be {%08x}\n",
  44. key, key->magic, KEY_DEBUG_MAGIC);
  45. BUG();
  46. }
  47. #endif
  48. /*****************************************************************************/
  49. /*
  50. * get the key quota record for a user, allocating a new record if one doesn't
  51. * already exist
  52. */
  53. struct key_user *key_user_lookup(uid_t uid)
  54. {
  55. struct key_user *candidate = NULL, *user;
  56. struct rb_node *parent = NULL;
  57. struct rb_node **p;
  58. try_again:
  59. p = &key_user_tree.rb_node;
  60. spin_lock(&key_user_lock);
  61. /* search the tree for a user record with a matching UID */
  62. while (*p) {
  63. parent = *p;
  64. user = rb_entry(parent, struct key_user, node);
  65. if (uid < user->uid)
  66. p = &(*p)->rb_left;
  67. else if (uid > user->uid)
  68. p = &(*p)->rb_right;
  69. else
  70. goto found;
  71. }
  72. /* if we get here, we failed to find a match in the tree */
  73. if (!candidate) {
  74. /* allocate a candidate user record if we don't already have
  75. * one */
  76. spin_unlock(&key_user_lock);
  77. user = NULL;
  78. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  79. if (unlikely(!candidate))
  80. goto out;
  81. /* the allocation may have scheduled, so we need to repeat the
  82. * search lest someone else added the record whilst we were
  83. * asleep */
  84. goto try_again;
  85. }
  86. /* if we get here, then the user record still hadn't appeared on the
  87. * second pass - so we use the candidate record */
  88. atomic_set(&candidate->usage, 1);
  89. atomic_set(&candidate->nkeys, 0);
  90. atomic_set(&candidate->nikeys, 0);
  91. candidate->uid = uid;
  92. candidate->qnkeys = 0;
  93. candidate->qnbytes = 0;
  94. spin_lock_init(&candidate->lock);
  95. mutex_init(&candidate->cons_lock);
  96. rb_link_node(&candidate->node, parent, p);
  97. rb_insert_color(&candidate->node, &key_user_tree);
  98. spin_unlock(&key_user_lock);
  99. user = candidate;
  100. goto out;
  101. /* okay - we found a user record for this UID */
  102. found:
  103. atomic_inc(&user->usage);
  104. spin_unlock(&key_user_lock);
  105. kfree(candidate);
  106. out:
  107. return user;
  108. } /* end key_user_lookup() */
  109. /*****************************************************************************/
  110. /*
  111. * dispose of a user structure
  112. */
  113. void key_user_put(struct key_user *user)
  114. {
  115. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  116. rb_erase(&user->node, &key_user_tree);
  117. spin_unlock(&key_user_lock);
  118. kfree(user);
  119. }
  120. } /* end key_user_put() */
  121. /*****************************************************************************/
  122. /*
  123. * assign a key the next unique serial number
  124. * - these are assigned randomly to avoid security issues through covert
  125. * channel problems
  126. */
  127. static inline void key_alloc_serial(struct key *key)
  128. {
  129. struct rb_node *parent, **p;
  130. struct key *xkey;
  131. /* propose a random serial number and look for a hole for it in the
  132. * serial number tree */
  133. do {
  134. get_random_bytes(&key->serial, sizeof(key->serial));
  135. key->serial >>= 1; /* negative numbers are not permitted */
  136. } while (key->serial < 3);
  137. spin_lock(&key_serial_lock);
  138. attempt_insertion:
  139. parent = NULL;
  140. p = &key_serial_tree.rb_node;
  141. while (*p) {
  142. parent = *p;
  143. xkey = rb_entry(parent, struct key, serial_node);
  144. if (key->serial < xkey->serial)
  145. p = &(*p)->rb_left;
  146. else if (key->serial > xkey->serial)
  147. p = &(*p)->rb_right;
  148. else
  149. goto serial_exists;
  150. }
  151. /* we've found a suitable hole - arrange for this key to occupy it */
  152. rb_link_node(&key->serial_node, parent, p);
  153. rb_insert_color(&key->serial_node, &key_serial_tree);
  154. spin_unlock(&key_serial_lock);
  155. return;
  156. /* we found a key with the proposed serial number - walk the tree from
  157. * that point looking for the next unused serial number */
  158. serial_exists:
  159. for (;;) {
  160. key->serial++;
  161. if (key->serial < 3) {
  162. key->serial = 3;
  163. goto attempt_insertion;
  164. }
  165. parent = rb_next(parent);
  166. if (!parent)
  167. goto attempt_insertion;
  168. xkey = rb_entry(parent, struct key, serial_node);
  169. if (key->serial < xkey->serial)
  170. goto attempt_insertion;
  171. }
  172. } /* end key_alloc_serial() */
  173. /*****************************************************************************/
  174. /*
  175. * allocate a key of the specified type
  176. * - update the user's quota to reflect the existence of the key
  177. * - called from a key-type operation with key_types_sem read-locked by
  178. * key_create_or_update()
  179. * - this prevents unregistration of the key type
  180. * - upon return the key is as yet uninstantiated; the caller needs to either
  181. * instantiate the key or discard it before returning
  182. */
  183. struct key *key_alloc(struct key_type *type, const char *desc,
  184. uid_t uid, gid_t gid, struct task_struct *ctx,
  185. key_perm_t perm, unsigned long flags)
  186. {
  187. struct key_user *user = NULL;
  188. struct key *key;
  189. size_t desclen, quotalen;
  190. int ret;
  191. key = ERR_PTR(-EINVAL);
  192. if (!desc || !*desc)
  193. goto error;
  194. desclen = strlen(desc) + 1;
  195. quotalen = desclen + type->def_datalen;
  196. /* get hold of the key tracking for this user */
  197. user = key_user_lookup(uid);
  198. if (!user)
  199. goto no_memory_1;
  200. /* check that the user's quota permits allocation of another key and
  201. * its description */
  202. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  203. unsigned maxkeys = (uid == 0) ?
  204. key_quota_root_maxkeys : key_quota_maxkeys;
  205. unsigned maxbytes = (uid == 0) ?
  206. key_quota_root_maxbytes : key_quota_maxbytes;
  207. spin_lock(&user->lock);
  208. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  209. if (user->qnkeys + 1 >= maxkeys ||
  210. user->qnbytes + quotalen >= maxbytes ||
  211. user->qnbytes + quotalen < user->qnbytes)
  212. goto no_quota;
  213. }
  214. user->qnkeys++;
  215. user->qnbytes += quotalen;
  216. spin_unlock(&user->lock);
  217. }
  218. /* allocate and initialise the key and its description */
  219. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  220. if (!key)
  221. goto no_memory_2;
  222. if (desc) {
  223. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  224. if (!key->description)
  225. goto no_memory_3;
  226. }
  227. atomic_set(&key->usage, 1);
  228. init_rwsem(&key->sem);
  229. key->type = type;
  230. key->user = user;
  231. key->quotalen = quotalen;
  232. key->datalen = type->def_datalen;
  233. key->uid = uid;
  234. key->gid = gid;
  235. key->perm = perm;
  236. key->flags = 0;
  237. key->expiry = 0;
  238. key->payload.data = NULL;
  239. key->security = NULL;
  240. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  241. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  242. memset(&key->type_data, 0, sizeof(key->type_data));
  243. #ifdef KEY_DEBUGGING
  244. key->magic = KEY_DEBUG_MAGIC;
  245. #endif
  246. /* let the security module know about the key */
  247. ret = security_key_alloc(key, ctx, flags);
  248. if (ret < 0)
  249. goto security_error;
  250. /* publish the key by giving it a serial number */
  251. atomic_inc(&user->nkeys);
  252. key_alloc_serial(key);
  253. error:
  254. return key;
  255. security_error:
  256. kfree(key->description);
  257. kmem_cache_free(key_jar, key);
  258. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  259. spin_lock(&user->lock);
  260. user->qnkeys--;
  261. user->qnbytes -= quotalen;
  262. spin_unlock(&user->lock);
  263. }
  264. key_user_put(user);
  265. key = ERR_PTR(ret);
  266. goto error;
  267. no_memory_3:
  268. kmem_cache_free(key_jar, key);
  269. no_memory_2:
  270. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  271. spin_lock(&user->lock);
  272. user->qnkeys--;
  273. user->qnbytes -= quotalen;
  274. spin_unlock(&user->lock);
  275. }
  276. key_user_put(user);
  277. no_memory_1:
  278. key = ERR_PTR(-ENOMEM);
  279. goto error;
  280. no_quota:
  281. spin_unlock(&user->lock);
  282. key_user_put(user);
  283. key = ERR_PTR(-EDQUOT);
  284. goto error;
  285. } /* end key_alloc() */
  286. EXPORT_SYMBOL(key_alloc);
  287. /*****************************************************************************/
  288. /*
  289. * reserve an amount of quota for the key's payload
  290. */
  291. int key_payload_reserve(struct key *key, size_t datalen)
  292. {
  293. int delta = (int) datalen - key->datalen;
  294. int ret = 0;
  295. key_check(key);
  296. /* contemplate the quota adjustment */
  297. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  298. unsigned maxbytes = (key->user->uid == 0) ?
  299. key_quota_root_maxbytes : key_quota_maxbytes;
  300. spin_lock(&key->user->lock);
  301. if (delta > 0 &&
  302. (key->user->qnbytes + delta >= maxbytes ||
  303. key->user->qnbytes + delta < key->user->qnbytes)) {
  304. ret = -EDQUOT;
  305. }
  306. else {
  307. key->user->qnbytes += delta;
  308. key->quotalen += delta;
  309. }
  310. spin_unlock(&key->user->lock);
  311. }
  312. /* change the recorded data length if that didn't generate an error */
  313. if (ret == 0)
  314. key->datalen = datalen;
  315. return ret;
  316. } /* end key_payload_reserve() */
  317. EXPORT_SYMBOL(key_payload_reserve);
  318. /*****************************************************************************/
  319. /*
  320. * instantiate a key and link it into the target keyring atomically
  321. * - called with the target keyring's semaphore writelocked
  322. */
  323. static int __key_instantiate_and_link(struct key *key,
  324. const void *data,
  325. size_t datalen,
  326. struct key *keyring,
  327. struct key *instkey)
  328. {
  329. int ret, awaken;
  330. key_check(key);
  331. key_check(keyring);
  332. awaken = 0;
  333. ret = -EBUSY;
  334. mutex_lock(&key_construction_mutex);
  335. /* can't instantiate twice */
  336. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  337. /* instantiate the key */
  338. ret = key->type->instantiate(key, data, datalen);
  339. if (ret == 0) {
  340. /* mark the key as being instantiated */
  341. atomic_inc(&key->user->nikeys);
  342. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  343. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  344. awaken = 1;
  345. /* and link it into the destination keyring */
  346. if (keyring)
  347. ret = __key_link(keyring, key);
  348. /* disable the authorisation key */
  349. if (instkey)
  350. key_revoke(instkey);
  351. }
  352. }
  353. mutex_unlock(&key_construction_mutex);
  354. /* wake up anyone waiting for a key to be constructed */
  355. if (awaken)
  356. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  357. return ret;
  358. } /* end __key_instantiate_and_link() */
  359. /*****************************************************************************/
  360. /*
  361. * instantiate a key and link it into the target keyring atomically
  362. */
  363. int key_instantiate_and_link(struct key *key,
  364. const void *data,
  365. size_t datalen,
  366. struct key *keyring,
  367. struct key *instkey)
  368. {
  369. int ret;
  370. if (keyring)
  371. down_write(&keyring->sem);
  372. ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
  373. if (keyring)
  374. up_write(&keyring->sem);
  375. return ret;
  376. } /* end key_instantiate_and_link() */
  377. EXPORT_SYMBOL(key_instantiate_and_link);
  378. /*****************************************************************************/
  379. /*
  380. * negatively instantiate a key and link it into the target keyring atomically
  381. */
  382. int key_negate_and_link(struct key *key,
  383. unsigned timeout,
  384. struct key *keyring,
  385. struct key *instkey)
  386. {
  387. struct timespec now;
  388. int ret, awaken;
  389. key_check(key);
  390. key_check(keyring);
  391. awaken = 0;
  392. ret = -EBUSY;
  393. if (keyring)
  394. down_write(&keyring->sem);
  395. mutex_lock(&key_construction_mutex);
  396. /* can't instantiate twice */
  397. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  398. /* mark the key as being negatively instantiated */
  399. atomic_inc(&key->user->nikeys);
  400. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  401. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  402. now = current_kernel_time();
  403. key->expiry = now.tv_sec + timeout;
  404. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  405. awaken = 1;
  406. ret = 0;
  407. /* and link it into the destination keyring */
  408. if (keyring)
  409. ret = __key_link(keyring, key);
  410. /* disable the authorisation key */
  411. if (instkey)
  412. key_revoke(instkey);
  413. }
  414. mutex_unlock(&key_construction_mutex);
  415. if (keyring)
  416. up_write(&keyring->sem);
  417. /* wake up anyone waiting for a key to be constructed */
  418. if (awaken)
  419. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  420. return ret;
  421. } /* end key_negate_and_link() */
  422. EXPORT_SYMBOL(key_negate_and_link);
  423. /*****************************************************************************/
  424. /*
  425. * do cleaning up in process context so that we don't have to disable
  426. * interrupts all over the place
  427. */
  428. static void key_cleanup(struct work_struct *work)
  429. {
  430. struct rb_node *_n;
  431. struct key *key;
  432. go_again:
  433. /* look for a dead key in the tree */
  434. spin_lock(&key_serial_lock);
  435. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  436. key = rb_entry(_n, struct key, serial_node);
  437. if (atomic_read(&key->usage) == 0)
  438. goto found_dead_key;
  439. }
  440. spin_unlock(&key_serial_lock);
  441. return;
  442. found_dead_key:
  443. /* we found a dead key - once we've removed it from the tree, we can
  444. * drop the lock */
  445. rb_erase(&key->serial_node, &key_serial_tree);
  446. spin_unlock(&key_serial_lock);
  447. key_check(key);
  448. security_key_free(key);
  449. /* deal with the user's key tracking and quota */
  450. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  451. spin_lock(&key->user->lock);
  452. key->user->qnkeys--;
  453. key->user->qnbytes -= key->quotalen;
  454. spin_unlock(&key->user->lock);
  455. }
  456. atomic_dec(&key->user->nkeys);
  457. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  458. atomic_dec(&key->user->nikeys);
  459. key_user_put(key->user);
  460. /* now throw away the key memory */
  461. if (key->type->destroy)
  462. key->type->destroy(key);
  463. kfree(key->description);
  464. #ifdef KEY_DEBUGGING
  465. key->magic = KEY_DEBUG_MAGIC_X;
  466. #endif
  467. kmem_cache_free(key_jar, key);
  468. /* there may, of course, be more than one key to destroy */
  469. goto go_again;
  470. } /* end key_cleanup() */
  471. /*****************************************************************************/
  472. /*
  473. * dispose of a reference to a key
  474. * - when all the references are gone, we schedule the cleanup task to come and
  475. * pull it out of the tree in definite process context
  476. */
  477. void key_put(struct key *key)
  478. {
  479. if (key) {
  480. key_check(key);
  481. if (atomic_dec_and_test(&key->usage))
  482. schedule_work(&key_cleanup_task);
  483. }
  484. } /* end key_put() */
  485. EXPORT_SYMBOL(key_put);
  486. /*****************************************************************************/
  487. /*
  488. * find a key by its serial number
  489. */
  490. struct key *key_lookup(key_serial_t id)
  491. {
  492. struct rb_node *n;
  493. struct key *key;
  494. spin_lock(&key_serial_lock);
  495. /* search the tree for the specified key */
  496. n = key_serial_tree.rb_node;
  497. while (n) {
  498. key = rb_entry(n, struct key, serial_node);
  499. if (id < key->serial)
  500. n = n->rb_left;
  501. else if (id > key->serial)
  502. n = n->rb_right;
  503. else
  504. goto found;
  505. }
  506. not_found:
  507. key = ERR_PTR(-ENOKEY);
  508. goto error;
  509. found:
  510. /* pretend it doesn't exist if it's dead */
  511. if (atomic_read(&key->usage) == 0 ||
  512. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  513. key->type == &key_type_dead)
  514. goto not_found;
  515. /* this races with key_put(), but that doesn't matter since key_put()
  516. * doesn't actually change the key
  517. */
  518. atomic_inc(&key->usage);
  519. error:
  520. spin_unlock(&key_serial_lock);
  521. return key;
  522. } /* end key_lookup() */
  523. /*****************************************************************************/
  524. /*
  525. * find and lock the specified key type against removal
  526. * - we return with the sem readlocked
  527. */
  528. struct key_type *key_type_lookup(const char *type)
  529. {
  530. struct key_type *ktype;
  531. down_read(&key_types_sem);
  532. /* look up the key type to see if it's one of the registered kernel
  533. * types */
  534. list_for_each_entry(ktype, &key_types_list, link) {
  535. if (strcmp(ktype->name, type) == 0)
  536. goto found_kernel_type;
  537. }
  538. up_read(&key_types_sem);
  539. ktype = ERR_PTR(-ENOKEY);
  540. found_kernel_type:
  541. return ktype;
  542. } /* end key_type_lookup() */
  543. /*****************************************************************************/
  544. /*
  545. * unlock a key type
  546. */
  547. void key_type_put(struct key_type *ktype)
  548. {
  549. up_read(&key_types_sem);
  550. } /* end key_type_put() */
  551. /*****************************************************************************/
  552. /*
  553. * attempt to update an existing key
  554. * - the key has an incremented refcount
  555. * - we need to put the key if we get an error
  556. */
  557. static inline key_ref_t __key_update(key_ref_t key_ref,
  558. const void *payload, size_t plen)
  559. {
  560. struct key *key = key_ref_to_ptr(key_ref);
  561. int ret;
  562. /* need write permission on the key to update it */
  563. ret = key_permission(key_ref, KEY_WRITE);
  564. if (ret < 0)
  565. goto error;
  566. ret = -EEXIST;
  567. if (!key->type->update)
  568. goto error;
  569. down_write(&key->sem);
  570. ret = key->type->update(key, payload, plen);
  571. if (ret == 0)
  572. /* updating a negative key instantiates it */
  573. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  574. up_write(&key->sem);
  575. if (ret < 0)
  576. goto error;
  577. out:
  578. return key_ref;
  579. error:
  580. key_put(key);
  581. key_ref = ERR_PTR(ret);
  582. goto out;
  583. } /* end __key_update() */
  584. /*****************************************************************************/
  585. /*
  586. * search the specified keyring for a key of the same description; if one is
  587. * found, update it, otherwise add a new one
  588. */
  589. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  590. const char *type,
  591. const char *description,
  592. const void *payload,
  593. size_t plen,
  594. key_perm_t perm,
  595. unsigned long flags)
  596. {
  597. struct key_type *ktype;
  598. struct key *keyring, *key = NULL;
  599. key_ref_t key_ref;
  600. int ret;
  601. /* look up the key type to see if it's one of the registered kernel
  602. * types */
  603. ktype = key_type_lookup(type);
  604. if (IS_ERR(ktype)) {
  605. key_ref = ERR_PTR(-ENODEV);
  606. goto error;
  607. }
  608. key_ref = ERR_PTR(-EINVAL);
  609. if (!ktype->match || !ktype->instantiate)
  610. goto error_2;
  611. keyring = key_ref_to_ptr(keyring_ref);
  612. key_check(keyring);
  613. key_ref = ERR_PTR(-ENOTDIR);
  614. if (keyring->type != &key_type_keyring)
  615. goto error_2;
  616. down_write(&keyring->sem);
  617. /* if we're going to allocate a new key, we're going to have
  618. * to modify the keyring */
  619. ret = key_permission(keyring_ref, KEY_WRITE);
  620. if (ret < 0) {
  621. key_ref = ERR_PTR(ret);
  622. goto error_3;
  623. }
  624. /* if it's possible to update this type of key, search for an existing
  625. * key of the same type and description in the destination keyring and
  626. * update that instead if possible
  627. */
  628. if (ktype->update) {
  629. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  630. 0);
  631. if (!IS_ERR(key_ref))
  632. goto found_matching_key;
  633. }
  634. /* if the client doesn't provide, decide on the permissions we want */
  635. if (perm == KEY_PERM_UNDEF) {
  636. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  637. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  638. if (ktype->read)
  639. perm |= KEY_POS_READ | KEY_USR_READ;
  640. if (ktype == &key_type_keyring || ktype->update)
  641. perm |= KEY_USR_WRITE;
  642. }
  643. /* allocate a new key */
  644. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  645. current, perm, flags);
  646. if (IS_ERR(key)) {
  647. key_ref = ERR_CAST(key);
  648. goto error_3;
  649. }
  650. /* instantiate it and link it into the target keyring */
  651. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  652. if (ret < 0) {
  653. key_put(key);
  654. key_ref = ERR_PTR(ret);
  655. goto error_3;
  656. }
  657. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  658. error_3:
  659. up_write(&keyring->sem);
  660. error_2:
  661. key_type_put(ktype);
  662. error:
  663. return key_ref;
  664. found_matching_key:
  665. /* we found a matching key, so we're going to try to update it
  666. * - we can drop the locks first as we have the key pinned
  667. */
  668. up_write(&keyring->sem);
  669. key_type_put(ktype);
  670. key_ref = __key_update(key_ref, payload, plen);
  671. goto error;
  672. } /* end key_create_or_update() */
  673. EXPORT_SYMBOL(key_create_or_update);
  674. /*****************************************************************************/
  675. /*
  676. * update a key
  677. */
  678. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  679. {
  680. struct key *key = key_ref_to_ptr(key_ref);
  681. int ret;
  682. key_check(key);
  683. /* the key must be writable */
  684. ret = key_permission(key_ref, KEY_WRITE);
  685. if (ret < 0)
  686. goto error;
  687. /* attempt to update it if supported */
  688. ret = -EOPNOTSUPP;
  689. if (key->type->update) {
  690. down_write(&key->sem);
  691. ret = key->type->update(key, payload, plen);
  692. if (ret == 0)
  693. /* updating a negative key instantiates it */
  694. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  695. up_write(&key->sem);
  696. }
  697. error:
  698. return ret;
  699. } /* end key_update() */
  700. EXPORT_SYMBOL(key_update);
  701. /*****************************************************************************/
  702. /*
  703. * revoke a key
  704. */
  705. void key_revoke(struct key *key)
  706. {
  707. key_check(key);
  708. /* make sure no one's trying to change or use the key when we mark it
  709. * - we tell lockdep that we might nest because we might be revoking an
  710. * authorisation key whilst holding the sem on a key we've just
  711. * instantiated
  712. */
  713. down_write_nested(&key->sem, 1);
  714. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  715. key->type->revoke)
  716. key->type->revoke(key);
  717. up_write(&key->sem);
  718. } /* end key_revoke() */
  719. EXPORT_SYMBOL(key_revoke);
  720. /*****************************************************************************/
  721. /*
  722. * register a type of key
  723. */
  724. int register_key_type(struct key_type *ktype)
  725. {
  726. struct key_type *p;
  727. int ret;
  728. ret = -EEXIST;
  729. down_write(&key_types_sem);
  730. /* disallow key types with the same name */
  731. list_for_each_entry(p, &key_types_list, link) {
  732. if (strcmp(p->name, ktype->name) == 0)
  733. goto out;
  734. }
  735. /* store the type */
  736. list_add(&ktype->link, &key_types_list);
  737. ret = 0;
  738. out:
  739. up_write(&key_types_sem);
  740. return ret;
  741. } /* end register_key_type() */
  742. EXPORT_SYMBOL(register_key_type);
  743. /*****************************************************************************/
  744. /*
  745. * unregister a type of key
  746. */
  747. void unregister_key_type(struct key_type *ktype)
  748. {
  749. struct rb_node *_n;
  750. struct key *key;
  751. down_write(&key_types_sem);
  752. /* withdraw the key type */
  753. list_del_init(&ktype->link);
  754. /* mark all the keys of this type dead */
  755. spin_lock(&key_serial_lock);
  756. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  757. key = rb_entry(_n, struct key, serial_node);
  758. if (key->type == ktype)
  759. key->type = &key_type_dead;
  760. }
  761. spin_unlock(&key_serial_lock);
  762. /* make sure everyone revalidates their keys */
  763. synchronize_rcu();
  764. /* we should now be able to destroy the payloads of all the keys of
  765. * this type with impunity */
  766. spin_lock(&key_serial_lock);
  767. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  768. key = rb_entry(_n, struct key, serial_node);
  769. if (key->type == ktype) {
  770. if (ktype->destroy)
  771. ktype->destroy(key);
  772. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  773. }
  774. }
  775. spin_unlock(&key_serial_lock);
  776. up_write(&key_types_sem);
  777. } /* end unregister_key_type() */
  778. EXPORT_SYMBOL(unregister_key_type);
  779. /*****************************************************************************/
  780. /*
  781. * initialise the key management stuff
  782. */
  783. void __init key_init(void)
  784. {
  785. /* allocate a slab in which we can store keys */
  786. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  787. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  788. /* add the special key types */
  789. list_add_tail(&key_type_keyring.link, &key_types_list);
  790. list_add_tail(&key_type_dead.link, &key_types_list);
  791. list_add_tail(&key_type_user.link, &key_types_list);
  792. /* record the root user tracking */
  793. rb_link_node(&root_key_user.node,
  794. NULL,
  795. &key_user_tree.rb_node);
  796. rb_insert_color(&root_key_user.node,
  797. &key_user_tree);
  798. } /* end key_init() */