key.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include <linux/user_namespace.h>
  21. #include "internal.h"
  22. static struct kmem_cache *key_jar;
  23. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  24. DEFINE_SPINLOCK(key_serial_lock);
  25. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  26. DEFINE_SPINLOCK(key_user_lock);
  27. unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
  28. unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
  29. unsigned int key_quota_maxkeys = 200; /* general key count quota */
  30. unsigned int key_quota_maxbytes = 20000; /* general key space quota */
  31. static LIST_HEAD(key_types_list);
  32. static DECLARE_RWSEM(key_types_sem);
  33. static void key_cleanup(struct work_struct *work);
  34. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  35. /* we serialise key instantiation and link */
  36. DEFINE_MUTEX(key_construction_mutex);
  37. /* any key who's type gets unegistered will be re-typed to this */
  38. static struct key_type key_type_dead = {
  39. .name = "dead",
  40. };
  41. #ifdef KEY_DEBUGGING
  42. void __key_check(const struct key *key)
  43. {
  44. printk("__key_check: key %p {%08x} should be {%08x}\n",
  45. key, key->magic, KEY_DEBUG_MAGIC);
  46. BUG();
  47. }
  48. #endif
  49. /*****************************************************************************/
  50. /*
  51. * get the key quota record for a user, allocating a new record if one doesn't
  52. * already exist
  53. */
  54. struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
  55. {
  56. struct key_user *candidate = NULL, *user;
  57. struct rb_node *parent = NULL;
  58. struct rb_node **p;
  59. try_again:
  60. p = &key_user_tree.rb_node;
  61. spin_lock(&key_user_lock);
  62. /* search the tree for a user record with a matching UID */
  63. while (*p) {
  64. parent = *p;
  65. user = rb_entry(parent, struct key_user, node);
  66. if (uid < user->uid)
  67. p = &(*p)->rb_left;
  68. else if (uid > user->uid)
  69. p = &(*p)->rb_right;
  70. else if (user_ns < user->user_ns)
  71. p = &(*p)->rb_left;
  72. else if (user_ns > user->user_ns)
  73. p = &(*p)->rb_right;
  74. else
  75. goto found;
  76. }
  77. /* if we get here, we failed to find a match in the tree */
  78. if (!candidate) {
  79. /* allocate a candidate user record if we don't already have
  80. * one */
  81. spin_unlock(&key_user_lock);
  82. user = NULL;
  83. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  84. if (unlikely(!candidate))
  85. goto out;
  86. /* the allocation may have scheduled, so we need to repeat the
  87. * search lest someone else added the record whilst we were
  88. * asleep */
  89. goto try_again;
  90. }
  91. /* if we get here, then the user record still hadn't appeared on the
  92. * second pass - so we use the candidate record */
  93. atomic_set(&candidate->usage, 1);
  94. atomic_set(&candidate->nkeys, 0);
  95. atomic_set(&candidate->nikeys, 0);
  96. candidate->uid = uid;
  97. candidate->user_ns = get_user_ns(user_ns);
  98. candidate->qnkeys = 0;
  99. candidate->qnbytes = 0;
  100. spin_lock_init(&candidate->lock);
  101. mutex_init(&candidate->cons_lock);
  102. rb_link_node(&candidate->node, parent, p);
  103. rb_insert_color(&candidate->node, &key_user_tree);
  104. spin_unlock(&key_user_lock);
  105. user = candidate;
  106. goto out;
  107. /* okay - we found a user record for this UID */
  108. found:
  109. atomic_inc(&user->usage);
  110. spin_unlock(&key_user_lock);
  111. kfree(candidate);
  112. out:
  113. return user;
  114. } /* end key_user_lookup() */
  115. /*****************************************************************************/
  116. /*
  117. * dispose of a user structure
  118. */
  119. void key_user_put(struct key_user *user)
  120. {
  121. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  122. rb_erase(&user->node, &key_user_tree);
  123. spin_unlock(&key_user_lock);
  124. put_user_ns(user->user_ns);
  125. kfree(user);
  126. }
  127. } /* end key_user_put() */
  128. /*****************************************************************************/
  129. /*
  130. * assign a key the next unique serial number
  131. * - these are assigned randomly to avoid security issues through covert
  132. * channel problems
  133. */
  134. static inline void key_alloc_serial(struct key *key)
  135. {
  136. struct rb_node *parent, **p;
  137. struct key *xkey;
  138. /* propose a random serial number and look for a hole for it in the
  139. * serial number tree */
  140. do {
  141. get_random_bytes(&key->serial, sizeof(key->serial));
  142. key->serial >>= 1; /* negative numbers are not permitted */
  143. } while (key->serial < 3);
  144. spin_lock(&key_serial_lock);
  145. attempt_insertion:
  146. parent = NULL;
  147. p = &key_serial_tree.rb_node;
  148. while (*p) {
  149. parent = *p;
  150. xkey = rb_entry(parent, struct key, serial_node);
  151. if (key->serial < xkey->serial)
  152. p = &(*p)->rb_left;
  153. else if (key->serial > xkey->serial)
  154. p = &(*p)->rb_right;
  155. else
  156. goto serial_exists;
  157. }
  158. /* we've found a suitable hole - arrange for this key to occupy it */
  159. rb_link_node(&key->serial_node, parent, p);
  160. rb_insert_color(&key->serial_node, &key_serial_tree);
  161. spin_unlock(&key_serial_lock);
  162. return;
  163. /* we found a key with the proposed serial number - walk the tree from
  164. * that point looking for the next unused serial number */
  165. serial_exists:
  166. for (;;) {
  167. key->serial++;
  168. if (key->serial < 3) {
  169. key->serial = 3;
  170. goto attempt_insertion;
  171. }
  172. parent = rb_next(parent);
  173. if (!parent)
  174. goto attempt_insertion;
  175. xkey = rb_entry(parent, struct key, serial_node);
  176. if (key->serial < xkey->serial)
  177. goto attempt_insertion;
  178. }
  179. } /* end key_alloc_serial() */
  180. /*****************************************************************************/
  181. /*
  182. * allocate a key of the specified type
  183. * - update the user's quota to reflect the existence of the key
  184. * - called from a key-type operation with key_types_sem read-locked by
  185. * key_create_or_update()
  186. * - this prevents unregistration of the key type
  187. * - upon return the key is as yet uninstantiated; the caller needs to either
  188. * instantiate the key or discard it before returning
  189. */
  190. struct key *key_alloc(struct key_type *type, const char *desc,
  191. uid_t uid, gid_t gid, const struct cred *cred,
  192. key_perm_t perm, unsigned long flags)
  193. {
  194. struct key_user *user = NULL;
  195. struct key *key;
  196. size_t desclen, quotalen;
  197. int ret;
  198. key = ERR_PTR(-EINVAL);
  199. if (!desc || !*desc)
  200. goto error;
  201. desclen = strlen(desc) + 1;
  202. quotalen = desclen + type->def_datalen;
  203. /* get hold of the key tracking for this user */
  204. user = key_user_lookup(uid, cred->user->user_ns);
  205. if (!user)
  206. goto no_memory_1;
  207. /* check that the user's quota permits allocation of another key and
  208. * its description */
  209. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  210. unsigned maxkeys = (uid == 0) ?
  211. key_quota_root_maxkeys : key_quota_maxkeys;
  212. unsigned maxbytes = (uid == 0) ?
  213. key_quota_root_maxbytes : key_quota_maxbytes;
  214. spin_lock(&user->lock);
  215. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  216. if (user->qnkeys + 1 >= maxkeys ||
  217. user->qnbytes + quotalen >= maxbytes ||
  218. user->qnbytes + quotalen < user->qnbytes)
  219. goto no_quota;
  220. }
  221. user->qnkeys++;
  222. user->qnbytes += quotalen;
  223. spin_unlock(&user->lock);
  224. }
  225. /* allocate and initialise the key and its description */
  226. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  227. if (!key)
  228. goto no_memory_2;
  229. if (desc) {
  230. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  231. if (!key->description)
  232. goto no_memory_3;
  233. }
  234. atomic_set(&key->usage, 1);
  235. init_rwsem(&key->sem);
  236. key->type = type;
  237. key->user = user;
  238. key->quotalen = quotalen;
  239. key->datalen = type->def_datalen;
  240. key->uid = uid;
  241. key->gid = gid;
  242. key->perm = perm;
  243. key->flags = 0;
  244. key->expiry = 0;
  245. key->payload.data = NULL;
  246. key->security = NULL;
  247. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  248. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  249. memset(&key->type_data, 0, sizeof(key->type_data));
  250. #ifdef KEY_DEBUGGING
  251. key->magic = KEY_DEBUG_MAGIC;
  252. #endif
  253. /* let the security module know about the key */
  254. ret = security_key_alloc(key, cred, flags);
  255. if (ret < 0)
  256. goto security_error;
  257. /* publish the key by giving it a serial number */
  258. atomic_inc(&user->nkeys);
  259. key_alloc_serial(key);
  260. error:
  261. return key;
  262. security_error:
  263. kfree(key->description);
  264. kmem_cache_free(key_jar, key);
  265. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  266. spin_lock(&user->lock);
  267. user->qnkeys--;
  268. user->qnbytes -= quotalen;
  269. spin_unlock(&user->lock);
  270. }
  271. key_user_put(user);
  272. key = ERR_PTR(ret);
  273. goto error;
  274. no_memory_3:
  275. kmem_cache_free(key_jar, key);
  276. no_memory_2:
  277. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  278. spin_lock(&user->lock);
  279. user->qnkeys--;
  280. user->qnbytes -= quotalen;
  281. spin_unlock(&user->lock);
  282. }
  283. key_user_put(user);
  284. no_memory_1:
  285. key = ERR_PTR(-ENOMEM);
  286. goto error;
  287. no_quota:
  288. spin_unlock(&user->lock);
  289. key_user_put(user);
  290. key = ERR_PTR(-EDQUOT);
  291. goto error;
  292. } /* end key_alloc() */
  293. EXPORT_SYMBOL(key_alloc);
  294. /*****************************************************************************/
  295. /*
  296. * reserve an amount of quota for the key's payload
  297. */
  298. int key_payload_reserve(struct key *key, size_t datalen)
  299. {
  300. int delta = (int) datalen - key->datalen;
  301. int ret = 0;
  302. key_check(key);
  303. /* contemplate the quota adjustment */
  304. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  305. unsigned maxbytes = (key->user->uid == 0) ?
  306. key_quota_root_maxbytes : key_quota_maxbytes;
  307. spin_lock(&key->user->lock);
  308. if (delta > 0 &&
  309. (key->user->qnbytes + delta >= maxbytes ||
  310. key->user->qnbytes + delta < key->user->qnbytes)) {
  311. ret = -EDQUOT;
  312. }
  313. else {
  314. key->user->qnbytes += delta;
  315. key->quotalen += delta;
  316. }
  317. spin_unlock(&key->user->lock);
  318. }
  319. /* change the recorded data length if that didn't generate an error */
  320. if (ret == 0)
  321. key->datalen = datalen;
  322. return ret;
  323. } /* end key_payload_reserve() */
  324. EXPORT_SYMBOL(key_payload_reserve);
  325. /*****************************************************************************/
  326. /*
  327. * instantiate a key and link it into the target keyring atomically
  328. * - called with the target keyring's semaphore writelocked
  329. */
  330. static int __key_instantiate_and_link(struct key *key,
  331. const void *data,
  332. size_t datalen,
  333. struct key *keyring,
  334. struct key *authkey)
  335. {
  336. int ret, awaken;
  337. key_check(key);
  338. key_check(keyring);
  339. awaken = 0;
  340. ret = -EBUSY;
  341. mutex_lock(&key_construction_mutex);
  342. /* can't instantiate twice */
  343. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  344. /* instantiate the key */
  345. ret = key->type->instantiate(key, data, datalen);
  346. if (ret == 0) {
  347. /* mark the key as being instantiated */
  348. atomic_inc(&key->user->nikeys);
  349. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  350. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  351. awaken = 1;
  352. /* and link it into the destination keyring */
  353. if (keyring)
  354. ret = __key_link(keyring, key);
  355. /* disable the authorisation key */
  356. if (authkey)
  357. key_revoke(authkey);
  358. }
  359. }
  360. mutex_unlock(&key_construction_mutex);
  361. /* wake up anyone waiting for a key to be constructed */
  362. if (awaken)
  363. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  364. return ret;
  365. } /* end __key_instantiate_and_link() */
  366. /*****************************************************************************/
  367. /*
  368. * instantiate a key and link it into the target keyring atomically
  369. */
  370. int key_instantiate_and_link(struct key *key,
  371. const void *data,
  372. size_t datalen,
  373. struct key *keyring,
  374. struct key *authkey)
  375. {
  376. int ret;
  377. if (keyring)
  378. down_write(&keyring->sem);
  379. ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
  380. if (keyring)
  381. up_write(&keyring->sem);
  382. return ret;
  383. } /* end key_instantiate_and_link() */
  384. EXPORT_SYMBOL(key_instantiate_and_link);
  385. /*****************************************************************************/
  386. /*
  387. * negatively instantiate a key and link it into the target keyring atomically
  388. */
  389. int key_negate_and_link(struct key *key,
  390. unsigned timeout,
  391. struct key *keyring,
  392. struct key *authkey)
  393. {
  394. struct timespec now;
  395. int ret, awaken;
  396. key_check(key);
  397. key_check(keyring);
  398. awaken = 0;
  399. ret = -EBUSY;
  400. if (keyring)
  401. down_write(&keyring->sem);
  402. mutex_lock(&key_construction_mutex);
  403. /* can't instantiate twice */
  404. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  405. /* mark the key as being negatively instantiated */
  406. atomic_inc(&key->user->nikeys);
  407. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  408. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  409. now = current_kernel_time();
  410. key->expiry = now.tv_sec + timeout;
  411. key_schedule_gc(key->expiry + key_gc_delay);
  412. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  413. awaken = 1;
  414. ret = 0;
  415. /* and link it into the destination keyring */
  416. if (keyring)
  417. ret = __key_link(keyring, key);
  418. /* disable the authorisation key */
  419. if (authkey)
  420. key_revoke(authkey);
  421. }
  422. mutex_unlock(&key_construction_mutex);
  423. if (keyring)
  424. up_write(&keyring->sem);
  425. /* wake up anyone waiting for a key to be constructed */
  426. if (awaken)
  427. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  428. return ret;
  429. } /* end key_negate_and_link() */
  430. EXPORT_SYMBOL(key_negate_and_link);
  431. /*****************************************************************************/
  432. /*
  433. * do cleaning up in process context so that we don't have to disable
  434. * interrupts all over the place
  435. */
  436. static void key_cleanup(struct work_struct *work)
  437. {
  438. struct rb_node *_n;
  439. struct key *key;
  440. go_again:
  441. /* look for a dead key in the tree */
  442. spin_lock(&key_serial_lock);
  443. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  444. key = rb_entry(_n, struct key, serial_node);
  445. if (atomic_read(&key->usage) == 0)
  446. goto found_dead_key;
  447. }
  448. spin_unlock(&key_serial_lock);
  449. return;
  450. found_dead_key:
  451. /* we found a dead key - once we've removed it from the tree, we can
  452. * drop the lock */
  453. rb_erase(&key->serial_node, &key_serial_tree);
  454. spin_unlock(&key_serial_lock);
  455. key_check(key);
  456. security_key_free(key);
  457. /* deal with the user's key tracking and quota */
  458. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  459. spin_lock(&key->user->lock);
  460. key->user->qnkeys--;
  461. key->user->qnbytes -= key->quotalen;
  462. spin_unlock(&key->user->lock);
  463. }
  464. atomic_dec(&key->user->nkeys);
  465. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  466. atomic_dec(&key->user->nikeys);
  467. key_user_put(key->user);
  468. /* now throw away the key memory */
  469. if (key->type->destroy)
  470. key->type->destroy(key);
  471. kfree(key->description);
  472. #ifdef KEY_DEBUGGING
  473. key->magic = KEY_DEBUG_MAGIC_X;
  474. #endif
  475. kmem_cache_free(key_jar, key);
  476. /* there may, of course, be more than one key to destroy */
  477. goto go_again;
  478. } /* end key_cleanup() */
  479. /*****************************************************************************/
  480. /*
  481. * dispose of a reference to a key
  482. * - when all the references are gone, we schedule the cleanup task to come and
  483. * pull it out of the tree in definite process context
  484. */
  485. void key_put(struct key *key)
  486. {
  487. if (key) {
  488. key_check(key);
  489. if (atomic_dec_and_test(&key->usage))
  490. schedule_work(&key_cleanup_task);
  491. }
  492. } /* end key_put() */
  493. EXPORT_SYMBOL(key_put);
  494. /*****************************************************************************/
  495. /*
  496. * find a key by its serial number
  497. */
  498. struct key *key_lookup(key_serial_t id)
  499. {
  500. struct rb_node *n;
  501. struct key *key;
  502. spin_lock(&key_serial_lock);
  503. /* search the tree for the specified key */
  504. n = key_serial_tree.rb_node;
  505. while (n) {
  506. key = rb_entry(n, struct key, serial_node);
  507. if (id < key->serial)
  508. n = n->rb_left;
  509. else if (id > key->serial)
  510. n = n->rb_right;
  511. else
  512. goto found;
  513. }
  514. not_found:
  515. key = ERR_PTR(-ENOKEY);
  516. goto error;
  517. found:
  518. /* pretend it doesn't exist if it is awaiting deletion */
  519. if (atomic_read(&key->usage) == 0)
  520. goto not_found;
  521. /* this races with key_put(), but that doesn't matter since key_put()
  522. * doesn't actually change the key
  523. */
  524. atomic_inc(&key->usage);
  525. error:
  526. spin_unlock(&key_serial_lock);
  527. return key;
  528. } /* end key_lookup() */
  529. /*****************************************************************************/
  530. /*
  531. * find and lock the specified key type against removal
  532. * - we return with the sem readlocked
  533. */
  534. struct key_type *key_type_lookup(const char *type)
  535. {
  536. struct key_type *ktype;
  537. down_read(&key_types_sem);
  538. /* look up the key type to see if it's one of the registered kernel
  539. * types */
  540. list_for_each_entry(ktype, &key_types_list, link) {
  541. if (strcmp(ktype->name, type) == 0)
  542. goto found_kernel_type;
  543. }
  544. up_read(&key_types_sem);
  545. ktype = ERR_PTR(-ENOKEY);
  546. found_kernel_type:
  547. return ktype;
  548. } /* end key_type_lookup() */
  549. /*****************************************************************************/
  550. /*
  551. * unlock a key type
  552. */
  553. void key_type_put(struct key_type *ktype)
  554. {
  555. up_read(&key_types_sem);
  556. } /* end key_type_put() */
  557. /*****************************************************************************/
  558. /*
  559. * attempt to update an existing key
  560. * - the key has an incremented refcount
  561. * - we need to put the key if we get an error
  562. */
  563. static inline key_ref_t __key_update(key_ref_t key_ref,
  564. const void *payload, size_t plen)
  565. {
  566. struct key *key = key_ref_to_ptr(key_ref);
  567. int ret;
  568. /* need write permission on the key to update it */
  569. ret = key_permission(key_ref, KEY_WRITE);
  570. if (ret < 0)
  571. goto error;
  572. ret = -EEXIST;
  573. if (!key->type->update)
  574. goto error;
  575. down_write(&key->sem);
  576. ret = key->type->update(key, payload, plen);
  577. if (ret == 0)
  578. /* updating a negative key instantiates it */
  579. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  580. up_write(&key->sem);
  581. if (ret < 0)
  582. goto error;
  583. out:
  584. return key_ref;
  585. error:
  586. key_put(key);
  587. key_ref = ERR_PTR(ret);
  588. goto out;
  589. } /* end __key_update() */
  590. /*****************************************************************************/
  591. /*
  592. * search the specified keyring for a key of the same description; if one is
  593. * found, update it, otherwise add a new one
  594. */
  595. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  596. const char *type,
  597. const char *description,
  598. const void *payload,
  599. size_t plen,
  600. key_perm_t perm,
  601. unsigned long flags)
  602. {
  603. const struct cred *cred = current_cred();
  604. struct key_type *ktype;
  605. struct key *keyring, *key = NULL;
  606. key_ref_t key_ref;
  607. int ret;
  608. /* look up the key type to see if it's one of the registered kernel
  609. * types */
  610. ktype = key_type_lookup(type);
  611. if (IS_ERR(ktype)) {
  612. key_ref = ERR_PTR(-ENODEV);
  613. goto error;
  614. }
  615. key_ref = ERR_PTR(-EINVAL);
  616. if (!ktype->match || !ktype->instantiate)
  617. goto error_2;
  618. keyring = key_ref_to_ptr(keyring_ref);
  619. key_check(keyring);
  620. key_ref = ERR_PTR(-ENOTDIR);
  621. if (keyring->type != &key_type_keyring)
  622. goto error_2;
  623. down_write(&keyring->sem);
  624. /* if we're going to allocate a new key, we're going to have
  625. * to modify the keyring */
  626. ret = key_permission(keyring_ref, KEY_WRITE);
  627. if (ret < 0) {
  628. key_ref = ERR_PTR(ret);
  629. goto error_3;
  630. }
  631. /* if it's possible to update this type of key, search for an existing
  632. * key of the same type and description in the destination keyring and
  633. * update that instead if possible
  634. */
  635. if (ktype->update) {
  636. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  637. 0);
  638. if (!IS_ERR(key_ref))
  639. goto found_matching_key;
  640. }
  641. /* if the client doesn't provide, decide on the permissions we want */
  642. if (perm == KEY_PERM_UNDEF) {
  643. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  644. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  645. if (ktype->read)
  646. perm |= KEY_POS_READ | KEY_USR_READ;
  647. if (ktype == &key_type_keyring || ktype->update)
  648. perm |= KEY_USR_WRITE;
  649. }
  650. /* allocate a new key */
  651. key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
  652. perm, flags);
  653. if (IS_ERR(key)) {
  654. key_ref = ERR_CAST(key);
  655. goto error_3;
  656. }
  657. /* instantiate it and link it into the target keyring */
  658. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  659. if (ret < 0) {
  660. key_put(key);
  661. key_ref = ERR_PTR(ret);
  662. goto error_3;
  663. }
  664. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  665. error_3:
  666. up_write(&keyring->sem);
  667. error_2:
  668. key_type_put(ktype);
  669. error:
  670. return key_ref;
  671. found_matching_key:
  672. /* we found a matching key, so we're going to try to update it
  673. * - we can drop the locks first as we have the key pinned
  674. */
  675. up_write(&keyring->sem);
  676. key_type_put(ktype);
  677. key_ref = __key_update(key_ref, payload, plen);
  678. goto error;
  679. } /* end key_create_or_update() */
  680. EXPORT_SYMBOL(key_create_or_update);
  681. /*****************************************************************************/
  682. /*
  683. * update a key
  684. */
  685. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  686. {
  687. struct key *key = key_ref_to_ptr(key_ref);
  688. int ret;
  689. key_check(key);
  690. /* the key must be writable */
  691. ret = key_permission(key_ref, KEY_WRITE);
  692. if (ret < 0)
  693. goto error;
  694. /* attempt to update it if supported */
  695. ret = -EOPNOTSUPP;
  696. if (key->type->update) {
  697. down_write(&key->sem);
  698. ret = key->type->update(key, payload, plen);
  699. if (ret == 0)
  700. /* updating a negative key instantiates it */
  701. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  702. up_write(&key->sem);
  703. }
  704. error:
  705. return ret;
  706. } /* end key_update() */
  707. EXPORT_SYMBOL(key_update);
  708. /*****************************************************************************/
  709. /*
  710. * revoke a key
  711. */
  712. void key_revoke(struct key *key)
  713. {
  714. struct timespec now;
  715. time_t time;
  716. key_check(key);
  717. /* make sure no one's trying to change or use the key when we mark it
  718. * - we tell lockdep that we might nest because we might be revoking an
  719. * authorisation key whilst holding the sem on a key we've just
  720. * instantiated
  721. */
  722. down_write_nested(&key->sem, 1);
  723. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  724. key->type->revoke)
  725. key->type->revoke(key);
  726. /* set the death time to no more than the expiry time */
  727. now = current_kernel_time();
  728. time = now.tv_sec;
  729. if (key->revoked_at == 0 || key->revoked_at > time) {
  730. key->revoked_at = time;
  731. key_schedule_gc(key->revoked_at + key_gc_delay);
  732. }
  733. up_write(&key->sem);
  734. } /* end key_revoke() */
  735. EXPORT_SYMBOL(key_revoke);
  736. /*****************************************************************************/
  737. /*
  738. * register a type of key
  739. */
  740. int register_key_type(struct key_type *ktype)
  741. {
  742. struct key_type *p;
  743. int ret;
  744. ret = -EEXIST;
  745. down_write(&key_types_sem);
  746. /* disallow key types with the same name */
  747. list_for_each_entry(p, &key_types_list, link) {
  748. if (strcmp(p->name, ktype->name) == 0)
  749. goto out;
  750. }
  751. /* store the type */
  752. list_add(&ktype->link, &key_types_list);
  753. ret = 0;
  754. out:
  755. up_write(&key_types_sem);
  756. return ret;
  757. } /* end register_key_type() */
  758. EXPORT_SYMBOL(register_key_type);
  759. /*****************************************************************************/
  760. /*
  761. * unregister a type of key
  762. */
  763. void unregister_key_type(struct key_type *ktype)
  764. {
  765. struct rb_node *_n;
  766. struct key *key;
  767. down_write(&key_types_sem);
  768. /* withdraw the key type */
  769. list_del_init(&ktype->link);
  770. /* mark all the keys of this type dead */
  771. spin_lock(&key_serial_lock);
  772. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  773. key = rb_entry(_n, struct key, serial_node);
  774. if (key->type == ktype) {
  775. key->type = &key_type_dead;
  776. set_bit(KEY_FLAG_DEAD, &key->flags);
  777. }
  778. }
  779. spin_unlock(&key_serial_lock);
  780. /* make sure everyone revalidates their keys */
  781. synchronize_rcu();
  782. /* we should now be able to destroy the payloads of all the keys of
  783. * this type with impunity */
  784. spin_lock(&key_serial_lock);
  785. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  786. key = rb_entry(_n, struct key, serial_node);
  787. if (key->type == ktype) {
  788. if (ktype->destroy)
  789. ktype->destroy(key);
  790. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  791. }
  792. }
  793. spin_unlock(&key_serial_lock);
  794. up_write(&key_types_sem);
  795. key_schedule_gc(0);
  796. } /* end unregister_key_type() */
  797. EXPORT_SYMBOL(unregister_key_type);
  798. /*****************************************************************************/
  799. /*
  800. * initialise the key management stuff
  801. */
  802. void __init key_init(void)
  803. {
  804. /* allocate a slab in which we can store keys */
  805. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  806. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  807. /* add the special key types */
  808. list_add_tail(&key_type_keyring.link, &key_types_list);
  809. list_add_tail(&key_type_dead.link, &key_types_list);
  810. list_add_tail(&key_type_user.link, &key_types_list);
  811. /* record the root user tracking */
  812. rb_link_node(&root_key_user.node,
  813. NULL,
  814. &key_user_tree.rb_node);
  815. rb_insert_color(&root_key_user.node,
  816. &key_user_tree);
  817. } /* end key_init() */