key.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. /* Basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include <linux/user_namespace.h>
  21. #include "internal.h"
  22. static struct kmem_cache *key_jar;
  23. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  24. DEFINE_SPINLOCK(key_serial_lock);
  25. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  26. DEFINE_SPINLOCK(key_user_lock);
  27. unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
  28. unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
  29. unsigned int key_quota_maxkeys = 200; /* general key count quota */
  30. unsigned int key_quota_maxbytes = 20000; /* general key space quota */
  31. static LIST_HEAD(key_types_list);
  32. static DECLARE_RWSEM(key_types_sem);
  33. static void key_cleanup(struct work_struct *work);
  34. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  35. /* we serialise key instantiation and link */
  36. DEFINE_MUTEX(key_construction_mutex);
  37. /* any key who's type gets unegistered will be re-typed to this */
  38. static struct key_type key_type_dead = {
  39. .name = "dead",
  40. };
  41. #ifdef KEY_DEBUGGING
  42. void __key_check(const struct key *key)
  43. {
  44. printk("__key_check: key %p {%08x} should be {%08x}\n",
  45. key, key->magic, KEY_DEBUG_MAGIC);
  46. BUG();
  47. }
  48. #endif
  49. /*
  50. * get the key quota record for a user, allocating a new record if one doesn't
  51. * already exist
  52. */
  53. struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
  54. {
  55. struct key_user *candidate = NULL, *user;
  56. struct rb_node *parent = NULL;
  57. struct rb_node **p;
  58. try_again:
  59. p = &key_user_tree.rb_node;
  60. spin_lock(&key_user_lock);
  61. /* search the tree for a user record with a matching UID */
  62. while (*p) {
  63. parent = *p;
  64. user = rb_entry(parent, struct key_user, node);
  65. if (uid < user->uid)
  66. p = &(*p)->rb_left;
  67. else if (uid > user->uid)
  68. p = &(*p)->rb_right;
  69. else if (user_ns < user->user_ns)
  70. p = &(*p)->rb_left;
  71. else if (user_ns > user->user_ns)
  72. p = &(*p)->rb_right;
  73. else
  74. goto found;
  75. }
  76. /* if we get here, we failed to find a match in the tree */
  77. if (!candidate) {
  78. /* allocate a candidate user record if we don't already have
  79. * one */
  80. spin_unlock(&key_user_lock);
  81. user = NULL;
  82. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  83. if (unlikely(!candidate))
  84. goto out;
  85. /* the allocation may have scheduled, so we need to repeat the
  86. * search lest someone else added the record whilst we were
  87. * asleep */
  88. goto try_again;
  89. }
  90. /* if we get here, then the user record still hadn't appeared on the
  91. * second pass - so we use the candidate record */
  92. atomic_set(&candidate->usage, 1);
  93. atomic_set(&candidate->nkeys, 0);
  94. atomic_set(&candidate->nikeys, 0);
  95. candidate->uid = uid;
  96. candidate->user_ns = get_user_ns(user_ns);
  97. candidate->qnkeys = 0;
  98. candidate->qnbytes = 0;
  99. spin_lock_init(&candidate->lock);
  100. mutex_init(&candidate->cons_lock);
  101. rb_link_node(&candidate->node, parent, p);
  102. rb_insert_color(&candidate->node, &key_user_tree);
  103. spin_unlock(&key_user_lock);
  104. user = candidate;
  105. goto out;
  106. /* okay - we found a user record for this UID */
  107. found:
  108. atomic_inc(&user->usage);
  109. spin_unlock(&key_user_lock);
  110. kfree(candidate);
  111. out:
  112. return user;
  113. }
  114. /*
  115. * dispose of a user structure
  116. */
  117. void key_user_put(struct key_user *user)
  118. {
  119. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  120. rb_erase(&user->node, &key_user_tree);
  121. spin_unlock(&key_user_lock);
  122. put_user_ns(user->user_ns);
  123. kfree(user);
  124. }
  125. }
  126. /*
  127. * assign a key the next unique serial number
  128. * - these are assigned randomly to avoid security issues through covert
  129. * channel problems
  130. */
  131. static inline void key_alloc_serial(struct key *key)
  132. {
  133. struct rb_node *parent, **p;
  134. struct key *xkey;
  135. /* propose a random serial number and look for a hole for it in the
  136. * serial number tree */
  137. do {
  138. get_random_bytes(&key->serial, sizeof(key->serial));
  139. key->serial >>= 1; /* negative numbers are not permitted */
  140. } while (key->serial < 3);
  141. spin_lock(&key_serial_lock);
  142. attempt_insertion:
  143. parent = NULL;
  144. p = &key_serial_tree.rb_node;
  145. while (*p) {
  146. parent = *p;
  147. xkey = rb_entry(parent, struct key, serial_node);
  148. if (key->serial < xkey->serial)
  149. p = &(*p)->rb_left;
  150. else if (key->serial > xkey->serial)
  151. p = &(*p)->rb_right;
  152. else
  153. goto serial_exists;
  154. }
  155. /* we've found a suitable hole - arrange for this key to occupy it */
  156. rb_link_node(&key->serial_node, parent, p);
  157. rb_insert_color(&key->serial_node, &key_serial_tree);
  158. spin_unlock(&key_serial_lock);
  159. return;
  160. /* we found a key with the proposed serial number - walk the tree from
  161. * that point looking for the next unused serial number */
  162. serial_exists:
  163. for (;;) {
  164. key->serial++;
  165. if (key->serial < 3) {
  166. key->serial = 3;
  167. goto attempt_insertion;
  168. }
  169. parent = rb_next(parent);
  170. if (!parent)
  171. goto attempt_insertion;
  172. xkey = rb_entry(parent, struct key, serial_node);
  173. if (key->serial < xkey->serial)
  174. goto attempt_insertion;
  175. }
  176. }
  177. /*
  178. * allocate a key of the specified type
  179. * - update the user's quota to reflect the existence of the key
  180. * - called from a key-type operation with key_types_sem read-locked by
  181. * key_create_or_update()
  182. * - this prevents unregistration of the key type
  183. * - upon return the key is as yet uninstantiated; the caller needs to either
  184. * instantiate the key or discard it before returning
  185. */
  186. struct key *key_alloc(struct key_type *type, const char *desc,
  187. uid_t uid, gid_t gid, const struct cred *cred,
  188. key_perm_t perm, unsigned long flags)
  189. {
  190. struct key_user *user = NULL;
  191. struct key *key;
  192. size_t desclen, quotalen;
  193. int ret;
  194. key = ERR_PTR(-EINVAL);
  195. if (!desc || !*desc)
  196. goto error;
  197. desclen = strlen(desc) + 1;
  198. quotalen = desclen + type->def_datalen;
  199. /* get hold of the key tracking for this user */
  200. user = key_user_lookup(uid, cred->user->user_ns);
  201. if (!user)
  202. goto no_memory_1;
  203. /* check that the user's quota permits allocation of another key and
  204. * its description */
  205. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  206. unsigned maxkeys = (uid == 0) ?
  207. key_quota_root_maxkeys : key_quota_maxkeys;
  208. unsigned maxbytes = (uid == 0) ?
  209. key_quota_root_maxbytes : key_quota_maxbytes;
  210. spin_lock(&user->lock);
  211. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  212. if (user->qnkeys + 1 >= maxkeys ||
  213. user->qnbytes + quotalen >= maxbytes ||
  214. user->qnbytes + quotalen < user->qnbytes)
  215. goto no_quota;
  216. }
  217. user->qnkeys++;
  218. user->qnbytes += quotalen;
  219. spin_unlock(&user->lock);
  220. }
  221. /* allocate and initialise the key and its description */
  222. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  223. if (!key)
  224. goto no_memory_2;
  225. if (desc) {
  226. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  227. if (!key->description)
  228. goto no_memory_3;
  229. }
  230. atomic_set(&key->usage, 1);
  231. init_rwsem(&key->sem);
  232. key->type = type;
  233. key->user = user;
  234. key->quotalen = quotalen;
  235. key->datalen = type->def_datalen;
  236. key->uid = uid;
  237. key->gid = gid;
  238. key->perm = perm;
  239. key->flags = 0;
  240. key->expiry = 0;
  241. key->payload.data = NULL;
  242. key->security = NULL;
  243. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  244. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  245. memset(&key->type_data, 0, sizeof(key->type_data));
  246. #ifdef KEY_DEBUGGING
  247. key->magic = KEY_DEBUG_MAGIC;
  248. #endif
  249. /* let the security module know about the key */
  250. ret = security_key_alloc(key, cred, flags);
  251. if (ret < 0)
  252. goto security_error;
  253. /* publish the key by giving it a serial number */
  254. atomic_inc(&user->nkeys);
  255. key_alloc_serial(key);
  256. error:
  257. return key;
  258. security_error:
  259. kfree(key->description);
  260. kmem_cache_free(key_jar, key);
  261. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  262. spin_lock(&user->lock);
  263. user->qnkeys--;
  264. user->qnbytes -= quotalen;
  265. spin_unlock(&user->lock);
  266. }
  267. key_user_put(user);
  268. key = ERR_PTR(ret);
  269. goto error;
  270. no_memory_3:
  271. kmem_cache_free(key_jar, key);
  272. no_memory_2:
  273. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  274. spin_lock(&user->lock);
  275. user->qnkeys--;
  276. user->qnbytes -= quotalen;
  277. spin_unlock(&user->lock);
  278. }
  279. key_user_put(user);
  280. no_memory_1:
  281. key = ERR_PTR(-ENOMEM);
  282. goto error;
  283. no_quota:
  284. spin_unlock(&user->lock);
  285. key_user_put(user);
  286. key = ERR_PTR(-EDQUOT);
  287. goto error;
  288. }
  289. EXPORT_SYMBOL(key_alloc);
  290. /*
  291. * reserve an amount of quota for the key's payload
  292. */
  293. int key_payload_reserve(struct key *key, size_t datalen)
  294. {
  295. int delta = (int)datalen - key->datalen;
  296. int ret = 0;
  297. key_check(key);
  298. /* contemplate the quota adjustment */
  299. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  300. unsigned maxbytes = (key->user->uid == 0) ?
  301. key_quota_root_maxbytes : key_quota_maxbytes;
  302. spin_lock(&key->user->lock);
  303. if (delta > 0 &&
  304. (key->user->qnbytes + delta >= maxbytes ||
  305. key->user->qnbytes + delta < key->user->qnbytes)) {
  306. ret = -EDQUOT;
  307. }
  308. else {
  309. key->user->qnbytes += delta;
  310. key->quotalen += delta;
  311. }
  312. spin_unlock(&key->user->lock);
  313. }
  314. /* change the recorded data length if that didn't generate an error */
  315. if (ret == 0)
  316. key->datalen = datalen;
  317. return ret;
  318. }
  319. EXPORT_SYMBOL(key_payload_reserve);
  320. /*
  321. * instantiate a key and link it into the target keyring atomically
  322. * - called with the target keyring's semaphore writelocked
  323. */
  324. static int __key_instantiate_and_link(struct key *key,
  325. const void *data,
  326. size_t datalen,
  327. struct key *keyring,
  328. struct key *authkey,
  329. struct keyring_list **_prealloc)
  330. {
  331. int ret, awaken;
  332. key_check(key);
  333. key_check(keyring);
  334. awaken = 0;
  335. ret = -EBUSY;
  336. mutex_lock(&key_construction_mutex);
  337. /* can't instantiate twice */
  338. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  339. /* instantiate the key */
  340. ret = key->type->instantiate(key, data, datalen);
  341. if (ret == 0) {
  342. /* mark the key as being instantiated */
  343. atomic_inc(&key->user->nikeys);
  344. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  345. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  346. awaken = 1;
  347. /* and link it into the destination keyring */
  348. if (keyring)
  349. __key_link(keyring, key, _prealloc);
  350. /* disable the authorisation key */
  351. if (authkey)
  352. key_revoke(authkey);
  353. }
  354. }
  355. mutex_unlock(&key_construction_mutex);
  356. /* wake up anyone waiting for a key to be constructed */
  357. if (awaken)
  358. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  359. return ret;
  360. }
  361. /*
  362. * instantiate a key and link it into the target keyring atomically
  363. */
  364. int key_instantiate_and_link(struct key *key,
  365. const void *data,
  366. size_t datalen,
  367. struct key *keyring,
  368. struct key *authkey)
  369. {
  370. struct keyring_list *prealloc;
  371. int ret;
  372. if (keyring) {
  373. ret = __key_link_begin(keyring, key->type, key->description,
  374. &prealloc);
  375. if (ret < 0)
  376. return ret;
  377. }
  378. ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
  379. &prealloc);
  380. if (keyring)
  381. __key_link_end(keyring, key->type, prealloc);
  382. return ret;
  383. }
  384. EXPORT_SYMBOL(key_instantiate_and_link);
  385. /*
  386. * negatively instantiate a key and link it into the target keyring atomically
  387. */
  388. int key_negate_and_link(struct key *key,
  389. unsigned timeout,
  390. struct key *keyring,
  391. struct key *authkey)
  392. {
  393. struct keyring_list *prealloc;
  394. struct timespec now;
  395. int ret, awaken, link_ret = 0;
  396. key_check(key);
  397. key_check(keyring);
  398. awaken = 0;
  399. ret = -EBUSY;
  400. if (keyring)
  401. link_ret = __key_link_begin(keyring, key->type,
  402. key->description, &prealloc);
  403. mutex_lock(&key_construction_mutex);
  404. /* can't instantiate twice */
  405. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  406. /* mark the key as being negatively instantiated */
  407. atomic_inc(&key->user->nikeys);
  408. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  409. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  410. now = current_kernel_time();
  411. key->expiry = now.tv_sec + timeout;
  412. key_schedule_gc(key->expiry + key_gc_delay);
  413. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  414. awaken = 1;
  415. ret = 0;
  416. /* and link it into the destination keyring */
  417. if (keyring && link_ret == 0)
  418. __key_link(keyring, key, &prealloc);
  419. /* disable the authorisation key */
  420. if (authkey)
  421. key_revoke(authkey);
  422. }
  423. mutex_unlock(&key_construction_mutex);
  424. if (keyring)
  425. __key_link_end(keyring, key->type, prealloc);
  426. /* wake up anyone waiting for a key to be constructed */
  427. if (awaken)
  428. wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
  429. return ret == 0 ? link_ret : ret;
  430. }
  431. EXPORT_SYMBOL(key_negate_and_link);
  432. /*
  433. * do cleaning up in process context so that we don't have to disable
  434. * interrupts all over the place
  435. */
  436. static void key_cleanup(struct work_struct *work)
  437. {
  438. struct rb_node *_n;
  439. struct key *key;
  440. go_again:
  441. /* look for a dead key in the tree */
  442. spin_lock(&key_serial_lock);
  443. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  444. key = rb_entry(_n, struct key, serial_node);
  445. if (atomic_read(&key->usage) == 0)
  446. goto found_dead_key;
  447. }
  448. spin_unlock(&key_serial_lock);
  449. return;
  450. found_dead_key:
  451. /* we found a dead key - once we've removed it from the tree, we can
  452. * drop the lock */
  453. rb_erase(&key->serial_node, &key_serial_tree);
  454. spin_unlock(&key_serial_lock);
  455. key_check(key);
  456. security_key_free(key);
  457. /* deal with the user's key tracking and quota */
  458. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  459. spin_lock(&key->user->lock);
  460. key->user->qnkeys--;
  461. key->user->qnbytes -= key->quotalen;
  462. spin_unlock(&key->user->lock);
  463. }
  464. atomic_dec(&key->user->nkeys);
  465. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  466. atomic_dec(&key->user->nikeys);
  467. key_user_put(key->user);
  468. /* now throw away the key memory */
  469. if (key->type->destroy)
  470. key->type->destroy(key);
  471. kfree(key->description);
  472. #ifdef KEY_DEBUGGING
  473. key->magic = KEY_DEBUG_MAGIC_X;
  474. #endif
  475. kmem_cache_free(key_jar, key);
  476. /* there may, of course, be more than one key to destroy */
  477. goto go_again;
  478. }
  479. /*
  480. * dispose of a reference to a key
  481. * - when all the references are gone, we schedule the cleanup task to come and
  482. * pull it out of the tree in definite process context
  483. */
  484. void key_put(struct key *key)
  485. {
  486. if (key) {
  487. key_check(key);
  488. if (atomic_dec_and_test(&key->usage))
  489. schedule_work(&key_cleanup_task);
  490. }
  491. }
  492. EXPORT_SYMBOL(key_put);
  493. /*
  494. * find a key by its serial number
  495. */
  496. struct key *key_lookup(key_serial_t id)
  497. {
  498. struct rb_node *n;
  499. struct key *key;
  500. spin_lock(&key_serial_lock);
  501. /* search the tree for the specified key */
  502. n = key_serial_tree.rb_node;
  503. while (n) {
  504. key = rb_entry(n, struct key, serial_node);
  505. if (id < key->serial)
  506. n = n->rb_left;
  507. else if (id > key->serial)
  508. n = n->rb_right;
  509. else
  510. goto found;
  511. }
  512. not_found:
  513. key = ERR_PTR(-ENOKEY);
  514. goto error;
  515. found:
  516. /* pretend it doesn't exist if it is awaiting deletion */
  517. if (atomic_read(&key->usage) == 0)
  518. goto not_found;
  519. /* this races with key_put(), but that doesn't matter since key_put()
  520. * doesn't actually change the key
  521. */
  522. atomic_inc(&key->usage);
  523. error:
  524. spin_unlock(&key_serial_lock);
  525. return key;
  526. }
  527. /*
  528. * find and lock the specified key type against removal
  529. * - we return with the sem readlocked
  530. */
  531. struct key_type *key_type_lookup(const char *type)
  532. {
  533. struct key_type *ktype;
  534. down_read(&key_types_sem);
  535. /* look up the key type to see if it's one of the registered kernel
  536. * types */
  537. list_for_each_entry(ktype, &key_types_list, link) {
  538. if (strcmp(ktype->name, type) == 0)
  539. goto found_kernel_type;
  540. }
  541. up_read(&key_types_sem);
  542. ktype = ERR_PTR(-ENOKEY);
  543. found_kernel_type:
  544. return ktype;
  545. }
  546. /*
  547. * unlock a key type
  548. */
  549. void key_type_put(struct key_type *ktype)
  550. {
  551. up_read(&key_types_sem);
  552. }
  553. /*
  554. * attempt to update an existing key
  555. * - the key has an incremented refcount
  556. * - we need to put the key if we get an error
  557. */
  558. static inline key_ref_t __key_update(key_ref_t key_ref,
  559. const void *payload, size_t plen)
  560. {
  561. struct key *key = key_ref_to_ptr(key_ref);
  562. int ret;
  563. /* need write permission on the key to update it */
  564. ret = key_permission(key_ref, KEY_WRITE);
  565. if (ret < 0)
  566. goto error;
  567. ret = -EEXIST;
  568. if (!key->type->update)
  569. goto error;
  570. down_write(&key->sem);
  571. ret = key->type->update(key, payload, plen);
  572. if (ret == 0)
  573. /* updating a negative key instantiates it */
  574. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  575. up_write(&key->sem);
  576. if (ret < 0)
  577. goto error;
  578. out:
  579. return key_ref;
  580. error:
  581. key_put(key);
  582. key_ref = ERR_PTR(ret);
  583. goto out;
  584. }
  585. /*
  586. * search the specified keyring for a key of the same description; if one is
  587. * found, update it, otherwise add a new one
  588. */
  589. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  590. const char *type,
  591. const char *description,
  592. const void *payload,
  593. size_t plen,
  594. key_perm_t perm,
  595. unsigned long flags)
  596. {
  597. struct keyring_list *prealloc;
  598. const struct cred *cred = current_cred();
  599. struct key_type *ktype;
  600. struct key *keyring, *key = NULL;
  601. key_ref_t key_ref;
  602. int ret;
  603. /* look up the key type to see if it's one of the registered kernel
  604. * types */
  605. ktype = key_type_lookup(type);
  606. if (IS_ERR(ktype)) {
  607. key_ref = ERR_PTR(-ENODEV);
  608. goto error;
  609. }
  610. key_ref = ERR_PTR(-EINVAL);
  611. if (!ktype->match || !ktype->instantiate)
  612. goto error_2;
  613. keyring = key_ref_to_ptr(keyring_ref);
  614. key_check(keyring);
  615. key_ref = ERR_PTR(-ENOTDIR);
  616. if (keyring->type != &key_type_keyring)
  617. goto error_2;
  618. ret = __key_link_begin(keyring, ktype, description, &prealloc);
  619. if (ret < 0)
  620. goto error_2;
  621. /* if we're going to allocate a new key, we're going to have
  622. * to modify the keyring */
  623. ret = key_permission(keyring_ref, KEY_WRITE);
  624. if (ret < 0) {
  625. key_ref = ERR_PTR(ret);
  626. goto error_3;
  627. }
  628. /* if it's possible to update this type of key, search for an existing
  629. * key of the same type and description in the destination keyring and
  630. * update that instead if possible
  631. */
  632. if (ktype->update) {
  633. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  634. 0);
  635. if (!IS_ERR(key_ref))
  636. goto found_matching_key;
  637. }
  638. /* if the client doesn't provide, decide on the permissions we want */
  639. if (perm == KEY_PERM_UNDEF) {
  640. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  641. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  642. if (ktype->read)
  643. perm |= KEY_POS_READ | KEY_USR_READ;
  644. if (ktype == &key_type_keyring || ktype->update)
  645. perm |= KEY_USR_WRITE;
  646. }
  647. /* allocate a new key */
  648. key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
  649. perm, flags);
  650. if (IS_ERR(key)) {
  651. key_ref = ERR_CAST(key);
  652. goto error_3;
  653. }
  654. /* instantiate it and link it into the target keyring */
  655. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
  656. &prealloc);
  657. if (ret < 0) {
  658. key_put(key);
  659. key_ref = ERR_PTR(ret);
  660. goto error_3;
  661. }
  662. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  663. error_3:
  664. __key_link_end(keyring, ktype, prealloc);
  665. error_2:
  666. key_type_put(ktype);
  667. error:
  668. return key_ref;
  669. found_matching_key:
  670. /* we found a matching key, so we're going to try to update it
  671. * - we can drop the locks first as we have the key pinned
  672. */
  673. __key_link_end(keyring, ktype, prealloc);
  674. key_type_put(ktype);
  675. key_ref = __key_update(key_ref, payload, plen);
  676. goto error;
  677. }
  678. EXPORT_SYMBOL(key_create_or_update);
  679. /*
  680. * update a key
  681. */
  682. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  683. {
  684. struct key *key = key_ref_to_ptr(key_ref);
  685. int ret;
  686. key_check(key);
  687. /* the key must be writable */
  688. ret = key_permission(key_ref, KEY_WRITE);
  689. if (ret < 0)
  690. goto error;
  691. /* attempt to update it if supported */
  692. ret = -EOPNOTSUPP;
  693. if (key->type->update) {
  694. down_write(&key->sem);
  695. ret = key->type->update(key, payload, plen);
  696. if (ret == 0)
  697. /* updating a negative key instantiates it */
  698. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  699. up_write(&key->sem);
  700. }
  701. error:
  702. return ret;
  703. }
  704. EXPORT_SYMBOL(key_update);
  705. /*
  706. * revoke a key
  707. */
  708. void key_revoke(struct key *key)
  709. {
  710. struct timespec now;
  711. time_t time;
  712. key_check(key);
  713. /* make sure no one's trying to change or use the key when we mark it
  714. * - we tell lockdep that we might nest because we might be revoking an
  715. * authorisation key whilst holding the sem on a key we've just
  716. * instantiated
  717. */
  718. down_write_nested(&key->sem, 1);
  719. if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
  720. key->type->revoke)
  721. key->type->revoke(key);
  722. /* set the death time to no more than the expiry time */
  723. now = current_kernel_time();
  724. time = now.tv_sec;
  725. if (key->revoked_at == 0 || key->revoked_at > time) {
  726. key->revoked_at = time;
  727. key_schedule_gc(key->revoked_at + key_gc_delay);
  728. }
  729. up_write(&key->sem);
  730. }
  731. EXPORT_SYMBOL(key_revoke);
  732. /*
  733. * register a type of key
  734. */
  735. int register_key_type(struct key_type *ktype)
  736. {
  737. struct key_type *p;
  738. int ret;
  739. ret = -EEXIST;
  740. down_write(&key_types_sem);
  741. /* disallow key types with the same name */
  742. list_for_each_entry(p, &key_types_list, link) {
  743. if (strcmp(p->name, ktype->name) == 0)
  744. goto out;
  745. }
  746. /* store the type */
  747. list_add(&ktype->link, &key_types_list);
  748. ret = 0;
  749. out:
  750. up_write(&key_types_sem);
  751. return ret;
  752. }
  753. EXPORT_SYMBOL(register_key_type);
  754. /*
  755. * unregister a type of key
  756. */
  757. void unregister_key_type(struct key_type *ktype)
  758. {
  759. struct rb_node *_n;
  760. struct key *key;
  761. down_write(&key_types_sem);
  762. /* withdraw the key type */
  763. list_del_init(&ktype->link);
  764. /* mark all the keys of this type dead */
  765. spin_lock(&key_serial_lock);
  766. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  767. key = rb_entry(_n, struct key, serial_node);
  768. if (key->type == ktype) {
  769. key->type = &key_type_dead;
  770. set_bit(KEY_FLAG_DEAD, &key->flags);
  771. }
  772. }
  773. spin_unlock(&key_serial_lock);
  774. /* make sure everyone revalidates their keys */
  775. synchronize_rcu();
  776. /* we should now be able to destroy the payloads of all the keys of
  777. * this type with impunity */
  778. spin_lock(&key_serial_lock);
  779. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  780. key = rb_entry(_n, struct key, serial_node);
  781. if (key->type == ktype) {
  782. if (ktype->destroy)
  783. ktype->destroy(key);
  784. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  785. }
  786. }
  787. spin_unlock(&key_serial_lock);
  788. up_write(&key_types_sem);
  789. key_schedule_gc(0);
  790. }
  791. EXPORT_SYMBOL(unregister_key_type);
  792. /*
  793. * initialise the key management stuff
  794. */
  795. void __init key_init(void)
  796. {
  797. /* allocate a slab in which we can store keys */
  798. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  799. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  800. /* add the special key types */
  801. list_add_tail(&key_type_keyring.link, &key_types_list);
  802. list_add_tail(&key_type_dead.link, &key_types_list);
  803. list_add_tail(&key_type_user.link, &key_types_list);
  804. /* record the root user tracking */
  805. rb_link_node(&root_key_user.node,
  806. NULL,
  807. &key_user_tree.rb_node);
  808. rb_insert_color(&root_key_user.node,
  809. &key_user_tree);
  810. }