key.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. /* key.c: basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/poison.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/security.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/random.h>
  19. #include <linux/err.h>
  20. #include "internal.h"
  21. static struct kmem_cache *key_jar;
  22. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  23. DEFINE_SPINLOCK(key_serial_lock);
  24. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  25. DEFINE_SPINLOCK(key_user_lock);
  26. static LIST_HEAD(key_types_list);
  27. static DECLARE_RWSEM(key_types_sem);
  28. static void key_cleanup(struct work_struct *work);
  29. static DECLARE_WORK(key_cleanup_task, key_cleanup);
  30. /* we serialise key instantiation and link */
  31. DECLARE_RWSEM(key_construction_sem);
  32. /* any key who's type gets unegistered will be re-typed to this */
  33. static struct key_type key_type_dead = {
  34. .name = "dead",
  35. };
  36. #ifdef KEY_DEBUGGING
  37. void __key_check(const struct key *key)
  38. {
  39. printk("__key_check: key %p {%08x} should be {%08x}\n",
  40. key, key->magic, KEY_DEBUG_MAGIC);
  41. BUG();
  42. }
  43. #endif
  44. /*****************************************************************************/
  45. /*
  46. * get the key quota record for a user, allocating a new record if one doesn't
  47. * already exist
  48. */
  49. struct key_user *key_user_lookup(uid_t uid)
  50. {
  51. struct key_user *candidate = NULL, *user;
  52. struct rb_node *parent = NULL;
  53. struct rb_node **p;
  54. try_again:
  55. p = &key_user_tree.rb_node;
  56. spin_lock(&key_user_lock);
  57. /* search the tree for a user record with a matching UID */
  58. while (*p) {
  59. parent = *p;
  60. user = rb_entry(parent, struct key_user, node);
  61. if (uid < user->uid)
  62. p = &(*p)->rb_left;
  63. else if (uid > user->uid)
  64. p = &(*p)->rb_right;
  65. else
  66. goto found;
  67. }
  68. /* if we get here, we failed to find a match in the tree */
  69. if (!candidate) {
  70. /* allocate a candidate user record if we don't already have
  71. * one */
  72. spin_unlock(&key_user_lock);
  73. user = NULL;
  74. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  75. if (unlikely(!candidate))
  76. goto out;
  77. /* the allocation may have scheduled, so we need to repeat the
  78. * search lest someone else added the record whilst we were
  79. * asleep */
  80. goto try_again;
  81. }
  82. /* if we get here, then the user record still hadn't appeared on the
  83. * second pass - so we use the candidate record */
  84. atomic_set(&candidate->usage, 1);
  85. atomic_set(&candidate->nkeys, 0);
  86. atomic_set(&candidate->nikeys, 0);
  87. candidate->uid = uid;
  88. candidate->qnkeys = 0;
  89. candidate->qnbytes = 0;
  90. spin_lock_init(&candidate->lock);
  91. INIT_LIST_HEAD(&candidate->consq);
  92. rb_link_node(&candidate->node, parent, p);
  93. rb_insert_color(&candidate->node, &key_user_tree);
  94. spin_unlock(&key_user_lock);
  95. user = candidate;
  96. goto out;
  97. /* okay - we found a user record for this UID */
  98. found:
  99. atomic_inc(&user->usage);
  100. spin_unlock(&key_user_lock);
  101. kfree(candidate);
  102. out:
  103. return user;
  104. } /* end key_user_lookup() */
  105. /*****************************************************************************/
  106. /*
  107. * dispose of a user structure
  108. */
  109. void key_user_put(struct key_user *user)
  110. {
  111. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  112. rb_erase(&user->node, &key_user_tree);
  113. spin_unlock(&key_user_lock);
  114. kfree(user);
  115. }
  116. } /* end key_user_put() */
  117. /*****************************************************************************/
  118. /*
  119. * insert a key with a fixed serial number
  120. */
  121. static void __init __key_insert_serial(struct key *key)
  122. {
  123. struct rb_node *parent, **p;
  124. struct key *xkey;
  125. parent = NULL;
  126. p = &key_serial_tree.rb_node;
  127. while (*p) {
  128. parent = *p;
  129. xkey = rb_entry(parent, struct key, serial_node);
  130. if (key->serial < xkey->serial)
  131. p = &(*p)->rb_left;
  132. else if (key->serial > xkey->serial)
  133. p = &(*p)->rb_right;
  134. else
  135. BUG();
  136. }
  137. /* we've found a suitable hole - arrange for this key to occupy it */
  138. rb_link_node(&key->serial_node, parent, p);
  139. rb_insert_color(&key->serial_node, &key_serial_tree);
  140. } /* end __key_insert_serial() */
  141. /*****************************************************************************/
  142. /*
  143. * assign a key the next unique serial number
  144. * - these are assigned randomly to avoid security issues through covert
  145. * channel problems
  146. */
  147. static inline void key_alloc_serial(struct key *key)
  148. {
  149. struct rb_node *parent, **p;
  150. struct key *xkey;
  151. /* propose a random serial number and look for a hole for it in the
  152. * serial number tree */
  153. do {
  154. get_random_bytes(&key->serial, sizeof(key->serial));
  155. key->serial >>= 1; /* negative numbers are not permitted */
  156. } while (key->serial < 3);
  157. spin_lock(&key_serial_lock);
  158. parent = NULL;
  159. p = &key_serial_tree.rb_node;
  160. while (*p) {
  161. parent = *p;
  162. xkey = rb_entry(parent, struct key, serial_node);
  163. if (key->serial < xkey->serial)
  164. p = &(*p)->rb_left;
  165. else if (key->serial > xkey->serial)
  166. p = &(*p)->rb_right;
  167. else
  168. goto serial_exists;
  169. }
  170. goto insert_here;
  171. /* we found a key with the proposed serial number - walk the tree from
  172. * that point looking for the next unused serial number */
  173. serial_exists:
  174. for (;;) {
  175. key->serial++;
  176. if (key->serial < 2)
  177. key->serial = 2;
  178. if (!rb_parent(parent))
  179. p = &key_serial_tree.rb_node;
  180. else if (rb_parent(parent)->rb_left == parent)
  181. p = &(rb_parent(parent)->rb_left);
  182. else
  183. p = &(rb_parent(parent)->rb_right);
  184. parent = rb_next(parent);
  185. if (!parent)
  186. break;
  187. xkey = rb_entry(parent, struct key, serial_node);
  188. if (key->serial < xkey->serial)
  189. goto insert_here;
  190. }
  191. /* we've found a suitable hole - arrange for this key to occupy it */
  192. insert_here:
  193. rb_link_node(&key->serial_node, parent, p);
  194. rb_insert_color(&key->serial_node, &key_serial_tree);
  195. spin_unlock(&key_serial_lock);
  196. } /* end key_alloc_serial() */
  197. /*****************************************************************************/
  198. /*
  199. * allocate a key of the specified type
  200. * - update the user's quota to reflect the existence of the key
  201. * - called from a key-type operation with key_types_sem read-locked by
  202. * key_create_or_update()
  203. * - this prevents unregistration of the key type
  204. * - upon return the key is as yet uninstantiated; the caller needs to either
  205. * instantiate the key or discard it before returning
  206. */
  207. struct key *key_alloc(struct key_type *type, const char *desc,
  208. uid_t uid, gid_t gid, struct task_struct *ctx,
  209. key_perm_t perm, unsigned long flags)
  210. {
  211. struct key_user *user = NULL;
  212. struct key *key;
  213. size_t desclen, quotalen;
  214. int ret;
  215. key = ERR_PTR(-EINVAL);
  216. if (!desc || !*desc)
  217. goto error;
  218. desclen = strlen(desc) + 1;
  219. quotalen = desclen + type->def_datalen;
  220. /* get hold of the key tracking for this user */
  221. user = key_user_lookup(uid);
  222. if (!user)
  223. goto no_memory_1;
  224. /* check that the user's quota permits allocation of another key and
  225. * its description */
  226. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  227. spin_lock(&user->lock);
  228. if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
  229. if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
  230. user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
  231. )
  232. goto no_quota;
  233. }
  234. user->qnkeys++;
  235. user->qnbytes += quotalen;
  236. spin_unlock(&user->lock);
  237. }
  238. /* allocate and initialise the key and its description */
  239. key = kmem_cache_alloc(key_jar, GFP_KERNEL);
  240. if (!key)
  241. goto no_memory_2;
  242. if (desc) {
  243. key->description = kmemdup(desc, desclen, GFP_KERNEL);
  244. if (!key->description)
  245. goto no_memory_3;
  246. }
  247. atomic_set(&key->usage, 1);
  248. init_rwsem(&key->sem);
  249. key->type = type;
  250. key->user = user;
  251. key->quotalen = quotalen;
  252. key->datalen = type->def_datalen;
  253. key->uid = uid;
  254. key->gid = gid;
  255. key->perm = perm;
  256. key->flags = 0;
  257. key->expiry = 0;
  258. key->payload.data = NULL;
  259. key->security = NULL;
  260. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
  261. key->flags |= 1 << KEY_FLAG_IN_QUOTA;
  262. memset(&key->type_data, 0, sizeof(key->type_data));
  263. #ifdef KEY_DEBUGGING
  264. key->magic = KEY_DEBUG_MAGIC;
  265. #endif
  266. /* let the security module know about the key */
  267. ret = security_key_alloc(key, ctx, flags);
  268. if (ret < 0)
  269. goto security_error;
  270. /* publish the key by giving it a serial number */
  271. atomic_inc(&user->nkeys);
  272. key_alloc_serial(key);
  273. error:
  274. return key;
  275. security_error:
  276. kfree(key->description);
  277. kmem_cache_free(key_jar, key);
  278. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  279. spin_lock(&user->lock);
  280. user->qnkeys--;
  281. user->qnbytes -= quotalen;
  282. spin_unlock(&user->lock);
  283. }
  284. key_user_put(user);
  285. key = ERR_PTR(ret);
  286. goto error;
  287. no_memory_3:
  288. kmem_cache_free(key_jar, key);
  289. no_memory_2:
  290. if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
  291. spin_lock(&user->lock);
  292. user->qnkeys--;
  293. user->qnbytes -= quotalen;
  294. spin_unlock(&user->lock);
  295. }
  296. key_user_put(user);
  297. no_memory_1:
  298. key = ERR_PTR(-ENOMEM);
  299. goto error;
  300. no_quota:
  301. spin_unlock(&user->lock);
  302. key_user_put(user);
  303. key = ERR_PTR(-EDQUOT);
  304. goto error;
  305. } /* end key_alloc() */
  306. EXPORT_SYMBOL(key_alloc);
  307. /*****************************************************************************/
  308. /*
  309. * reserve an amount of quota for the key's payload
  310. */
  311. int key_payload_reserve(struct key *key, size_t datalen)
  312. {
  313. int delta = (int) datalen - key->datalen;
  314. int ret = 0;
  315. key_check(key);
  316. /* contemplate the quota adjustment */
  317. if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  318. spin_lock(&key->user->lock);
  319. if (delta > 0 &&
  320. key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
  321. ) {
  322. ret = -EDQUOT;
  323. }
  324. else {
  325. key->user->qnbytes += delta;
  326. key->quotalen += delta;
  327. }
  328. spin_unlock(&key->user->lock);
  329. }
  330. /* change the recorded data length if that didn't generate an error */
  331. if (ret == 0)
  332. key->datalen = datalen;
  333. return ret;
  334. } /* end key_payload_reserve() */
  335. EXPORT_SYMBOL(key_payload_reserve);
  336. /*****************************************************************************/
  337. /*
  338. * instantiate a key and link it into the target keyring atomically
  339. * - called with the target keyring's semaphore writelocked
  340. */
  341. static int __key_instantiate_and_link(struct key *key,
  342. const void *data,
  343. size_t datalen,
  344. struct key *keyring,
  345. struct key *instkey)
  346. {
  347. int ret, awaken;
  348. key_check(key);
  349. key_check(keyring);
  350. awaken = 0;
  351. ret = -EBUSY;
  352. down_write(&key_construction_sem);
  353. /* can't instantiate twice */
  354. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  355. /* instantiate the key */
  356. ret = key->type->instantiate(key, data, datalen);
  357. if (ret == 0) {
  358. /* mark the key as being instantiated */
  359. atomic_inc(&key->user->nikeys);
  360. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  361. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  362. awaken = 1;
  363. /* and link it into the destination keyring */
  364. if (keyring)
  365. ret = __key_link(keyring, key);
  366. /* disable the authorisation key */
  367. if (instkey)
  368. key_revoke(instkey);
  369. }
  370. }
  371. up_write(&key_construction_sem);
  372. /* wake up anyone waiting for a key to be constructed */
  373. if (awaken)
  374. wake_up_all(&request_key_conswq);
  375. return ret;
  376. } /* end __key_instantiate_and_link() */
  377. /*****************************************************************************/
  378. /*
  379. * instantiate a key and link it into the target keyring atomically
  380. */
  381. int key_instantiate_and_link(struct key *key,
  382. const void *data,
  383. size_t datalen,
  384. struct key *keyring,
  385. struct key *instkey)
  386. {
  387. int ret;
  388. if (keyring)
  389. down_write(&keyring->sem);
  390. ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
  391. if (keyring)
  392. up_write(&keyring->sem);
  393. return ret;
  394. } /* end key_instantiate_and_link() */
  395. EXPORT_SYMBOL(key_instantiate_and_link);
  396. /*****************************************************************************/
  397. /*
  398. * negatively instantiate a key and link it into the target keyring atomically
  399. */
  400. int key_negate_and_link(struct key *key,
  401. unsigned timeout,
  402. struct key *keyring,
  403. struct key *instkey)
  404. {
  405. struct timespec now;
  406. int ret, awaken;
  407. key_check(key);
  408. key_check(keyring);
  409. awaken = 0;
  410. ret = -EBUSY;
  411. if (keyring)
  412. down_write(&keyring->sem);
  413. down_write(&key_construction_sem);
  414. /* can't instantiate twice */
  415. if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
  416. /* mark the key as being negatively instantiated */
  417. atomic_inc(&key->user->nikeys);
  418. set_bit(KEY_FLAG_NEGATIVE, &key->flags);
  419. set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
  420. now = current_kernel_time();
  421. key->expiry = now.tv_sec + timeout;
  422. if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
  423. awaken = 1;
  424. ret = 0;
  425. /* and link it into the destination keyring */
  426. if (keyring)
  427. ret = __key_link(keyring, key);
  428. /* disable the authorisation key */
  429. if (instkey)
  430. key_revoke(instkey);
  431. }
  432. up_write(&key_construction_sem);
  433. if (keyring)
  434. up_write(&keyring->sem);
  435. /* wake up anyone waiting for a key to be constructed */
  436. if (awaken)
  437. wake_up_all(&request_key_conswq);
  438. return ret;
  439. } /* end key_negate_and_link() */
  440. EXPORT_SYMBOL(key_negate_and_link);
  441. /*****************************************************************************/
  442. /*
  443. * do cleaning up in process context so that we don't have to disable
  444. * interrupts all over the place
  445. */
  446. static void key_cleanup(struct work_struct *work)
  447. {
  448. struct rb_node *_n;
  449. struct key *key;
  450. go_again:
  451. /* look for a dead key in the tree */
  452. spin_lock(&key_serial_lock);
  453. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  454. key = rb_entry(_n, struct key, serial_node);
  455. if (atomic_read(&key->usage) == 0)
  456. goto found_dead_key;
  457. }
  458. spin_unlock(&key_serial_lock);
  459. return;
  460. found_dead_key:
  461. /* we found a dead key - once we've removed it from the tree, we can
  462. * drop the lock */
  463. rb_erase(&key->serial_node, &key_serial_tree);
  464. spin_unlock(&key_serial_lock);
  465. key_check(key);
  466. security_key_free(key);
  467. /* deal with the user's key tracking and quota */
  468. if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
  469. spin_lock(&key->user->lock);
  470. key->user->qnkeys--;
  471. key->user->qnbytes -= key->quotalen;
  472. spin_unlock(&key->user->lock);
  473. }
  474. atomic_dec(&key->user->nkeys);
  475. if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
  476. atomic_dec(&key->user->nikeys);
  477. key_user_put(key->user);
  478. /* now throw away the key memory */
  479. if (key->type->destroy)
  480. key->type->destroy(key);
  481. kfree(key->description);
  482. #ifdef KEY_DEBUGGING
  483. key->magic = KEY_DEBUG_MAGIC_X;
  484. #endif
  485. kmem_cache_free(key_jar, key);
  486. /* there may, of course, be more than one key to destroy */
  487. goto go_again;
  488. } /* end key_cleanup() */
  489. /*****************************************************************************/
  490. /*
  491. * dispose of a reference to a key
  492. * - when all the references are gone, we schedule the cleanup task to come and
  493. * pull it out of the tree in definite process context
  494. */
  495. void key_put(struct key *key)
  496. {
  497. if (key) {
  498. key_check(key);
  499. if (atomic_dec_and_test(&key->usage))
  500. schedule_work(&key_cleanup_task);
  501. }
  502. } /* end key_put() */
  503. EXPORT_SYMBOL(key_put);
  504. /*****************************************************************************/
  505. /*
  506. * find a key by its serial number
  507. */
  508. struct key *key_lookup(key_serial_t id)
  509. {
  510. struct rb_node *n;
  511. struct key *key;
  512. spin_lock(&key_serial_lock);
  513. /* search the tree for the specified key */
  514. n = key_serial_tree.rb_node;
  515. while (n) {
  516. key = rb_entry(n, struct key, serial_node);
  517. if (id < key->serial)
  518. n = n->rb_left;
  519. else if (id > key->serial)
  520. n = n->rb_right;
  521. else
  522. goto found;
  523. }
  524. not_found:
  525. key = ERR_PTR(-ENOKEY);
  526. goto error;
  527. found:
  528. /* pretend it doesn't exist if it's dead */
  529. if (atomic_read(&key->usage) == 0 ||
  530. test_bit(KEY_FLAG_DEAD, &key->flags) ||
  531. key->type == &key_type_dead)
  532. goto not_found;
  533. /* this races with key_put(), but that doesn't matter since key_put()
  534. * doesn't actually change the key
  535. */
  536. atomic_inc(&key->usage);
  537. error:
  538. spin_unlock(&key_serial_lock);
  539. return key;
  540. } /* end key_lookup() */
  541. /*****************************************************************************/
  542. /*
  543. * find and lock the specified key type against removal
  544. * - we return with the sem readlocked
  545. */
  546. struct key_type *key_type_lookup(const char *type)
  547. {
  548. struct key_type *ktype;
  549. down_read(&key_types_sem);
  550. /* look up the key type to see if it's one of the registered kernel
  551. * types */
  552. list_for_each_entry(ktype, &key_types_list, link) {
  553. if (strcmp(ktype->name, type) == 0)
  554. goto found_kernel_type;
  555. }
  556. up_read(&key_types_sem);
  557. ktype = ERR_PTR(-ENOKEY);
  558. found_kernel_type:
  559. return ktype;
  560. } /* end key_type_lookup() */
  561. /*****************************************************************************/
  562. /*
  563. * unlock a key type
  564. */
  565. void key_type_put(struct key_type *ktype)
  566. {
  567. up_read(&key_types_sem);
  568. } /* end key_type_put() */
  569. /*****************************************************************************/
  570. /*
  571. * attempt to update an existing key
  572. * - the key has an incremented refcount
  573. * - we need to put the key if we get an error
  574. */
  575. static inline key_ref_t __key_update(key_ref_t key_ref,
  576. const void *payload, size_t plen)
  577. {
  578. struct key *key = key_ref_to_ptr(key_ref);
  579. int ret;
  580. /* need write permission on the key to update it */
  581. ret = key_permission(key_ref, KEY_WRITE);
  582. if (ret < 0)
  583. goto error;
  584. ret = -EEXIST;
  585. if (!key->type->update)
  586. goto error;
  587. down_write(&key->sem);
  588. ret = key->type->update(key, payload, plen);
  589. if (ret == 0)
  590. /* updating a negative key instantiates it */
  591. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  592. up_write(&key->sem);
  593. if (ret < 0)
  594. goto error;
  595. out:
  596. return key_ref;
  597. error:
  598. key_put(key);
  599. key_ref = ERR_PTR(ret);
  600. goto out;
  601. } /* end __key_update() */
  602. /*****************************************************************************/
  603. /*
  604. * search the specified keyring for a key of the same description; if one is
  605. * found, update it, otherwise add a new one
  606. */
  607. key_ref_t key_create_or_update(key_ref_t keyring_ref,
  608. const char *type,
  609. const char *description,
  610. const void *payload,
  611. size_t plen,
  612. unsigned long flags)
  613. {
  614. struct key_type *ktype;
  615. struct key *keyring, *key = NULL;
  616. key_perm_t perm;
  617. key_ref_t key_ref;
  618. int ret;
  619. /* look up the key type to see if it's one of the registered kernel
  620. * types */
  621. ktype = key_type_lookup(type);
  622. if (IS_ERR(ktype)) {
  623. key_ref = ERR_PTR(-ENODEV);
  624. goto error;
  625. }
  626. key_ref = ERR_PTR(-EINVAL);
  627. if (!ktype->match || !ktype->instantiate)
  628. goto error_2;
  629. keyring = key_ref_to_ptr(keyring_ref);
  630. key_check(keyring);
  631. key_ref = ERR_PTR(-ENOTDIR);
  632. if (keyring->type != &key_type_keyring)
  633. goto error_2;
  634. down_write(&keyring->sem);
  635. /* if we're going to allocate a new key, we're going to have
  636. * to modify the keyring */
  637. ret = key_permission(keyring_ref, KEY_WRITE);
  638. if (ret < 0) {
  639. key_ref = ERR_PTR(ret);
  640. goto error_3;
  641. }
  642. /* if it's possible to update this type of key, search for an existing
  643. * key of the same type and description in the destination keyring and
  644. * update that instead if possible
  645. */
  646. if (ktype->update) {
  647. key_ref = __keyring_search_one(keyring_ref, ktype, description,
  648. 0);
  649. if (!IS_ERR(key_ref))
  650. goto found_matching_key;
  651. }
  652. /* decide on the permissions we want */
  653. perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
  654. perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
  655. if (ktype->read)
  656. perm |= KEY_POS_READ | KEY_USR_READ;
  657. if (ktype == &key_type_keyring || ktype->update)
  658. perm |= KEY_USR_WRITE;
  659. /* allocate a new key */
  660. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  661. current, perm, flags);
  662. if (IS_ERR(key)) {
  663. key_ref = ERR_PTR(PTR_ERR(key));
  664. goto error_3;
  665. }
  666. /* instantiate it and link it into the target keyring */
  667. ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
  668. if (ret < 0) {
  669. key_put(key);
  670. key_ref = ERR_PTR(ret);
  671. goto error_3;
  672. }
  673. key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
  674. error_3:
  675. up_write(&keyring->sem);
  676. error_2:
  677. key_type_put(ktype);
  678. error:
  679. return key_ref;
  680. found_matching_key:
  681. /* we found a matching key, so we're going to try to update it
  682. * - we can drop the locks first as we have the key pinned
  683. */
  684. up_write(&keyring->sem);
  685. key_type_put(ktype);
  686. key_ref = __key_update(key_ref, payload, plen);
  687. goto error;
  688. } /* end key_create_or_update() */
  689. EXPORT_SYMBOL(key_create_or_update);
  690. /*****************************************************************************/
  691. /*
  692. * update a key
  693. */
  694. int key_update(key_ref_t key_ref, const void *payload, size_t plen)
  695. {
  696. struct key *key = key_ref_to_ptr(key_ref);
  697. int ret;
  698. key_check(key);
  699. /* the key must be writable */
  700. ret = key_permission(key_ref, KEY_WRITE);
  701. if (ret < 0)
  702. goto error;
  703. /* attempt to update it if supported */
  704. ret = -EOPNOTSUPP;
  705. if (key->type->update) {
  706. down_write(&key->sem);
  707. ret = key->type->update(key, payload, plen);
  708. if (ret == 0)
  709. /* updating a negative key instantiates it */
  710. clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
  711. up_write(&key->sem);
  712. }
  713. error:
  714. return ret;
  715. } /* end key_update() */
  716. EXPORT_SYMBOL(key_update);
  717. /*****************************************************************************/
  718. /*
  719. * revoke a key
  720. */
  721. void key_revoke(struct key *key)
  722. {
  723. key_check(key);
  724. /* make sure no one's trying to change or use the key when we mark
  725. * it */
  726. down_write(&key->sem);
  727. set_bit(KEY_FLAG_REVOKED, &key->flags);
  728. if (key->type->revoke)
  729. key->type->revoke(key);
  730. up_write(&key->sem);
  731. } /* end key_revoke() */
  732. EXPORT_SYMBOL(key_revoke);
  733. /*****************************************************************************/
  734. /*
  735. * register a type of key
  736. */
  737. int register_key_type(struct key_type *ktype)
  738. {
  739. struct key_type *p;
  740. int ret;
  741. ret = -EEXIST;
  742. down_write(&key_types_sem);
  743. /* disallow key types with the same name */
  744. list_for_each_entry(p, &key_types_list, link) {
  745. if (strcmp(p->name, ktype->name) == 0)
  746. goto out;
  747. }
  748. /* store the type */
  749. list_add(&ktype->link, &key_types_list);
  750. ret = 0;
  751. out:
  752. up_write(&key_types_sem);
  753. return ret;
  754. } /* end register_key_type() */
  755. EXPORT_SYMBOL(register_key_type);
  756. /*****************************************************************************/
  757. /*
  758. * unregister a type of key
  759. */
  760. void unregister_key_type(struct key_type *ktype)
  761. {
  762. struct rb_node *_n;
  763. struct key *key;
  764. down_write(&key_types_sem);
  765. /* withdraw the key type */
  766. list_del_init(&ktype->link);
  767. /* mark all the keys of this type dead */
  768. spin_lock(&key_serial_lock);
  769. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  770. key = rb_entry(_n, struct key, serial_node);
  771. if (key->type == ktype)
  772. key->type = &key_type_dead;
  773. }
  774. spin_unlock(&key_serial_lock);
  775. /* make sure everyone revalidates their keys */
  776. synchronize_rcu();
  777. /* we should now be able to destroy the payloads of all the keys of
  778. * this type with impunity */
  779. spin_lock(&key_serial_lock);
  780. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  781. key = rb_entry(_n, struct key, serial_node);
  782. if (key->type == ktype) {
  783. if (ktype->destroy)
  784. ktype->destroy(key);
  785. memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
  786. }
  787. }
  788. spin_unlock(&key_serial_lock);
  789. up_write(&key_types_sem);
  790. } /* end unregister_key_type() */
  791. EXPORT_SYMBOL(unregister_key_type);
  792. /*****************************************************************************/
  793. /*
  794. * initialise the key management stuff
  795. */
  796. void __init key_init(void)
  797. {
  798. /* allocate a slab in which we can store keys */
  799. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  800. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  801. /* add the special key types */
  802. list_add_tail(&key_type_keyring.link, &key_types_list);
  803. list_add_tail(&key_type_dead.link, &key_types_list);
  804. list_add_tail(&key_type_user.link, &key_types_list);
  805. /* record the root user tracking */
  806. rb_link_node(&root_key_user.node,
  807. NULL,
  808. &key_user_tree.rb_node);
  809. rb_insert_color(&root_key_user.node,
  810. &key_user_tree);
  811. /* record root's user standard keyrings */
  812. key_check(&root_user_keyring);
  813. key_check(&root_session_keyring);
  814. __key_insert_serial(&root_user_keyring);
  815. __key_insert_serial(&root_session_keyring);
  816. keyring_publish_name(&root_user_keyring);
  817. keyring_publish_name(&root_session_keyring);
  818. /* link the two root keyrings together */
  819. key_link(&root_session_keyring, &root_user_keyring);
  820. } /* end key_init() */