key.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /* key.c: basic authentication token and access key management
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/err.h>
  17. #include "internal.h"
  18. static kmem_cache_t *key_jar;
  19. static key_serial_t key_serial_next = 3;
  20. struct rb_root key_serial_tree; /* tree of keys indexed by serial */
  21. DEFINE_SPINLOCK(key_serial_lock);
  22. struct rb_root key_user_tree; /* tree of quota records indexed by UID */
  23. DEFINE_SPINLOCK(key_user_lock);
  24. static LIST_HEAD(key_types_list);
  25. static DECLARE_RWSEM(key_types_sem);
  26. static void key_cleanup(void *data);
  27. static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
  28. /* we serialise key instantiation and link */
  29. DECLARE_RWSEM(key_construction_sem);
  30. /* any key who's type gets unegistered will be re-typed to this */
  31. struct key_type key_type_dead = {
  32. .name = "dead",
  33. };
  34. #ifdef KEY_DEBUGGING
  35. void __key_check(const struct key *key)
  36. {
  37. printk("__key_check: key %p {%08x} should be {%08x}\n",
  38. key, key->magic, KEY_DEBUG_MAGIC);
  39. BUG();
  40. }
  41. #endif
  42. /*****************************************************************************/
  43. /*
  44. * get the key quota record for a user, allocating a new record if one doesn't
  45. * already exist
  46. */
  47. struct key_user *key_user_lookup(uid_t uid)
  48. {
  49. struct key_user *candidate = NULL, *user;
  50. struct rb_node *parent = NULL;
  51. struct rb_node **p;
  52. try_again:
  53. p = &key_user_tree.rb_node;
  54. spin_lock(&key_user_lock);
  55. /* search the tree for a user record with a matching UID */
  56. while (*p) {
  57. parent = *p;
  58. user = rb_entry(parent, struct key_user, node);
  59. if (uid < user->uid)
  60. p = &(*p)->rb_left;
  61. else if (uid > user->uid)
  62. p = &(*p)->rb_right;
  63. else
  64. goto found;
  65. }
  66. /* if we get here, we failed to find a match in the tree */
  67. if (!candidate) {
  68. /* allocate a candidate user record if we don't already have
  69. * one */
  70. spin_unlock(&key_user_lock);
  71. user = NULL;
  72. candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
  73. if (unlikely(!candidate))
  74. goto out;
  75. /* the allocation may have scheduled, so we need to repeat the
  76. * search lest someone else added the record whilst we were
  77. * asleep */
  78. goto try_again;
  79. }
  80. /* if we get here, then the user record still hadn't appeared on the
  81. * second pass - so we use the candidate record */
  82. atomic_set(&candidate->usage, 1);
  83. atomic_set(&candidate->nkeys, 0);
  84. atomic_set(&candidate->nikeys, 0);
  85. candidate->uid = uid;
  86. candidate->qnkeys = 0;
  87. candidate->qnbytes = 0;
  88. spin_lock_init(&candidate->lock);
  89. INIT_LIST_HEAD(&candidate->consq);
  90. rb_link_node(&candidate->node, parent, p);
  91. rb_insert_color(&candidate->node, &key_user_tree);
  92. spin_unlock(&key_user_lock);
  93. user = candidate;
  94. goto out;
  95. /* okay - we found a user record for this UID */
  96. found:
  97. atomic_inc(&user->usage);
  98. spin_unlock(&key_user_lock);
  99. if (candidate)
  100. kfree(candidate);
  101. out:
  102. return user;
  103. } /* end key_user_lookup() */
  104. /*****************************************************************************/
  105. /*
  106. * dispose of a user structure
  107. */
  108. void key_user_put(struct key_user *user)
  109. {
  110. if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
  111. rb_erase(&user->node, &key_user_tree);
  112. spin_unlock(&key_user_lock);
  113. kfree(user);
  114. }
  115. } /* end key_user_put() */
  116. /*****************************************************************************/
  117. /*
  118. * insert a key with a fixed serial number
  119. */
  120. static void __init __key_insert_serial(struct key *key)
  121. {
  122. struct rb_node *parent, **p;
  123. struct key *xkey;
  124. parent = NULL;
  125. p = &key_serial_tree.rb_node;
  126. while (*p) {
  127. parent = *p;
  128. xkey = rb_entry(parent, struct key, serial_node);
  129. if (key->serial < xkey->serial)
  130. p = &(*p)->rb_left;
  131. else if (key->serial > xkey->serial)
  132. p = &(*p)->rb_right;
  133. else
  134. BUG();
  135. }
  136. /* we've found a suitable hole - arrange for this key to occupy it */
  137. rb_link_node(&key->serial_node, parent, p);
  138. rb_insert_color(&key->serial_node, &key_serial_tree);
  139. } /* end __key_insert_serial() */
  140. /*****************************************************************************/
  141. /*
  142. * assign a key the next unique serial number
  143. * - we work through all the serial numbers between 2 and 2^31-1 in turn and
  144. * then wrap
  145. */
  146. static inline void key_alloc_serial(struct key *key)
  147. {
  148. struct rb_node *parent, **p;
  149. struct key *xkey;
  150. spin_lock(&key_serial_lock);
  151. /* propose a likely serial number and look for a hole for it in the
  152. * serial number tree */
  153. key->serial = key_serial_next;
  154. if (key->serial < 3)
  155. key->serial = 3;
  156. key_serial_next = key->serial + 1;
  157. parent = NULL;
  158. p = &key_serial_tree.rb_node;
  159. while (*p) {
  160. parent = *p;
  161. xkey = rb_entry(parent, struct key, serial_node);
  162. if (key->serial < xkey->serial)
  163. p = &(*p)->rb_left;
  164. else if (key->serial > xkey->serial)
  165. p = &(*p)->rb_right;
  166. else
  167. goto serial_exists;
  168. }
  169. goto insert_here;
  170. /* we found a key with the proposed serial number - walk the tree from
  171. * that point looking for the next unused serial number */
  172. serial_exists:
  173. for (;;) {
  174. key->serial = key_serial_next;
  175. if (key->serial < 2)
  176. key->serial = 2;
  177. key_serial_next = key->serial + 1;
  178. if (!parent->rb_parent)
  179. p = &key_serial_tree.rb_node;
  180. else if (parent->rb_parent->rb_left == parent)
  181. p = &parent->rb_parent->rb_left;
  182. else
  183. p = &parent->rb_parent->rb_right;
  184. parent = rb_next(parent);
  185. if (!parent)
  186. break;
  187. xkey = rb_entry(parent, struct key, serial_node);
  188. if (key->serial < xkey->serial)
  189. goto insert_here;
  190. }
  191. /* we've found a suitable hole - arrange for this key to occupy it */
  192. insert_here:
  193. rb_link_node(&key->serial_node, parent, p);
  194. rb_insert_color(&key->serial_node, &key_serial_tree);
  195. spin_unlock(&key_serial_lock);
  196. } /* end key_alloc_serial() */
  197. /*****************************************************************************/
  198. /*
  199. * allocate a key of the specified type
  200. * - update the user's quota to reflect the existence of the key
  201. * - called from a key-type operation with key_types_sem read-locked by either
  202. * key_create_or_update() or by key_duplicate(); this prevents unregistration
  203. * of the key type
  204. * - upon return the key is as yet uninstantiated; the caller needs to either
  205. * instantiate the key or discard it before returning
  206. */
  207. struct key *key_alloc(struct key_type *type, const char *desc,
  208. uid_t uid, gid_t gid, key_perm_t perm,
  209. int not_in_quota)
  210. {
  211. struct key_user *user = NULL;
  212. struct key *key;
  213. size_t desclen, quotalen;
  214. key = ERR_PTR(-EINVAL);
  215. if (!desc || !*desc)
  216. goto error;
  217. desclen = strlen(desc) + 1;
  218. quotalen = desclen + type->def_datalen;
  219. /* get hold of the key tracking for this user */
  220. user = key_user_lookup(uid);
  221. if (!user)
  222. goto no_memory_1;
  223. /* check that the user's quota permits allocation of another key and
  224. * its description */
  225. if (!not_in_quota) {
  226. spin_lock(&user->lock);
  227. if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS &&
  228. user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
  229. )
  230. goto no_quota;
  231. user->qnkeys++;
  232. user->qnbytes += quotalen;
  233. spin_unlock(&user->lock);
  234. }
  235. /* allocate and initialise the key and its description */
  236. key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
  237. if (!key)
  238. goto no_memory_2;
  239. if (desc) {
  240. key->description = kmalloc(desclen, GFP_KERNEL);
  241. if (!key->description)
  242. goto no_memory_3;
  243. memcpy(key->description, desc, desclen);
  244. }
  245. atomic_set(&key->usage, 1);
  246. rwlock_init(&key->lock);
  247. init_rwsem(&key->sem);
  248. key->type = type;
  249. key->user = user;
  250. key->quotalen = quotalen;
  251. key->datalen = type->def_datalen;
  252. key->uid = uid;
  253. key->gid = gid;
  254. key->perm = perm;
  255. key->flags = 0;
  256. key->expiry = 0;
  257. key->payload.data = NULL;
  258. if (!not_in_quota)
  259. key->flags |= KEY_FLAG_IN_QUOTA;
  260. memset(&key->type_data, 0, sizeof(key->type_data));
  261. #ifdef KEY_DEBUGGING
  262. key->magic = KEY_DEBUG_MAGIC;
  263. #endif
  264. /* publish the key by giving it a serial number */
  265. atomic_inc(&user->nkeys);
  266. key_alloc_serial(key);
  267. error:
  268. return key;
  269. no_memory_3:
  270. kmem_cache_free(key_jar, key);
  271. no_memory_2:
  272. if (!not_in_quota) {
  273. spin_lock(&user->lock);
  274. user->qnkeys--;
  275. user->qnbytes -= quotalen;
  276. spin_unlock(&user->lock);
  277. }
  278. key_user_put(user);
  279. no_memory_1:
  280. key = ERR_PTR(-ENOMEM);
  281. goto error;
  282. no_quota:
  283. spin_unlock(&user->lock);
  284. key_user_put(user);
  285. key = ERR_PTR(-EDQUOT);
  286. goto error;
  287. } /* end key_alloc() */
  288. EXPORT_SYMBOL(key_alloc);
  289. /*****************************************************************************/
  290. /*
  291. * reserve an amount of quota for the key's payload
  292. */
  293. int key_payload_reserve(struct key *key, size_t datalen)
  294. {
  295. int delta = (int) datalen - key->datalen;
  296. int ret = 0;
  297. key_check(key);
  298. /* contemplate the quota adjustment */
  299. if (delta != 0 && key->flags & KEY_FLAG_IN_QUOTA) {
  300. spin_lock(&key->user->lock);
  301. if (delta > 0 &&
  302. key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
  303. ) {
  304. ret = -EDQUOT;
  305. }
  306. else {
  307. key->user->qnbytes += delta;
  308. key->quotalen += delta;
  309. }
  310. spin_unlock(&key->user->lock);
  311. }
  312. /* change the recorded data length if that didn't generate an error */
  313. if (ret == 0)
  314. key->datalen = datalen;
  315. return ret;
  316. } /* end key_payload_reserve() */
  317. EXPORT_SYMBOL(key_payload_reserve);
  318. /*****************************************************************************/
  319. /*
  320. * instantiate a key and link it into the target keyring atomically
  321. * - called with the target keyring's semaphore writelocked
  322. */
  323. static int __key_instantiate_and_link(struct key *key,
  324. const void *data,
  325. size_t datalen,
  326. struct key *keyring)
  327. {
  328. int ret, awaken;
  329. key_check(key);
  330. key_check(keyring);
  331. awaken = 0;
  332. ret = -EBUSY;
  333. down_write(&key_construction_sem);
  334. /* can't instantiate twice */
  335. if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
  336. /* instantiate the key */
  337. ret = key->type->instantiate(key, data, datalen);
  338. if (ret == 0) {
  339. /* mark the key as being instantiated */
  340. write_lock(&key->lock);
  341. atomic_inc(&key->user->nikeys);
  342. key->flags |= KEY_FLAG_INSTANTIATED;
  343. if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
  344. key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
  345. awaken = 1;
  346. }
  347. write_unlock(&key->lock);
  348. /* and link it into the destination keyring */
  349. if (keyring)
  350. ret = __key_link(keyring, key);
  351. }
  352. }
  353. up_write(&key_construction_sem);
  354. /* wake up anyone waiting for a key to be constructed */
  355. if (awaken)
  356. wake_up_all(&request_key_conswq);
  357. return ret;
  358. } /* end __key_instantiate_and_link() */
  359. /*****************************************************************************/
  360. /*
  361. * instantiate a key and link it into the target keyring atomically
  362. */
  363. int key_instantiate_and_link(struct key *key,
  364. const void *data,
  365. size_t datalen,
  366. struct key *keyring)
  367. {
  368. int ret;
  369. if (keyring)
  370. down_write(&keyring->sem);
  371. ret = __key_instantiate_and_link(key, data, datalen, keyring);
  372. if (keyring)
  373. up_write(&keyring->sem);
  374. return ret;
  375. } /* end key_instantiate_and_link() */
  376. EXPORT_SYMBOL(key_instantiate_and_link);
  377. /*****************************************************************************/
  378. /*
  379. * negatively instantiate a key and link it into the target keyring atomically
  380. */
  381. int key_negate_and_link(struct key *key,
  382. unsigned timeout,
  383. struct key *keyring)
  384. {
  385. struct timespec now;
  386. int ret, awaken;
  387. key_check(key);
  388. key_check(keyring);
  389. awaken = 0;
  390. ret = -EBUSY;
  391. if (keyring)
  392. down_write(&keyring->sem);
  393. down_write(&key_construction_sem);
  394. /* can't instantiate twice */
  395. if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
  396. /* mark the key as being negatively instantiated */
  397. write_lock(&key->lock);
  398. atomic_inc(&key->user->nikeys);
  399. key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE;
  400. now = current_kernel_time();
  401. key->expiry = now.tv_sec + timeout;
  402. if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
  403. key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
  404. awaken = 1;
  405. }
  406. write_unlock(&key->lock);
  407. ret = 0;
  408. /* and link it into the destination keyring */
  409. if (keyring)
  410. ret = __key_link(keyring, key);
  411. }
  412. up_write(&key_construction_sem);
  413. if (keyring)
  414. up_write(&keyring->sem);
  415. /* wake up anyone waiting for a key to be constructed */
  416. if (awaken)
  417. wake_up_all(&request_key_conswq);
  418. return ret;
  419. } /* end key_negate_and_link() */
  420. EXPORT_SYMBOL(key_negate_and_link);
  421. /*****************************************************************************/
  422. /*
  423. * do cleaning up in process context so that we don't have to disable
  424. * interrupts all over the place
  425. */
  426. static void key_cleanup(void *data)
  427. {
  428. struct rb_node *_n;
  429. struct key *key;
  430. go_again:
  431. /* look for a dead key in the tree */
  432. spin_lock(&key_serial_lock);
  433. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  434. key = rb_entry(_n, struct key, serial_node);
  435. if (atomic_read(&key->usage) == 0)
  436. goto found_dead_key;
  437. }
  438. spin_unlock(&key_serial_lock);
  439. return;
  440. found_dead_key:
  441. /* we found a dead key - once we've removed it from the tree, we can
  442. * drop the lock */
  443. rb_erase(&key->serial_node, &key_serial_tree);
  444. spin_unlock(&key_serial_lock);
  445. /* deal with the user's key tracking and quota */
  446. if (key->flags & KEY_FLAG_IN_QUOTA) {
  447. spin_lock(&key->user->lock);
  448. key->user->qnkeys--;
  449. key->user->qnbytes -= key->quotalen;
  450. spin_unlock(&key->user->lock);
  451. }
  452. atomic_dec(&key->user->nkeys);
  453. if (key->flags & KEY_FLAG_INSTANTIATED)
  454. atomic_dec(&key->user->nikeys);
  455. key_user_put(key->user);
  456. /* now throw away the key memory */
  457. if (key->type->destroy)
  458. key->type->destroy(key);
  459. kfree(key->description);
  460. #ifdef KEY_DEBUGGING
  461. key->magic = KEY_DEBUG_MAGIC_X;
  462. #endif
  463. kmem_cache_free(key_jar, key);
  464. /* there may, of course, be more than one key to destroy */
  465. goto go_again;
  466. } /* end key_cleanup() */
  467. /*****************************************************************************/
  468. /*
  469. * dispose of a reference to a key
  470. * - when all the references are gone, we schedule the cleanup task to come and
  471. * pull it out of the tree in definite process context
  472. */
  473. void key_put(struct key *key)
  474. {
  475. if (key) {
  476. key_check(key);
  477. if (atomic_dec_and_test(&key->usage))
  478. schedule_work(&key_cleanup_task);
  479. }
  480. } /* end key_put() */
  481. EXPORT_SYMBOL(key_put);
  482. /*****************************************************************************/
  483. /*
  484. * find a key by its serial number
  485. */
  486. struct key *key_lookup(key_serial_t id)
  487. {
  488. struct rb_node *n;
  489. struct key *key;
  490. spin_lock(&key_serial_lock);
  491. /* search the tree for the specified key */
  492. n = key_serial_tree.rb_node;
  493. while (n) {
  494. key = rb_entry(n, struct key, serial_node);
  495. if (id < key->serial)
  496. n = n->rb_left;
  497. else if (id > key->serial)
  498. n = n->rb_right;
  499. else
  500. goto found;
  501. }
  502. not_found:
  503. key = ERR_PTR(-ENOKEY);
  504. goto error;
  505. found:
  506. /* pretent doesn't exist if it's dead */
  507. if (atomic_read(&key->usage) == 0 ||
  508. (key->flags & KEY_FLAG_DEAD) ||
  509. key->type == &key_type_dead)
  510. goto not_found;
  511. /* this races with key_put(), but that doesn't matter since key_put()
  512. * doesn't actually change the key
  513. */
  514. atomic_inc(&key->usage);
  515. error:
  516. spin_unlock(&key_serial_lock);
  517. return key;
  518. } /* end key_lookup() */
  519. /*****************************************************************************/
  520. /*
  521. * find and lock the specified key type against removal
  522. * - we return with the sem readlocked
  523. */
  524. struct key_type *key_type_lookup(const char *type)
  525. {
  526. struct key_type *ktype;
  527. down_read(&key_types_sem);
  528. /* look up the key type to see if it's one of the registered kernel
  529. * types */
  530. list_for_each_entry(ktype, &key_types_list, link) {
  531. if (strcmp(ktype->name, type) == 0)
  532. goto found_kernel_type;
  533. }
  534. up_read(&key_types_sem);
  535. ktype = ERR_PTR(-ENOKEY);
  536. found_kernel_type:
  537. return ktype;
  538. } /* end key_type_lookup() */
  539. /*****************************************************************************/
  540. /*
  541. * unlock a key type
  542. */
  543. void key_type_put(struct key_type *ktype)
  544. {
  545. up_read(&key_types_sem);
  546. } /* end key_type_put() */
  547. /*****************************************************************************/
  548. /*
  549. * attempt to update an existing key
  550. * - the key has an incremented refcount
  551. * - we need to put the key if we get an error
  552. */
  553. static inline struct key *__key_update(struct key *key, const void *payload,
  554. size_t plen)
  555. {
  556. int ret;
  557. /* need write permission on the key to update it */
  558. ret = -EACCES;
  559. if (!key_permission(key, KEY_WRITE))
  560. goto error;
  561. ret = -EEXIST;
  562. if (!key->type->update)
  563. goto error;
  564. down_write(&key->sem);
  565. ret = key->type->update(key, payload, plen);
  566. if (ret == 0) {
  567. /* updating a negative key instantiates it */
  568. write_lock(&key->lock);
  569. key->flags &= ~KEY_FLAG_NEGATIVE;
  570. write_unlock(&key->lock);
  571. }
  572. up_write(&key->sem);
  573. if (ret < 0)
  574. goto error;
  575. out:
  576. return key;
  577. error:
  578. key_put(key);
  579. key = ERR_PTR(ret);
  580. goto out;
  581. } /* end __key_update() */
  582. /*****************************************************************************/
  583. /*
  584. * search the specified keyring for a key of the same description; if one is
  585. * found, update it, otherwise add a new one
  586. */
  587. struct key *key_create_or_update(struct key *keyring,
  588. const char *type,
  589. const char *description,
  590. const void *payload,
  591. size_t plen,
  592. int not_in_quota)
  593. {
  594. struct key_type *ktype;
  595. struct key *key = NULL;
  596. key_perm_t perm;
  597. int ret;
  598. key_check(keyring);
  599. /* look up the key type to see if it's one of the registered kernel
  600. * types */
  601. ktype = key_type_lookup(type);
  602. if (IS_ERR(ktype)) {
  603. key = ERR_PTR(-ENODEV);
  604. goto error;
  605. }
  606. ret = -EINVAL;
  607. if (!ktype->match || !ktype->instantiate)
  608. goto error_2;
  609. /* search for an existing key of the same type and description in the
  610. * destination keyring
  611. */
  612. down_write(&keyring->sem);
  613. key = __keyring_search_one(keyring, ktype, description, 0);
  614. if (!IS_ERR(key))
  615. goto found_matching_key;
  616. /* if we're going to allocate a new key, we're going to have to modify
  617. * the keyring */
  618. ret = -EACCES;
  619. if (!key_permission(keyring, KEY_WRITE))
  620. goto error_3;
  621. /* decide on the permissions we want */
  622. perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK;
  623. if (ktype->read)
  624. perm |= KEY_USR_READ;
  625. if (ktype == &key_type_keyring || ktype->update)
  626. perm |= KEY_USR_WRITE;
  627. /* allocate a new key */
  628. key = key_alloc(ktype, description, current->fsuid, current->fsgid,
  629. perm, not_in_quota);
  630. if (IS_ERR(key)) {
  631. ret = PTR_ERR(key);
  632. goto error_3;
  633. }
  634. /* instantiate it and link it into the target keyring */
  635. ret = __key_instantiate_and_link(key, payload, plen, keyring);
  636. if (ret < 0) {
  637. key_put(key);
  638. key = ERR_PTR(ret);
  639. }
  640. error_3:
  641. up_write(&keyring->sem);
  642. error_2:
  643. key_type_put(ktype);
  644. error:
  645. return key;
  646. found_matching_key:
  647. /* we found a matching key, so we're going to try to update it
  648. * - we can drop the locks first as we have the key pinned
  649. */
  650. up_write(&keyring->sem);
  651. key_type_put(ktype);
  652. key = __key_update(key, payload, plen);
  653. goto error;
  654. } /* end key_create_or_update() */
  655. EXPORT_SYMBOL(key_create_or_update);
  656. /*****************************************************************************/
  657. /*
  658. * update a key
  659. */
  660. int key_update(struct key *key, const void *payload, size_t plen)
  661. {
  662. int ret;
  663. key_check(key);
  664. /* the key must be writable */
  665. ret = -EACCES;
  666. if (!key_permission(key, KEY_WRITE))
  667. goto error;
  668. /* attempt to update it if supported */
  669. ret = -EOPNOTSUPP;
  670. if (key->type->update) {
  671. down_write(&key->sem);
  672. ret = key->type->update(key, payload, plen);
  673. if (ret == 0) {
  674. /* updating a negative key instantiates it */
  675. write_lock(&key->lock);
  676. key->flags &= ~KEY_FLAG_NEGATIVE;
  677. write_unlock(&key->lock);
  678. }
  679. up_write(&key->sem);
  680. }
  681. error:
  682. return ret;
  683. } /* end key_update() */
  684. EXPORT_SYMBOL(key_update);
  685. /*****************************************************************************/
  686. /*
  687. * duplicate a key, potentially with a revised description
  688. * - must be supported by the keytype (keyrings for instance can be duplicated)
  689. */
  690. struct key *key_duplicate(struct key *source, const char *desc)
  691. {
  692. struct key *key;
  693. int ret;
  694. key_check(source);
  695. if (!desc)
  696. desc = source->description;
  697. down_read(&key_types_sem);
  698. ret = -EINVAL;
  699. if (!source->type->duplicate)
  700. goto error;
  701. /* allocate and instantiate a key */
  702. key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
  703. source->perm, 0);
  704. if (IS_ERR(key))
  705. goto error_k;
  706. down_read(&source->sem);
  707. ret = key->type->duplicate(key, source);
  708. up_read(&source->sem);
  709. if (ret < 0)
  710. goto error2;
  711. atomic_inc(&key->user->nikeys);
  712. write_lock(&key->lock);
  713. key->flags |= KEY_FLAG_INSTANTIATED;
  714. write_unlock(&key->lock);
  715. error_k:
  716. up_read(&key_types_sem);
  717. out:
  718. return key;
  719. error2:
  720. key_put(key);
  721. error:
  722. up_read(&key_types_sem);
  723. key = ERR_PTR(ret);
  724. goto out;
  725. } /* end key_duplicate() */
  726. /*****************************************************************************/
  727. /*
  728. * revoke a key
  729. */
  730. void key_revoke(struct key *key)
  731. {
  732. key_check(key);
  733. /* make sure no one's trying to change or use the key when we mark
  734. * it */
  735. down_write(&key->sem);
  736. write_lock(&key->lock);
  737. key->flags |= KEY_FLAG_REVOKED;
  738. write_unlock(&key->lock);
  739. up_write(&key->sem);
  740. } /* end key_revoke() */
  741. EXPORT_SYMBOL(key_revoke);
  742. /*****************************************************************************/
  743. /*
  744. * register a type of key
  745. */
  746. int register_key_type(struct key_type *ktype)
  747. {
  748. struct key_type *p;
  749. int ret;
  750. ret = -EEXIST;
  751. down_write(&key_types_sem);
  752. /* disallow key types with the same name */
  753. list_for_each_entry(p, &key_types_list, link) {
  754. if (strcmp(p->name, ktype->name) == 0)
  755. goto out;
  756. }
  757. /* store the type */
  758. list_add(&ktype->link, &key_types_list);
  759. ret = 0;
  760. out:
  761. up_write(&key_types_sem);
  762. return ret;
  763. } /* end register_key_type() */
  764. EXPORT_SYMBOL(register_key_type);
  765. /*****************************************************************************/
  766. /*
  767. * unregister a type of key
  768. */
  769. void unregister_key_type(struct key_type *ktype)
  770. {
  771. struct rb_node *_n;
  772. struct key *key;
  773. down_write(&key_types_sem);
  774. /* withdraw the key type */
  775. list_del_init(&ktype->link);
  776. /* need to withdraw all keys of this type */
  777. spin_lock(&key_serial_lock);
  778. for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
  779. key = rb_entry(_n, struct key, serial_node);
  780. if (key->type != ktype)
  781. continue;
  782. write_lock(&key->lock);
  783. key->type = &key_type_dead;
  784. write_unlock(&key->lock);
  785. /* there shouldn't be anyone looking at the description or
  786. * payload now */
  787. if (ktype->destroy)
  788. ktype->destroy(key);
  789. memset(&key->payload, 0xbd, sizeof(key->payload));
  790. }
  791. spin_unlock(&key_serial_lock);
  792. up_write(&key_types_sem);
  793. } /* end unregister_key_type() */
  794. EXPORT_SYMBOL(unregister_key_type);
  795. /*****************************************************************************/
  796. /*
  797. * initialise the key management stuff
  798. */
  799. void __init key_init(void)
  800. {
  801. /* allocate a slab in which we can store keys */
  802. key_jar = kmem_cache_create("key_jar", sizeof(struct key),
  803. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  804. /* add the special key types */
  805. list_add_tail(&key_type_keyring.link, &key_types_list);
  806. list_add_tail(&key_type_dead.link, &key_types_list);
  807. list_add_tail(&key_type_user.link, &key_types_list);
  808. /* record the root user tracking */
  809. rb_link_node(&root_key_user.node,
  810. NULL,
  811. &key_user_tree.rb_node);
  812. rb_insert_color(&root_key_user.node,
  813. &key_user_tree);
  814. /* record root's user standard keyrings */
  815. key_check(&root_user_keyring);
  816. key_check(&root_session_keyring);
  817. __key_insert_serial(&root_user_keyring);
  818. __key_insert_serial(&root_session_keyring);
  819. keyring_publish_name(&root_user_keyring);
  820. keyring_publish_name(&root_session_keyring);
  821. /* link the two root keyrings together */
  822. key_link(&root_session_keyring, &root_user_keyring);
  823. } /* end key_init() */