ima_iint.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * Copyright (C) 2008 IBM Corporation
  3. *
  4. * Authors:
  5. * Mimi Zohar <zohar@us.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation, version 2 of the
  10. * License.
  11. *
  12. * File: ima_iint.c
  13. * - implements the IMA hooks: ima_inode_alloc, ima_inode_free
  14. * - cache integrity information associated with an inode
  15. * using a rbtree tree.
  16. */
  17. #include <linux/slab.h>
  18. #include <linux/module.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/rbtree.h>
  21. #include "ima.h"
  22. static struct rb_root ima_iint_tree = RB_ROOT;
  23. static DEFINE_SPINLOCK(ima_iint_lock);
  24. static struct kmem_cache *iint_cache __read_mostly;
  25. int iint_initialized = 0;
  26. /*
  27. * __ima_iint_find - return the iint associated with an inode
  28. */
  29. static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
  30. {
  31. struct ima_iint_cache *iint;
  32. struct rb_node *n = ima_iint_tree.rb_node;
  33. assert_spin_locked(&ima_iint_lock);
  34. while (n) {
  35. iint = rb_entry(n, struct ima_iint_cache, rb_node);
  36. if (inode < iint->inode)
  37. n = n->rb_left;
  38. else if (inode > iint->inode)
  39. n = n->rb_right;
  40. else
  41. break;
  42. }
  43. if (!n)
  44. return NULL;
  45. return iint;
  46. }
  47. /*
  48. * ima_iint_find_get - return the iint associated with an inode
  49. *
  50. * ima_iint_find_get gets a reference to the iint. Caller must
  51. * remember to put the iint reference.
  52. */
  53. struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
  54. {
  55. struct ima_iint_cache *iint;
  56. spin_lock(&ima_iint_lock);
  57. iint = __ima_iint_find(inode);
  58. if (iint)
  59. kref_get(&iint->refcount);
  60. spin_unlock(&ima_iint_lock);
  61. return iint;
  62. }
  63. /**
  64. * ima_inode_alloc - allocate an iint associated with an inode
  65. * @inode: pointer to the inode
  66. */
  67. int ima_inode_alloc(struct inode *inode)
  68. {
  69. struct rb_node **p;
  70. struct rb_node *new_node, *parent = NULL;
  71. struct ima_iint_cache *new_iint, *test_iint;
  72. int rc;
  73. new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
  74. if (!new_iint)
  75. return -ENOMEM;
  76. new_iint->inode = inode;
  77. new_node = &new_iint->rb_node;
  78. spin_lock(&ima_iint_lock);
  79. p = &ima_iint_tree.rb_node;
  80. while (*p) {
  81. parent = *p;
  82. test_iint = rb_entry(parent, struct ima_iint_cache, rb_node);
  83. rc = -EEXIST;
  84. if (inode < test_iint->inode)
  85. p = &(*p)->rb_left;
  86. else if (inode > test_iint->inode)
  87. p = &(*p)->rb_right;
  88. else
  89. goto out_err;
  90. }
  91. rb_link_node(new_node, parent, p);
  92. rb_insert_color(new_node, &ima_iint_tree);
  93. spin_unlock(&ima_iint_lock);
  94. return 0;
  95. out_err:
  96. spin_unlock(&ima_iint_lock);
  97. kref_put(&new_iint->refcount, iint_free);
  98. return rc;
  99. }
  100. /* iint_free - called when the iint refcount goes to zero */
  101. void iint_free(struct kref *kref)
  102. {
  103. struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
  104. refcount);
  105. iint->version = 0;
  106. iint->flags = 0UL;
  107. if (iint->readcount != 0) {
  108. printk(KERN_INFO "%s: readcount: %ld\n", __func__,
  109. iint->readcount);
  110. iint->readcount = 0;
  111. }
  112. if (iint->writecount != 0) {
  113. printk(KERN_INFO "%s: writecount: %ld\n", __func__,
  114. iint->writecount);
  115. iint->writecount = 0;
  116. }
  117. if (iint->opencount != 0) {
  118. printk(KERN_INFO "%s: opencount: %ld\n", __func__,
  119. iint->opencount);
  120. iint->opencount = 0;
  121. }
  122. kref_init(&iint->refcount);
  123. kmem_cache_free(iint_cache, iint);
  124. }
  125. /**
  126. * ima_inode_free - called on security_inode_free
  127. * @inode: pointer to the inode
  128. *
  129. * Free the integrity information(iint) associated with an inode.
  130. */
  131. void ima_inode_free(struct inode *inode)
  132. {
  133. struct ima_iint_cache *iint;
  134. spin_lock(&ima_iint_lock);
  135. iint = __ima_iint_find(inode);
  136. if (iint)
  137. rb_erase(&iint->rb_node, &ima_iint_tree);
  138. spin_unlock(&ima_iint_lock);
  139. if (iint)
  140. kref_put(&iint->refcount, iint_free);
  141. }
  142. static void init_once(void *foo)
  143. {
  144. struct ima_iint_cache *iint = foo;
  145. memset(iint, 0, sizeof *iint);
  146. iint->version = 0;
  147. iint->flags = 0UL;
  148. mutex_init(&iint->mutex);
  149. iint->readcount = 0;
  150. iint->writecount = 0;
  151. iint->opencount = 0;
  152. kref_init(&iint->refcount);
  153. }
  154. static int __init ima_iintcache_init(void)
  155. {
  156. iint_cache =
  157. kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
  158. SLAB_PANIC, init_once);
  159. iint_initialized = 1;
  160. return 0;
  161. }
  162. security_initcall(ima_iintcache_init);