audit_tree.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. #include "audit.h"
  2. #include <linux/inotify.h>
  3. #include <linux/namei.h>
  4. #include <linux/mount.h>
  5. #include <linux/kthread.h>
  6. struct audit_tree;
  7. struct audit_chunk;
  8. struct audit_tree {
  9. atomic_t count;
  10. int goner;
  11. struct audit_chunk *root;
  12. struct list_head chunks;
  13. struct list_head rules;
  14. struct list_head list;
  15. struct list_head same_root;
  16. struct rcu_head head;
  17. char pathname[];
  18. };
  19. struct audit_chunk {
  20. struct list_head hash;
  21. struct inotify_watch watch;
  22. struct list_head trees; /* with root here */
  23. int dead;
  24. int count;
  25. atomic_long_t refs;
  26. struct rcu_head head;
  27. struct node {
  28. struct list_head list;
  29. struct audit_tree *owner;
  30. unsigned index; /* index; upper bit indicates 'will prune' */
  31. } owners[];
  32. };
  33. static LIST_HEAD(tree_list);
  34. static LIST_HEAD(prune_list);
  35. /*
  36. * One struct chunk is attached to each inode of interest.
  37. * We replace struct chunk on tagging/untagging.
  38. * Rules have pointer to struct audit_tree.
  39. * Rules have struct list_head rlist forming a list of rules over
  40. * the same tree.
  41. * References to struct chunk are collected at audit_inode{,_child}()
  42. * time and used in AUDIT_TREE rule matching.
  43. * These references are dropped at the same time we are calling
  44. * audit_free_names(), etc.
  45. *
  46. * Cyclic lists galore:
  47. * tree.chunks anchors chunk.owners[].list hash_lock
  48. * tree.rules anchors rule.rlist audit_filter_mutex
  49. * chunk.trees anchors tree.same_root hash_lock
  50. * chunk.hash is a hash with middle bits of watch.inode as
  51. * a hash function. RCU, hash_lock
  52. *
  53. * tree is refcounted; one reference for "some rules on rules_list refer to
  54. * it", one for each chunk with pointer to it.
  55. *
  56. * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
  57. * of watch contributes 1 to .refs).
  58. *
  59. * node.index allows to get from node.list to containing chunk.
  60. * MSB of that sucker is stolen to mark taggings that we might have to
  61. * revert - several operations have very unpleasant cleanup logics and
  62. * that makes a difference. Some.
  63. */
  64. static struct inotify_handle *rtree_ih;
  65. static struct audit_tree *alloc_tree(const char *s)
  66. {
  67. struct audit_tree *tree;
  68. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  69. if (tree) {
  70. atomic_set(&tree->count, 1);
  71. tree->goner = 0;
  72. INIT_LIST_HEAD(&tree->chunks);
  73. INIT_LIST_HEAD(&tree->rules);
  74. INIT_LIST_HEAD(&tree->list);
  75. INIT_LIST_HEAD(&tree->same_root);
  76. tree->root = NULL;
  77. strcpy(tree->pathname, s);
  78. }
  79. return tree;
  80. }
  81. static inline void get_tree(struct audit_tree *tree)
  82. {
  83. atomic_inc(&tree->count);
  84. }
  85. static void __put_tree(struct rcu_head *rcu)
  86. {
  87. struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
  88. kfree(tree);
  89. }
  90. static inline void put_tree(struct audit_tree *tree)
  91. {
  92. if (atomic_dec_and_test(&tree->count))
  93. call_rcu(&tree->head, __put_tree);
  94. }
  95. /* to avoid bringing the entire thing in audit.h */
  96. const char *audit_tree_path(struct audit_tree *tree)
  97. {
  98. return tree->pathname;
  99. }
  100. static struct audit_chunk *alloc_chunk(int count)
  101. {
  102. struct audit_chunk *chunk;
  103. size_t size;
  104. int i;
  105. size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
  106. chunk = kzalloc(size, GFP_KERNEL);
  107. if (!chunk)
  108. return NULL;
  109. INIT_LIST_HEAD(&chunk->hash);
  110. INIT_LIST_HEAD(&chunk->trees);
  111. chunk->count = count;
  112. atomic_long_set(&chunk->refs, 1);
  113. for (i = 0; i < count; i++) {
  114. INIT_LIST_HEAD(&chunk->owners[i].list);
  115. chunk->owners[i].index = i;
  116. }
  117. inotify_init_watch(&chunk->watch);
  118. return chunk;
  119. }
  120. static void free_chunk(struct audit_chunk *chunk)
  121. {
  122. int i;
  123. for (i = 0; i < chunk->count; i++) {
  124. if (chunk->owners[i].owner)
  125. put_tree(chunk->owners[i].owner);
  126. }
  127. kfree(chunk);
  128. }
  129. void audit_put_chunk(struct audit_chunk *chunk)
  130. {
  131. if (atomic_long_dec_and_test(&chunk->refs))
  132. free_chunk(chunk);
  133. }
  134. static void __put_chunk(struct rcu_head *rcu)
  135. {
  136. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  137. audit_put_chunk(chunk);
  138. }
  139. enum {HASH_SIZE = 128};
  140. static struct list_head chunk_hash_heads[HASH_SIZE];
  141. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  142. static inline struct list_head *chunk_hash(const struct inode *inode)
  143. {
  144. unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
  145. return chunk_hash_heads + n % HASH_SIZE;
  146. }
  147. /* hash_lock is held by caller */
  148. static void insert_hash(struct audit_chunk *chunk)
  149. {
  150. struct list_head *list = chunk_hash(chunk->watch.inode);
  151. list_add_rcu(&chunk->hash, list);
  152. }
  153. /* called under rcu_read_lock */
  154. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  155. {
  156. struct list_head *list = chunk_hash(inode);
  157. struct audit_chunk *p;
  158. list_for_each_entry_rcu(p, list, hash) {
  159. if (p->watch.inode == inode) {
  160. atomic_long_inc(&p->refs);
  161. return p;
  162. }
  163. }
  164. return NULL;
  165. }
  166. int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  167. {
  168. int n;
  169. for (n = 0; n < chunk->count; n++)
  170. if (chunk->owners[n].owner == tree)
  171. return 1;
  172. return 0;
  173. }
  174. /* tagging and untagging inodes with trees */
  175. static struct audit_chunk *find_chunk(struct node *p)
  176. {
  177. int index = p->index & ~(1U<<31);
  178. p -= index;
  179. return container_of(p, struct audit_chunk, owners[0]);
  180. }
  181. static void untag_chunk(struct node *p)
  182. {
  183. struct audit_chunk *chunk = find_chunk(p);
  184. struct audit_chunk *new;
  185. struct audit_tree *owner;
  186. int size = chunk->count - 1;
  187. int i, j;
  188. if (!pin_inotify_watch(&chunk->watch)) {
  189. /*
  190. * Filesystem is shutting down; all watches are getting
  191. * evicted, just take it off the node list for this
  192. * tree and let the eviction logics take care of the
  193. * rest.
  194. */
  195. owner = p->owner;
  196. if (owner->root == chunk) {
  197. list_del_init(&owner->same_root);
  198. owner->root = NULL;
  199. }
  200. list_del_init(&p->list);
  201. p->owner = NULL;
  202. put_tree(owner);
  203. return;
  204. }
  205. spin_unlock(&hash_lock);
  206. /*
  207. * pin_inotify_watch() succeeded, so the watch won't go away
  208. * from under us.
  209. */
  210. mutex_lock(&chunk->watch.inode->inotify_mutex);
  211. if (chunk->dead) {
  212. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  213. goto out;
  214. }
  215. owner = p->owner;
  216. if (!size) {
  217. chunk->dead = 1;
  218. spin_lock(&hash_lock);
  219. list_del_init(&chunk->trees);
  220. if (owner->root == chunk)
  221. owner->root = NULL;
  222. list_del_init(&p->list);
  223. list_del_rcu(&chunk->hash);
  224. spin_unlock(&hash_lock);
  225. inotify_evict_watch(&chunk->watch);
  226. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  227. put_inotify_watch(&chunk->watch);
  228. goto out;
  229. }
  230. new = alloc_chunk(size);
  231. if (!new)
  232. goto Fallback;
  233. if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
  234. free_chunk(new);
  235. goto Fallback;
  236. }
  237. chunk->dead = 1;
  238. spin_lock(&hash_lock);
  239. list_replace_init(&chunk->trees, &new->trees);
  240. if (owner->root == chunk) {
  241. list_del_init(&owner->same_root);
  242. owner->root = NULL;
  243. }
  244. for (i = j = 0; j <= size; i++, j++) {
  245. struct audit_tree *s;
  246. if (&chunk->owners[j] == p) {
  247. list_del_init(&p->list);
  248. i--;
  249. continue;
  250. }
  251. s = chunk->owners[j].owner;
  252. new->owners[i].owner = s;
  253. new->owners[i].index = chunk->owners[j].index - j + i;
  254. if (!s) /* result of earlier fallback */
  255. continue;
  256. get_tree(s);
  257. list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
  258. }
  259. list_replace_rcu(&chunk->hash, &new->hash);
  260. list_for_each_entry(owner, &new->trees, same_root)
  261. owner->root = new;
  262. spin_unlock(&hash_lock);
  263. inotify_evict_watch(&chunk->watch);
  264. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  265. put_inotify_watch(&chunk->watch);
  266. goto out;
  267. Fallback:
  268. // do the best we can
  269. spin_lock(&hash_lock);
  270. if (owner->root == chunk) {
  271. list_del_init(&owner->same_root);
  272. owner->root = NULL;
  273. }
  274. list_del_init(&p->list);
  275. p->owner = NULL;
  276. put_tree(owner);
  277. spin_unlock(&hash_lock);
  278. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  279. out:
  280. unpin_inotify_watch(&chunk->watch);
  281. spin_lock(&hash_lock);
  282. }
  283. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  284. {
  285. struct audit_chunk *chunk = alloc_chunk(1);
  286. if (!chunk)
  287. return -ENOMEM;
  288. if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
  289. free_chunk(chunk);
  290. return -ENOSPC;
  291. }
  292. mutex_lock(&inode->inotify_mutex);
  293. spin_lock(&hash_lock);
  294. if (tree->goner) {
  295. spin_unlock(&hash_lock);
  296. chunk->dead = 1;
  297. inotify_evict_watch(&chunk->watch);
  298. mutex_unlock(&inode->inotify_mutex);
  299. put_inotify_watch(&chunk->watch);
  300. return 0;
  301. }
  302. chunk->owners[0].index = (1U << 31);
  303. chunk->owners[0].owner = tree;
  304. get_tree(tree);
  305. list_add(&chunk->owners[0].list, &tree->chunks);
  306. if (!tree->root) {
  307. tree->root = chunk;
  308. list_add(&tree->same_root, &chunk->trees);
  309. }
  310. insert_hash(chunk);
  311. spin_unlock(&hash_lock);
  312. mutex_unlock(&inode->inotify_mutex);
  313. return 0;
  314. }
  315. /* the first tagged inode becomes root of tree */
  316. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  317. {
  318. struct inotify_watch *watch;
  319. struct audit_tree *owner;
  320. struct audit_chunk *chunk, *old;
  321. struct node *p;
  322. int n;
  323. if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
  324. return create_chunk(inode, tree);
  325. old = container_of(watch, struct audit_chunk, watch);
  326. /* are we already there? */
  327. spin_lock(&hash_lock);
  328. for (n = 0; n < old->count; n++) {
  329. if (old->owners[n].owner == tree) {
  330. spin_unlock(&hash_lock);
  331. put_inotify_watch(&old->watch);
  332. return 0;
  333. }
  334. }
  335. spin_unlock(&hash_lock);
  336. chunk = alloc_chunk(old->count + 1);
  337. if (!chunk) {
  338. put_inotify_watch(&old->watch);
  339. return -ENOMEM;
  340. }
  341. mutex_lock(&inode->inotify_mutex);
  342. if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
  343. mutex_unlock(&inode->inotify_mutex);
  344. put_inotify_watch(&old->watch);
  345. free_chunk(chunk);
  346. return -ENOSPC;
  347. }
  348. spin_lock(&hash_lock);
  349. if (tree->goner) {
  350. spin_unlock(&hash_lock);
  351. chunk->dead = 1;
  352. inotify_evict_watch(&chunk->watch);
  353. mutex_unlock(&inode->inotify_mutex);
  354. put_inotify_watch(&old->watch);
  355. put_inotify_watch(&chunk->watch);
  356. return 0;
  357. }
  358. list_replace_init(&old->trees, &chunk->trees);
  359. for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
  360. struct audit_tree *s = old->owners[n].owner;
  361. p->owner = s;
  362. p->index = old->owners[n].index;
  363. if (!s) /* result of fallback in untag */
  364. continue;
  365. get_tree(s);
  366. list_replace_init(&old->owners[n].list, &p->list);
  367. }
  368. p->index = (chunk->count - 1) | (1U<<31);
  369. p->owner = tree;
  370. get_tree(tree);
  371. list_add(&p->list, &tree->chunks);
  372. list_replace_rcu(&old->hash, &chunk->hash);
  373. list_for_each_entry(owner, &chunk->trees, same_root)
  374. owner->root = chunk;
  375. old->dead = 1;
  376. if (!tree->root) {
  377. tree->root = chunk;
  378. list_add(&tree->same_root, &chunk->trees);
  379. }
  380. spin_unlock(&hash_lock);
  381. inotify_evict_watch(&old->watch);
  382. mutex_unlock(&inode->inotify_mutex);
  383. put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
  384. put_inotify_watch(&old->watch); /* and kill it */
  385. return 0;
  386. }
  387. static void kill_rules(struct audit_tree *tree)
  388. {
  389. struct audit_krule *rule, *next;
  390. struct audit_entry *entry;
  391. struct audit_buffer *ab;
  392. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  393. entry = container_of(rule, struct audit_entry, rule);
  394. list_del_init(&rule->rlist);
  395. if (rule->tree) {
  396. /* not a half-baked one */
  397. ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  398. audit_log_format(ab, "op=");
  399. audit_log_string(ab, "remove rule");
  400. audit_log_format(ab, " dir=");
  401. audit_log_untrustedstring(ab, rule->tree->pathname);
  402. audit_log_key(ab, rule->filterkey);
  403. audit_log_format(ab, " list=%d res=1", rule->listnr);
  404. audit_log_end(ab);
  405. rule->tree = NULL;
  406. list_del_rcu(&entry->list);
  407. list_del(&entry->rule.list);
  408. call_rcu(&entry->rcu, audit_free_rule_rcu);
  409. }
  410. }
  411. }
  412. /*
  413. * finish killing struct audit_tree
  414. */
  415. static void prune_one(struct audit_tree *victim)
  416. {
  417. spin_lock(&hash_lock);
  418. while (!list_empty(&victim->chunks)) {
  419. struct node *p;
  420. p = list_entry(victim->chunks.next, struct node, list);
  421. untag_chunk(p);
  422. }
  423. spin_unlock(&hash_lock);
  424. put_tree(victim);
  425. }
  426. /* trim the uncommitted chunks from tree */
  427. static void trim_marked(struct audit_tree *tree)
  428. {
  429. struct list_head *p, *q;
  430. spin_lock(&hash_lock);
  431. if (tree->goner) {
  432. spin_unlock(&hash_lock);
  433. return;
  434. }
  435. /* reorder */
  436. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  437. struct node *node = list_entry(p, struct node, list);
  438. q = p->next;
  439. if (node->index & (1U<<31)) {
  440. list_del_init(p);
  441. list_add(p, &tree->chunks);
  442. }
  443. }
  444. while (!list_empty(&tree->chunks)) {
  445. struct node *node;
  446. node = list_entry(tree->chunks.next, struct node, list);
  447. /* have we run out of marked? */
  448. if (!(node->index & (1U<<31)))
  449. break;
  450. untag_chunk(node);
  451. }
  452. if (!tree->root && !tree->goner) {
  453. tree->goner = 1;
  454. spin_unlock(&hash_lock);
  455. mutex_lock(&audit_filter_mutex);
  456. kill_rules(tree);
  457. list_del_init(&tree->list);
  458. mutex_unlock(&audit_filter_mutex);
  459. prune_one(tree);
  460. } else {
  461. spin_unlock(&hash_lock);
  462. }
  463. }
  464. static void audit_schedule_prune(void);
  465. /* called with audit_filter_mutex */
  466. int audit_remove_tree_rule(struct audit_krule *rule)
  467. {
  468. struct audit_tree *tree;
  469. tree = rule->tree;
  470. if (tree) {
  471. spin_lock(&hash_lock);
  472. list_del_init(&rule->rlist);
  473. if (list_empty(&tree->rules) && !tree->goner) {
  474. tree->root = NULL;
  475. list_del_init(&tree->same_root);
  476. tree->goner = 1;
  477. list_move(&tree->list, &prune_list);
  478. rule->tree = NULL;
  479. spin_unlock(&hash_lock);
  480. audit_schedule_prune();
  481. return 1;
  482. }
  483. rule->tree = NULL;
  484. spin_unlock(&hash_lock);
  485. return 1;
  486. }
  487. return 0;
  488. }
  489. static int compare_root(struct vfsmount *mnt, void *arg)
  490. {
  491. return mnt->mnt_root->d_inode == arg;
  492. }
  493. void audit_trim_trees(void)
  494. {
  495. struct list_head cursor;
  496. mutex_lock(&audit_filter_mutex);
  497. list_add(&cursor, &tree_list);
  498. while (cursor.next != &tree_list) {
  499. struct audit_tree *tree;
  500. struct path path;
  501. struct vfsmount *root_mnt;
  502. struct node *node;
  503. int err;
  504. tree = container_of(cursor.next, struct audit_tree, list);
  505. get_tree(tree);
  506. list_del(&cursor);
  507. list_add(&cursor, &tree->list);
  508. mutex_unlock(&audit_filter_mutex);
  509. err = kern_path(tree->pathname, 0, &path);
  510. if (err)
  511. goto skip_it;
  512. root_mnt = collect_mounts(&path);
  513. path_put(&path);
  514. if (!root_mnt)
  515. goto skip_it;
  516. spin_lock(&hash_lock);
  517. list_for_each_entry(node, &tree->chunks, list) {
  518. struct inode *inode = find_chunk(node)->watch.inode;
  519. node->index |= 1U<<31;
  520. if (iterate_mounts(compare_root, inode, root_mnt))
  521. node->index &= ~(1U<<31);
  522. }
  523. spin_unlock(&hash_lock);
  524. trim_marked(tree);
  525. put_tree(tree);
  526. drop_collected_mounts(root_mnt);
  527. skip_it:
  528. mutex_lock(&audit_filter_mutex);
  529. }
  530. list_del(&cursor);
  531. mutex_unlock(&audit_filter_mutex);
  532. }
  533. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  534. {
  535. if (pathname[0] != '/' ||
  536. rule->listnr != AUDIT_FILTER_EXIT ||
  537. op != Audit_equal ||
  538. rule->inode_f || rule->watch || rule->tree)
  539. return -EINVAL;
  540. rule->tree = alloc_tree(pathname);
  541. if (!rule->tree)
  542. return -ENOMEM;
  543. return 0;
  544. }
  545. void audit_put_tree(struct audit_tree *tree)
  546. {
  547. put_tree(tree);
  548. }
  549. static int tag_mount(struct vfsmount *mnt, void *arg)
  550. {
  551. return tag_chunk(mnt->mnt_root->d_inode, arg);
  552. }
  553. /* called with audit_filter_mutex */
  554. int audit_add_tree_rule(struct audit_krule *rule)
  555. {
  556. struct audit_tree *seed = rule->tree, *tree;
  557. struct path path;
  558. struct vfsmount *mnt;
  559. int err;
  560. list_for_each_entry(tree, &tree_list, list) {
  561. if (!strcmp(seed->pathname, tree->pathname)) {
  562. put_tree(seed);
  563. rule->tree = tree;
  564. list_add(&rule->rlist, &tree->rules);
  565. return 0;
  566. }
  567. }
  568. tree = seed;
  569. list_add(&tree->list, &tree_list);
  570. list_add(&rule->rlist, &tree->rules);
  571. /* do not set rule->tree yet */
  572. mutex_unlock(&audit_filter_mutex);
  573. err = kern_path(tree->pathname, 0, &path);
  574. if (err)
  575. goto Err;
  576. mnt = collect_mounts(&path);
  577. path_put(&path);
  578. if (!mnt) {
  579. err = -ENOMEM;
  580. goto Err;
  581. }
  582. get_tree(tree);
  583. err = iterate_mounts(tag_mount, tree, mnt);
  584. drop_collected_mounts(mnt);
  585. if (!err) {
  586. struct node *node;
  587. spin_lock(&hash_lock);
  588. list_for_each_entry(node, &tree->chunks, list)
  589. node->index &= ~(1U<<31);
  590. spin_unlock(&hash_lock);
  591. } else {
  592. trim_marked(tree);
  593. goto Err;
  594. }
  595. mutex_lock(&audit_filter_mutex);
  596. if (list_empty(&rule->rlist)) {
  597. put_tree(tree);
  598. return -ENOENT;
  599. }
  600. rule->tree = tree;
  601. put_tree(tree);
  602. return 0;
  603. Err:
  604. mutex_lock(&audit_filter_mutex);
  605. list_del_init(&tree->list);
  606. list_del_init(&tree->rules);
  607. put_tree(tree);
  608. return err;
  609. }
  610. int audit_tag_tree(char *old, char *new)
  611. {
  612. struct list_head cursor, barrier;
  613. int failed = 0;
  614. struct path path1, path2;
  615. struct vfsmount *tagged;
  616. int err;
  617. err = kern_path(new, 0, &path2);
  618. if (err)
  619. return err;
  620. tagged = collect_mounts(&path2);
  621. path_put(&path2);
  622. if (!tagged)
  623. return -ENOMEM;
  624. err = kern_path(old, 0, &path1);
  625. if (err) {
  626. drop_collected_mounts(tagged);
  627. return err;
  628. }
  629. mutex_lock(&audit_filter_mutex);
  630. list_add(&barrier, &tree_list);
  631. list_add(&cursor, &barrier);
  632. while (cursor.next != &tree_list) {
  633. struct audit_tree *tree;
  634. int good_one = 0;
  635. tree = container_of(cursor.next, struct audit_tree, list);
  636. get_tree(tree);
  637. list_del(&cursor);
  638. list_add(&cursor, &tree->list);
  639. mutex_unlock(&audit_filter_mutex);
  640. err = kern_path(tree->pathname, 0, &path2);
  641. if (!err) {
  642. good_one = path_is_under(&path1, &path2);
  643. path_put(&path2);
  644. }
  645. if (!good_one) {
  646. put_tree(tree);
  647. mutex_lock(&audit_filter_mutex);
  648. continue;
  649. }
  650. failed = iterate_mounts(tag_mount, tree, tagged);
  651. if (failed) {
  652. put_tree(tree);
  653. mutex_lock(&audit_filter_mutex);
  654. break;
  655. }
  656. mutex_lock(&audit_filter_mutex);
  657. spin_lock(&hash_lock);
  658. if (!tree->goner) {
  659. list_del(&tree->list);
  660. list_add(&tree->list, &tree_list);
  661. }
  662. spin_unlock(&hash_lock);
  663. put_tree(tree);
  664. }
  665. while (barrier.prev != &tree_list) {
  666. struct audit_tree *tree;
  667. tree = container_of(barrier.prev, struct audit_tree, list);
  668. get_tree(tree);
  669. list_del(&tree->list);
  670. list_add(&tree->list, &barrier);
  671. mutex_unlock(&audit_filter_mutex);
  672. if (!failed) {
  673. struct node *node;
  674. spin_lock(&hash_lock);
  675. list_for_each_entry(node, &tree->chunks, list)
  676. node->index &= ~(1U<<31);
  677. spin_unlock(&hash_lock);
  678. } else {
  679. trim_marked(tree);
  680. }
  681. put_tree(tree);
  682. mutex_lock(&audit_filter_mutex);
  683. }
  684. list_del(&barrier);
  685. list_del(&cursor);
  686. mutex_unlock(&audit_filter_mutex);
  687. path_put(&path1);
  688. drop_collected_mounts(tagged);
  689. return failed;
  690. }
  691. /*
  692. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  693. * Runs from a separate thread.
  694. */
  695. static int prune_tree_thread(void *unused)
  696. {
  697. mutex_lock(&audit_cmd_mutex);
  698. mutex_lock(&audit_filter_mutex);
  699. while (!list_empty(&prune_list)) {
  700. struct audit_tree *victim;
  701. victim = list_entry(prune_list.next, struct audit_tree, list);
  702. list_del_init(&victim->list);
  703. mutex_unlock(&audit_filter_mutex);
  704. prune_one(victim);
  705. mutex_lock(&audit_filter_mutex);
  706. }
  707. mutex_unlock(&audit_filter_mutex);
  708. mutex_unlock(&audit_cmd_mutex);
  709. return 0;
  710. }
  711. static void audit_schedule_prune(void)
  712. {
  713. kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
  714. }
  715. /*
  716. * ... and that one is done if evict_chunk() decides to delay until the end
  717. * of syscall. Runs synchronously.
  718. */
  719. void audit_kill_trees(struct list_head *list)
  720. {
  721. mutex_lock(&audit_cmd_mutex);
  722. mutex_lock(&audit_filter_mutex);
  723. while (!list_empty(list)) {
  724. struct audit_tree *victim;
  725. victim = list_entry(list->next, struct audit_tree, list);
  726. kill_rules(victim);
  727. list_del_init(&victim->list);
  728. mutex_unlock(&audit_filter_mutex);
  729. prune_one(victim);
  730. mutex_lock(&audit_filter_mutex);
  731. }
  732. mutex_unlock(&audit_filter_mutex);
  733. mutex_unlock(&audit_cmd_mutex);
  734. }
  735. /*
  736. * Here comes the stuff asynchronous to auditctl operations
  737. */
  738. /* inode->inotify_mutex is locked */
  739. static void evict_chunk(struct audit_chunk *chunk)
  740. {
  741. struct audit_tree *owner;
  742. struct list_head *postponed = audit_killed_trees();
  743. int need_prune = 0;
  744. int n;
  745. if (chunk->dead)
  746. return;
  747. chunk->dead = 1;
  748. mutex_lock(&audit_filter_mutex);
  749. spin_lock(&hash_lock);
  750. while (!list_empty(&chunk->trees)) {
  751. owner = list_entry(chunk->trees.next,
  752. struct audit_tree, same_root);
  753. owner->goner = 1;
  754. owner->root = NULL;
  755. list_del_init(&owner->same_root);
  756. spin_unlock(&hash_lock);
  757. if (!postponed) {
  758. kill_rules(owner);
  759. list_move(&owner->list, &prune_list);
  760. need_prune = 1;
  761. } else {
  762. list_move(&owner->list, postponed);
  763. }
  764. spin_lock(&hash_lock);
  765. }
  766. list_del_rcu(&chunk->hash);
  767. for (n = 0; n < chunk->count; n++)
  768. list_del_init(&chunk->owners[n].list);
  769. spin_unlock(&hash_lock);
  770. if (need_prune)
  771. audit_schedule_prune();
  772. mutex_unlock(&audit_filter_mutex);
  773. }
  774. static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
  775. u32 cookie, const char *dname, struct inode *inode)
  776. {
  777. struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
  778. if (mask & IN_IGNORED) {
  779. evict_chunk(chunk);
  780. put_inotify_watch(watch);
  781. }
  782. }
  783. static void destroy_watch(struct inotify_watch *watch)
  784. {
  785. struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
  786. call_rcu(&chunk->head, __put_chunk);
  787. }
  788. static const struct inotify_operations rtree_inotify_ops = {
  789. .handle_event = handle_event,
  790. .destroy_watch = destroy_watch,
  791. };
  792. static int __init audit_tree_init(void)
  793. {
  794. int i;
  795. rtree_ih = inotify_init(&rtree_inotify_ops);
  796. if (IS_ERR(rtree_ih))
  797. audit_panic("cannot initialize inotify handle for rectree watches");
  798. for (i = 0; i < HASH_SIZE; i++)
  799. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  800. return 0;
  801. }
  802. __initcall(audit_tree_init);