audit_tree.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963
  1. #include "audit.h"
  2. #include <linux/inotify.h>
  3. #include <linux/namei.h>
  4. #include <linux/mount.h>
  5. #include <linux/kthread.h>
  6. struct audit_tree;
  7. struct audit_chunk;
  8. struct audit_tree {
  9. atomic_t count;
  10. int goner;
  11. struct audit_chunk *root;
  12. struct list_head chunks;
  13. struct list_head rules;
  14. struct list_head list;
  15. struct list_head same_root;
  16. struct rcu_head head;
  17. char pathname[];
  18. };
  19. struct audit_chunk {
  20. struct list_head hash;
  21. struct inotify_watch watch;
  22. struct list_head trees; /* with root here */
  23. int dead;
  24. int count;
  25. atomic_long_t refs;
  26. struct rcu_head head;
  27. struct node {
  28. struct list_head list;
  29. struct audit_tree *owner;
  30. unsigned index; /* index; upper bit indicates 'will prune' */
  31. } owners[];
  32. };
  33. static LIST_HEAD(tree_list);
  34. static LIST_HEAD(prune_list);
  35. /*
  36. * One struct chunk is attached to each inode of interest.
  37. * We replace struct chunk on tagging/untagging.
  38. * Rules have pointer to struct audit_tree.
  39. * Rules have struct list_head rlist forming a list of rules over
  40. * the same tree.
  41. * References to struct chunk are collected at audit_inode{,_child}()
  42. * time and used in AUDIT_TREE rule matching.
  43. * These references are dropped at the same time we are calling
  44. * audit_free_names(), etc.
  45. *
  46. * Cyclic lists galore:
  47. * tree.chunks anchors chunk.owners[].list hash_lock
  48. * tree.rules anchors rule.rlist audit_filter_mutex
  49. * chunk.trees anchors tree.same_root hash_lock
  50. * chunk.hash is a hash with middle bits of watch.inode as
  51. * a hash function. RCU, hash_lock
  52. *
  53. * tree is refcounted; one reference for "some rules on rules_list refer to
  54. * it", one for each chunk with pointer to it.
  55. *
  56. * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
  57. * of watch contributes 1 to .refs).
  58. *
  59. * node.index allows to get from node.list to containing chunk.
  60. * MSB of that sucker is stolen to mark taggings that we might have to
  61. * revert - several operations have very unpleasant cleanup logics and
  62. * that makes a difference. Some.
  63. */
  64. static struct inotify_handle *rtree_ih;
  65. static struct audit_tree *alloc_tree(const char *s)
  66. {
  67. struct audit_tree *tree;
  68. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  69. if (tree) {
  70. atomic_set(&tree->count, 1);
  71. tree->goner = 0;
  72. INIT_LIST_HEAD(&tree->chunks);
  73. INIT_LIST_HEAD(&tree->rules);
  74. INIT_LIST_HEAD(&tree->list);
  75. INIT_LIST_HEAD(&tree->same_root);
  76. tree->root = NULL;
  77. strcpy(tree->pathname, s);
  78. }
  79. return tree;
  80. }
  81. static inline void get_tree(struct audit_tree *tree)
  82. {
  83. atomic_inc(&tree->count);
  84. }
  85. static void __put_tree(struct rcu_head *rcu)
  86. {
  87. struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
  88. kfree(tree);
  89. }
  90. static inline void put_tree(struct audit_tree *tree)
  91. {
  92. if (atomic_dec_and_test(&tree->count))
  93. call_rcu(&tree->head, __put_tree);
  94. }
  95. /* to avoid bringing the entire thing in audit.h */
  96. const char *audit_tree_path(struct audit_tree *tree)
  97. {
  98. return tree->pathname;
  99. }
  100. static struct audit_chunk *alloc_chunk(int count)
  101. {
  102. struct audit_chunk *chunk;
  103. size_t size;
  104. int i;
  105. size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
  106. chunk = kzalloc(size, GFP_KERNEL);
  107. if (!chunk)
  108. return NULL;
  109. INIT_LIST_HEAD(&chunk->hash);
  110. INIT_LIST_HEAD(&chunk->trees);
  111. chunk->count = count;
  112. atomic_long_set(&chunk->refs, 1);
  113. for (i = 0; i < count; i++) {
  114. INIT_LIST_HEAD(&chunk->owners[i].list);
  115. chunk->owners[i].index = i;
  116. }
  117. inotify_init_watch(&chunk->watch);
  118. return chunk;
  119. }
  120. static void free_chunk(struct audit_chunk *chunk)
  121. {
  122. int i;
  123. for (i = 0; i < chunk->count; i++) {
  124. if (chunk->owners[i].owner)
  125. put_tree(chunk->owners[i].owner);
  126. }
  127. kfree(chunk);
  128. }
  129. void audit_put_chunk(struct audit_chunk *chunk)
  130. {
  131. if (atomic_long_dec_and_test(&chunk->refs))
  132. free_chunk(chunk);
  133. }
  134. static void __put_chunk(struct rcu_head *rcu)
  135. {
  136. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  137. audit_put_chunk(chunk);
  138. }
  139. enum {HASH_SIZE = 128};
  140. static struct list_head chunk_hash_heads[HASH_SIZE];
  141. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  142. static inline struct list_head *chunk_hash(const struct inode *inode)
  143. {
  144. unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
  145. return chunk_hash_heads + n % HASH_SIZE;
  146. }
  147. /* hash_lock is held by caller */
  148. static void insert_hash(struct audit_chunk *chunk)
  149. {
  150. struct list_head *list = chunk_hash(chunk->watch.inode);
  151. list_add_rcu(&chunk->hash, list);
  152. }
  153. /* called under rcu_read_lock */
  154. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  155. {
  156. struct list_head *list = chunk_hash(inode);
  157. struct audit_chunk *p;
  158. list_for_each_entry_rcu(p, list, hash) {
  159. if (p->watch.inode == inode) {
  160. atomic_long_inc(&p->refs);
  161. return p;
  162. }
  163. }
  164. return NULL;
  165. }
  166. int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  167. {
  168. int n;
  169. for (n = 0; n < chunk->count; n++)
  170. if (chunk->owners[n].owner == tree)
  171. return 1;
  172. return 0;
  173. }
  174. /* tagging and untagging inodes with trees */
  175. static struct audit_chunk *find_chunk(struct node *p)
  176. {
  177. int index = p->index & ~(1U<<31);
  178. p -= index;
  179. return container_of(p, struct audit_chunk, owners[0]);
  180. }
  181. static void untag_chunk(struct node *p)
  182. {
  183. struct audit_chunk *chunk = find_chunk(p);
  184. struct audit_chunk *new;
  185. struct audit_tree *owner;
  186. int size = chunk->count - 1;
  187. int i, j;
  188. if (!pin_inotify_watch(&chunk->watch)) {
  189. /*
  190. * Filesystem is shutting down; all watches are getting
  191. * evicted, just take it off the node list for this
  192. * tree and let the eviction logics take care of the
  193. * rest.
  194. */
  195. owner = p->owner;
  196. if (owner->root == chunk) {
  197. list_del_init(&owner->same_root);
  198. owner->root = NULL;
  199. }
  200. list_del_init(&p->list);
  201. p->owner = NULL;
  202. put_tree(owner);
  203. return;
  204. }
  205. spin_unlock(&hash_lock);
  206. /*
  207. * pin_inotify_watch() succeeded, so the watch won't go away
  208. * from under us.
  209. */
  210. mutex_lock(&chunk->watch.inode->inotify_mutex);
  211. if (chunk->dead) {
  212. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  213. goto out;
  214. }
  215. owner = p->owner;
  216. if (!size) {
  217. chunk->dead = 1;
  218. spin_lock(&hash_lock);
  219. list_del_init(&chunk->trees);
  220. if (owner->root == chunk)
  221. owner->root = NULL;
  222. list_del_init(&p->list);
  223. list_del_rcu(&chunk->hash);
  224. spin_unlock(&hash_lock);
  225. inotify_evict_watch(&chunk->watch);
  226. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  227. put_inotify_watch(&chunk->watch);
  228. goto out;
  229. }
  230. new = alloc_chunk(size);
  231. if (!new)
  232. goto Fallback;
  233. if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
  234. free_chunk(new);
  235. goto Fallback;
  236. }
  237. chunk->dead = 1;
  238. spin_lock(&hash_lock);
  239. list_replace_init(&chunk->trees, &new->trees);
  240. if (owner->root == chunk) {
  241. list_del_init(&owner->same_root);
  242. owner->root = NULL;
  243. }
  244. for (i = j = 0; i < size; i++, j++) {
  245. struct audit_tree *s;
  246. if (&chunk->owners[j] == p) {
  247. list_del_init(&p->list);
  248. i--;
  249. continue;
  250. }
  251. s = chunk->owners[j].owner;
  252. new->owners[i].owner = s;
  253. new->owners[i].index = chunk->owners[j].index - j + i;
  254. if (!s) /* result of earlier fallback */
  255. continue;
  256. get_tree(s);
  257. list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
  258. }
  259. list_replace_rcu(&chunk->hash, &new->hash);
  260. list_for_each_entry(owner, &new->trees, same_root)
  261. owner->root = new;
  262. spin_unlock(&hash_lock);
  263. inotify_evict_watch(&chunk->watch);
  264. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  265. put_inotify_watch(&chunk->watch);
  266. goto out;
  267. Fallback:
  268. // do the best we can
  269. spin_lock(&hash_lock);
  270. if (owner->root == chunk) {
  271. list_del_init(&owner->same_root);
  272. owner->root = NULL;
  273. }
  274. list_del_init(&p->list);
  275. p->owner = NULL;
  276. put_tree(owner);
  277. spin_unlock(&hash_lock);
  278. mutex_unlock(&chunk->watch.inode->inotify_mutex);
  279. out:
  280. unpin_inotify_watch(&chunk->watch);
  281. spin_lock(&hash_lock);
  282. }
  283. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  284. {
  285. struct audit_chunk *chunk = alloc_chunk(1);
  286. if (!chunk)
  287. return -ENOMEM;
  288. if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
  289. free_chunk(chunk);
  290. return -ENOSPC;
  291. }
  292. mutex_lock(&inode->inotify_mutex);
  293. spin_lock(&hash_lock);
  294. if (tree->goner) {
  295. spin_unlock(&hash_lock);
  296. chunk->dead = 1;
  297. inotify_evict_watch(&chunk->watch);
  298. mutex_unlock(&inode->inotify_mutex);
  299. put_inotify_watch(&chunk->watch);
  300. return 0;
  301. }
  302. chunk->owners[0].index = (1U << 31);
  303. chunk->owners[0].owner = tree;
  304. get_tree(tree);
  305. list_add(&chunk->owners[0].list, &tree->chunks);
  306. if (!tree->root) {
  307. tree->root = chunk;
  308. list_add(&tree->same_root, &chunk->trees);
  309. }
  310. insert_hash(chunk);
  311. spin_unlock(&hash_lock);
  312. mutex_unlock(&inode->inotify_mutex);
  313. return 0;
  314. }
  315. /* the first tagged inode becomes root of tree */
  316. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  317. {
  318. struct inotify_watch *watch;
  319. struct audit_tree *owner;
  320. struct audit_chunk *chunk, *old;
  321. struct node *p;
  322. int n;
  323. if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
  324. return create_chunk(inode, tree);
  325. old = container_of(watch, struct audit_chunk, watch);
  326. /* are we already there? */
  327. spin_lock(&hash_lock);
  328. for (n = 0; n < old->count; n++) {
  329. if (old->owners[n].owner == tree) {
  330. spin_unlock(&hash_lock);
  331. put_inotify_watch(watch);
  332. return 0;
  333. }
  334. }
  335. spin_unlock(&hash_lock);
  336. chunk = alloc_chunk(old->count + 1);
  337. if (!chunk)
  338. return -ENOMEM;
  339. mutex_lock(&inode->inotify_mutex);
  340. if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
  341. mutex_unlock(&inode->inotify_mutex);
  342. put_inotify_watch(&old->watch);
  343. free_chunk(chunk);
  344. return -ENOSPC;
  345. }
  346. spin_lock(&hash_lock);
  347. if (tree->goner) {
  348. spin_unlock(&hash_lock);
  349. chunk->dead = 1;
  350. inotify_evict_watch(&chunk->watch);
  351. mutex_unlock(&inode->inotify_mutex);
  352. put_inotify_watch(&old->watch);
  353. put_inotify_watch(&chunk->watch);
  354. return 0;
  355. }
  356. list_replace_init(&old->trees, &chunk->trees);
  357. for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
  358. struct audit_tree *s = old->owners[n].owner;
  359. p->owner = s;
  360. p->index = old->owners[n].index;
  361. if (!s) /* result of fallback in untag */
  362. continue;
  363. get_tree(s);
  364. list_replace_init(&old->owners[n].list, &p->list);
  365. }
  366. p->index = (chunk->count - 1) | (1U<<31);
  367. p->owner = tree;
  368. get_tree(tree);
  369. list_add(&p->list, &tree->chunks);
  370. list_replace_rcu(&old->hash, &chunk->hash);
  371. list_for_each_entry(owner, &chunk->trees, same_root)
  372. owner->root = chunk;
  373. old->dead = 1;
  374. if (!tree->root) {
  375. tree->root = chunk;
  376. list_add(&tree->same_root, &chunk->trees);
  377. }
  378. spin_unlock(&hash_lock);
  379. inotify_evict_watch(&old->watch);
  380. mutex_unlock(&inode->inotify_mutex);
  381. put_inotify_watch(&old->watch);
  382. return 0;
  383. }
  384. static void kill_rules(struct audit_tree *tree)
  385. {
  386. struct audit_krule *rule, *next;
  387. struct audit_entry *entry;
  388. struct audit_buffer *ab;
  389. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  390. entry = container_of(rule, struct audit_entry, rule);
  391. list_del_init(&rule->rlist);
  392. if (rule->tree) {
  393. /* not a half-baked one */
  394. ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  395. audit_log_format(ab, "op=");
  396. audit_log_string(ab, "remove rule");
  397. audit_log_format(ab, " dir=");
  398. audit_log_untrustedstring(ab, rule->tree->pathname);
  399. audit_log_key(ab, rule->filterkey);
  400. audit_log_format(ab, " list=%d res=1", rule->listnr);
  401. audit_log_end(ab);
  402. rule->tree = NULL;
  403. list_del_rcu(&entry->list);
  404. list_del(&entry->rule.list);
  405. call_rcu(&entry->rcu, audit_free_rule_rcu);
  406. }
  407. }
  408. }
  409. /*
  410. * finish killing struct audit_tree
  411. */
  412. static void prune_one(struct audit_tree *victim)
  413. {
  414. spin_lock(&hash_lock);
  415. while (!list_empty(&victim->chunks)) {
  416. struct node *p;
  417. p = list_entry(victim->chunks.next, struct node, list);
  418. untag_chunk(p);
  419. }
  420. spin_unlock(&hash_lock);
  421. put_tree(victim);
  422. }
  423. /* trim the uncommitted chunks from tree */
  424. static void trim_marked(struct audit_tree *tree)
  425. {
  426. struct list_head *p, *q;
  427. spin_lock(&hash_lock);
  428. if (tree->goner) {
  429. spin_unlock(&hash_lock);
  430. return;
  431. }
  432. /* reorder */
  433. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  434. struct node *node = list_entry(p, struct node, list);
  435. q = p->next;
  436. if (node->index & (1U<<31)) {
  437. list_del_init(p);
  438. list_add(p, &tree->chunks);
  439. }
  440. }
  441. while (!list_empty(&tree->chunks)) {
  442. struct node *node;
  443. node = list_entry(tree->chunks.next, struct node, list);
  444. /* have we run out of marked? */
  445. if (!(node->index & (1U<<31)))
  446. break;
  447. untag_chunk(node);
  448. }
  449. if (!tree->root && !tree->goner) {
  450. tree->goner = 1;
  451. spin_unlock(&hash_lock);
  452. mutex_lock(&audit_filter_mutex);
  453. kill_rules(tree);
  454. list_del_init(&tree->list);
  455. mutex_unlock(&audit_filter_mutex);
  456. prune_one(tree);
  457. } else {
  458. spin_unlock(&hash_lock);
  459. }
  460. }
  461. static void audit_schedule_prune(void);
  462. /* called with audit_filter_mutex */
  463. int audit_remove_tree_rule(struct audit_krule *rule)
  464. {
  465. struct audit_tree *tree;
  466. tree = rule->tree;
  467. if (tree) {
  468. spin_lock(&hash_lock);
  469. list_del_init(&rule->rlist);
  470. if (list_empty(&tree->rules) && !tree->goner) {
  471. tree->root = NULL;
  472. list_del_init(&tree->same_root);
  473. tree->goner = 1;
  474. list_move(&tree->list, &prune_list);
  475. rule->tree = NULL;
  476. spin_unlock(&hash_lock);
  477. audit_schedule_prune();
  478. return 1;
  479. }
  480. rule->tree = NULL;
  481. spin_unlock(&hash_lock);
  482. return 1;
  483. }
  484. return 0;
  485. }
  486. void audit_trim_trees(void)
  487. {
  488. struct list_head cursor;
  489. mutex_lock(&audit_filter_mutex);
  490. list_add(&cursor, &tree_list);
  491. while (cursor.next != &tree_list) {
  492. struct audit_tree *tree;
  493. struct path path;
  494. struct vfsmount *root_mnt;
  495. struct node *node;
  496. struct list_head list;
  497. int err;
  498. tree = container_of(cursor.next, struct audit_tree, list);
  499. get_tree(tree);
  500. list_del(&cursor);
  501. list_add(&cursor, &tree->list);
  502. mutex_unlock(&audit_filter_mutex);
  503. err = kern_path(tree->pathname, 0, &path);
  504. if (err)
  505. goto skip_it;
  506. root_mnt = collect_mounts(&path);
  507. path_put(&path);
  508. if (!root_mnt)
  509. goto skip_it;
  510. list_add_tail(&list, &root_mnt->mnt_list);
  511. spin_lock(&hash_lock);
  512. list_for_each_entry(node, &tree->chunks, list) {
  513. struct audit_chunk *chunk = find_chunk(node);
  514. struct inode *inode = chunk->watch.inode;
  515. struct vfsmount *mnt;
  516. node->index |= 1U<<31;
  517. list_for_each_entry(mnt, &list, mnt_list) {
  518. if (mnt->mnt_root->d_inode == inode) {
  519. node->index &= ~(1U<<31);
  520. break;
  521. }
  522. }
  523. }
  524. spin_unlock(&hash_lock);
  525. trim_marked(tree);
  526. put_tree(tree);
  527. list_del_init(&list);
  528. drop_collected_mounts(root_mnt);
  529. skip_it:
  530. mutex_lock(&audit_filter_mutex);
  531. }
  532. list_del(&cursor);
  533. mutex_unlock(&audit_filter_mutex);
  534. }
  535. static int is_under(struct vfsmount *mnt, struct dentry *dentry,
  536. struct path *path)
  537. {
  538. if (mnt != path->mnt) {
  539. for (;;) {
  540. if (mnt->mnt_parent == mnt)
  541. return 0;
  542. if (mnt->mnt_parent == path->mnt)
  543. break;
  544. mnt = mnt->mnt_parent;
  545. }
  546. dentry = mnt->mnt_mountpoint;
  547. }
  548. return is_subdir(dentry, path->dentry);
  549. }
  550. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  551. {
  552. if (pathname[0] != '/' ||
  553. rule->listnr != AUDIT_FILTER_EXIT ||
  554. op != Audit_equal ||
  555. rule->inode_f || rule->watch || rule->tree)
  556. return -EINVAL;
  557. rule->tree = alloc_tree(pathname);
  558. if (!rule->tree)
  559. return -ENOMEM;
  560. return 0;
  561. }
  562. void audit_put_tree(struct audit_tree *tree)
  563. {
  564. put_tree(tree);
  565. }
  566. /* called with audit_filter_mutex */
  567. int audit_add_tree_rule(struct audit_krule *rule)
  568. {
  569. struct audit_tree *seed = rule->tree, *tree;
  570. struct path path;
  571. struct vfsmount *mnt, *p;
  572. struct list_head list;
  573. int err;
  574. list_for_each_entry(tree, &tree_list, list) {
  575. if (!strcmp(seed->pathname, tree->pathname)) {
  576. put_tree(seed);
  577. rule->tree = tree;
  578. list_add(&rule->rlist, &tree->rules);
  579. return 0;
  580. }
  581. }
  582. tree = seed;
  583. list_add(&tree->list, &tree_list);
  584. list_add(&rule->rlist, &tree->rules);
  585. /* do not set rule->tree yet */
  586. mutex_unlock(&audit_filter_mutex);
  587. err = kern_path(tree->pathname, 0, &path);
  588. if (err)
  589. goto Err;
  590. mnt = collect_mounts(&path);
  591. path_put(&path);
  592. if (!mnt) {
  593. err = -ENOMEM;
  594. goto Err;
  595. }
  596. list_add_tail(&list, &mnt->mnt_list);
  597. get_tree(tree);
  598. list_for_each_entry(p, &list, mnt_list) {
  599. err = tag_chunk(p->mnt_root->d_inode, tree);
  600. if (err)
  601. break;
  602. }
  603. list_del(&list);
  604. drop_collected_mounts(mnt);
  605. if (!err) {
  606. struct node *node;
  607. spin_lock(&hash_lock);
  608. list_for_each_entry(node, &tree->chunks, list)
  609. node->index &= ~(1U<<31);
  610. spin_unlock(&hash_lock);
  611. } else {
  612. trim_marked(tree);
  613. goto Err;
  614. }
  615. mutex_lock(&audit_filter_mutex);
  616. if (list_empty(&rule->rlist)) {
  617. put_tree(tree);
  618. return -ENOENT;
  619. }
  620. rule->tree = tree;
  621. put_tree(tree);
  622. return 0;
  623. Err:
  624. mutex_lock(&audit_filter_mutex);
  625. list_del_init(&tree->list);
  626. list_del_init(&tree->rules);
  627. put_tree(tree);
  628. return err;
  629. }
  630. int audit_tag_tree(char *old, char *new)
  631. {
  632. struct list_head cursor, barrier;
  633. int failed = 0;
  634. struct path path;
  635. struct vfsmount *tagged;
  636. struct list_head list;
  637. struct vfsmount *mnt;
  638. struct dentry *dentry;
  639. int err;
  640. err = kern_path(new, 0, &path);
  641. if (err)
  642. return err;
  643. tagged = collect_mounts(&path);
  644. path_put(&path);
  645. if (!tagged)
  646. return -ENOMEM;
  647. err = kern_path(old, 0, &path);
  648. if (err) {
  649. drop_collected_mounts(tagged);
  650. return err;
  651. }
  652. mnt = mntget(path.mnt);
  653. dentry = dget(path.dentry);
  654. path_put(&path);
  655. list_add_tail(&list, &tagged->mnt_list);
  656. mutex_lock(&audit_filter_mutex);
  657. list_add(&barrier, &tree_list);
  658. list_add(&cursor, &barrier);
  659. while (cursor.next != &tree_list) {
  660. struct audit_tree *tree;
  661. struct vfsmount *p;
  662. tree = container_of(cursor.next, struct audit_tree, list);
  663. get_tree(tree);
  664. list_del(&cursor);
  665. list_add(&cursor, &tree->list);
  666. mutex_unlock(&audit_filter_mutex);
  667. err = kern_path(tree->pathname, 0, &path);
  668. if (err) {
  669. put_tree(tree);
  670. mutex_lock(&audit_filter_mutex);
  671. continue;
  672. }
  673. spin_lock(&vfsmount_lock);
  674. if (!is_under(mnt, dentry, &path)) {
  675. spin_unlock(&vfsmount_lock);
  676. path_put(&path);
  677. put_tree(tree);
  678. mutex_lock(&audit_filter_mutex);
  679. continue;
  680. }
  681. spin_unlock(&vfsmount_lock);
  682. path_put(&path);
  683. list_for_each_entry(p, &list, mnt_list) {
  684. failed = tag_chunk(p->mnt_root->d_inode, tree);
  685. if (failed)
  686. break;
  687. }
  688. if (failed) {
  689. put_tree(tree);
  690. mutex_lock(&audit_filter_mutex);
  691. break;
  692. }
  693. mutex_lock(&audit_filter_mutex);
  694. spin_lock(&hash_lock);
  695. if (!tree->goner) {
  696. list_del(&tree->list);
  697. list_add(&tree->list, &tree_list);
  698. }
  699. spin_unlock(&hash_lock);
  700. put_tree(tree);
  701. }
  702. while (barrier.prev != &tree_list) {
  703. struct audit_tree *tree;
  704. tree = container_of(barrier.prev, struct audit_tree, list);
  705. get_tree(tree);
  706. list_del(&tree->list);
  707. list_add(&tree->list, &barrier);
  708. mutex_unlock(&audit_filter_mutex);
  709. if (!failed) {
  710. struct node *node;
  711. spin_lock(&hash_lock);
  712. list_for_each_entry(node, &tree->chunks, list)
  713. node->index &= ~(1U<<31);
  714. spin_unlock(&hash_lock);
  715. } else {
  716. trim_marked(tree);
  717. }
  718. put_tree(tree);
  719. mutex_lock(&audit_filter_mutex);
  720. }
  721. list_del(&barrier);
  722. list_del(&cursor);
  723. list_del(&list);
  724. mutex_unlock(&audit_filter_mutex);
  725. dput(dentry);
  726. mntput(mnt);
  727. drop_collected_mounts(tagged);
  728. return failed;
  729. }
  730. /*
  731. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  732. * Runs from a separate thread.
  733. */
  734. static int prune_tree_thread(void *unused)
  735. {
  736. mutex_lock(&audit_cmd_mutex);
  737. mutex_lock(&audit_filter_mutex);
  738. while (!list_empty(&prune_list)) {
  739. struct audit_tree *victim;
  740. victim = list_entry(prune_list.next, struct audit_tree, list);
  741. list_del_init(&victim->list);
  742. mutex_unlock(&audit_filter_mutex);
  743. prune_one(victim);
  744. mutex_lock(&audit_filter_mutex);
  745. }
  746. mutex_unlock(&audit_filter_mutex);
  747. mutex_unlock(&audit_cmd_mutex);
  748. return 0;
  749. }
  750. static void audit_schedule_prune(void)
  751. {
  752. kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
  753. }
  754. /*
  755. * ... and that one is done if evict_chunk() decides to delay until the end
  756. * of syscall. Runs synchronously.
  757. */
  758. void audit_kill_trees(struct list_head *list)
  759. {
  760. mutex_lock(&audit_cmd_mutex);
  761. mutex_lock(&audit_filter_mutex);
  762. while (!list_empty(list)) {
  763. struct audit_tree *victim;
  764. victim = list_entry(list->next, struct audit_tree, list);
  765. kill_rules(victim);
  766. list_del_init(&victim->list);
  767. mutex_unlock(&audit_filter_mutex);
  768. prune_one(victim);
  769. mutex_lock(&audit_filter_mutex);
  770. }
  771. mutex_unlock(&audit_filter_mutex);
  772. mutex_unlock(&audit_cmd_mutex);
  773. }
  774. /*
  775. * Here comes the stuff asynchronous to auditctl operations
  776. */
  777. /* inode->inotify_mutex is locked */
  778. static void evict_chunk(struct audit_chunk *chunk)
  779. {
  780. struct audit_tree *owner;
  781. struct list_head *postponed = audit_killed_trees();
  782. int need_prune = 0;
  783. int n;
  784. if (chunk->dead)
  785. return;
  786. chunk->dead = 1;
  787. mutex_lock(&audit_filter_mutex);
  788. spin_lock(&hash_lock);
  789. while (!list_empty(&chunk->trees)) {
  790. owner = list_entry(chunk->trees.next,
  791. struct audit_tree, same_root);
  792. owner->goner = 1;
  793. owner->root = NULL;
  794. list_del_init(&owner->same_root);
  795. spin_unlock(&hash_lock);
  796. if (!postponed) {
  797. kill_rules(owner);
  798. list_move(&owner->list, &prune_list);
  799. need_prune = 1;
  800. } else {
  801. list_move(&owner->list, postponed);
  802. }
  803. spin_lock(&hash_lock);
  804. }
  805. list_del_rcu(&chunk->hash);
  806. for (n = 0; n < chunk->count; n++)
  807. list_del_init(&chunk->owners[n].list);
  808. spin_unlock(&hash_lock);
  809. if (need_prune)
  810. audit_schedule_prune();
  811. mutex_unlock(&audit_filter_mutex);
  812. }
  813. static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
  814. u32 cookie, const char *dname, struct inode *inode)
  815. {
  816. struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
  817. if (mask & IN_IGNORED) {
  818. evict_chunk(chunk);
  819. put_inotify_watch(watch);
  820. }
  821. }
  822. static void destroy_watch(struct inotify_watch *watch)
  823. {
  824. struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
  825. call_rcu(&chunk->head, __put_chunk);
  826. }
  827. static const struct inotify_operations rtree_inotify_ops = {
  828. .handle_event = handle_event,
  829. .destroy_watch = destroy_watch,
  830. };
  831. static int __init audit_tree_init(void)
  832. {
  833. int i;
  834. rtree_ih = inotify_init(&rtree_inotify_ops);
  835. if (IS_ERR(rtree_ih))
  836. audit_panic("cannot initialize inotify handle for rectree watches");
  837. for (i = 0; i < HASH_SIZE; i++)
  838. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  839. return 0;
  840. }
  841. __initcall(audit_tree_init);