00c4459f76df3e5256bb27eef53f4822153753c2
[linux-3.10.git] / kernel / audit_tree.c
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
7
8 struct audit_tree;
9 struct audit_chunk;
10
11 struct audit_tree {
12         atomic_t count;
13         int goner;
14         struct audit_chunk *root;
15         struct list_head chunks;
16         struct list_head rules;
17         struct list_head list;
18         struct list_head same_root;
19         struct rcu_head head;
20         char pathname[];
21 };
22
23 struct audit_chunk {
24         struct list_head hash;
25         struct fsnotify_mark mark;
26         struct list_head trees;         /* with root here */
27         int dead;
28         int count;
29         atomic_long_t refs;
30         struct rcu_head head;
31         struct node {
32                 struct list_head list;
33                 struct audit_tree *owner;
34                 unsigned index;         /* index; upper bit indicates 'will prune' */
35         } owners[];
36 };
37
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
40
41 /*
42  * One struct chunk is attached to each inode of interest.
43  * We replace struct chunk on tagging/untagging.
44  * Rules have pointer to struct audit_tree.
45  * Rules have struct list_head rlist forming a list of rules over
46  * the same tree.
47  * References to struct chunk are collected at audit_inode{,_child}()
48  * time and used in AUDIT_TREE rule matching.
49  * These references are dropped at the same time we are calling
50  * audit_free_names(), etc.
51  *
52  * Cyclic lists galore:
53  * tree.chunks anchors chunk.owners[].list                      hash_lock
54  * tree.rules anchors rule.rlist                                audit_filter_mutex
55  * chunk.trees anchors tree.same_root                           hash_lock
56  * chunk.hash is a hash with middle bits of watch.inode as
57  * a hash function.                                             RCU, hash_lock
58  *
59  * tree is refcounted; one reference for "some rules on rules_list refer to
60  * it", one for each chunk with pointer to it.
61  *
62  * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63  * of watch contributes 1 to .refs).
64  *
65  * node.index allows to get from node.list to containing chunk.
66  * MSB of that sucker is stolen to mark taggings that we might have to
67  * revert - several operations have very unpleasant cleanup logics and
68  * that makes a difference.  Some.
69  */
70
71 static struct fsnotify_group *audit_tree_group;
72
73 static struct audit_tree *alloc_tree(const char *s)
74 {
75         struct audit_tree *tree;
76
77         tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78         if (tree) {
79                 atomic_set(&tree->count, 1);
80                 tree->goner = 0;
81                 INIT_LIST_HEAD(&tree->chunks);
82                 INIT_LIST_HEAD(&tree->rules);
83                 INIT_LIST_HEAD(&tree->list);
84                 INIT_LIST_HEAD(&tree->same_root);
85                 tree->root = NULL;
86                 strcpy(tree->pathname, s);
87         }
88         return tree;
89 }
90
91 static inline void get_tree(struct audit_tree *tree)
92 {
93         atomic_inc(&tree->count);
94 }
95
96 static inline void put_tree(struct audit_tree *tree)
97 {
98         if (atomic_dec_and_test(&tree->count))
99                 kfree_rcu(tree, head);
100 }
101
102 /* to avoid bringing the entire thing in audit.h */
103 const char *audit_tree_path(struct audit_tree *tree)
104 {
105         return tree->pathname;
106 }
107
108 static void free_chunk(struct audit_chunk *chunk)
109 {
110         int i;
111
112         for (i = 0; i < chunk->count; i++) {
113                 if (chunk->owners[i].owner)
114                         put_tree(chunk->owners[i].owner);
115         }
116         kfree(chunk);
117 }
118
119 void audit_put_chunk(struct audit_chunk *chunk)
120 {
121         if (atomic_long_dec_and_test(&chunk->refs))
122                 free_chunk(chunk);
123 }
124
125 static void __put_chunk(struct rcu_head *rcu)
126 {
127         struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128         audit_put_chunk(chunk);
129 }
130
131 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
132 {
133         struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134         call_rcu(&chunk->head, __put_chunk);
135 }
136
137 static struct audit_chunk *alloc_chunk(int count)
138 {
139         struct audit_chunk *chunk;
140         size_t size;
141         int i;
142
143         size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144         chunk = kzalloc(size, GFP_KERNEL);
145         if (!chunk)
146                 return NULL;
147
148         INIT_LIST_HEAD(&chunk->hash);
149         INIT_LIST_HEAD(&chunk->trees);
150         chunk->count = count;
151         atomic_long_set(&chunk->refs, 1);
152         for (i = 0; i < count; i++) {
153                 INIT_LIST_HEAD(&chunk->owners[i].list);
154                 chunk->owners[i].index = i;
155         }
156         fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157         chunk->mark.mask = FS_IN_IGNORED;
158         return chunk;
159 }
160
161 enum {HASH_SIZE = 128};
162 static struct list_head chunk_hash_heads[HASH_SIZE];
163 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
164
165 static inline struct list_head *chunk_hash(const struct inode *inode)
166 {
167         unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
168         return chunk_hash_heads + n % HASH_SIZE;
169 }
170
171 /* hash_lock & entry->lock is held by caller */
172 static void insert_hash(struct audit_chunk *chunk)
173 {
174         struct fsnotify_mark *entry = &chunk->mark;
175         struct list_head *list;
176
177         if (!entry->i.inode)
178                 return;
179         list = chunk_hash(entry->i.inode);
180         list_add_rcu(&chunk->hash, list);
181 }
182
183 /* called under rcu_read_lock */
184 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
185 {
186         struct list_head *list = chunk_hash(inode);
187         struct audit_chunk *p;
188
189         list_for_each_entry_rcu(p, list, hash) {
190                 /* mark.inode may have gone NULL, but who cares? */
191                 if (p->mark.i.inode == inode) {
192                         atomic_long_inc(&p->refs);
193                         return p;
194                 }
195         }
196         return NULL;
197 }
198
199 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
200 {
201         int n;
202         for (n = 0; n < chunk->count; n++)
203                 if (chunk->owners[n].owner == tree)
204                         return 1;
205         return 0;
206 }
207
208 /* tagging and untagging inodes with trees */
209
210 static struct audit_chunk *find_chunk(struct node *p)
211 {
212         int index = p->index & ~(1U<<31);
213         p -= index;
214         return container_of(p, struct audit_chunk, owners[0]);
215 }
216
217 static void untag_chunk(struct node *p)
218 {
219         struct audit_chunk *chunk = find_chunk(p);
220         struct fsnotify_mark *entry = &chunk->mark;
221         struct audit_chunk *new = NULL;
222         struct audit_tree *owner;
223         int size = chunk->count - 1;
224         int i, j;
225
226         fsnotify_get_mark(entry);
227
228         spin_unlock(&hash_lock);
229
230         if (size)
231                 new = alloc_chunk(size);
232
233         spin_lock(&entry->lock);
234         if (chunk->dead || !entry->i.inode) {
235                 spin_unlock(&entry->lock);
236                 if (new)
237                         free_chunk(new);
238                 goto out;
239         }
240
241         owner = p->owner;
242
243         if (!size) {
244                 chunk->dead = 1;
245                 spin_lock(&hash_lock);
246                 list_del_init(&chunk->trees);
247                 if (owner->root == chunk)
248                         owner->root = NULL;
249                 list_del_init(&p->list);
250                 list_del_rcu(&chunk->hash);
251                 spin_unlock(&hash_lock);
252                 spin_unlock(&entry->lock);
253                 fsnotify_destroy_mark(entry, audit_tree_group);
254                 goto out;
255         }
256
257         if (!new)
258                 goto Fallback;
259
260         fsnotify_duplicate_mark(&new->mark, entry);
261         if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
262                 fsnotify_put_mark(&new->mark);
263                 goto Fallback;
264         }
265
266         chunk->dead = 1;
267         spin_lock(&hash_lock);
268         list_replace_init(&chunk->trees, &new->trees);
269         if (owner->root == chunk) {
270                 list_del_init(&owner->same_root);
271                 owner->root = NULL;
272         }
273
274         for (i = j = 0; j <= size; i++, j++) {
275                 struct audit_tree *s;
276                 if (&chunk->owners[j] == p) {
277                         list_del_init(&p->list);
278                         i--;
279                         continue;
280                 }
281                 s = chunk->owners[j].owner;
282                 new->owners[i].owner = s;
283                 new->owners[i].index = chunk->owners[j].index - j + i;
284                 if (!s) /* result of earlier fallback */
285                         continue;
286                 get_tree(s);
287                 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
288         }
289
290         list_replace_rcu(&chunk->hash, &new->hash);
291         list_for_each_entry(owner, &new->trees, same_root)
292                 owner->root = new;
293         spin_unlock(&hash_lock);
294         spin_unlock(&entry->lock);
295         fsnotify_destroy_mark(entry, audit_tree_group);
296         fsnotify_put_mark(&new->mark);  /* drop initial reference */
297         goto out;
298
299 Fallback:
300         // do the best we can
301         spin_lock(&hash_lock);
302         if (owner->root == chunk) {
303                 list_del_init(&owner->same_root);
304                 owner->root = NULL;
305         }
306         list_del_init(&p->list);
307         p->owner = NULL;
308         put_tree(owner);
309         spin_unlock(&hash_lock);
310         spin_unlock(&entry->lock);
311 out:
312         fsnotify_put_mark(entry);
313         spin_lock(&hash_lock);
314 }
315
316 static int create_chunk(struct inode *inode, struct audit_tree *tree)
317 {
318         struct fsnotify_mark *entry;
319         struct audit_chunk *chunk = alloc_chunk(1);
320         if (!chunk)
321                 return -ENOMEM;
322
323         entry = &chunk->mark;
324         if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
325                 fsnotify_put_mark(entry);
326                 return -ENOSPC;
327         }
328
329         spin_lock(&entry->lock);
330         spin_lock(&hash_lock);
331         if (tree->goner) {
332                 spin_unlock(&hash_lock);
333                 chunk->dead = 1;
334                 spin_unlock(&entry->lock);
335                 fsnotify_destroy_mark(entry, audit_tree_group);
336                 fsnotify_put_mark(entry);
337                 return 0;
338         }
339         chunk->owners[0].index = (1U << 31);
340         chunk->owners[0].owner = tree;
341         get_tree(tree);
342         list_add(&chunk->owners[0].list, &tree->chunks);
343         if (!tree->root) {
344                 tree->root = chunk;
345                 list_add(&tree->same_root, &chunk->trees);
346         }
347         insert_hash(chunk);
348         spin_unlock(&hash_lock);
349         spin_unlock(&entry->lock);
350         fsnotify_put_mark(entry);       /* drop initial reference */
351         return 0;
352 }
353
354 /* the first tagged inode becomes root of tree */
355 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
356 {
357         struct fsnotify_mark *old_entry, *chunk_entry;
358         struct audit_tree *owner;
359         struct audit_chunk *chunk, *old;
360         struct node *p;
361         int n;
362
363         old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
364         if (!old_entry)
365                 return create_chunk(inode, tree);
366
367         old = container_of(old_entry, struct audit_chunk, mark);
368
369         /* are we already there? */
370         spin_lock(&hash_lock);
371         for (n = 0; n < old->count; n++) {
372                 if (old->owners[n].owner == tree) {
373                         spin_unlock(&hash_lock);
374                         fsnotify_put_mark(old_entry);
375                         return 0;
376                 }
377         }
378         spin_unlock(&hash_lock);
379
380         chunk = alloc_chunk(old->count + 1);
381         if (!chunk) {
382                 fsnotify_put_mark(old_entry);
383                 return -ENOMEM;
384         }
385
386         chunk_entry = &chunk->mark;
387
388         spin_lock(&old_entry->lock);
389         if (!old_entry->i.inode) {
390                 /* old_entry is being shot, lets just lie */
391                 spin_unlock(&old_entry->lock);
392                 fsnotify_put_mark(old_entry);
393                 free_chunk(chunk);
394                 return -ENOENT;
395         }
396
397         fsnotify_duplicate_mark(chunk_entry, old_entry);
398         if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
399                 spin_unlock(&old_entry->lock);
400                 fsnotify_put_mark(chunk_entry);
401                 fsnotify_put_mark(old_entry);
402                 return -ENOSPC;
403         }
404
405         /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
406         spin_lock(&chunk_entry->lock);
407         spin_lock(&hash_lock);
408
409         /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
410         if (tree->goner) {
411                 spin_unlock(&hash_lock);
412                 chunk->dead = 1;
413                 spin_unlock(&chunk_entry->lock);
414                 spin_unlock(&old_entry->lock);
415
416                 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
417
418                 fsnotify_put_mark(chunk_entry);
419                 fsnotify_put_mark(old_entry);
420                 return 0;
421         }
422         list_replace_init(&old->trees, &chunk->trees);
423         for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
424                 struct audit_tree *s = old->owners[n].owner;
425                 p->owner = s;
426                 p->index = old->owners[n].index;
427                 if (!s) /* result of fallback in untag */
428                         continue;
429                 get_tree(s);
430                 list_replace_init(&old->owners[n].list, &p->list);
431         }
432         p->index = (chunk->count - 1) | (1U<<31);
433         p->owner = tree;
434         get_tree(tree);
435         list_add(&p->list, &tree->chunks);
436         list_replace_rcu(&old->hash, &chunk->hash);
437         list_for_each_entry(owner, &chunk->trees, same_root)
438                 owner->root = chunk;
439         old->dead = 1;
440         if (!tree->root) {
441                 tree->root = chunk;
442                 list_add(&tree->same_root, &chunk->trees);
443         }
444         spin_unlock(&hash_lock);
445         spin_unlock(&chunk_entry->lock);
446         spin_unlock(&old_entry->lock);
447         fsnotify_destroy_mark(old_entry, audit_tree_group);
448         fsnotify_put_mark(chunk_entry); /* drop initial reference */
449         fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
450         return 0;
451 }
452
453 static void audit_log_remove_rule(struct audit_krule *rule)
454 {
455         struct audit_buffer *ab;
456
457         ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
458         if (unlikely(!ab))
459                 return;
460         audit_log_format(ab, "op=");
461         audit_log_string(ab, "remove rule");
462         audit_log_format(ab, " dir=");
463         audit_log_untrustedstring(ab, rule->tree->pathname);
464         audit_log_key(ab, rule->filterkey);
465         audit_log_format(ab, " list=%d res=1", rule->listnr);
466         audit_log_end(ab);
467 }
468
469 static void kill_rules(struct audit_tree *tree)
470 {
471         struct audit_krule *rule, *next;
472         struct audit_entry *entry;
473
474         list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
475                 entry = container_of(rule, struct audit_entry, rule);
476
477                 list_del_init(&rule->rlist);
478                 if (rule->tree) {
479                         /* not a half-baked one */
480                         audit_log_remove_rule(rule);
481                         rule->tree = NULL;
482                         list_del_rcu(&entry->list);
483                         list_del(&entry->rule.list);
484                         call_rcu(&entry->rcu, audit_free_rule_rcu);
485                 }
486         }
487 }
488
489 /*
490  * finish killing struct audit_tree
491  */
492 static void prune_one(struct audit_tree *victim)
493 {
494         spin_lock(&hash_lock);
495         while (!list_empty(&victim->chunks)) {
496                 struct node *p;
497
498                 p = list_entry(victim->chunks.next, struct node, list);
499
500                 untag_chunk(p);
501         }
502         spin_unlock(&hash_lock);
503         put_tree(victim);
504 }
505
506 /* trim the uncommitted chunks from tree */
507
508 static void trim_marked(struct audit_tree *tree)
509 {
510         struct list_head *p, *q;
511         spin_lock(&hash_lock);
512         if (tree->goner) {
513                 spin_unlock(&hash_lock);
514                 return;
515         }
516         /* reorder */
517         for (p = tree->chunks.next; p != &tree->chunks; p = q) {
518                 struct node *node = list_entry(p, struct node, list);
519                 q = p->next;
520                 if (node->index & (1U<<31)) {
521                         list_del_init(p);
522                         list_add(p, &tree->chunks);
523                 }
524         }
525
526         while (!list_empty(&tree->chunks)) {
527                 struct node *node;
528
529                 node = list_entry(tree->chunks.next, struct node, list);
530
531                 /* have we run out of marked? */
532                 if (!(node->index & (1U<<31)))
533                         break;
534
535                 untag_chunk(node);
536         }
537         if (!tree->root && !tree->goner) {
538                 tree->goner = 1;
539                 spin_unlock(&hash_lock);
540                 mutex_lock(&audit_filter_mutex);
541                 kill_rules(tree);
542                 list_del_init(&tree->list);
543                 mutex_unlock(&audit_filter_mutex);
544                 prune_one(tree);
545         } else {
546                 spin_unlock(&hash_lock);
547         }
548 }
549
550 static void audit_schedule_prune(void);
551
552 /* called with audit_filter_mutex */
553 int audit_remove_tree_rule(struct audit_krule *rule)
554 {
555         struct audit_tree *tree;
556         tree = rule->tree;
557         if (tree) {
558                 spin_lock(&hash_lock);
559                 list_del_init(&rule->rlist);
560                 if (list_empty(&tree->rules) && !tree->goner) {
561                         tree->root = NULL;
562                         list_del_init(&tree->same_root);
563                         tree->goner = 1;
564                         list_move(&tree->list, &prune_list);
565                         rule->tree = NULL;
566                         spin_unlock(&hash_lock);
567                         audit_schedule_prune();
568                         return 1;
569                 }
570                 rule->tree = NULL;
571                 spin_unlock(&hash_lock);
572                 return 1;
573         }
574         return 0;
575 }
576
577 static int compare_root(struct vfsmount *mnt, void *arg)
578 {
579         return mnt->mnt_root->d_inode == arg;
580 }
581
582 void audit_trim_trees(void)
583 {
584         struct list_head cursor;
585
586         mutex_lock(&audit_filter_mutex);
587         list_add(&cursor, &tree_list);
588         while (cursor.next != &tree_list) {
589                 struct audit_tree *tree;
590                 struct path path;
591                 struct vfsmount *root_mnt;
592                 struct node *node;
593                 int err;
594
595                 tree = container_of(cursor.next, struct audit_tree, list);
596                 get_tree(tree);
597                 list_del(&cursor);
598                 list_add(&cursor, &tree->list);
599                 mutex_unlock(&audit_filter_mutex);
600
601                 err = kern_path(tree->pathname, 0, &path);
602                 if (err)
603                         goto skip_it;
604
605                 root_mnt = collect_mounts(&path);
606                 path_put(&path);
607                 if (IS_ERR(root_mnt))
608                         goto skip_it;
609
610                 spin_lock(&hash_lock);
611                 list_for_each_entry(node, &tree->chunks, list) {
612                         struct audit_chunk *chunk = find_chunk(node);
613                         /* this could be NULL if the watch is dying else where... */
614                         struct inode *inode = chunk->mark.i.inode;
615                         node->index |= 1U<<31;
616                         if (iterate_mounts(compare_root, inode, root_mnt))
617                                 node->index &= ~(1U<<31);
618                 }
619                 spin_unlock(&hash_lock);
620                 trim_marked(tree);
621                 drop_collected_mounts(root_mnt);
622 skip_it:
623                 put_tree(tree);
624                 mutex_lock(&audit_filter_mutex);
625         }
626         list_del(&cursor);
627         mutex_unlock(&audit_filter_mutex);
628 }
629
630 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
631 {
632
633         if (pathname[0] != '/' ||
634             rule->listnr != AUDIT_FILTER_EXIT ||
635             op != Audit_equal ||
636             rule->inode_f || rule->watch || rule->tree)
637                 return -EINVAL;
638         rule->tree = alloc_tree(pathname);
639         if (!rule->tree)
640                 return -ENOMEM;
641         return 0;
642 }
643
644 void audit_put_tree(struct audit_tree *tree)
645 {
646         put_tree(tree);
647 }
648
649 static int tag_mount(struct vfsmount *mnt, void *arg)
650 {
651         return tag_chunk(mnt->mnt_root->d_inode, arg);
652 }
653
654 /* called with audit_filter_mutex */
655 int audit_add_tree_rule(struct audit_krule *rule)
656 {
657         struct audit_tree *seed = rule->tree, *tree;
658         struct path path;
659         struct vfsmount *mnt;
660         int err;
661
662         rule->tree = NULL;
663         list_for_each_entry(tree, &tree_list, list) {
664                 if (!strcmp(seed->pathname, tree->pathname)) {
665                         put_tree(seed);
666                         rule->tree = tree;
667                         list_add(&rule->rlist, &tree->rules);
668                         return 0;
669                 }
670         }
671         tree = seed;
672         list_add(&tree->list, &tree_list);
673         list_add(&rule->rlist, &tree->rules);
674         /* do not set rule->tree yet */
675         mutex_unlock(&audit_filter_mutex);
676
677         err = kern_path(tree->pathname, 0, &path);
678         if (err)
679                 goto Err;
680         mnt = collect_mounts(&path);
681         path_put(&path);
682         if (IS_ERR(mnt)) {
683                 err = PTR_ERR(mnt);
684                 goto Err;
685         }
686
687         get_tree(tree);
688         err = iterate_mounts(tag_mount, tree, mnt);
689         drop_collected_mounts(mnt);
690
691         if (!err) {
692                 struct node *node;
693                 spin_lock(&hash_lock);
694                 list_for_each_entry(node, &tree->chunks, list)
695                         node->index &= ~(1U<<31);
696                 spin_unlock(&hash_lock);
697         } else {
698                 trim_marked(tree);
699                 goto Err;
700         }
701
702         mutex_lock(&audit_filter_mutex);
703         if (list_empty(&rule->rlist)) {
704                 put_tree(tree);
705                 return -ENOENT;
706         }
707         rule->tree = tree;
708         put_tree(tree);
709
710         return 0;
711 Err:
712         mutex_lock(&audit_filter_mutex);
713         list_del_init(&tree->list);
714         list_del_init(&tree->rules);
715         put_tree(tree);
716         return err;
717 }
718
719 int audit_tag_tree(char *old, char *new)
720 {
721         struct list_head cursor, barrier;
722         int failed = 0;
723         struct path path1, path2;
724         struct vfsmount *tagged;
725         int err;
726
727         err = kern_path(new, 0, &path2);
728         if (err)
729                 return err;
730         tagged = collect_mounts(&path2);
731         path_put(&path2);
732         if (IS_ERR(tagged))
733                 return PTR_ERR(tagged);
734
735         err = kern_path(old, 0, &path1);
736         if (err) {
737                 drop_collected_mounts(tagged);
738                 return err;
739         }
740
741         mutex_lock(&audit_filter_mutex);
742         list_add(&barrier, &tree_list);
743         list_add(&cursor, &barrier);
744
745         while (cursor.next != &tree_list) {
746                 struct audit_tree *tree;
747                 int good_one = 0;
748
749                 tree = container_of(cursor.next, struct audit_tree, list);
750                 get_tree(tree);
751                 list_del(&cursor);
752                 list_add(&cursor, &tree->list);
753                 mutex_unlock(&audit_filter_mutex);
754
755                 err = kern_path(tree->pathname, 0, &path2);
756                 if (!err) {
757                         good_one = path_is_under(&path1, &path2);
758                         path_put(&path2);
759                 }
760
761                 if (!good_one) {
762                         put_tree(tree);
763                         mutex_lock(&audit_filter_mutex);
764                         continue;
765                 }
766
767                 failed = iterate_mounts(tag_mount, tree, tagged);
768                 if (failed) {
769                         put_tree(tree);
770                         mutex_lock(&audit_filter_mutex);
771                         break;
772                 }
773
774                 mutex_lock(&audit_filter_mutex);
775                 spin_lock(&hash_lock);
776                 if (!tree->goner) {
777                         list_del(&tree->list);
778                         list_add(&tree->list, &tree_list);
779                 }
780                 spin_unlock(&hash_lock);
781                 put_tree(tree);
782         }
783
784         while (barrier.prev != &tree_list) {
785                 struct audit_tree *tree;
786
787                 tree = container_of(barrier.prev, struct audit_tree, list);
788                 get_tree(tree);
789                 list_del(&tree->list);
790                 list_add(&tree->list, &barrier);
791                 mutex_unlock(&audit_filter_mutex);
792
793                 if (!failed) {
794                         struct node *node;
795                         spin_lock(&hash_lock);
796                         list_for_each_entry(node, &tree->chunks, list)
797                                 node->index &= ~(1U<<31);
798                         spin_unlock(&hash_lock);
799                 } else {
800                         trim_marked(tree);
801                 }
802
803                 put_tree(tree);
804                 mutex_lock(&audit_filter_mutex);
805         }
806         list_del(&barrier);
807         list_del(&cursor);
808         mutex_unlock(&audit_filter_mutex);
809         path_put(&path1);
810         drop_collected_mounts(tagged);
811         return failed;
812 }
813
814 /*
815  * That gets run when evict_chunk() ends up needing to kill audit_tree.
816  * Runs from a separate thread.
817  */
818 static int prune_tree_thread(void *unused)
819 {
820         mutex_lock(&audit_cmd_mutex);
821         mutex_lock(&audit_filter_mutex);
822
823         while (!list_empty(&prune_list)) {
824                 struct audit_tree *victim;
825
826                 victim = list_entry(prune_list.next, struct audit_tree, list);
827                 list_del_init(&victim->list);
828
829                 mutex_unlock(&audit_filter_mutex);
830
831                 prune_one(victim);
832
833                 mutex_lock(&audit_filter_mutex);
834         }
835
836         mutex_unlock(&audit_filter_mutex);
837         mutex_unlock(&audit_cmd_mutex);
838         return 0;
839 }
840
841 static void audit_schedule_prune(void)
842 {
843         kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
844 }
845
846 /*
847  * ... and that one is done if evict_chunk() decides to delay until the end
848  * of syscall.  Runs synchronously.
849  */
850 void audit_kill_trees(struct list_head *list)
851 {
852         mutex_lock(&audit_cmd_mutex);
853         mutex_lock(&audit_filter_mutex);
854
855         while (!list_empty(list)) {
856                 struct audit_tree *victim;
857
858                 victim = list_entry(list->next, struct audit_tree, list);
859                 kill_rules(victim);
860                 list_del_init(&victim->list);
861
862                 mutex_unlock(&audit_filter_mutex);
863
864                 prune_one(victim);
865
866                 mutex_lock(&audit_filter_mutex);
867         }
868
869         mutex_unlock(&audit_filter_mutex);
870         mutex_unlock(&audit_cmd_mutex);
871 }
872
873 /*
874  *  Here comes the stuff asynchronous to auditctl operations
875  */
876
877 static void evict_chunk(struct audit_chunk *chunk)
878 {
879         struct audit_tree *owner;
880         struct list_head *postponed = audit_killed_trees();
881         int need_prune = 0;
882         int n;
883
884         if (chunk->dead)
885                 return;
886
887         chunk->dead = 1;
888         mutex_lock(&audit_filter_mutex);
889         spin_lock(&hash_lock);
890         while (!list_empty(&chunk->trees)) {
891                 owner = list_entry(chunk->trees.next,
892                                    struct audit_tree, same_root);
893                 owner->goner = 1;
894                 owner->root = NULL;
895                 list_del_init(&owner->same_root);
896                 spin_unlock(&hash_lock);
897                 if (!postponed) {
898                         kill_rules(owner);
899                         list_move(&owner->list, &prune_list);
900                         need_prune = 1;
901                 } else {
902                         list_move(&owner->list, postponed);
903                 }
904                 spin_lock(&hash_lock);
905         }
906         list_del_rcu(&chunk->hash);
907         for (n = 0; n < chunk->count; n++)
908                 list_del_init(&chunk->owners[n].list);
909         spin_unlock(&hash_lock);
910         if (need_prune)
911                 audit_schedule_prune();
912         mutex_unlock(&audit_filter_mutex);
913 }
914
915 static int audit_tree_handle_event(struct fsnotify_group *group,
916                                    struct fsnotify_mark *inode_mark,
917                                    struct fsnotify_mark *vfsmonut_mark,
918                                    struct fsnotify_event *event)
919 {
920         BUG();
921         return -EOPNOTSUPP;
922 }
923
924 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
925 {
926         struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
927
928         evict_chunk(chunk);
929
930         /*
931          * We are guaranteed to have at least one reference to the mark from
932          * either the inode or the caller of fsnotify_destroy_mark().
933          */
934         BUG_ON(atomic_read(&entry->refcnt) < 1);
935 }
936
937 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
938                                   struct fsnotify_mark *inode_mark,
939                                   struct fsnotify_mark *vfsmount_mark,
940                                   __u32 mask, void *data, int data_type)
941 {
942         return false;
943 }
944
945 static const struct fsnotify_ops audit_tree_ops = {
946         .handle_event = audit_tree_handle_event,
947         .should_send_event = audit_tree_send_event,
948         .free_group_priv = NULL,
949         .free_event_priv = NULL,
950         .freeing_mark = audit_tree_freeing_mark,
951 };
952
953 static int __init audit_tree_init(void)
954 {
955         int i;
956
957         audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
958         if (IS_ERR(audit_tree_group))
959                 audit_panic("cannot initialize fsnotify group for rectree watches");
960
961         for (i = 0; i < HASH_SIZE; i++)
962                 INIT_LIST_HEAD(&chunk_hash_heads[i]);
963
964         return 0;
965 }
966 __initcall(audit_tree_init);