Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
[linux-2.6] / kernel / audit_tree.c
1 #include "audit.h"
2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5
6 struct audit_tree;
7 struct audit_chunk;
8
9 struct audit_tree {
10         atomic_t count;
11         int goner;
12         struct audit_chunk *root;
13         struct list_head chunks;
14         struct list_head rules;
15         struct list_head list;
16         struct list_head same_root;
17         struct rcu_head head;
18         char pathname[];
19 };
20
21 struct audit_chunk {
22         struct list_head hash;
23         struct inotify_watch watch;
24         struct list_head trees;         /* with root here */
25         int dead;
26         int count;
27         struct rcu_head head;
28         struct node {
29                 struct list_head list;
30                 struct audit_tree *owner;
31                 unsigned index;         /* index; upper bit indicates 'will prune' */
32         } owners[];
33 };
34
35 static LIST_HEAD(tree_list);
36 static LIST_HEAD(prune_list);
37
38 /*
39  * One struct chunk is attached to each inode of interest.
40  * We replace struct chunk on tagging/untagging.
41  * Rules have pointer to struct audit_tree.
42  * Rules have struct list_head rlist forming a list of rules over
43  * the same tree.
44  * References to struct chunk are collected at audit_inode{,_child}()
45  * time and used in AUDIT_TREE rule matching.
46  * These references are dropped at the same time we are calling
47  * audit_free_names(), etc.
48  *
49  * Cyclic lists galore:
50  * tree.chunks anchors chunk.owners[].list                      hash_lock
51  * tree.rules anchors rule.rlist                                audit_filter_mutex
52  * chunk.trees anchors tree.same_root                           hash_lock
53  * chunk.hash is a hash with middle bits of watch.inode as
54  * a hash function.                                             RCU, hash_lock
55  *
56  * tree is refcounted; one reference for "some rules on rules_list refer to
57  * it", one for each chunk with pointer to it.
58  *
59  * chunk is refcounted by embedded inotify_watch.
60  *
61  * node.index allows to get from node.list to containing chunk.
62  * MSB of that sucker is stolen to mark taggings that we might have to
63  * revert - several operations have very unpleasant cleanup logics and
64  * that makes a difference.  Some.
65  */
66
67 static struct inotify_handle *rtree_ih;
68
69 static struct audit_tree *alloc_tree(const char *s)
70 {
71         struct audit_tree *tree;
72
73         tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
74         if (tree) {
75                 atomic_set(&tree->count, 1);
76                 tree->goner = 0;
77                 INIT_LIST_HEAD(&tree->chunks);
78                 INIT_LIST_HEAD(&tree->rules);
79                 INIT_LIST_HEAD(&tree->list);
80                 INIT_LIST_HEAD(&tree->same_root);
81                 tree->root = NULL;
82                 strcpy(tree->pathname, s);
83         }
84         return tree;
85 }
86
87 static inline void get_tree(struct audit_tree *tree)
88 {
89         atomic_inc(&tree->count);
90 }
91
92 static void __put_tree(struct rcu_head *rcu)
93 {
94         struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
95         kfree(tree);
96 }
97
98 static inline void put_tree(struct audit_tree *tree)
99 {
100         if (atomic_dec_and_test(&tree->count))
101                 call_rcu(&tree->head, __put_tree);
102 }
103
104 /* to avoid bringing the entire thing in audit.h */
105 const char *audit_tree_path(struct audit_tree *tree)
106 {
107         return tree->pathname;
108 }
109
110 static struct audit_chunk *alloc_chunk(int count)
111 {
112         struct audit_chunk *chunk;
113         size_t size;
114         int i;
115
116         size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
117         chunk = kzalloc(size, GFP_KERNEL);
118         if (!chunk)
119                 return NULL;
120
121         INIT_LIST_HEAD(&chunk->hash);
122         INIT_LIST_HEAD(&chunk->trees);
123         chunk->count = count;
124         for (i = 0; i < count; i++) {
125                 INIT_LIST_HEAD(&chunk->owners[i].list);
126                 chunk->owners[i].index = i;
127         }
128         inotify_init_watch(&chunk->watch);
129         return chunk;
130 }
131
132 static void __free_chunk(struct rcu_head *rcu)
133 {
134         struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
135         int i;
136
137         for (i = 0; i < chunk->count; i++) {
138                 if (chunk->owners[i].owner)
139                         put_tree(chunk->owners[i].owner);
140         }
141         kfree(chunk);
142 }
143
144 static inline void free_chunk(struct audit_chunk *chunk)
145 {
146         call_rcu(&chunk->head, __free_chunk);
147 }
148
149 void audit_put_chunk(struct audit_chunk *chunk)
150 {
151         put_inotify_watch(&chunk->watch);
152 }
153
154 enum {HASH_SIZE = 128};
155 static struct list_head chunk_hash_heads[HASH_SIZE];
156 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
157
158 static inline struct list_head *chunk_hash(const struct inode *inode)
159 {
160         unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
161         return chunk_hash_heads + n % HASH_SIZE;
162 }
163
164 /* hash_lock is held by caller */
165 static void insert_hash(struct audit_chunk *chunk)
166 {
167         struct list_head *list = chunk_hash(chunk->watch.inode);
168         list_add_rcu(&chunk->hash, list);
169 }
170
171 /* called under rcu_read_lock */
172 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
173 {
174         struct list_head *list = chunk_hash(inode);
175         struct list_head *pos;
176
177         list_for_each_rcu(pos, list) {
178                 struct audit_chunk *p = container_of(pos, struct audit_chunk, hash);
179                 if (p->watch.inode == inode) {
180                         get_inotify_watch(&p->watch);
181                         return p;
182                 }
183         }
184         return NULL;
185 }
186
187 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
188 {
189         int n;
190         for (n = 0; n < chunk->count; n++)
191                 if (chunk->owners[n].owner == tree)
192                         return 1;
193         return 0;
194 }
195
196 /* tagging and untagging inodes with trees */
197
198 static void untag_chunk(struct audit_chunk *chunk, struct node *p)
199 {
200         struct audit_chunk *new;
201         struct audit_tree *owner;
202         int size = chunk->count - 1;
203         int i, j;
204
205         mutex_lock(&chunk->watch.inode->inotify_mutex);
206         if (chunk->dead) {
207                 mutex_unlock(&chunk->watch.inode->inotify_mutex);
208                 return;
209         }
210
211         owner = p->owner;
212
213         if (!size) {
214                 chunk->dead = 1;
215                 spin_lock(&hash_lock);
216                 list_del_init(&chunk->trees);
217                 if (owner->root == chunk)
218                         owner->root = NULL;
219                 list_del_init(&p->list);
220                 list_del_rcu(&chunk->hash);
221                 spin_unlock(&hash_lock);
222                 inotify_evict_watch(&chunk->watch);
223                 mutex_unlock(&chunk->watch.inode->inotify_mutex);
224                 put_inotify_watch(&chunk->watch);
225                 return;
226         }
227
228         new = alloc_chunk(size);
229         if (!new)
230                 goto Fallback;
231         if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
232                 free_chunk(new);
233                 goto Fallback;
234         }
235
236         chunk->dead = 1;
237         spin_lock(&hash_lock);
238         list_replace_init(&chunk->trees, &new->trees);
239         if (owner->root == chunk) {
240                 list_del_init(&owner->same_root);
241                 owner->root = NULL;
242         }
243
244         for (i = j = 0; i < size; i++, j++) {
245                 struct audit_tree *s;
246                 if (&chunk->owners[j] == p) {
247                         list_del_init(&p->list);
248                         i--;
249                         continue;
250                 }
251                 s = chunk->owners[j].owner;
252                 new->owners[i].owner = s;
253                 new->owners[i].index = chunk->owners[j].index - j + i;
254                 if (!s) /* result of earlier fallback */
255                         continue;
256                 get_tree(s);
257                 list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
258         }
259
260         list_replace_rcu(&chunk->hash, &new->hash);
261         list_for_each_entry(owner, &new->trees, same_root)
262                 owner->root = new;
263         spin_unlock(&hash_lock);
264         inotify_evict_watch(&chunk->watch);
265         mutex_unlock(&chunk->watch.inode->inotify_mutex);
266         put_inotify_watch(&chunk->watch);
267         return;
268
269 Fallback:
270         // do the best we can
271         spin_lock(&hash_lock);
272         if (owner->root == chunk) {
273                 list_del_init(&owner->same_root);
274                 owner->root = NULL;
275         }
276         list_del_init(&p->list);
277         p->owner = NULL;
278         put_tree(owner);
279         spin_unlock(&hash_lock);
280         mutex_unlock(&chunk->watch.inode->inotify_mutex);
281 }
282
283 static int create_chunk(struct inode *inode, struct audit_tree *tree)
284 {
285         struct audit_chunk *chunk = alloc_chunk(1);
286         if (!chunk)
287                 return -ENOMEM;
288
289         if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
290                 free_chunk(chunk);
291                 return -ENOSPC;
292         }
293
294         mutex_lock(&inode->inotify_mutex);
295         spin_lock(&hash_lock);
296         if (tree->goner) {
297                 spin_unlock(&hash_lock);
298                 chunk->dead = 1;
299                 inotify_evict_watch(&chunk->watch);
300                 mutex_unlock(&inode->inotify_mutex);
301                 put_inotify_watch(&chunk->watch);
302                 return 0;
303         }
304         chunk->owners[0].index = (1U << 31);
305         chunk->owners[0].owner = tree;
306         get_tree(tree);
307         list_add(&chunk->owners[0].list, &tree->chunks);
308         if (!tree->root) {
309                 tree->root = chunk;
310                 list_add(&tree->same_root, &chunk->trees);
311         }
312         insert_hash(chunk);
313         spin_unlock(&hash_lock);
314         mutex_unlock(&inode->inotify_mutex);
315         return 0;
316 }
317
318 /* the first tagged inode becomes root of tree */
319 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
320 {
321         struct inotify_watch *watch;
322         struct audit_tree *owner;
323         struct audit_chunk *chunk, *old;
324         struct node *p;
325         int n;
326
327         if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
328                 return create_chunk(inode, tree);
329
330         old = container_of(watch, struct audit_chunk, watch);
331
332         /* are we already there? */
333         spin_lock(&hash_lock);
334         for (n = 0; n < old->count; n++) {
335                 if (old->owners[n].owner == tree) {
336                         spin_unlock(&hash_lock);
337                         put_inotify_watch(watch);
338                         return 0;
339                 }
340         }
341         spin_unlock(&hash_lock);
342
343         chunk = alloc_chunk(old->count + 1);
344         if (!chunk)
345                 return -ENOMEM;
346
347         mutex_lock(&inode->inotify_mutex);
348         if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
349                 mutex_unlock(&inode->inotify_mutex);
350                 free_chunk(chunk);
351                 return -ENOSPC;
352         }
353         spin_lock(&hash_lock);
354         if (tree->goner) {
355                 spin_unlock(&hash_lock);
356                 chunk->dead = 1;
357                 inotify_evict_watch(&chunk->watch);
358                 mutex_unlock(&inode->inotify_mutex);
359                 put_inotify_watch(&chunk->watch);
360                 return 0;
361         }
362         list_replace_init(&old->trees, &chunk->trees);
363         for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
364                 struct audit_tree *s = old->owners[n].owner;
365                 p->owner = s;
366                 p->index = old->owners[n].index;
367                 if (!s) /* result of fallback in untag */
368                         continue;
369                 get_tree(s);
370                 list_replace_init(&old->owners[n].list, &p->list);
371         }
372         p->index = (chunk->count - 1) | (1U<<31);
373         p->owner = tree;
374         get_tree(tree);
375         list_add(&p->list, &tree->chunks);
376         list_replace_rcu(&old->hash, &chunk->hash);
377         list_for_each_entry(owner, &chunk->trees, same_root)
378                 owner->root = chunk;
379         old->dead = 1;
380         if (!tree->root) {
381                 tree->root = chunk;
382                 list_add(&tree->same_root, &chunk->trees);
383         }
384         spin_unlock(&hash_lock);
385         inotify_evict_watch(&old->watch);
386         mutex_unlock(&inode->inotify_mutex);
387         put_inotify_watch(&old->watch);
388         return 0;
389 }
390
391 static struct audit_chunk *find_chunk(struct node *p)
392 {
393         int index = p->index & ~(1U<<31);
394         p -= index;
395         return container_of(p, struct audit_chunk, owners[0]);
396 }
397
398 static void kill_rules(struct audit_tree *tree)
399 {
400         struct audit_krule *rule, *next;
401         struct audit_entry *entry;
402         struct audit_buffer *ab;
403
404         list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
405                 entry = container_of(rule, struct audit_entry, rule);
406
407                 list_del_init(&rule->rlist);
408                 if (rule->tree) {
409                         /* not a half-baked one */
410                         ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
411                         audit_log_format(ab, "op=remove rule dir=");
412                         audit_log_untrustedstring(ab, rule->tree->pathname);
413                         if (rule->filterkey) {
414                                 audit_log_format(ab, " key=");
415                                 audit_log_untrustedstring(ab, rule->filterkey);
416                         } else
417                                 audit_log_format(ab, " key=(null)");
418                         audit_log_format(ab, " list=%d res=1", rule->listnr);
419                         audit_log_end(ab);
420                         rule->tree = NULL;
421                         list_del_rcu(&entry->list);
422                         call_rcu(&entry->rcu, audit_free_rule_rcu);
423                 }
424         }
425 }
426
427 /*
428  * finish killing struct audit_tree
429  */
430 static void prune_one(struct audit_tree *victim)
431 {
432         spin_lock(&hash_lock);
433         while (!list_empty(&victim->chunks)) {
434                 struct node *p;
435                 struct audit_chunk *chunk;
436
437                 p = list_entry(victim->chunks.next, struct node, list);
438                 chunk = find_chunk(p);
439                 get_inotify_watch(&chunk->watch);
440                 spin_unlock(&hash_lock);
441
442                 untag_chunk(chunk, p);
443
444                 put_inotify_watch(&chunk->watch);
445                 spin_lock(&hash_lock);
446         }
447         spin_unlock(&hash_lock);
448         put_tree(victim);
449 }
450
451 /* trim the uncommitted chunks from tree */
452
453 static void trim_marked(struct audit_tree *tree)
454 {
455         struct list_head *p, *q;
456         spin_lock(&hash_lock);
457         if (tree->goner) {
458                 spin_unlock(&hash_lock);
459                 return;
460         }
461         /* reorder */
462         for (p = tree->chunks.next; p != &tree->chunks; p = q) {
463                 struct node *node = list_entry(p, struct node, list);
464                 q = p->next;
465                 if (node->index & (1U<<31)) {
466                         list_del_init(p);
467                         list_add(p, &tree->chunks);
468                 }
469         }
470
471         while (!list_empty(&tree->chunks)) {
472                 struct node *node;
473                 struct audit_chunk *chunk;
474
475                 node = list_entry(tree->chunks.next, struct node, list);
476
477                 /* have we run out of marked? */
478                 if (!(node->index & (1U<<31)))
479                         break;
480
481                 chunk = find_chunk(node);
482                 get_inotify_watch(&chunk->watch);
483                 spin_unlock(&hash_lock);
484
485                 untag_chunk(chunk, node);
486
487                 put_inotify_watch(&chunk->watch);
488                 spin_lock(&hash_lock);
489         }
490         if (!tree->root && !tree->goner) {
491                 tree->goner = 1;
492                 spin_unlock(&hash_lock);
493                 mutex_lock(&audit_filter_mutex);
494                 kill_rules(tree);
495                 list_del_init(&tree->list);
496                 mutex_unlock(&audit_filter_mutex);
497                 prune_one(tree);
498         } else {
499                 spin_unlock(&hash_lock);
500         }
501 }
502
503 /* called with audit_filter_mutex */
504 int audit_remove_tree_rule(struct audit_krule *rule)
505 {
506         struct audit_tree *tree;
507         tree = rule->tree;
508         if (tree) {
509                 spin_lock(&hash_lock);
510                 list_del_init(&rule->rlist);
511                 if (list_empty(&tree->rules) && !tree->goner) {
512                         tree->root = NULL;
513                         list_del_init(&tree->same_root);
514                         tree->goner = 1;
515                         list_move(&tree->list, &prune_list);
516                         rule->tree = NULL;
517                         spin_unlock(&hash_lock);
518                         audit_schedule_prune();
519                         return 1;
520                 }
521                 rule->tree = NULL;
522                 spin_unlock(&hash_lock);
523                 return 1;
524         }
525         return 0;
526 }
527
528 void audit_trim_trees(void)
529 {
530         struct list_head cursor;
531
532         mutex_lock(&audit_filter_mutex);
533         list_add(&cursor, &tree_list);
534         while (cursor.next != &tree_list) {
535                 struct audit_tree *tree;
536                 struct nameidata nd;
537                 struct vfsmount *root_mnt;
538                 struct node *node;
539                 struct list_head list;
540                 int err;
541
542                 tree = container_of(cursor.next, struct audit_tree, list);
543                 get_tree(tree);
544                 list_del(&cursor);
545                 list_add(&cursor, &tree->list);
546                 mutex_unlock(&audit_filter_mutex);
547
548                 err = path_lookup(tree->pathname, 0, &nd);
549                 if (err)
550                         goto skip_it;
551
552                 root_mnt = collect_mounts(nd.path.mnt, nd.path.dentry);
553                 path_put(&nd.path);
554                 if (!root_mnt)
555                         goto skip_it;
556
557                 list_add_tail(&list, &root_mnt->mnt_list);
558                 spin_lock(&hash_lock);
559                 list_for_each_entry(node, &tree->chunks, list) {
560                         struct audit_chunk *chunk = find_chunk(node);
561                         struct inode *inode = chunk->watch.inode;
562                         struct vfsmount *mnt;
563                         node->index |= 1U<<31;
564                         list_for_each_entry(mnt, &list, mnt_list) {
565                                 if (mnt->mnt_root->d_inode == inode) {
566                                         node->index &= ~(1U<<31);
567                                         break;
568                                 }
569                         }
570                 }
571                 spin_unlock(&hash_lock);
572                 trim_marked(tree);
573                 put_tree(tree);
574                 list_del_init(&list);
575                 drop_collected_mounts(root_mnt);
576 skip_it:
577                 mutex_lock(&audit_filter_mutex);
578         }
579         list_del(&cursor);
580         mutex_unlock(&audit_filter_mutex);
581 }
582
583 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
584                     struct nameidata *nd)
585 {
586         if (mnt != nd->path.mnt) {
587                 for (;;) {
588                         if (mnt->mnt_parent == mnt)
589                                 return 0;
590                         if (mnt->mnt_parent == nd->path.mnt)
591                                         break;
592                         mnt = mnt->mnt_parent;
593                 }
594                 dentry = mnt->mnt_mountpoint;
595         }
596         return is_subdir(dentry, nd->path.dentry);
597 }
598
599 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
600 {
601
602         if (pathname[0] != '/' ||
603             rule->listnr != AUDIT_FILTER_EXIT ||
604             op & ~AUDIT_EQUAL ||
605             rule->inode_f || rule->watch || rule->tree)
606                 return -EINVAL;
607         rule->tree = alloc_tree(pathname);
608         if (!rule->tree)
609                 return -ENOMEM;
610         return 0;
611 }
612
613 void audit_put_tree(struct audit_tree *tree)
614 {
615         put_tree(tree);
616 }
617
618 /* called with audit_filter_mutex */
619 int audit_add_tree_rule(struct audit_krule *rule)
620 {
621         struct audit_tree *seed = rule->tree, *tree;
622         struct nameidata nd;
623         struct vfsmount *mnt, *p;
624         struct list_head list;
625         int err;
626
627         list_for_each_entry(tree, &tree_list, list) {
628                 if (!strcmp(seed->pathname, tree->pathname)) {
629                         put_tree(seed);
630                         rule->tree = tree;
631                         list_add(&rule->rlist, &tree->rules);
632                         return 0;
633                 }
634         }
635         tree = seed;
636         list_add(&tree->list, &tree_list);
637         list_add(&rule->rlist, &tree->rules);
638         /* do not set rule->tree yet */
639         mutex_unlock(&audit_filter_mutex);
640
641         err = path_lookup(tree->pathname, 0, &nd);
642         if (err)
643                 goto Err;
644         mnt = collect_mounts(nd.path.mnt, nd.path.dentry);
645         path_put(&nd.path);
646         if (!mnt) {
647                 err = -ENOMEM;
648                 goto Err;
649         }
650         list_add_tail(&list, &mnt->mnt_list);
651
652         get_tree(tree);
653         list_for_each_entry(p, &list, mnt_list) {
654                 err = tag_chunk(p->mnt_root->d_inode, tree);
655                 if (err)
656                         break;
657         }
658
659         list_del(&list);
660         drop_collected_mounts(mnt);
661
662         if (!err) {
663                 struct node *node;
664                 spin_lock(&hash_lock);
665                 list_for_each_entry(node, &tree->chunks, list)
666                         node->index &= ~(1U<<31);
667                 spin_unlock(&hash_lock);
668         } else {
669                 trim_marked(tree);
670                 goto Err;
671         }
672
673         mutex_lock(&audit_filter_mutex);
674         if (list_empty(&rule->rlist)) {
675                 put_tree(tree);
676                 return -ENOENT;
677         }
678         rule->tree = tree;
679         put_tree(tree);
680
681         return 0;
682 Err:
683         mutex_lock(&audit_filter_mutex);
684         list_del_init(&tree->list);
685         list_del_init(&tree->rules);
686         put_tree(tree);
687         return err;
688 }
689
690 int audit_tag_tree(char *old, char *new)
691 {
692         struct list_head cursor, barrier;
693         int failed = 0;
694         struct nameidata nd;
695         struct vfsmount *tagged;
696         struct list_head list;
697         struct vfsmount *mnt;
698         struct dentry *dentry;
699         int err;
700
701         err = path_lookup(new, 0, &nd);
702         if (err)
703                 return err;
704         tagged = collect_mounts(nd.path.mnt, nd.path.dentry);
705         path_put(&nd.path);
706         if (!tagged)
707                 return -ENOMEM;
708
709         err = path_lookup(old, 0, &nd);
710         if (err) {
711                 drop_collected_mounts(tagged);
712                 return err;
713         }
714         mnt = mntget(nd.path.mnt);
715         dentry = dget(nd.path.dentry);
716         path_put(&nd.path);
717
718         if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
719                 follow_up(&mnt, &dentry);
720
721         list_add_tail(&list, &tagged->mnt_list);
722
723         mutex_lock(&audit_filter_mutex);
724         list_add(&barrier, &tree_list);
725         list_add(&cursor, &barrier);
726
727         while (cursor.next != &tree_list) {
728                 struct audit_tree *tree;
729                 struct vfsmount *p;
730
731                 tree = container_of(cursor.next, struct audit_tree, list);
732                 get_tree(tree);
733                 list_del(&cursor);
734                 list_add(&cursor, &tree->list);
735                 mutex_unlock(&audit_filter_mutex);
736
737                 err = path_lookup(tree->pathname, 0, &nd);
738                 if (err) {
739                         put_tree(tree);
740                         mutex_lock(&audit_filter_mutex);
741                         continue;
742                 }
743
744                 spin_lock(&vfsmount_lock);
745                 if (!is_under(mnt, dentry, &nd)) {
746                         spin_unlock(&vfsmount_lock);
747                         path_put(&nd.path);
748                         put_tree(tree);
749                         mutex_lock(&audit_filter_mutex);
750                         continue;
751                 }
752                 spin_unlock(&vfsmount_lock);
753                 path_put(&nd.path);
754
755                 list_for_each_entry(p, &list, mnt_list) {
756                         failed = tag_chunk(p->mnt_root->d_inode, tree);
757                         if (failed)
758                                 break;
759                 }
760
761                 if (failed) {
762                         put_tree(tree);
763                         mutex_lock(&audit_filter_mutex);
764                         break;
765                 }
766
767                 mutex_lock(&audit_filter_mutex);
768                 spin_lock(&hash_lock);
769                 if (!tree->goner) {
770                         list_del(&tree->list);
771                         list_add(&tree->list, &tree_list);
772                 }
773                 spin_unlock(&hash_lock);
774                 put_tree(tree);
775         }
776
777         while (barrier.prev != &tree_list) {
778                 struct audit_tree *tree;
779
780                 tree = container_of(barrier.prev, struct audit_tree, list);
781                 get_tree(tree);
782                 list_del(&tree->list);
783                 list_add(&tree->list, &barrier);
784                 mutex_unlock(&audit_filter_mutex);
785
786                 if (!failed) {
787                         struct node *node;
788                         spin_lock(&hash_lock);
789                         list_for_each_entry(node, &tree->chunks, list)
790                                 node->index &= ~(1U<<31);
791                         spin_unlock(&hash_lock);
792                 } else {
793                         trim_marked(tree);
794                 }
795
796                 put_tree(tree);
797                 mutex_lock(&audit_filter_mutex);
798         }
799         list_del(&barrier);
800         list_del(&cursor);
801         list_del(&list);
802         mutex_unlock(&audit_filter_mutex);
803         dput(dentry);
804         mntput(mnt);
805         drop_collected_mounts(tagged);
806         return failed;
807 }
808
809 /*
810  * That gets run when evict_chunk() ends up needing to kill audit_tree.
811  * Runs from a separate thread, with audit_cmd_mutex held.
812  */
813 void audit_prune_trees(void)
814 {
815         mutex_lock(&audit_filter_mutex);
816
817         while (!list_empty(&prune_list)) {
818                 struct audit_tree *victim;
819
820                 victim = list_entry(prune_list.next, struct audit_tree, list);
821                 list_del_init(&victim->list);
822
823                 mutex_unlock(&audit_filter_mutex);
824
825                 prune_one(victim);
826
827                 mutex_lock(&audit_filter_mutex);
828         }
829
830         mutex_unlock(&audit_filter_mutex);
831 }
832
833 /*
834  *  Here comes the stuff asynchronous to auditctl operations
835  */
836
837 /* inode->inotify_mutex is locked */
838 static void evict_chunk(struct audit_chunk *chunk)
839 {
840         struct audit_tree *owner;
841         int n;
842
843         if (chunk->dead)
844                 return;
845
846         chunk->dead = 1;
847         mutex_lock(&audit_filter_mutex);
848         spin_lock(&hash_lock);
849         while (!list_empty(&chunk->trees)) {
850                 owner = list_entry(chunk->trees.next,
851                                    struct audit_tree, same_root);
852                 owner->goner = 1;
853                 owner->root = NULL;
854                 list_del_init(&owner->same_root);
855                 spin_unlock(&hash_lock);
856                 kill_rules(owner);
857                 list_move(&owner->list, &prune_list);
858                 audit_schedule_prune();
859                 spin_lock(&hash_lock);
860         }
861         list_del_rcu(&chunk->hash);
862         for (n = 0; n < chunk->count; n++)
863                 list_del_init(&chunk->owners[n].list);
864         spin_unlock(&hash_lock);
865         mutex_unlock(&audit_filter_mutex);
866 }
867
868 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
869                          u32 cookie, const char *dname, struct inode *inode)
870 {
871         struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
872
873         if (mask & IN_IGNORED) {
874                 evict_chunk(chunk);
875                 put_inotify_watch(watch);
876         }
877 }
878
879 static void destroy_watch(struct inotify_watch *watch)
880 {
881         struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
882         free_chunk(chunk);
883 }
884
885 static const struct inotify_operations rtree_inotify_ops = {
886         .handle_event   = handle_event,
887         .destroy_watch  = destroy_watch,
888 };
889
890 static int __init audit_tree_init(void)
891 {
892         int i;
893
894         rtree_ih = inotify_init(&rtree_inotify_ops);
895         if (IS_ERR(rtree_ih))
896                 audit_panic("cannot initialize inotify handle for rectree watches");
897
898         for (i = 0; i < HASH_SIZE; i++)
899                 INIT_LIST_HEAD(&chunk_hash_heads[i]);
900
901         return 0;
902 }
903 __initcall(audit_tree_init);