hid: jarvis: Supress kernel debug prints in atvr_raw_event()
[linux-3.10.git] / kernel / audit_tree.c
index f16f909..00c4459 100644 (file)
@@ -93,16 +93,10 @@ static inline void get_tree(struct audit_tree *tree)
        atomic_inc(&tree->count);
 }
 
-static void __put_tree(struct rcu_head *rcu)
-{
-       struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
-       kfree(tree);
-}
-
 static inline void put_tree(struct audit_tree *tree)
 {
        if (atomic_dec_and_test(&tree->count))
-               call_rcu(&tree->head, __put_tree);
+               kfree_rcu(tree, head);
 }
 
 /* to avoid bringing the entire thing in audit.h */
@@ -160,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
                chunk->owners[i].index = i;
        }
        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+       chunk->mark.mask = FS_IN_IGNORED;
        return chunk;
 }
 
@@ -223,7 +218,7 @@ static void untag_chunk(struct node *p)
 {
        struct audit_chunk *chunk = find_chunk(p);
        struct fsnotify_mark *entry = &chunk->mark;
-       struct audit_chunk *new;
+       struct audit_chunk *new = NULL;
        struct audit_tree *owner;
        int size = chunk->count - 1;
        int i, j;
@@ -232,9 +227,14 @@ static void untag_chunk(struct node *p)
 
        spin_unlock(&hash_lock);
 
+       if (size)
+               new = alloc_chunk(size);
+
        spin_lock(&entry->lock);
        if (chunk->dead || !entry->i.inode) {
                spin_unlock(&entry->lock);
+               if (new)
+                       free_chunk(new);
                goto out;
        }
 
@@ -250,17 +250,16 @@ static void untag_chunk(struct node *p)
                list_del_rcu(&chunk->hash);
                spin_unlock(&hash_lock);
                spin_unlock(&entry->lock);
-               fsnotify_destroy_mark_by_entry(entry);
-               fsnotify_put_mark(entry);
+               fsnotify_destroy_mark(entry, audit_tree_group);
                goto out;
        }
 
-       new = alloc_chunk(size);
        if (!new)
                goto Fallback;
+
        fsnotify_duplicate_mark(&new->mark, entry);
-       if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, 1)) {
-               free_chunk(new);
+       if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
+               fsnotify_put_mark(&new->mark);
                goto Fallback;
        }
 
@@ -293,8 +292,8 @@ static void untag_chunk(struct node *p)
                owner->root = new;
        spin_unlock(&hash_lock);
        spin_unlock(&entry->lock);
-       fsnotify_destroy_mark_by_entry(entry);
-       fsnotify_put_mark(entry);
+       fsnotify_destroy_mark(entry, audit_tree_group);
+       fsnotify_put_mark(&new->mark);  /* drop initial reference */
        goto out;
 
 Fallback:
@@ -322,8 +321,8 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
                return -ENOMEM;
 
        entry = &chunk->mark;
-       if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) {
-               free_chunk(chunk);
+       if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
+               fsnotify_put_mark(entry);
                return -ENOSPC;
        }
 
@@ -333,7 +332,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
                spin_unlock(&hash_lock);
                chunk->dead = 1;
                spin_unlock(&entry->lock);
-               fsnotify_destroy_mark_by_entry(entry);
+               fsnotify_destroy_mark(entry, audit_tree_group);
                fsnotify_put_mark(entry);
                return 0;
        }
@@ -348,6 +347,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
        insert_hash(chunk);
        spin_unlock(&hash_lock);
        spin_unlock(&entry->lock);
+       fsnotify_put_mark(entry);       /* drop initial reference */
        return 0;
 }
 
@@ -360,9 +360,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        struct node *p;
        int n;
 
-       spin_lock(&inode->i_lock);
-       old_entry = fsnotify_find_mark_entry(audit_tree_group, inode);
-       spin_unlock(&inode->i_lock);
+       old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
        if (!old_entry)
                return create_chunk(inode, tree);
 
@@ -397,9 +395,9 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        }
 
        fsnotify_duplicate_mark(chunk_entry, old_entry);
-       if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, 1)) {
+       if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
                spin_unlock(&old_entry->lock);
-               free_chunk(chunk);
+               fsnotify_put_mark(chunk_entry);
                fsnotify_put_mark(old_entry);
                return -ENOSPC;
        }
@@ -415,7 +413,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
                spin_unlock(&chunk_entry->lock);
                spin_unlock(&old_entry->lock);
 
-               fsnotify_destroy_mark_by_entry(chunk_entry);
+               fsnotify_destroy_mark(chunk_entry, audit_tree_group);
 
                fsnotify_put_mark(chunk_entry);
                fsnotify_put_mark(old_entry);
@@ -446,17 +444,32 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        spin_unlock(&hash_lock);
        spin_unlock(&chunk_entry->lock);
        spin_unlock(&old_entry->lock);
-       fsnotify_destroy_mark_by_entry(old_entry);
+       fsnotify_destroy_mark(old_entry, audit_tree_group);
+       fsnotify_put_mark(chunk_entry); /* drop initial reference */
        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
-       fsnotify_put_mark(old_entry); /* and kill it */
        return 0;
 }
 
+static void audit_log_remove_rule(struct audit_krule *rule)
+{
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+       if (unlikely(!ab))
+               return;
+       audit_log_format(ab, "op=");
+       audit_log_string(ab, "remove rule");
+       audit_log_format(ab, " dir=");
+       audit_log_untrustedstring(ab, rule->tree->pathname);
+       audit_log_key(ab, rule->filterkey);
+       audit_log_format(ab, " list=%d res=1", rule->listnr);
+       audit_log_end(ab);
+}
+
 static void kill_rules(struct audit_tree *tree)
 {
        struct audit_krule *rule, *next;
        struct audit_entry *entry;
-       struct audit_buffer *ab;
 
        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
                entry = container_of(rule, struct audit_entry, rule);
@@ -464,14 +477,7 @@ static void kill_rules(struct audit_tree *tree)
                list_del_init(&rule->rlist);
                if (rule->tree) {
                        /* not a half-baked one */
-                       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
-                       audit_log_format(ab, "op=");
-                       audit_log_string(ab, "remove rule");
-                       audit_log_format(ab, " dir=");
-                       audit_log_untrustedstring(ab, rule->tree->pathname);
-                       audit_log_key(ab, rule->filterkey);
-                       audit_log_format(ab, " list=%d res=1", rule->listnr);
-                       audit_log_end(ab);
+                       audit_log_remove_rule(rule);
                        rule->tree = NULL;
                        list_del_rcu(&entry->list);
                        list_del(&entry->rule.list);
@@ -598,13 +604,13 @@ void audit_trim_trees(void)
 
                root_mnt = collect_mounts(&path);
                path_put(&path);
-               if (!root_mnt)
+               if (IS_ERR(root_mnt))
                        goto skip_it;
 
                spin_lock(&hash_lock);
                list_for_each_entry(node, &tree->chunks, list) {
                        struct audit_chunk *chunk = find_chunk(node);
-                       /* this could be NULL if the watch is dieing else where... */
+                       /* this could be NULL if the watch is dying else where... */
                        struct inode *inode = chunk->mark.i.inode;
                        node->index |= 1U<<31;
                        if (iterate_mounts(compare_root, inode, root_mnt))
@@ -612,9 +618,9 @@ void audit_trim_trees(void)
                }
                spin_unlock(&hash_lock);
                trim_marked(tree);
-               put_tree(tree);
                drop_collected_mounts(root_mnt);
 skip_it:
+               put_tree(tree);
                mutex_lock(&audit_filter_mutex);
        }
        list_del(&cursor);
@@ -653,6 +659,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
        struct vfsmount *mnt;
        int err;
 
+       rule->tree = NULL;
        list_for_each_entry(tree, &tree_list, list) {
                if (!strcmp(seed->pathname, tree->pathname)) {
                        put_tree(seed);
@@ -672,8 +679,8 @@ int audit_add_tree_rule(struct audit_krule *rule)
                goto Err;
        mnt = collect_mounts(&path);
        path_put(&path);
-       if (!mnt) {
-               err = -ENOMEM;
+       if (IS_ERR(mnt)) {
+               err = PTR_ERR(mnt);
                goto Err;
        }
 
@@ -722,8 +729,8 @@ int audit_tag_tree(char *old, char *new)
                return err;
        tagged = collect_mounts(&path2);
        path_put(&path2);
-       if (!tagged)
-               return -ENOMEM;
+       if (IS_ERR(tagged))
+               return PTR_ERR(tagged);
 
        err = kern_path(old, 0, &path1);
        if (err) {
@@ -905,7 +912,10 @@ static void evict_chunk(struct audit_chunk *chunk)
        mutex_unlock(&audit_filter_mutex);
 }
 
-static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+static int audit_tree_handle_event(struct fsnotify_group *group,
+                                  struct fsnotify_mark *inode_mark,
+                                  struct fsnotify_mark *vfsmonut_mark,
+                                  struct fsnotify_event *event)
 {
        BUG();
        return -EOPNOTSUPP;
@@ -916,14 +926,20 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 
        evict_chunk(chunk);
-       fsnotify_put_mark(entry);
+
+       /*
+        * We are guaranteed to have at least one reference to the mark from
+        * either the inode or the caller of fsnotify_destroy_mark().
+        */
+       BUG_ON(atomic_read(&entry->refcnt) < 1);
 }
 
 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
-                                 struct vfsmount *mnt, __u32 mask, void *data,
-                                 int data_type)
+                                 struct fsnotify_mark *inode_mark,
+                                 struct fsnotify_mark *vfsmount_mark,
+                                 __u32 mask, void *data, int data_type)
 {
-       return 0;
+       return false;
 }
 
 static const struct fsnotify_ops audit_tree_ops = {