vfs: dcache: fix deadlock in tree traversal
[linux-3.10.git] / fs / dcache.c
index a78e145..693f95b 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/cache.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/mount.h>
 #include <linux/file.h>
 #include <asm/uaccess.h>
@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
 static struct hlist_bl_head *dentry_hashtable __read_mostly;
 
 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
-                                       unsigned long hash)
+                                       unsigned int hash)
 {
-       hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
-       hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+       hash += (unsigned long) parent / L1_CACHE_BYTES;
+       hash = hash + (hash >> D_HASHBITS);
        return dentry_hashtable + (hash & D_HASHMASK);
 }
 
@@ -141,12 +141,43 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
  * The strings are both count bytes long, and count is non-zero.
  */
-static inline int dentry_cmp(const unsigned char *cs, size_t scount,
-                               const unsigned char *ct, size_t tcount)
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#include <asm/word-at-a-time.h>
+/*
+ * NOTE! 'cs' and 'scount' come from a dentry, so it has a
+ * aligned allocation for this particular component. We don't
+ * strictly need the load_unaligned_zeropad() safety, but it
+ * doesn't hurt either.
+ *
+ * In contrast, 'ct' and 'tcount' can be from a pathname, and do
+ * need the careful unaligned handling.
+ */
+static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
 {
-       if (scount != tcount)
-               return 1;
+       unsigned long a,b,mask;
+
+       for (;;) {
+               a = *(unsigned long *)cs;
+               b = load_unaligned_zeropad(ct);
+               if (tcount < sizeof(unsigned long))
+                       break;
+               if (unlikely(a != b))
+                       return 1;
+               cs += sizeof(unsigned long);
+               ct += sizeof(unsigned long);
+               tcount -= sizeof(unsigned long);
+               if (!tcount)
+                       return 0;
+       }
+       mask = ~(~0ul << tcount*8);
+       return unlikely(!!((a ^ b) & mask));
+}
+
+#else
 
+static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
+{
        do {
                if (*cs != *ct)
                        return 1;
@@ -157,11 +188,37 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
        return 0;
 }
 
+#endif
+
+static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
+{
+       const unsigned char *cs;
+       /*
+        * Be careful about RCU walk racing with rename:
+        * use ACCESS_ONCE to fetch the name pointer.
+        *
+        * NOTE! Even if a rename will mean that the length
+        * was not loaded atomically, we don't care. The
+        * RCU walk will check the sequence count eventually,
+        * and catch it. And we won't overrun the buffer,
+        * because we're reading the name pointer atomically,
+        * and a dentry name is guaranteed to be properly
+        * terminated with a NUL byte.
+        *
+        * End result: even if 'len' is wrong, we'll exit
+        * early because the data cannot match (there can
+        * be no NUL in the ct/tcount data)
+        */
+       cs = ACCESS_ONCE(dentry->d_name.name);
+       smp_read_barrier_depends();
+       return dentry_string_cmp(cs, ct, tcount);
+}
+
 static void __d_free(struct rcu_head *head)
 {
        struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
 
-       WARN_ON(!list_empty(&dentry->d_alias));
+       WARN_ON(!hlist_unhashed(&dentry->d_alias));
        if (dname_external(dentry))
                kfree(dentry->d_name.name);
        kmem_cache_free(dentry_cache, dentry); 
@@ -210,7 +267,7 @@ static void dentry_iput(struct dentry * dentry)
        struct inode *inode = dentry->d_inode;
        if (inode) {
                dentry->d_inode = NULL;
-               list_del_init(&dentry->d_alias);
+               hlist_del_init(&dentry->d_alias);
                spin_unlock(&dentry->d_lock);
                spin_unlock(&inode->i_lock);
                if (!inode->i_nlink)
@@ -234,7 +291,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
 {
        struct inode *inode = dentry->d_inode;
        dentry->d_inode = NULL;
-       list_del_init(&dentry->d_alias);
+       hlist_del_init(&dentry->d_alias);
        dentry_rcuwalk_barrier(dentry);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
@@ -332,7 +389,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
         * Inform try_to_ascend() that we are no longer attached to the
         * dentry tree
         */
-       dentry->d_flags |= DCACHE_DISCONNECTED;
+       dentry->d_flags |= DCACHE_DENTRY_KILLED;
        if (parent)
                spin_unlock(&parent->d_lock);
        dentry_iput(dentry);
@@ -642,10 +699,11 @@ EXPORT_SYMBOL(dget_parent);
 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 {
        struct dentry *alias, *discon_alias;
+       struct hlist_node *p;
 
 again:
        discon_alias = NULL;
-       list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
                spin_lock(&alias->d_lock);
                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
                        if (IS_ROOT(alias) &&
@@ -680,7 +738,7 @@ struct dentry *d_find_alias(struct inode *inode)
 {
        struct dentry *de = NULL;
 
-       if (!list_empty(&inode->i_dentry)) {
+       if (!hlist_empty(&inode->i_dentry)) {
                spin_lock(&inode->i_lock);
                de = __d_find_alias(inode, 0);
                spin_unlock(&inode->i_lock);
@@ -696,9 +754,10 @@ EXPORT_SYMBOL(d_find_alias);
 void d_prune_aliases(struct inode *inode)
 {
        struct dentry *dentry;
+       struct hlist_node *p;
 restart:
        spin_lock(&inode->i_lock);
-       list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
                spin_lock(&dentry->d_lock);
                if (!dentry->d_count) {
                        __dget_dlock(dentry);
@@ -920,7 +979,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                        inode = dentry->d_inode;
                        if (inode) {
                                dentry->d_inode = NULL;
-                               list_del_init(&dentry->d_alias);
+                               hlist_del_init(&dentry->d_alias);
                                if (dentry->d_op && dentry->d_op->d_iput)
                                        dentry->d_op->d_iput(dentry, inode);
                                else
@@ -989,7 +1048,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
         * or deletion
         */
        if (new != old->d_parent ||
-                (old->d_flags & DCACHE_DISCONNECTED) ||
+                (old->d_flags & DCACHE_DENTRY_KILLED) ||
                 (!locked && read_seqretry(&rename_lock, seq))) {
                spin_unlock(&new->d_lock);
                new = NULL;
@@ -1075,6 +1134,8 @@ positive:
        return 1;
 
 rename_retry:
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
@@ -1082,7 +1143,7 @@ rename_retry:
 EXPORT_SYMBOL(have_submounts);
 
 /*
- * Search the dentry child list for the specified parent,
+ * Search the dentry child list of the specified parent,
  * and move any unused dentries to the end of the unused
  * list for prune_dcache(). We descend to the next level
  * whenever the d_subdirs list is non-empty and continue
@@ -1177,6 +1238,8 @@ out:
 rename_retry:
        if (found)
                return found;
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
@@ -1217,6 +1280,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        if (!dentry)
                return NULL;
 
+       /*
+        * We guarantee that the inline name is always NUL-terminated.
+        * This way the memcpy() done by the name switching in rename
+        * will still always have a NUL at the end, even if we might
+        * be overwriting an internal NUL character
+        */
+       dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
        if (name->len > DNAME_INLINE_LEN-1) {
                dname = kmalloc(name->len + 1, GFP_KERNEL);
                if (!dname) {
@@ -1226,13 +1296,16 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        } else  {
                dname = dentry->d_iname;
        }       
-       dentry->d_name.name = dname;
 
        dentry->d_name.len = name->len;
        dentry->d_name.hash = name->hash;
        memcpy(dname, name->name, name->len);
        dname[name->len] = 0;
 
+       /* Make sure we always see the terminating NUL character */
+       smp_wmb();
+       dentry->d_name.name = dname;
+
        dentry->d_count = 1;
        dentry->d_flags = 0;
        spin_lock_init(&dentry->d_lock);
@@ -1245,7 +1318,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        INIT_HLIST_BL_NODE(&dentry->d_hash);
        INIT_LIST_HEAD(&dentry->d_lru);
        INIT_LIST_HEAD(&dentry->d_subdirs);
-       INIT_LIST_HEAD(&dentry->d_alias);
+       INIT_HLIST_NODE(&dentry->d_alias);
        INIT_LIST_HEAD(&dentry->d_u.d_child);
        d_set_d_op(dentry, dentry->d_sb->s_d_op);
 
@@ -1333,7 +1406,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        if (inode) {
                if (unlikely(IS_AUTOMOUNT(inode)))
                        dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
-               list_add(&dentry->d_alias, &inode->i_dentry);
+               hlist_add_head(&dentry->d_alias, &inode->i_dentry);
        }
        dentry->d_inode = inode;
        dentry_rcuwalk_barrier(dentry);
@@ -1358,7 +1431,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
  
 void d_instantiate(struct dentry *entry, struct inode * inode)
 {
-       BUG_ON(!list_empty(&entry->d_alias));
+       BUG_ON(!hlist_unhashed(&entry->d_alias));
        if (inode)
                spin_lock(&inode->i_lock);
        __d_instantiate(entry, inode);
@@ -1391,25 +1464,26 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
        int len = entry->d_name.len;
        const char *name = entry->d_name.name;
        unsigned int hash = entry->d_name.hash;
+       struct hlist_node *p;
 
        if (!inode) {
                __d_instantiate(entry, NULL);
                return NULL;
        }
 
-       list_for_each_entry(alias, &inode->i_dentry, d_alias) {
-               struct qstr *qstr = &alias->d_name;
-
+       hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
                /*
                 * Don't need alias->d_lock here, because aliases with
                 * d_parent == entry->d_parent are not subject to name or
                 * parent changes, because the parent inode i_mutex is held.
                 */
-               if (qstr->hash != hash)
+               if (alias->d_name.hash != hash)
                        continue;
                if (alias->d_parent != entry->d_parent)
                        continue;
-               if (dentry_cmp(qstr->name, qstr->len, name, len))
+               if (alias->d_name.len != len)
+                       continue;
+               if (dentry_cmp(alias, name, len))
                        continue;
                __dget(alias);
                return alias;
@@ -1423,7 +1497,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
 {
        struct dentry *result;
 
-       BUG_ON(!list_empty(&entry->d_alias));
+       BUG_ON(!hlist_unhashed(&entry->d_alias));
 
        if (inode)
                spin_lock(&inode->i_lock);
@@ -1448,7 +1522,7 @@ struct dentry *d_make_root(struct inode *root_inode)
        struct dentry *res = NULL;
 
        if (root_inode) {
-               static const struct qstr name = { .name = "/", .len = 1 };
+               static const struct qstr name = QSTR_INIT("/", 1);
 
                res = __d_alloc(root_inode->i_sb, &name);
                if (res)
@@ -1464,9 +1538,9 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
 {
        struct dentry *alias;
 
-       if (list_empty(&inode->i_dentry))
+       if (hlist_empty(&inode->i_dentry))
                return NULL;
-       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+       alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
        __dget(alias);
        return alias;
 }
@@ -1540,7 +1614,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
        spin_lock(&tmp->d_lock);
        tmp->d_inode = inode;
        tmp->d_flags |= DCACHE_DISCONNECTED;
-       list_add(&tmp->d_alias, &inode->i_dentry);
+       hlist_add_head(&tmp->d_alias, &inode->i_dentry);
        hlist_bl_lock(&tmp->d_sb->s_anon);
        hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
        hlist_bl_unlock(&tmp->d_sb->s_anon);
@@ -1686,11 +1760,53 @@ err_out:
 }
 EXPORT_SYMBOL(d_add_ci);
 
+/*
+ * Do the slow-case of the dentry name compare.
+ *
+ * Unlike the dentry_cmp() function, we need to atomically
+ * load the name, length and inode information, so that the
+ * filesystem can rely on them, and can use the 'name' and
+ * 'len' information without worrying about walking off the
+ * end of memory etc.
+ *
+ * Thus the read_seqcount_retry() and the "duplicate" info
+ * in arguments (the low-level filesystem should not look
+ * at the dentry inode or name contents directly, since
+ * rename can change them while we're in RCU mode).
+ */
+enum slow_d_compare {
+       D_COMP_OK,
+       D_COMP_NOMATCH,
+       D_COMP_SEQRETRY,
+};
+
+static noinline enum slow_d_compare slow_dentry_cmp(
+               const struct dentry *parent,
+               struct inode *inode,
+               struct dentry *dentry,
+               unsigned int seq,
+               const struct qstr *name)
+{
+       int tlen = dentry->d_name.len;
+       const char *tname = dentry->d_name.name;
+       struct inode *i = dentry->d_inode;
+
+       if (read_seqcount_retry(&dentry->d_seq, seq)) {
+               cpu_relax();
+               return D_COMP_SEQRETRY;
+       }
+       if (parent->d_op->d_compare(parent, inode,
+                               dentry, i,
+                               tlen, tname, name))
+               return D_COMP_NOMATCH;
+       return D_COMP_OK;
+}
+
 /**
  * __d_lookup_rcu - search for a dentry (racy, store-free)
  * @parent: parent dentry
  * @name: qstr of name we wish to find
- * @seq: returns d_seq value at the point where the dentry was found
+ * @seqp: returns d_seq value at the point where the dentry was found
  * @inode: returns dentry->d_inode when the inode was found valid.
  * Returns: dentry, or NULL
  *
@@ -1712,15 +1828,17 @@ EXPORT_SYMBOL(d_add_ci);
  * the returned dentry, so long as its parent's seqlock is checked after the
  * child is looked up. Thus, an interlocking stepping of sequence lock checks
  * is formed, giving integrity down the path walk.
+ *
+ * NOTE! The caller *has* to check the resulting dentry against the sequence
+ * number we've returned before using any of the resulting dentry state!
  */
 struct dentry *__d_lookup_rcu(const struct dentry *parent,
                                const struct qstr *name,
-                               unsigned *seqp, struct inode **inode)
+                               unsigned *seqp, struct inode *inode)
 {
-       unsigned int len = name->len;
-       unsigned int hash = name->hash;
+       u64 hashlen = name->hash_len;
        const unsigned char *str = name->name;
-       struct hlist_bl_head *b = d_hash(parent, hash);
+       struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
        struct hlist_bl_node *node;
        struct dentry *dentry;
 
@@ -1746,49 +1864,47 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
         */
        hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
                unsigned seq;
-               struct inode *i;
-               const char *tname;
-               int tlen;
-
-               if (dentry->d_name.hash != hash)
-                       continue;
 
 seqretry:
-               seq = read_seqcount_begin(&dentry->d_seq);
+               /*
+                * The dentry sequence count protects us from concurrent
+                * renames, and thus protects inode, parent and name fields.
+                *
+                * The caller must perform a seqcount check in order
+                * to do anything useful with the returned dentry,
+                * including using the 'd_inode' pointer.
+                *
+                * NOTE! We do a "raw" seqcount_begin here. That means that
+                * we don't wait for the sequence count to stabilize if it
+                * is in the middle of a sequence change. If we do the slow
+                * dentry compare, we will do seqretries until it is stable,
+                * and if we end up with a successful lookup, we actually
+                * want to exit RCU lookup anyway.
+                */
+               seq = raw_seqcount_begin(&dentry->d_seq);
                if (dentry->d_parent != parent)
                        continue;
                if (d_unhashed(dentry))
                        continue;
-               tlen = dentry->d_name.len;
-               tname = dentry->d_name.name;
-               i = dentry->d_inode;
-               prefetch(tname);
-               /*
-                * This seqcount check is required to ensure name and
-                * len are loaded atomically, so as not to walk off the
-                * edge of memory when walking. If we could load this
-                * atomically some other way, we could drop this check.
-                */
-               if (read_seqcount_retry(&dentry->d_seq, seq))
-                       goto seqretry;
+               *seqp = seq;
+
                if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
-                       if (parent->d_op->d_compare(parent, *inode,
-                                               dentry, i,
-                                               tlen, tname, name))
+                       if (dentry->d_name.hash != hashlen_hash(hashlen))
                                continue;
-               } else {
-                       if (dentry_cmp(tname, tlen, str, len))
+                       switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) {
+                       case D_COMP_OK:
+                               return dentry;
+                       case D_COMP_NOMATCH:
                                continue;
+                       default:
+                               goto seqretry;
+                       }
                }
-               /*
-                * No extra seqcount check is required after the name
-                * compare. The caller must perform a seqcount check in
-                * order to do anything useful with the returned dentry
-                * anyway.
-                */
-               *seqp = seq;
-               *inode = i;
-               return dentry;
+
+               if (dentry->d_name.hash_len != hashlen)
+                       continue;
+               if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
+                       return dentry;
        }
        return NULL;
 }
@@ -1867,8 +1983,6 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
        rcu_read_lock();
        
        hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
-               const char *tname;
-               int tlen;
 
                if (dentry->d_name.hash != hash)
                        continue;
@@ -1883,15 +1997,17 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
                 * It is safe to compare names since d_move() cannot
                 * change the qstr (protected by d_lock).
                 */
-               tlen = dentry->d_name.len;
-               tname = dentry->d_name.name;
                if (parent->d_flags & DCACHE_OP_COMPARE) {
+                       int tlen = dentry->d_name.len;
+                       const char *tname = dentry->d_name.name;
                        if (parent->d_op->d_compare(parent, parent->d_inode,
                                                dentry, dentry->d_inode,
                                                tlen, tname, name))
                                goto next;
                } else {
-                       if (dentry_cmp(tname, tlen, str, len))
+                       if (dentry->d_name.len != len)
+                               goto next;
+                       if (dentry_cmp(dentry, str, len))
                                goto next;
                }
 
@@ -2275,14 +2391,13 @@ static struct dentry *__d_unalias(struct inode *inode,
                struct dentry *dentry, struct dentry *alias)
 {
        struct mutex *m1 = NULL, *m2 = NULL;
-       struct dentry *ret;
+       struct dentry *ret = ERR_PTR(-EBUSY);
 
        /* If alias and dentry share a parent, then no extra locks required */
        if (alias->d_parent == dentry->d_parent)
                goto out_unalias;
 
        /* See lock_rename() */
-       ret = ERR_PTR(-EBUSY);
        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
                goto out_err;
        m1 = &dentry->d_sb->s_vfs_rename_mutex;
@@ -2290,8 +2405,10 @@ static struct dentry *__d_unalias(struct inode *inode,
                goto out_err;
        m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-       __d_move(alias, dentry);
-       ret = alias;
+       if (likely(!d_mountpoint(alias))) {
+               __d_move(alias, dentry);
+               ret = alias;
+       }
 out_err:
        spin_unlock(&inode->i_lock);
        if (m2)
@@ -2381,6 +2498,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                        if (d_ancestor(alias, dentry)) {
                                /* Check for loops */
                                actual = ERR_PTR(-ELOOP);
+                               spin_unlock(&inode->i_lock);
                        } else if (IS_ROOT(alias)) {
                                /* Is this an anonymous mountpoint that we
                                 * could splice into our tree? */
@@ -2390,7 +2508,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                                goto found;
                        } else {
                                /* Nope, but we must(!) avoid directory
-                                * aliasing */
+                                * aliasing. This drops inode->i_lock */
                                actual = __d_unalias(inode, dentry, alias);
                        }
                        write_sequnlock(&rename_lock);
@@ -2465,7 +2583,7 @@ static int prepend_path(const struct path *path,
        bool slash = false;
        int error = 0;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -2496,7 +2614,7 @@ static int prepend_path(const struct path *path,
                error = prepend(buffer, buflen, "/", 1);
 
 out:
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return error;
 
 global_root:
@@ -2512,7 +2630,7 @@ global_root:
        if (!slash)
                error = prepend(buffer, buflen, "/", 1);
        if (!error)
-               error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
+               error = is_mounted(vfsmnt) ? 1 : 2;
        goto out;
 }
 
@@ -2921,6 +3039,8 @@ resume:
        return;
 
 rename_retry:
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
@@ -2983,6 +3103,7 @@ static void __init dcache_init_early(void)
                                        HASH_EARLY,
                                        &d_hash_shift,
                                        &d_hash_mask,
+                                       0,
                                        0);
 
        for (loop = 0; loop < (1U << d_hash_shift); loop++)
@@ -3013,6 +3134,7 @@ static void __init dcache_init(void)
                                        0,
                                        &d_hash_shift,
                                        &d_hash_mask,
+                                       0,
                                        0);
 
        for (loop = 0; loop < (1U << d_hash_shift); loop++)