NFS: Add functionality to allow waiting on all outstanding reads to complete
Trond Myklebust [Tue, 9 Apr 2013 01:38:12 +0000 (21:38 -0400)]
This will later allow NFS locking code to wait for readahead to complete
before releasing byte range locks.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/pagelist.c
include/linux/nfs_fs.h

index 55b840f..c1c7a9d 100644 (file)
@@ -561,6 +561,7 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
        l_ctx->lockowner.l_owner = current->files;
        l_ctx->lockowner.l_pid = current->tgid;
        INIT_LIST_HEAD(&l_ctx->list);
+       nfs_iocounter_init(&l_ctx->io_count);
 }
 
 static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
index 541c9eb..91e59a3 100644 (file)
@@ -229,6 +229,13 @@ extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
                              struct nfs_pgio_header *hdr,
                              void (*release)(struct nfs_pgio_header *hdr));
 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
+int nfs_iocounter_wait(struct nfs_io_counter *c);
+
+static inline void nfs_iocounter_init(struct nfs_io_counter *c)
+{
+       c->flags = 0;
+       atomic_set(&c->io_count, 0);
+}
 
 /* nfs2xdr.c */
 extern struct rpc_procinfo nfs_procedures[];
index 7f09330..29cfb7a 100644 (file)
@@ -84,6 +84,55 @@ nfs_page_free(struct nfs_page *p)
        kmem_cache_free(nfs_page_cachep, p);
 }
 
+static void
+nfs_iocounter_inc(struct nfs_io_counter *c)
+{
+       atomic_inc(&c->io_count);
+}
+
+static void
+nfs_iocounter_dec(struct nfs_io_counter *c)
+{
+       if (atomic_dec_and_test(&c->io_count)) {
+               clear_bit(NFS_IO_INPROGRESS, &c->flags);
+               smp_mb__after_clear_bit();
+               wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
+       }
+}
+
+static int
+__nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
+       DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
+       int ret = 0;
+
+       do {
+               prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
+               set_bit(NFS_IO_INPROGRESS, &c->flags);
+               if (atomic_read(&c->io_count) == 0)
+                       break;
+               ret = nfs_wait_bit_killable(&c->flags);
+       } while (atomic_read(&c->io_count) != 0);
+       finish_wait(wq, &q.wait);
+       return ret;
+}
+
+/**
+ * nfs_iocounter_wait - wait for i/o to complete
+ * @c: nfs_io_counter to use
+ *
+ * returns -ERESTARTSYS if interrupted by a fatal signal.
+ * Otherwise returns 0 once the io_count hits 0.
+ */
+int
+nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+       if (atomic_read(&c->io_count) == 0)
+               return 0;
+       return __nfs_iocounter_wait(c);
+}
+
 /**
  * nfs_create_request - Create an NFS read/write request.
  * @ctx: open context to use
@@ -118,6 +167,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
                return ERR_CAST(l_ctx);
        }
        req->wb_lock_context = l_ctx;
+       nfs_iocounter_inc(&l_ctx->io_count);
 
        /* Initialize the request struct. Initially, we assume a
         * long write-back delay. This will be adjusted in
@@ -177,6 +227,7 @@ static void nfs_clear_request(struct nfs_page *req)
                req->wb_page = NULL;
        }
        if (l_ctx != NULL) {
+               nfs_iocounter_dec(&l_ctx->io_count);
                nfs_put_lock_context(l_ctx);
                req->wb_lock_context = NULL;
        }
index f6b1956..fc01d5c 100644 (file)
@@ -59,11 +59,18 @@ struct nfs_lockowner {
        pid_t l_pid;
 };
 
+#define NFS_IO_INPROGRESS 0
+struct nfs_io_counter {
+       unsigned long flags;
+       atomic_t io_count;
+};
+
 struct nfs_lock_context {
        atomic_t count;
        struct list_head list;
        struct nfs_open_context *open_context;
        struct nfs_lockowner lockowner;
+       struct nfs_io_counter io_count;
 };
 
 struct nfs4_state;