fuse: postpone end_page_writeback() in fuse_writepage_locked()
[linux-3.10.git] / fs / fs-writeback.c
index 5f2c682..3be5718 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
@@ -52,11 +51,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
 /**
  * writeback_in_progress - determine whether there is writeback in progress
  * @bdi: the device's backing_dev_info structure.
@@ -68,6 +62,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
 {
        return test_bit(BDI_writeback_running, &bdi->state);
 }
+EXPORT_SYMBOL(writeback_in_progress);
 
 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
 {
@@ -92,20 +87,6 @@ static inline struct inode *wb_inode(struct list_head *head)
 #define CREATE_TRACE_POINTS
 #include <trace/events/writeback.h>
 
-/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
-static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
-{
-       if (bdi->wb.task) {
-               wake_up_process(bdi->wb.task);
-       } else {
-               /*
-                * The bdi thread isn't there, wake up the forker thread which
-                * will create and run it.
-                */
-               wake_up_process(default_backing_dev_info.wb.task);
-       }
-}
-
 static void bdi_queue_work(struct backing_dev_info *bdi,
                           struct wb_writeback_work *work)
 {
@@ -113,10 +94,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
 
        spin_lock_bh(&bdi->wb_lock);
        list_add_tail(&work->list, &bdi->work_list);
-       if (!bdi->wb.task)
-               trace_writeback_nothread(bdi, work);
-       bdi_wakeup_flusher(bdi);
        spin_unlock_bh(&bdi->wb_lock);
+
+       mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
 }
 
 static void
@@ -131,10 +111,8 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
         */
        work = kzalloc(sizeof(*work), GFP_ATOMIC);
        if (!work) {
-               if (bdi->wb.task) {
-                       trace_writeback_nowork(bdi);
-                       wake_up_process(bdi->wb.task);
-               }
+               trace_writeback_nowork(bdi);
+               mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
                return;
        }
 
@@ -181,9 +159,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
         * writeback as soon as there is no other work to do.
         */
        trace_writeback_wake_background(bdi);
-       spin_lock_bh(&bdi->wb_lock);
-       bdi_wakeup_flusher(bdi);
-       spin_unlock_bh(&bdi->wb_lock);
+       mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
 }
 
 /*
@@ -232,6 +208,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 static void inode_sync_complete(struct inode *inode)
 {
        inode->i_state &= ~I_SYNC;
+       /* If inode is clean an unused, put it into LRU now... */
+       inode_add_lru(inode);
        /* Waiters must see I_SYNC cleared before being woken up */
        smp_mb();
        wake_up_bit(&inode->i_state, __I_SYNC);
@@ -253,7 +231,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
 }
 
 /*
- * Move expired (dirtied after work->older_than_this) dirty inodes from
+ * Move expired (dirtied before work->older_than_this) dirty inodes from
  * @delaying_queue to @dispatch_queue.
  */
 static int move_expired_inodes(struct list_head *delaying_queue,
@@ -320,15 +298,24 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
 
 static int write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
-               return inode->i_sb->s_op->write_inode(inode, wbc);
+       int ret;
+
+       if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
+               trace_writeback_write_inode_start(inode, wbc);
+               ret = inode->i_sb->s_op->write_inode(inode, wbc);
+               trace_writeback_write_inode(inode, wbc);
+               return ret;
+       }
        return 0;
 }
 
 /*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
  */
-static void inode_wait_for_writeback(struct inode *inode)
+static void __inode_wait_for_writeback(struct inode *inode)
+       __releases(inode->i_lock)
+       __acquires(inode->i_lock)
 {
        DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
        wait_queue_head_t *wqh;
@@ -342,6 +329,36 @@ static void inode_wait_for_writeback(struct inode *inode)
 }
 
 /*
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
+ */
+void inode_wait_for_writeback(struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       __inode_wait_for_writeback(inode);
+       spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+       __releases(inode->i_lock)
+{
+       DEFINE_WAIT(wait);
+       wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+       int sleep;
+
+       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+       sleep = inode->i_state & I_SYNC;
+       spin_unlock(&inode->i_lock);
+       if (sleep)
+               schedule();
+       finish_wait(wqh, &wait);
+}
+
+/*
  * Find proper writeback list for the inode depending on its current state and
  * possibly also change of its state while we were doing writeback.  Here we
  * handle things such as livelock prevention or fairness of writeback among
@@ -410,8 +427,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
  */
 static int
-__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
-                        struct writeback_control *wbc)
+__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 {
        struct address_space *mapping = inode->i_mapping;
        long nr_to_write = wbc->nr_to_write;
@@ -420,6 +436,8 @@ __writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
 
        WARN_ON(!(inode->i_state & I_SYNC));
 
+       trace_writeback_single_inode_start(inode, wbc, nr_to_write);
+
        ret = do_writepages(mapping, wbc);
 
        /*
@@ -479,9 +497,11 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
                if (wbc->sync_mode != WB_SYNC_ALL)
                        goto out;
                /*
-                * It's a data-integrity sync.  We must wait.
+                * It's a data-integrity sync. We must wait. Since callers hold
+                * inode reference or inode has I_WILL_FREE set, it cannot go
+                * away under us.
                 */
-               inode_wait_for_writeback(inode);
+               __inode_wait_for_writeback(inode);
        }
        WARN_ON(inode->i_state & I_SYNC);
        /*
@@ -496,7 +516,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
        inode->i_state |= I_SYNC;
        spin_unlock(&inode->i_lock);
 
-       ret = __writeback_single_inode(inode, wb, wbc);
+       ret = __writeback_single_inode(inode, wbc);
 
        spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
@@ -547,10 +567,6 @@ static long writeback_chunk_size(struct backing_dev_info *bdi,
 /*
  * Write a portion of b_io inodes which belong to @sb.
  *
- * If @only_this_sb is true, then find and write all such
- * inodes. Otherwise write only ones which go sequentially
- * in reverse order.
- *
  * Return the number of pages and/or inodes written.
  */
 static long writeback_sb_inodes(struct super_block *sb,
@@ -593,8 +609,8 @@ static long writeback_sb_inodes(struct super_block *sb,
                }
 
                /*
-                * Don't bother with new inodes or inodes beeing freed, first
-                * kind does not need peridic writeout yet, and for the latter
+                * Don't bother with new inodes or inodes being freed, first
+                * kind does not need periodic writeout yet, and for the latter
                 * kind writeout is handled by the freer.
                 */
                spin_lock(&inode->i_lock);
@@ -620,21 +636,30 @@ static long writeback_sb_inodes(struct super_block *sb,
                }
                spin_unlock(&wb->list_lock);
 
-               __iget(inode);
                /*
                 * We already requeued the inode if it had I_SYNC set and we
                 * are doing WB_SYNC_NONE writeback. So this catches only the
                 * WB_SYNC_ALL case.
                 */
-               if (inode->i_state & I_SYNC)
-                       inode_wait_for_writeback(inode);
+               if (inode->i_state & I_SYNC) {
+                       /* Wait for I_SYNC. This function drops i_lock... */
+                       inode_sleep_on_writeback(inode);
+                       /* Inode may be gone, start again */
+                       spin_lock(&wb->list_lock);
+                       continue;
+               }
                inode->i_state |= I_SYNC;
                spin_unlock(&inode->i_lock);
+
                write_chunk = writeback_chunk_size(wb->bdi, work);
                wbc.nr_to_write = write_chunk;
                wbc.pages_skipped = 0;
 
-               __writeback_single_inode(inode, wb, &wbc);
+               /*
+                * We use I_SYNC to pin the inode in memory. While it is set
+                * evict_inode() will wait so the inode cannot be freed.
+                */
+               __writeback_single_inode(inode, &wbc);
 
                work->nr_pages -= write_chunk - wbc.nr_to_write;
                wrote += write_chunk - wbc.nr_to_write;
@@ -645,10 +670,7 @@ static long writeback_sb_inodes(struct super_block *sb,
                requeue_inode(inode, wb, &wbc);
                inode_sync_complete(inode);
                spin_unlock(&inode->i_lock);
-               spin_unlock(&wb->list_lock);
-               iput(inode);
-               cond_resched();
-               spin_lock(&wb->list_lock);
+               cond_resched_lock(&wb->list_lock);
                /*
                 * bail out to wb_writeback() often enough to check
                 * background threshold and other termination conditions.
@@ -843,8 +865,8 @@ static long wb_writeback(struct bdi_writeback *wb,
                        inode = wb_inode(wb->b_more_io.prev);
                        spin_lock(&inode->i_lock);
                        spin_unlock(&wb->list_lock);
-                       inode_wait_for_writeback(inode);
-                       spin_unlock(&inode->i_lock);
+                       /* This function drops i_lock... */
+                       inode_sleep_on_writeback(inode);
                        spin_lock(&wb->list_lock);
                }
        }
@@ -978,66 +1000,49 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
 
 /*
  * Handle writeback of dirty data for the device backed by this bdi. Also
- * wakes up periodically and does kupdated style flushing.
+ * reschedules periodically and does kupdated style flushing.
  */
-int bdi_writeback_thread(void *data)
+void bdi_writeback_workfn(struct work_struct *work)
 {
-       struct bdi_writeback *wb = data;
+       struct bdi_writeback *wb = container_of(to_delayed_work(work),
+                                               struct bdi_writeback, dwork);
        struct backing_dev_info *bdi = wb->bdi;
        long pages_written;
 
+       set_worker_desc("flush-%s", dev_name(bdi->dev));
        current->flags |= PF_SWAPWRITE;
-       set_freezable();
-       wb->last_active = jiffies;
-
-       /*
-        * Our parent may run at a different priority, just set us to normal
-        */
-       set_user_nice(current, 0);
 
-       trace_writeback_thread_start(bdi);
-
-       while (!kthread_freezable_should_stop(NULL)) {
+       if (likely(!current_is_workqueue_rescuer() ||
+                  list_empty(&bdi->bdi_list))) {
                /*
-                * Remove own delayed wake-up timer, since we are already awake
-                * and we'll take care of the preriodic write-back.
+                * The normal path.  Keep writing back @bdi until its
+                * work_list is empty.  Note that this path is also taken
+                * if @bdi is shutting down even when we're running off the
+                * rescuer as work_list needs to be drained.
                 */
-               del_timer(&wb->wakeup_timer);
-
-               pages_written = wb_do_writeback(wb, 0);
-
+               do {
+                       pages_written = wb_do_writeback(wb, 0);
+                       trace_writeback_pages_written(pages_written);
+               } while (!list_empty(&bdi->work_list));
+       } else {
+               /*
+                * bdi_wq can't get enough workers and we're running off
+                * the emergency worker.  Don't hog it.  Hopefully, 1024 is
+                * enough for efficient IO.
+                */
+               pages_written = writeback_inodes_wb(&bdi->wb, 1024,
+                                                   WB_REASON_FORKER_THREAD);
                trace_writeback_pages_written(pages_written);
-
-               if (pages_written)
-                       wb->last_active = jiffies;
-
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
-                       __set_current_state(TASK_RUNNING);
-                       continue;
-               }
-
-               if (wb_has_dirty_io(wb) && dirty_writeback_interval)
-                       schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
-               else {
-                       /*
-                        * We have nothing to do, so can go sleep without any
-                        * timeout and save power. When a work is queued or
-                        * something is made dirty - we will be woken up.
-                        */
-                       schedule();
-               }
        }
 
-       /* Flush any work that raced with us exiting */
-       if (!list_empty(&bdi->work_list))
-               wb_do_writeback(wb, 1);
+       if (!list_empty(&bdi->work_list) ||
+           (wb_has_dirty_io(wb) && dirty_writeback_interval))
+               queue_delayed_work(bdi_wq, &wb->dwork,
+                       msecs_to_jiffies(dirty_writeback_interval * 10));
 
-       trace_writeback_thread_stop(bdi);
-       return 0;
+       current->flags &= ~PF_SWAPWRITE;
 }
 
-
 /*
  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
  * the whole world.
@@ -1116,8 +1121,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
         * dirty the inode itself
         */
        if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+               trace_writeback_dirty_inode_start(inode, flags);
+
                if (sb->s_op->dirty_inode)
                        sb->s_op->dirty_inode(inode, flags);
+
+               trace_writeback_dirty_inode(inode, flags);
        }
 
        /*
@@ -1274,6 +1283,8 @@ void writeback_inodes_sb_nr(struct super_block *sb,
                .reason                 = reason,
        };
 
+       if (sb->s_bdi == &noop_backing_dev_info)
+               return;
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
        bdi_queue_work(sb->s_bdi, &work);
        wait_for_completion(&done);
@@ -1296,47 +1307,43 @@ void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
 EXPORT_SYMBOL(writeback_inodes_sb);
 
 /**
- * writeback_inodes_sb_if_idle -       start writeback if none underway
+ * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
  * @sb: the superblock
- * @reason: reason why some writeback work was initiated
+ * @nr: the number of pages to write
+ * @reason: the reason of writeback
  *
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
  */
-int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
+int try_to_writeback_inodes_sb_nr(struct super_block *sb,
+                                 unsigned long nr,
+                                 enum wb_reason reason)
 {
-       if (!writeback_in_progress(sb->s_bdi)) {
-               down_read(&sb->s_umount);
-               writeback_inodes_sb(sb, reason);
-               up_read(&sb->s_umount);
+       if (writeback_in_progress(sb->s_bdi))
                return 1;
-       } else
+
+       if (!down_read_trylock(&sb->s_umount))
                return 0;
+
+       writeback_inodes_sb_nr(sb, nr, reason);
+       up_read(&sb->s_umount);
+       return 1;
 }
-EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
 
 /**
- * writeback_inodes_sb_nr_if_idle      -       start writeback if none underway
+ * try_to_writeback_inodes_sb - try to start writeback if none underway
  * @sb: the superblock
- * @nr: the number of pages to write
  * @reason: reason why some writeback work was initiated
  *
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Implement by try_to_writeback_inodes_sb_nr()
  * Returns 1 if writeback was started, 0 if not.
  */
-int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
-                                  unsigned long nr,
-                                  enum wb_reason reason)
+int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
 {
-       if (!writeback_in_progress(sb->s_bdi)) {
-               down_read(&sb->s_umount);
-               writeback_inodes_sb_nr(sb, nr, reason);
-               up_read(&sb->s_umount);
-               return 1;
-       } else
-               return 0;
+       return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
 }
-EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb);
 
 /**
  * sync_inodes_sb      -       sync sb inode pages
@@ -1357,6 +1364,9 @@ void sync_inodes_sb(struct super_block *sb)
                .reason         = WB_REASON_SYNC,
        };
 
+       /* Nothing to do? */
+       if (sb->s_bdi == &noop_backing_dev_info)
+               return;
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
        bdi_queue_work(sb->s_bdi, &work);