]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/fs-writeback.c
writeback: cleanup writeback_single_inode()
[linux-2.6.git] / fs / fs-writeback.c
index c5e91225501dcbe749be8450ad848a6c85a799a1..916e83489caa8fef98684f4a7fce532c7cb454f5 100644 (file)
@@ -41,24 +41,24 @@ struct wb_writeback_args {
        long nr_pages;
        struct super_block *sb;
        enum writeback_sync_modes sync_mode;
-       int for_kupdate;
-       int range_cyclic;
+       int for_kupdate:1;
+       int range_cyclic:1;
+       int for_background:1;
 };
 
 /*
  * Work items for the bdi_writeback threads
  */
 struct bdi_work {
-       struct list_head list;
-       struct list_head wait_list;
-       struct rcu_head rcu_head;
+       struct list_head list;          /* pending work list */
+       struct rcu_head rcu_head;       /* for RCU free/clear of work */
 
-       unsigned long seen;
-       atomic_t pending;
+       unsigned long seen;             /* threads that have seen this work */
+       atomic_t pending;               /* number of threads still to do work */
 
-       struct wb_writeback_args args;
+       struct wb_writeback_args args;  /* writeback arguments */
 
-       unsigned long state;
+       unsigned long state;            /* flag bits, see WS_* */
 };
 
 enum {
@@ -75,14 +75,10 @@ static inline bool bdi_work_on_stack(struct bdi_work *work)
 }
 
 static inline void bdi_work_init(struct bdi_work *work,
-                                struct writeback_control *wbc)
+                                struct wb_writeback_args *args)
 {
        INIT_RCU_HEAD(&work->rcu_head);
-       work->args.sb = wbc->sb;
-       work->args.nr_pages = wbc->nr_to_write;
-       work->args.sync_mode = wbc->sync_mode;
-       work->args.range_cyclic = wbc->range_cyclic;
-       work->args.for_kupdate = 0;
+       work->args = *args;
        work->state = WS_USED;
 }
 
@@ -102,6 +98,11 @@ static void bdi_work_clear(struct bdi_work *work)
 {
        clear_bit(WS_USED_B, &work->state);
        smp_mb__after_clear_bit();
+       /*
+        * work can have disappeared at this point. bit waitq functions
+        * should be able to tolerate this, provided bdi_sched_wait does
+        * not dereference it's pointer argument.
+       */
        wake_up_bit(&work->state, WS_USED_B);
 }
 
@@ -118,6 +119,7 @@ static void bdi_work_free(struct rcu_head *head)
 static void wb_work_complete(struct bdi_work *work)
 {
        const enum writeback_sync_modes sync_mode = work->args.sync_mode;
+       int onstack = bdi_work_on_stack(work);
 
        /*
         * For allocated work, we can clear the done/seen bit right here.
@@ -125,9 +127,9 @@ static void wb_work_complete(struct bdi_work *work)
         * to after the RCU grace period, since the stack could be invalidated
         * as soon as bdi_work_clear() has done the wakeup.
         */
-       if (!bdi_work_on_stack(work))
+       if (!onstack)
                bdi_work_clear(work);
-       if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
+       if (sync_mode == WB_SYNC_NONE || onstack)
                call_rcu(&work->rcu_head, bdi_work_free);
 }
 
@@ -150,21 +152,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
 
 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
 {
-       if (work) {
-               work->seen = bdi->wb_mask;
-               BUG_ON(!work->seen);
-               atomic_set(&work->pending, bdi->wb_cnt);
-               BUG_ON(!bdi->wb_cnt);
-
-               /*
-                * Make sure stores are seen before it appears on the list
-                */
-               smp_mb();
+       work->seen = bdi->wb_mask;
+       BUG_ON(!work->seen);
+       atomic_set(&work->pending, bdi->wb_cnt);
+       BUG_ON(!bdi->wb_cnt);
 
-               spin_lock(&bdi->wb_lock);
-               list_add_tail_rcu(&work->list, &bdi->work_list);
-               spin_unlock(&bdi->wb_lock);
-       }
+       /*
+        * list_add_tail_rcu() contains the necessary barriers to
+        * make sure the above stores are seen before the item is
+        * noticed on the list
+        */
+       spin_lock(&bdi->wb_lock);
+       list_add_tail_rcu(&work->list, &bdi->work_list);
+       spin_unlock(&bdi->wb_lock);
 
        /*
         * If the default thread isn't there, make sure we add it. When
@@ -175,15 +175,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
        else {
                struct bdi_writeback *wb = &bdi->wb;
 
-               /*
-                * If we failed allocating the bdi work item, wake up the wb
-                * thread always. As a safety precaution, it'll flush out
-                * everything
-                */
-               if (!wb_has_dirty_io(wb)) {
-                       if (work)
-                               wb_clear_pending(wb, work);
-               } else if (wb->task)
+               if (wb->task)
                        wake_up_process(wb->task);
        }
 }
@@ -198,37 +190,84 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
                    TASK_UNINTERRUPTIBLE);
 }
 
-static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc)
+static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
+                                struct wb_writeback_args *args)
 {
        struct bdi_work *work;
 
+       /*
+        * This is WB_SYNC_NONE writeback, so if allocation fails just
+        * wakeup the thread for old dirty data writeback
+        */
        work = kmalloc(sizeof(*work), GFP_ATOMIC);
-       if (work)
-               bdi_work_init(work, wbc);
+       if (work) {
+               bdi_work_init(work, args);
+               bdi_queue_work(bdi, work);
+       } else {
+               struct bdi_writeback *wb = &bdi->wb;
 
-       return work;
+               if (wb->task)
+                       wake_up_process(wb->task);
+       }
 }
 
-void bdi_start_writeback(struct writeback_control *wbc)
+/**
+ * bdi_sync_writeback - start and wait for writeback
+ * @bdi: the backing device to write from
+ * @sb: write inodes from this super_block
+ *
+ * Description:
+ *   This does WB_SYNC_ALL data integrity writeback and waits for the
+ *   IO to complete. Callers must hold the sb s_umount semaphore for
+ *   reading, to avoid having the super disappear before we are done.
+ */
+static void bdi_sync_writeback(struct backing_dev_info *bdi,
+                              struct super_block *sb)
 {
-       /*
-        * WB_SYNC_NONE is opportunistic writeback. If this allocation fails,
-        * bdi_queue_work() will wake up the thread and flush old data. This
-        * should ensure some amount of progress in freeing memory.
-        */
-       if (wbc->sync_mode != WB_SYNC_ALL) {
-               struct bdi_work *w = bdi_alloc_work(wbc);
+       struct wb_writeback_args args = {
+               .sb             = sb,
+               .sync_mode      = WB_SYNC_ALL,
+               .nr_pages       = LONG_MAX,
+               .range_cyclic   = 0,
+       };
+       struct bdi_work work;
 
-               bdi_queue_work(wbc->bdi, w);
-       } else {
-               struct bdi_work work;
+       bdi_work_init(&work, &args);
+       work.state |= WS_ONSTACK;
 
-               bdi_work_init(&work, wbc);
-               work.state |= WS_ONSTACK;
+       bdi_queue_work(bdi, &work);
+       bdi_wait_on_work_clear(&work);
+}
 
-               bdi_queue_work(wbc->bdi, &work);
-               bdi_wait_on_work_clear(&work);
+/**
+ * bdi_start_writeback - start writeback
+ * @bdi: the backing device to write from
+ * @nr_pages: the number of pages to write
+ *
+ * Description:
+ *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
+ *   started when this function returns, we make no guarentees on
+ *   completion. Caller need not hold sb s_umount semaphore.
+ *
+ */
+void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+{
+       struct wb_writeback_args args = {
+               .sync_mode      = WB_SYNC_NONE,
+               .nr_pages       = nr_pages,
+               .range_cyclic   = 1,
+       };
+
+       /*
+        * We treat @nr_pages=0 as the special case to do background writeback,
+        * ie. to sync pages until the background dirty threshold is reached.
+        */
+       if (!nr_pages) {
+               args.nr_pages = LONG_MAX;
+               args.for_background = 1;
        }
+
+       bdi_alloc_queue_work(bdi, &args);
 }
 
 /*
@@ -410,8 +449,13 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        spin_lock(&inode_lock);
        inode->i_state &= ~I_SYNC;
        if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
-               if (!(inode->i_state & I_DIRTY) &&
-                   mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+               if (inode->i_state & I_DIRTY) {
+                       /*
+                        * Someone redirtied the inode while were writing back
+                        * the pages.
+                        */
+                       redirty_tail(inode);
+               } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
                        /*
                         * We didn't write back all the pages.  nfs_writepages()
                         * sometimes bales out without doing anything. Redirty
@@ -455,12 +499,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
                                inode->i_state |= I_DIRTY_PAGES;
                                redirty_tail(inode);
                        }
-               } else if (inode->i_state & I_DIRTY) {
-                       /*
-                        * Someone redirtied the inode while were writing back
-                        * the pages.
-                        */
-                       redirty_tail(inode);
                } else if (atomic_read(&inode->i_count)) {
                        /*
                         * The inode is clean, inuse
@@ -677,6 +715,7 @@ static long wb_writeback(struct bdi_writeback *wb,
        };
        unsigned long oldest_jif;
        long wrote = 0;
+       struct inode *inode;
 
        if (wbc.for_kupdate) {
                wbc.older_than_this = &oldest_jif;
@@ -690,20 +729,16 @@ static long wb_writeback(struct bdi_writeback *wb,
 
        for (;;) {
                /*
-                * Don't flush anything for non-integrity writeback where
-                * no nr_pages was given
+                * Stop writeback when nr_pages has been consumed
                 */
-               if (!args->for_kupdate && args->nr_pages <= 0 &&
-                    args->sync_mode == WB_SYNC_NONE)
+               if (args->nr_pages <= 0)
                        break;
 
                /*
-                * If no specific pages were given and this is just a
-                * periodic background writeout and we are below the
-                * background dirty threshold, don't do anything
+                * For background writeout, stop when we are below the
+                * background dirty threshold
                 */
-               if (args->for_kupdate && args->nr_pages <= 0 &&
-                   !over_bground_thresh())
+               if (args->for_background && !over_bground_thresh())
                        break;
 
                wbc.more_io = 0;
@@ -717,9 +752,25 @@ static long wb_writeback(struct bdi_writeback *wb,
                /*
                 * If we ran out of stuff to write, bail unless more_io got set
                 */
-               if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
-                       if (wbc.more_io && !wbc.for_kupdate)
+               if (wbc.nr_to_write > 0) {
+                       if (wbc.more_io) {
+                               if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+                                       continue;
+                               /*
+                                * Nothing written. Wait for some inode to
+                                * become available for writeback. Otherwise
+                                * we'll just busyloop.
+                                */
+                               spin_lock(&inode_lock);
+                               if (!list_empty(&wb->b_more_io))  {
+                                       inode = list_entry(
+                                                       wb->b_more_io.prev,
+                                                       struct inode, i_list);
+                                       inode_wait_for_writeback(inode);
+                               }
+                               spin_unlock(&inode_lock);
                                continue;
+                       }
                        break;
                }
        }
@@ -729,7 +780,11 @@ static long wb_writeback(struct bdi_writeback *wb,
 
 /*
  * Return the next bdi_work struct that hasn't been processed by this
- * wb thread yet
+ * wb thread yet. ->seen is initially set for each thread that exists
+ * for this device, when a thread first notices a piece of work it
+ * clears its bit. Depending on writeback type, the thread will notify
+ * completion on either receiving the work (WB_SYNC_NONE) or after
+ * it is done (WB_SYNC_ALL).
  */
 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
                                           struct bdi_writeback *wb)
@@ -739,8 +794,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
        rcu_read_lock();
 
        list_for_each_entry_rcu(work, &bdi->work_list, list) {
-               if (!test_and_clear_bit(wb->nr, &work->seen))
+               if (!test_bit(wb->nr, &work->seen))
                        continue;
+               clear_bit(wb->nr, &work->seen);
 
                ret = work;
                break;
@@ -851,8 +907,7 @@ int bdi_writeback_task(struct bdi_writeback *wb)
                }
 
                wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(wait_jiffies);
+               schedule_timeout_interruptible(wait_jiffies);
                try_to_freeze();
        }
 
@@ -860,67 +915,28 @@ int bdi_writeback_task(struct bdi_writeback *wb)
 }
 
 /*
- * Schedule writeback for all backing devices. Expensive! If this is a data
- * integrity operation, writeback will be complete when this returns. If
- * we are simply called for WB_SYNC_NONE, then writeback will merely be
- * scheduled to run.
+ * Schedule writeback for all backing devices. This does WB_SYNC_NONE
+ * writeback, for integrity writeback see bdi_sync_writeback().
  */
-static void bdi_writeback_all(struct writeback_control *wbc)
+static void bdi_writeback_all(struct super_block *sb, long nr_pages)
 {
-       const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
+       struct wb_writeback_args args = {
+               .sb             = sb,
+               .nr_pages       = nr_pages,
+               .sync_mode      = WB_SYNC_NONE,
+       };
        struct backing_dev_info *bdi;
-       struct bdi_work *work;
-       LIST_HEAD(list);
-
-restart:
-       spin_lock(&bdi_lock);
 
-       list_for_each_entry(bdi, &bdi_list, bdi_list) {
-               struct bdi_work *work;
+       rcu_read_lock();
 
+       list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
                if (!bdi_has_dirty_io(bdi))
                        continue;
 
-               /*
-                * If work allocation fails, do the writes inline. We drop
-                * the lock and restart the list writeout. This should be OK,
-                * since this happens rarely and because the writeout should
-                * eventually make more free memory available.
-                */
-               work = bdi_alloc_work(wbc);
-               if (!work) {
-                       struct writeback_control __wbc;
-
-                       /*
-                        * Not a data integrity writeout, just continue
-                        */
-                       if (!must_wait)
-                               continue;
-
-                       spin_unlock(&bdi_lock);
-                       __wbc = *wbc;
-                       __wbc.bdi = bdi;
-                       writeback_inodes_wbc(&__wbc);
-                       goto restart;
-               }
-               if (must_wait)
-                       list_add_tail(&work->wait_list, &list);
-
-               bdi_queue_work(bdi, work);
+               bdi_alloc_queue_work(bdi, &args);
        }
 
-       spin_unlock(&bdi_lock);
-
-       /*
-        * If this is for WB_SYNC_ALL, wait for pending work to complete
-        * before returning.
-        */
-       while (!list_empty(&list)) {
-               work = list_entry(list.next, struct bdi_work, wait_list);
-               list_del(&work->wait_list);
-               bdi_wait_on_work_clear(work);
-               call_rcu(&work->rcu_head, bdi_work_free);
-       }
+       rcu_read_unlock();
 }
 
 /*
@@ -929,17 +945,10 @@ restart:
  */
 void wakeup_flusher_threads(long nr_pages)
 {
-       struct writeback_control wbc = {
-               .sync_mode      = WB_SYNC_NONE,
-               .older_than_this = NULL,
-               .range_cyclic   = 1,
-       };
-
        if (nr_pages == 0)
                nr_pages = global_page_state(NR_FILE_DIRTY) +
                                global_page_state(NR_UNSTABLE_NFS);
-       wbc.nr_to_write = nr_pages;
-       bdi_writeback_all(&wbc);
+       bdi_writeback_all(NULL, nr_pages);
 }
 
 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
@@ -1086,7 +1095,7 @@ EXPORT_SYMBOL(__mark_inode_dirty);
  * on the writer throttling path, and we get decent balancing between many
  * throttled threads: we don't want them all piling up on inode_sync_wait.
  */
-static void wait_sb_inodes(struct writeback_control *wbc)
+static void wait_sb_inodes(struct super_block *sb)
 {
        struct inode *inode, *old_inode = NULL;
 
@@ -1094,7 +1103,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
         * We need to be protected against the filesystem going from
         * r/o to r/w or vice versa.
         */
-       WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
+       WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
        spin_lock(&inode_lock);
 
@@ -1105,7 +1114,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
         * In which case, the inode may not be on the dirty list, but
         * we still have to wait for that writeout.
         */
-       list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                struct address_space *mapping;
 
                if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
@@ -1145,14 +1154,8 @@ static void wait_sb_inodes(struct writeback_control *wbc)
  * for IO completion of submitted IO. The number of pages submitted is
  * returned.
  */
-long writeback_inodes_sb(struct super_block *sb)
+void writeback_inodes_sb(struct super_block *sb)
 {
-       struct writeback_control wbc = {
-               .sb             = sb,
-               .sync_mode      = WB_SYNC_NONE,
-               .range_start    = 0,
-               .range_end      = LLONG_MAX,
-       };
        unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
        unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
        long nr_to_write;
@@ -1160,9 +1163,7 @@ long writeback_inodes_sb(struct super_block *sb)
        nr_to_write = nr_dirty + nr_unstable +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
 
-       wbc.nr_to_write = nr_to_write;
-       bdi_writeback_all(&wbc);
-       return nr_to_write - wbc.nr_to_write;
+       bdi_writeback_all(sb, nr_to_write);
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
@@ -1173,20 +1174,10 @@ EXPORT_SYMBOL(writeback_inodes_sb);
  * This function writes and waits on any dirty inode belonging to this
  * super_block. The number of pages synced is returned.
  */
-long sync_inodes_sb(struct super_block *sb)
+void sync_inodes_sb(struct super_block *sb)
 {
-       struct writeback_control wbc = {
-               .sb             = sb,
-               .sync_mode      = WB_SYNC_ALL,
-               .range_start    = 0,
-               .range_end      = LLONG_MAX,
-       };
-       long nr_to_write = LONG_MAX; /* doesn't actually matter */
-
-       wbc.nr_to_write = nr_to_write;
-       bdi_writeback_all(&wbc);
-       wait_sb_inodes(&wbc);
-       return nr_to_write - wbc.nr_to_write;
+       bdi_sync_writeback(sb->s_bdi, sb);
+       wait_sb_inodes(sb);
 }
 EXPORT_SYMBOL(sync_inodes_sb);