cris: fix a build error in kernel/fork.c
[linux-2.6.git] / mm / page-writeback.c
index 25e7770..d196074 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/buffer_head.h>
 #include <linux/pagevec.h>
+#include <trace/events/writeback.h>
+
+/*
+ * Sleep at most 200ms at a time in balance_dirty_pages().
+ */
+#define MAX_PAUSE              max(HZ/5, 1)
+
+/*
+ * Estimate write bandwidth at 200ms intervals.
+ */
+#define BANDWIDTH_INTERVAL     max(HZ/5, 1)
 
 /*
  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
@@ -44,18 +55,21 @@ static long ratelimit_pages = 32;
 /*
  * When balance_dirty_pages decides that the caller needs to perform some
  * non-background writeback, this is how many pages it will attempt to write.
- * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
+ * It should be somewhat larger than dirtied pages to ensure that reasonably
  * large amounts of I/O are submitted.
  */
-static inline long sync_writeback_pages(void)
+static inline long sync_writeback_pages(unsigned long dirtied)
 {
-       return ratelimit_pages + ratelimit_pages / 2;
+       if (dirtied < ratelimit_pages)
+               dirtied = ratelimit_pages;
+
+       return dirtied + dirtied / 2;
 }
 
 /* The following parameters are exported via /proc/sys/vm */
 
 /*
- * Start background writeback (via pdflush) at this percentage
+ * Start background writeback (via writeback threads) at this percentage
  */
 int dirty_background_ratio = 10;
 
@@ -107,6 +121,7 @@ EXPORT_SYMBOL(laptop_mode);
 
 /* End of sysctl-exported parameters */
 
+unsigned long global_dirty_limit;
 
 /*
  * Scale the writeback cache size proportional to the relative writeout speeds.
@@ -155,37 +170,37 @@ static void update_completion_period(void)
 }
 
 int dirty_background_ratio_handler(struct ctl_table *table, int write,
-               struct file *filp, void __user *buffer, size_t *lenp,
+               void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
        int ret;
 
-       ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (ret == 0 && write)
                dirty_background_bytes = 0;
        return ret;
 }
 
 int dirty_background_bytes_handler(struct ctl_table *table, int write,
-               struct file *filp, void __user *buffer, size_t *lenp,
+               void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
        int ret;
 
-       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
        if (ret == 0 && write)
                dirty_background_ratio = 0;
        return ret;
 }
 
 int dirty_ratio_handler(struct ctl_table *table, int write,
-               struct file *filp, void __user *buffer, size_t *lenp,
+               void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
        int old_ratio = vm_dirty_ratio;
        int ret;
 
-       ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
                update_completion_period();
                vm_dirty_bytes = 0;
@@ -195,13 +210,13 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
 
 
 int dirty_bytes_handler(struct ctl_table *table, int write,
-               struct file *filp, void __user *buffer, size_t *lenp,
+               void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
        unsigned long old_bytes = vm_dirty_bytes;
        int ret;
 
-       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
        if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
                update_completion_period();
                vm_dirty_ratio = 0;
@@ -215,6 +230,7 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
  */
 static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 {
+       __inc_bdi_stat(bdi, BDI_WRITTEN);
        __prop_inc_percpu_max(&vm_completions, &bdi->completions,
                              bdi->max_prop_frac);
 }
@@ -240,39 +256,8 @@ void task_dirty_inc(struct task_struct *tsk)
 static void bdi_writeout_fraction(struct backing_dev_info *bdi,
                long *numerator, long *denominator)
 {
-       if (bdi_cap_writeback_dirty(bdi)) {
-               prop_fraction_percpu(&vm_completions, &bdi->completions,
+       prop_fraction_percpu(&vm_completions, &bdi->completions,
                                numerator, denominator);
-       } else {
-               *numerator = 0;
-               *denominator = 1;
-       }
-}
-
-/*
- * Clip the earned share of dirty pages to that which is actually available.
- * This avoids exceeding the total dirty_limit when the floating averages
- * fluctuate too quickly.
- */
-static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
-               unsigned long dirty, unsigned long *pbdi_dirty)
-{
-       unsigned long avail_dirty;
-
-       avail_dirty = global_page_state(NR_FILE_DIRTY) +
-                global_page_state(NR_WRITEBACK) +
-                global_page_state(NR_UNSTABLE_NFS) +
-                global_page_state(NR_WRITEBACK_TEMP);
-
-       if (avail_dirty < dirty)
-               avail_dirty = dirty - avail_dirty;
-       else
-               avail_dirty = 0;
-
-       avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
-               bdi_stat(bdi, BDI_WRITEBACK);
-
-       *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
 }
 
 static inline void task_dirties_fraction(struct task_struct *tsk,
@@ -283,27 +268,40 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
 }
 
 /*
- * scale the dirty limit
+ * task_dirty_limit - scale down dirty throttling threshold for one task
  *
  * task specific dirty limit:
  *
  *   dirty -= (dirty/8) * p_{t}
+ *
+ * To protect light/slow dirtying tasks from heavier/fast ones, we start
+ * throttling individual tasks before reaching the bdi dirty limit.
+ * Relatively low thresholds will be allocated to heavy dirtiers. So when
+ * dirty pages grow large, heavy dirtiers will be throttled first, which will
+ * effectively curb the growth of dirty pages. Light dirtiers with high enough
+ * dirty threshold may never get throttled.
  */
-static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
+#define TASK_LIMIT_FRACTION 8
+static unsigned long task_dirty_limit(struct task_struct *tsk,
+                                      unsigned long bdi_dirty)
 {
        long numerator, denominator;
-       unsigned long dirty = *pdirty;
-       u64 inv = dirty >> 3;
+       unsigned long dirty = bdi_dirty;
+       u64 inv = dirty / TASK_LIMIT_FRACTION;
 
        task_dirties_fraction(tsk, &numerator, &denominator);
        inv *= numerator;
        do_div(inv, denominator);
 
        dirty -= inv;
-       if (dirty < *pdirty/2)
-               dirty = *pdirty/2;
 
-       *pdirty = dirty;
+       return max(dirty, bdi_dirty/2);
+}
+
+/* Minimum limit for any task */
+static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
+{
+       return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
 }
 
 /*
@@ -315,7 +313,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
 {
        int ret = 0;
 
-       spin_lock(&bdi_lock);
+       spin_lock_bh(&bdi_lock);
        if (min_ratio > bdi->max_ratio) {
                ret = -EINVAL;
        } else {
@@ -327,7 +325,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
                        ret = -EINVAL;
                }
        }
-       spin_unlock(&bdi_lock);
+       spin_unlock_bh(&bdi_lock);
 
        return ret;
 }
@@ -339,14 +337,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
        if (max_ratio > 100)
                return -EINVAL;
 
-       spin_lock(&bdi_lock);
+       spin_lock_bh(&bdi_lock);
        if (bdi->min_ratio > max_ratio) {
                ret = -EINVAL;
        } else {
                bdi->max_ratio = max_ratio;
                bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
        }
-       spin_unlock(&bdi_lock);
+       spin_unlock_bh(&bdi_lock);
 
        return ret;
 }
@@ -380,7 +378,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
                struct zone *z =
                        &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-               x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
+               x += zone_page_state(z, NR_FREE_PAGES) +
+                    zone_reclaimable_pages(z);
        }
        /*
         * Make sure that the number of highmem pages is never larger
@@ -404,7 +403,7 @@ unsigned long determine_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
 
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
@@ -412,25 +411,34 @@ unsigned long determine_dirtyable_memory(void)
        return x + 1;   /* Ensure that we never return 0 */
 }
 
-void
-get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
-                unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
+static unsigned long hard_dirty_limit(unsigned long thresh)
+{
+       return max(thresh, global_dirty_limit);
+}
+
+/*
+ * global_dirty_limits - background-writeback and dirty-throttling thresholds
+ *
+ * Calculate the dirty thresholds based on sysctl parameters
+ * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
+ * - vm.dirty_ratio             or  vm.dirty_bytes
+ * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
+ * real-time tasks.
+ */
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 {
        unsigned long background;
        unsigned long dirty;
-       unsigned long available_memory = determine_dirtyable_memory();
+       unsigned long uninitialized_var(available_memory);
        struct task_struct *tsk;
 
+       if (!vm_dirty_bytes || !dirty_background_bytes)
+               available_memory = determine_dirtyable_memory();
+
        if (vm_dirty_bytes)
                dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
-       else {
-               int dirty_ratio;
-
-               dirty_ratio = vm_dirty_ratio;
-               if (dirty_ratio < 5)
-                       dirty_ratio = 5;
-               dirty = (dirty_ratio * available_memory) / 100;
-       }
+       else
+               dirty = (vm_dirty_ratio * available_memory) / 100;
 
        if (dirty_background_bytes)
                background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
@@ -446,82 +454,278 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
        }
        *pbackground = background;
        *pdirty = dirty;
+       trace_global_dirty_state(background, dirty);
+}
 
-       if (bdi) {
-               u64 bdi_dirty;
-               long numerator, denominator;
+/**
+ * bdi_dirty_limit - @bdi's share of dirty throttling threshold
+ * @bdi: the backing_dev_info to query
+ * @dirty: global dirty limit in pages
+ *
+ * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
+ * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
+ * And the "limit" in the name is not seriously taken as hard limit in
+ * balance_dirty_pages().
+ *
+ * It allocates high/low dirty limits to fast/slow devices, in order to prevent
+ * - starving fast devices
+ * - piling up dirty pages (that will take long time to sync) on slow devices
+ *
+ * The bdi's share of dirty limit will be adapting to its throughput and
+ * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
+ */
+unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
+{
+       u64 bdi_dirty;
+       long numerator, denominator;
 
-               /*
-                * Calculate this BDI's share of the dirty ratio.
-                */
-               bdi_writeout_fraction(bdi, &numerator, &denominator);
-
-               bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
-               bdi_dirty *= numerator;
-               do_div(bdi_dirty, denominator);
-               bdi_dirty += (dirty * bdi->min_ratio) / 100;
-               if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
-                       bdi_dirty = dirty * bdi->max_ratio / 100;
-
-               *pbdi_dirty = bdi_dirty;
-               clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
-               task_dirty_limit(current, pbdi_dirty);
+       /*
+        * Calculate this BDI's share of the dirty ratio.
+        */
+       bdi_writeout_fraction(bdi, &numerator, &denominator);
+
+       bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
+       bdi_dirty *= numerator;
+       do_div(bdi_dirty, denominator);
+
+       bdi_dirty += (dirty * bdi->min_ratio) / 100;
+       if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
+               bdi_dirty = dirty * bdi->max_ratio / 100;
+
+       return bdi_dirty;
+}
+
+static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
+                                      unsigned long elapsed,
+                                      unsigned long written)
+{
+       const unsigned long period = roundup_pow_of_two(3 * HZ);
+       unsigned long avg = bdi->avg_write_bandwidth;
+       unsigned long old = bdi->write_bandwidth;
+       u64 bw;
+
+       /*
+        * bw = written * HZ / elapsed
+        *
+        *                   bw * elapsed + write_bandwidth * (period - elapsed)
+        * write_bandwidth = ---------------------------------------------------
+        *                                          period
+        */
+       bw = written - bdi->written_stamp;
+       bw *= HZ;
+       if (unlikely(elapsed > period)) {
+               do_div(bw, elapsed);
+               avg = bw;
+               goto out;
        }
+       bw += (u64)bdi->write_bandwidth * (period - elapsed);
+       bw >>= ilog2(period);
+
+       /*
+        * one more level of smoothing, for filtering out sudden spikes
+        */
+       if (avg > old && old >= (unsigned long)bw)
+               avg -= (avg - old) >> 3;
+
+       if (avg < old && old <= (unsigned long)bw)
+               avg += (old - avg) >> 3;
+
+out:
+       bdi->write_bandwidth = bw;
+       bdi->avg_write_bandwidth = avg;
+}
+
+/*
+ * The global dirtyable memory and dirty threshold could be suddenly knocked
+ * down by a large amount (eg. on the startup of KVM in a swapless system).
+ * This may throw the system into deep dirty exceeded state and throttle
+ * heavy/light dirtiers alike. To retain good responsiveness, maintain
+ * global_dirty_limit for tracking slowly down to the knocked down dirty
+ * threshold.
+ */
+static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
+{
+       unsigned long limit = global_dirty_limit;
+
+       /*
+        * Follow up in one step.
+        */
+       if (limit < thresh) {
+               limit = thresh;
+               goto update;
+       }
+
+       /*
+        * Follow down slowly. Use the higher one as the target, because thresh
+        * may drop below dirty. This is exactly the reason to introduce
+        * global_dirty_limit which is guaranteed to lie above the dirty pages.
+        */
+       thresh = max(thresh, dirty);
+       if (limit > thresh) {
+               limit -= (limit - thresh) >> 5;
+               goto update;
+       }
+       return;
+update:
+       global_dirty_limit = limit;
+}
+
+static void global_update_bandwidth(unsigned long thresh,
+                                   unsigned long dirty,
+                                   unsigned long now)
+{
+       static DEFINE_SPINLOCK(dirty_lock);
+       static unsigned long update_time;
+
+       /*
+        * check locklessly first to optimize away locking for the most time
+        */
+       if (time_before(now, update_time + BANDWIDTH_INTERVAL))
+               return;
+
+       spin_lock(&dirty_lock);
+       if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
+               update_dirty_limit(thresh, dirty);
+               update_time = now;
+       }
+       spin_unlock(&dirty_lock);
+}
+
+void __bdi_update_bandwidth(struct backing_dev_info *bdi,
+                           unsigned long thresh,
+                           unsigned long dirty,
+                           unsigned long bdi_thresh,
+                           unsigned long bdi_dirty,
+                           unsigned long start_time)
+{
+       unsigned long now = jiffies;
+       unsigned long elapsed = now - bdi->bw_time_stamp;
+       unsigned long written;
+
+       /*
+        * rate-limit, only update once every 200ms.
+        */
+       if (elapsed < BANDWIDTH_INTERVAL)
+               return;
+
+       written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
+
+       /*
+        * Skip quiet periods when disk bandwidth is under-utilized.
+        * (at least 1s idle time between two flusher runs)
+        */
+       if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
+               goto snapshot;
+
+       if (thresh)
+               global_update_bandwidth(thresh, dirty, now);
+
+       bdi_update_write_bandwidth(bdi, elapsed, written);
+
+snapshot:
+       bdi->written_stamp = written;
+       bdi->bw_time_stamp = now;
+}
+
+static void bdi_update_bandwidth(struct backing_dev_info *bdi,
+                                unsigned long thresh,
+                                unsigned long dirty,
+                                unsigned long bdi_thresh,
+                                unsigned long bdi_dirty,
+                                unsigned long start_time)
+{
+       if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
+               return;
+       spin_lock(&bdi->wb.list_lock);
+       __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
+                              start_time);
+       spin_unlock(&bdi->wb.list_lock);
 }
 
 /*
  * balance_dirty_pages() must be called by processes which are generating dirty
  * data.  It looks at the number of dirty pages in the machine and will force
  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
- * If we're over `background_thresh' then pdflush is woken to perform some
- * writeout.
+ * If we're over `background_thresh' then the writeback threads are woken to
+ * perform some writeout.
  */
-static void balance_dirty_pages(struct address_space *mapping)
+static void balance_dirty_pages(struct address_space *mapping,
+                               unsigned long write_chunk)
 {
-       long nr_reclaimable, bdi_nr_reclaimable;
-       long nr_writeback, bdi_nr_writeback;
+       unsigned long nr_reclaimable, bdi_nr_reclaimable;
+       unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
+       unsigned long bdi_dirty;
        unsigned long background_thresh;
        unsigned long dirty_thresh;
        unsigned long bdi_thresh;
+       unsigned long task_bdi_thresh;
+       unsigned long min_task_bdi_thresh;
        unsigned long pages_written = 0;
-       unsigned long write_chunk = sync_writeback_pages();
-
+       unsigned long pause = 1;
+       bool dirty_exceeded = false;
+       bool clear_dirty_exceeded = true;
        struct backing_dev_info *bdi = mapping->backing_dev_info;
+       unsigned long start_time = jiffies;
 
        for (;;) {
-               struct writeback_control wbc = {
-                       .bdi            = bdi,
-                       .sync_mode      = WB_SYNC_NONE,
-                       .older_than_this = NULL,
-                       .nr_to_write    = write_chunk,
-                       .range_cyclic   = 1,
-               };
-
-               get_dirty_limits(&background_thresh, &dirty_thresh,
-                               &bdi_thresh, bdi);
-
                nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
-               nr_writeback = global_page_state(NR_WRITEBACK);
+               nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
 
-               bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
-               bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
-
-               if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
-                       break;
+               global_dirty_limits(&background_thresh, &dirty_thresh);
 
                /*
                 * Throttle it only when the background writeback cannot
                 * catch-up. This avoids (excessively) small writeouts
                 * when the bdi limits are ramping up.
                 */
-               if (nr_reclaimable + nr_writeback <
-                               (background_thresh + dirty_thresh) / 2)
+               if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
+                       break;
+
+               bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+               min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
+               task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
+
+               /*
+                * In order to avoid the stacked BDI deadlock we need
+                * to ensure we accurately count the 'dirty' pages when
+                * the threshold is low.
+                *
+                * Otherwise it would be possible to get thresh+n pages
+                * reported dirty, even though there are thresh-m pages
+                * actually dirty; with m+n sitting in the percpu
+                * deltas.
+                */
+               if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
+                       bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+                       bdi_dirty = bdi_nr_reclaimable +
+                                   bdi_stat_sum(bdi, BDI_WRITEBACK);
+               } else {
+                       bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+                       bdi_dirty = bdi_nr_reclaimable +
+                                   bdi_stat(bdi, BDI_WRITEBACK);
+               }
+
+               /*
+                * The bdi thresh is somehow "soft" limit derived from the
+                * global "hard" limit. The former helps to prevent heavy IO
+                * bdi or process from holding back light ones; The latter is
+                * the last resort safeguard.
+                */
+               dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
+                                 (nr_dirty > dirty_thresh);
+               clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
+                                       (nr_dirty <= dirty_thresh);
+
+               if (!dirty_exceeded)
                        break;
 
                if (!bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;
 
+               bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
+                                    bdi_thresh, bdi_dirty, start_time);
+
                /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
                 * Unstable writes are a feature of certain networked
                 * filesystems (i.e. NFS) in which data may have been
@@ -531,45 +735,56 @@ static void balance_dirty_pages(struct address_space *mapping)
                 * threshold otherwise wait until the disk writes catch
                 * up.
                 */
-               if (bdi_nr_reclaimable > bdi_thresh) {
-                       writeback_inodes_wbc(&wbc);
-                       pages_written += write_chunk - wbc.nr_to_write;
-                       get_dirty_limits(&background_thresh, &dirty_thresh,
-                                      &bdi_thresh, bdi);
+               trace_balance_dirty_start(bdi);
+               if (bdi_nr_reclaimable > task_bdi_thresh) {
+                       pages_written += writeback_inodes_wb(&bdi->wb,
+                                                            write_chunk);
+                       trace_balance_dirty_written(bdi, pages_written);
+                       if (pages_written >= write_chunk)
+                               break;          /* We've done our duty */
                }
+               __set_current_state(TASK_UNINTERRUPTIBLE);
+               io_schedule_timeout(pause);
+               trace_balance_dirty_wait(bdi);
 
+               dirty_thresh = hard_dirty_limit(dirty_thresh);
                /*
-                * In order to avoid the stacked BDI deadlock we need
-                * to ensure we accurately count the 'dirty' pages when
-                * the threshold is low.
-                *
-                * Otherwise it would be possible to get thresh+n pages
-                * reported dirty, even though there are thresh-m pages
-                * actually dirty; with m+n sitting in the percpu
-                * deltas.
+                * max-pause area. If dirty exceeded but still within this
+                * area, no need to sleep for more than 200ms: (a) 8 pages per
+                * 200ms is typically more than enough to curb heavy dirtiers;
+                * (b) the pause time limit makes the dirtiers more responsive.
                 */
-               if (bdi_thresh < 2*bdi_stat_error(bdi)) {
-                       bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
-                       bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
-               } else if (bdi_nr_reclaimable) {
-                       bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
-                       bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
-               }
-
-               if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
+               if (nr_dirty < dirty_thresh +
+                              dirty_thresh / DIRTY_MAXPAUSE_AREA &&
+                   time_after(jiffies, start_time + MAX_PAUSE))
+                       break;
+               /*
+                * pass-good area. When some bdi gets blocked (eg. NFS server
+                * not responding), or write bandwidth dropped dramatically due
+                * to concurrent reads, or dirty threshold suddenly dropped and
+                * the dirty pages cannot be brought down anytime soon (eg. on
+                * slow USB stick), at least let go of the good bdi's.
+                */
+               if (nr_dirty < dirty_thresh +
+                              dirty_thresh / DIRTY_PASSGOOD_AREA &&
+                   bdi_dirty < bdi_thresh)
                        break;
-               if (pages_written >= write_chunk)
-                       break;          /* We've done our duty */
 
-               schedule_timeout(1);
+               /*
+                * Increase the delay for each loop, up to our previous
+                * default of taking a 100ms nap.
+                */
+               pause <<= 1;
+               if (pause > HZ / 10)
+                       pause = HZ / 10;
        }
 
-       if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
-                       bdi->dirty_exceeded)
+       /* Clear dirty_exceeded flag only when no task can exceed the limit */
+       if (clear_dirty_exceeded && bdi->dirty_exceeded)
                bdi->dirty_exceeded = 0;
 
        if (writeback_in_progress(bdi))
-               return;         /* pdflush is already working this queue */
+               return;
 
        /*
         * In laptop mode, we wait until hitting the higher threshold before
@@ -580,18 +795,8 @@ static void balance_dirty_pages(struct address_space *mapping)
         * background_thresh, to keep the amount of dirty memory low.
         */
        if ((laptop_mode && pages_written) ||
-           (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
-                                         + global_page_state(NR_UNSTABLE_NFS))
-                                         > background_thresh))) {
-               struct writeback_control wbc = {
-                       .bdi            = bdi,
-                       .sync_mode      = WB_SYNC_NONE,
-                       .nr_to_write    = nr_writeback,
-               };
-
-
-               bdi_start_writeback(&wbc);
-       }
+           (!laptop_mode && (nr_reclaimable > background_thresh)))
+               bdi_start_background_writeback(bdi);
 }
 
 void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -604,6 +809,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
        }
 }
 
+static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
+
 /**
  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
  * @mapping: address_space which was dirtied
@@ -621,10 +828,13 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
                                        unsigned long nr_pages_dirtied)
 {
-       static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
+       struct backing_dev_info *bdi = mapping->backing_dev_info;
        unsigned long ratelimit;
        unsigned long *p;
 
+       if (!bdi_cap_account_dirty(bdi))
+               return;
+
        ratelimit = ratelimit_pages;
        if (mapping->backing_dev_info->dirty_exceeded)
                ratelimit = 8;
@@ -634,12 +844,13 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
         * tasks in balance_dirty_pages(). Period.
         */
        preempt_disable();
-       p =  &__get_cpu_var(ratelimits);
+       p =  &__get_cpu_var(bdp_ratelimits);
        *p += nr_pages_dirtied;
        if (unlikely(*p >= ratelimit)) {
+               ratelimit = sync_writeback_pages(*p);
                *p = 0;
                preempt_enable();
-               balance_dirty_pages(mapping);
+               balance_dirty_pages(mapping, ratelimit);
                return;
        }
        preempt_enable();
@@ -652,7 +863,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
        unsigned long dirty_thresh;
 
         for ( ; ; ) {
-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+               global_dirty_limits(&background_thresh, &dirty_thresh);
 
                 /*
                  * Boost the allowable dirty threshold a bit for page
@@ -675,35 +886,30 @@ void throttle_vm_writeout(gfp_t gfp_mask)
         }
 }
 
-static void laptop_timer_fn(unsigned long unused);
-
-static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
-
 /*
  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
  */
 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length, ppos);
+       proc_dointvec(table, write, buffer, length, ppos);
+       bdi_arm_supers_timer();
        return 0;
 }
 
-static void do_laptop_sync(struct work_struct *work)
-{
-       wakeup_flusher_threads(0);
-       kfree(work);
-}
-
-static void laptop_timer_fn(unsigned long unused)
+#ifdef CONFIG_BLOCK
+void laptop_mode_timer_fn(unsigned long data)
 {
-       struct work_struct *work;
+       struct request_queue *q = (struct request_queue *)data;
+       int nr_pages = global_page_state(NR_FILE_DIRTY) +
+               global_page_state(NR_UNSTABLE_NFS);
 
-       work = kmalloc(sizeof(*work), GFP_ATOMIC);
-       if (work) {
-               INIT_WORK(work, do_laptop_sync);
-               schedule_work(work);
-       }
+       /*
+        * We want to write everything out, not just down to the dirty
+        * threshold
+        */
+       if (bdi_has_dirty_io(&q->backing_dev_info))
+               bdi_start_writeback(&q->backing_dev_info, nr_pages);
 }
 
 /*
@@ -711,9 +917,9 @@ static void laptop_timer_fn(unsigned long unused)
  * of all dirty data a few seconds from now.  If the flush is already scheduled
  * then push it back - the user is still using the disk.
  */
-void laptop_io_completion(void)
+void laptop_io_completion(struct backing_dev_info *info)
 {
-       mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
+       mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
 }
 
 /*
@@ -723,8 +929,16 @@ void laptop_io_completion(void)
  */
 void laptop_sync_completion(void)
 {
-       del_timer(&laptop_mode_wb_timer);
+       struct backing_dev_info *bdi;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
+               del_timer(&bdi->laptop_mode_wb_timer);
+
+       rcu_read_unlock();
 }
+#endif
 
 /*
  * If ratelimit_pages is too high then we can get into dirty-data overload
@@ -795,6 +1009,42 @@ void __init page_writeback_init(void)
 }
 
 /**
+ * tag_pages_for_writeback - tag pages to be written by write_cache_pages
+ * @mapping: address space structure to write
+ * @start: starting page index
+ * @end: ending page index (inclusive)
+ *
+ * This function scans the page range from @start to @end (inclusive) and tags
+ * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
+ * that write_cache_pages (or whoever calls this function) will then use
+ * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
+ * used to avoid livelocking of writeback by a process steadily creating new
+ * dirty pages in the file (thus it is important for this function to be quick
+ * so that it can tag pages faster than a dirtying process can create them).
+ */
+/*
+ * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
+ */
+void tag_pages_for_writeback(struct address_space *mapping,
+                            pgoff_t start, pgoff_t end)
+{
+#define WRITEBACK_TAG_BATCH 4096
+       unsigned long tagged;
+
+       do {
+               spin_lock_irq(&mapping->tree_lock);
+               tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
+                               &start, end, WRITEBACK_TAG_BATCH,
+                               PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
+               spin_unlock_irq(&mapping->tree_lock);
+               WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
+               cond_resched();
+               /* We check 'start' to handle wrapping when end == ~0UL */
+       } while (tagged >= WRITEBACK_TAG_BATCH && start);
+}
+EXPORT_SYMBOL(tag_pages_for_writeback);
+
+/**
  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  * @mapping: address space structure to write
  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
@@ -808,12 +1058,18 @@ void __init page_writeback_init(void)
  * the call was made get new I/O started against them.  If wbc->sync_mode is
  * WB_SYNC_ALL then we were called for data integrity and we must wait for
  * existing IO to complete.
+ *
+ * To avoid livelocks (when other process dirties new pages), we first tag
+ * pages which should be written back with TOWRITE tag and only then start
+ * writing them. For data-integrity sync we have to be careful so that we do
+ * not miss some pages (e.g., because some other process has cleared TOWRITE
+ * tag we set). The rule we follow is that TOWRITE tag can be cleared only
+ * by the process clearing the DIRTY tag (and submitting the page for IO).
  */
 int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data)
 {
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        int ret = 0;
        int done = 0;
        struct pagevec pvec;
@@ -824,12 +1080,7 @@ int write_cache_pages(struct address_space *mapping,
        pgoff_t done_index;
        int cycled;
        int range_whole = 0;
-       long nr_to_write = wbc->nr_to_write;
-
-       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-               wbc->encountered_congestion = 1;
-               return 0;
-       }
+       int tag;
 
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
@@ -847,13 +1098,18 @@ int write_cache_pages(struct address_space *mapping,
                        range_whole = 1;
                cycled = 1; /* ignore range_cyclic tests */
        }
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+               tag = PAGECACHE_TAG_TOWRITE;
+       else
+               tag = PAGECACHE_TAG_DIRTY;
 retry:
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+               tag_pages_for_writeback(mapping, index, end);
        done_index = index;
        while (!done && (index <= end)) {
                int i;
 
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                             PAGECACHE_TAG_DIRTY,
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
                if (nr_pages == 0)
                        break;
@@ -877,7 +1133,7 @@ retry:
                                break;
                        }
 
-                       done_index = page->index + 1;
+                       done_index = page->index;
 
                        lock_page(page);
 
@@ -911,6 +1167,7 @@ continue_unlock:
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
+                       trace_wbc_writepage(wbc, mapping->backing_dev_info);
                        ret = (*writepage)(page, wbc, data);
                        if (unlikely(ret)) {
                                if (ret == AOP_WRITEPAGE_ACTIVATE) {
@@ -926,32 +1183,20 @@ continue_unlock:
                                         * not be suitable for data integrity
                                         * writeout).
                                         */
-                                       done = 1;
-                                       break;
-                               }
-                       }
-
-                       if (nr_to_write > 0) {
-                               nr_to_write--;
-                               if (nr_to_write == 0 &&
-                                   wbc->sync_mode == WB_SYNC_NONE) {
-                                       /*
-                                        * We stop writing back only if we are
-                                        * not doing integrity sync. In case of
-                                        * integrity sync we have to keep going
-                                        * because someone may be concurrently
-                                        * dirtying pages, and we might have
-                                        * synced a lot of newly appeared dirty
-                                        * pages, but have not synced all of the
-                                        * old dirty pages.
-                                        */
+                                       done_index = page->index + 1;
                                        done = 1;
                                        break;
                                }
                        }
 
-                       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-                               wbc->encountered_congestion = 1;
+                       /*
+                        * We stop writing back only if we are not doing
+                        * integrity sync. In case of integrity sync we have to
+                        * keep going until we have written all the pages
+                        * we tagged for writeback prior to entering this loop.
+                        */
+                       if (--wbc->nr_to_write <= 0 &&
+                           wbc->sync_mode == WB_SYNC_NONE) {
                                done = 1;
                                break;
                        }
@@ -970,11 +1215,8 @@ continue_unlock:
                end = writeback_index - 1;
                goto retry;
        }
-       if (!wbc->no_nrwrite_index_update) {
-               if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
-                       mapping->writeback_index = done_index;
-               wbc->nr_to_write = nr_to_write;
-       }
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+               mapping->writeback_index = done_index;
 
        return ret;
 }
@@ -1004,11 +1246,17 @@ static int __writepage(struct page *page, struct writeback_control *wbc,
 int generic_writepages(struct address_space *mapping,
                       struct writeback_control *wbc)
 {
+       struct blk_plug plug;
+       int ret;
+
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
                return 0;
 
-       return write_cache_pages(mapping, wbc, __writepage, mapping);
+       blk_start_plug(&plug);
+       ret = write_cache_pages(mapping, wbc, __writepage, mapping);
+       blk_finish_plug(&plug);
+       return ret;
 }
 
 EXPORT_SYMBOL(generic_writepages);
@@ -1019,12 +1267,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
 
        if (wbc->nr_to_write <= 0)
                return 0;
-       wbc->for_writepages = 1;
        if (mapping->a_ops->writepages)
                ret = mapping->a_ops->writepages(mapping, wbc);
        else
                ret = generic_writepages(mapping, wbc);
-       wbc->for_writepages = 0;
        return ret;
 }
 
@@ -1073,7 +1319,7 @@ EXPORT_SYMBOL(write_one_page);
 int __set_page_dirty_no_writeback(struct page *page)
 {
        if (!PageDirty(page))
-               SetPageDirty(page);
+               return !TestSetPageDirty(page);
        return 0;
 }
 
@@ -1085,11 +1331,24 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 {
        if (mapping_cap_account_dirty(mapping)) {
                __inc_zone_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_DIRTIED);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
                task_dirty_inc(current);
                task_io_account_write(PAGE_CACHE_SIZE);
        }
 }
+EXPORT_SYMBOL(account_page_dirtied);
+
+/*
+ * Helper function for set_page_writeback family.
+ * NOTE: Unlike account_page_dirtied this does not rely on being atomic
+ * wrt interrupts.
+ */
+void account_page_writeback(struct page *page)
+{
+       inc_zone_page_state(page, NR_WRITEBACK);
+}
+EXPORT_SYMBOL(account_page_writeback);
 
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
@@ -1148,6 +1407,13 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
 EXPORT_SYMBOL(redirty_page_for_writepage);
 
 /*
+ * Dirty a page.
+ *
+ * For pages with a mapping this should be done under the page lock
+ * for the benefit of asynchronous memory errors who prefer a consistent
+ * dirty state. This rule can be broken in some special cases,
+ * but should be better not to.
+ *
  * If the mapping doesn't provide a set_page_dirty a_op, then
  * just fall through and assume that it wants buffer_heads.
  */
@@ -1157,6 +1423,17 @@ int set_page_dirty(struct page *page)
 
        if (likely(mapping)) {
                int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
+               /*
+                * readahead/lru_deactivate_page could remain
+                * PG_readahead/PG_reclaim due to race with end_page_writeback
+                * About readahead, if the page is written, the flags would be
+                * reset. So no problem.
+                * About lru_deactivate_page, if the page is redirty, the flag
+                * will be reset. So no problem. but if the page is used by readahead
+                * it will confuse readahead and make it restart the size rampup
+                * process. But it's a trivial problem.
+                */
+               ClearPageReclaim(page);
 #ifdef CONFIG_BLOCK
                if (!spd)
                        spd = __set_page_dirty_buffers;
@@ -1185,7 +1462,7 @@ int set_page_dirty_lock(struct page *page)
 {
        int ret;
 
-       lock_page_nosync(page);
+       lock_page(page);
        ret = set_page_dirty(page);
        unlock_page(page);
        return ret;
@@ -1212,7 +1489,6 @@ int clear_page_dirty_for_io(struct page *page)
 
        BUG_ON(!PageLocked(page));
 
-       ClearPageReclaim(page);
        if (mapping && mapping_cap_account_dirty(mapping)) {
                /*
                 * Yes, Virginia, this is indeed insane.
@@ -1287,8 +1563,10 @@ int test_clear_page_writeback(struct page *page)
        } else {
                ret = TestClearPageWriteback(page);
        }
-       if (ret)
+       if (ret) {
                dec_zone_page_state(page, NR_WRITEBACK);
+               inc_zone_page_state(page, NR_WRITTEN);
+       }
        return ret;
 }
 
@@ -1314,12 +1592,15 @@ int test_set_page_writeback(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
+               radix_tree_tag_clear(&mapping->page_tree,
+                                    page_index(page),
+                                    PAGECACHE_TAG_TOWRITE);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
        }
        if (!ret)
-               inc_zone_page_state(page, NR_WRITEBACK);
+               account_page_writeback(page);
        return ret;
 
 }
@@ -1331,10 +1612,6 @@ EXPORT_SYMBOL(test_set_page_writeback);
  */
 int mapping_tagged(struct address_space *mapping, int tag)
 {
-       int ret;
-       rcu_read_lock();
-       ret = radix_tree_tagged(&mapping->page_tree, tag);
-       rcu_read_unlock();
-       return ret;
+       return radix_tree_tagged(&mapping->page_tree, tag);
 }
 EXPORT_SYMBOL(mapping_tagged);