writeback: consolidate variable names in balance_dirty_pages()
[linux-2.6.git] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  * Contains functions related to writing back dirty pages at the
8  * address_space level.
9  *
10  * 10Apr2002    Andrew Morton
11  *              Initial version
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/spinlock.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/writeback.h>
23 #include <linux/init.h>
24 #include <linux/backing-dev.h>
25 #include <linux/task_io_accounting_ops.h>
26 #include <linux/blkdev.h>
27 #include <linux/mpage.h>
28 #include <linux/rmap.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/smp.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/syscalls.h>
35 #include <linux/buffer_head.h>
36 #include <linux/pagevec.h>
37 #include <trace/events/writeback.h>
38
39 /*
40  * Estimate write bandwidth at 200ms intervals.
41  */
42 #define BANDWIDTH_INTERVAL      max(HZ/5, 1)
43
44 /*
45  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
46  * will look to see if it needs to force writeback or throttling.
47  */
48 static long ratelimit_pages = 32;
49
50 /*
51  * When balance_dirty_pages decides that the caller needs to perform some
52  * non-background writeback, this is how many pages it will attempt to write.
53  * It should be somewhat larger than dirtied pages to ensure that reasonably
54  * large amounts of I/O are submitted.
55  */
56 static inline long sync_writeback_pages(unsigned long dirtied)
57 {
58         if (dirtied < ratelimit_pages)
59                 dirtied = ratelimit_pages;
60
61         return dirtied + dirtied / 2;
62 }
63
64 /* The following parameters are exported via /proc/sys/vm */
65
66 /*
67  * Start background writeback (via writeback threads) at this percentage
68  */
69 int dirty_background_ratio = 10;
70
71 /*
72  * dirty_background_bytes starts at 0 (disabled) so that it is a function of
73  * dirty_background_ratio * the amount of dirtyable memory
74  */
75 unsigned long dirty_background_bytes;
76
77 /*
78  * free highmem will not be subtracted from the total free memory
79  * for calculating free ratios if vm_highmem_is_dirtyable is true
80  */
81 int vm_highmem_is_dirtyable;
82
83 /*
84  * The generator of dirty data starts writeback at this percentage
85  */
86 int vm_dirty_ratio = 20;
87
88 /*
89  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
90  * vm_dirty_ratio * the amount of dirtyable memory
91  */
92 unsigned long vm_dirty_bytes;
93
94 /*
95  * The interval between `kupdate'-style writebacks
96  */
97 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
98
99 /*
100  * The longest time for which data is allowed to remain dirty
101  */
102 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
103
104 /*
105  * Flag that makes the machine dump writes/reads and block dirtyings.
106  */
107 int block_dump;
108
109 /*
110  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
111  * a full sync is triggered after this time elapses without any disk activity.
112  */
113 int laptop_mode;
114
115 EXPORT_SYMBOL(laptop_mode);
116
117 /* End of sysctl-exported parameters */
118
119
120 /*
121  * Scale the writeback cache size proportional to the relative writeout speeds.
122  *
123  * We do this by keeping a floating proportion between BDIs, based on page
124  * writeback completions [end_page_writeback()]. Those devices that write out
125  * pages fastest will get the larger share, while the slower will get a smaller
126  * share.
127  *
128  * We use page writeout completions because we are interested in getting rid of
129  * dirty pages. Having them written out is the primary goal.
130  *
131  * We introduce a concept of time, a period over which we measure these events,
132  * because demand can/will vary over time. The length of this period itself is
133  * measured in page writeback completions.
134  *
135  */
136 static struct prop_descriptor vm_completions;
137 static struct prop_descriptor vm_dirties;
138
139 /*
140  * couple the period to the dirty_ratio:
141  *
142  *   period/2 ~ roundup_pow_of_two(dirty limit)
143  */
144 static int calc_period_shift(void)
145 {
146         unsigned long dirty_total;
147
148         if (vm_dirty_bytes)
149                 dirty_total = vm_dirty_bytes / PAGE_SIZE;
150         else
151                 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
152                                 100;
153         return 2 + ilog2(dirty_total - 1);
154 }
155
156 /*
157  * update the period when the dirty threshold changes.
158  */
159 static void update_completion_period(void)
160 {
161         int shift = calc_period_shift();
162         prop_change_shift(&vm_completions, shift);
163         prop_change_shift(&vm_dirties, shift);
164 }
165
166 int dirty_background_ratio_handler(struct ctl_table *table, int write,
167                 void __user *buffer, size_t *lenp,
168                 loff_t *ppos)
169 {
170         int ret;
171
172         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
173         if (ret == 0 && write)
174                 dirty_background_bytes = 0;
175         return ret;
176 }
177
178 int dirty_background_bytes_handler(struct ctl_table *table, int write,
179                 void __user *buffer, size_t *lenp,
180                 loff_t *ppos)
181 {
182         int ret;
183
184         ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
185         if (ret == 0 && write)
186                 dirty_background_ratio = 0;
187         return ret;
188 }
189
190 int dirty_ratio_handler(struct ctl_table *table, int write,
191                 void __user *buffer, size_t *lenp,
192                 loff_t *ppos)
193 {
194         int old_ratio = vm_dirty_ratio;
195         int ret;
196
197         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
198         if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
199                 update_completion_period();
200                 vm_dirty_bytes = 0;
201         }
202         return ret;
203 }
204
205
206 int dirty_bytes_handler(struct ctl_table *table, int write,
207                 void __user *buffer, size_t *lenp,
208                 loff_t *ppos)
209 {
210         unsigned long old_bytes = vm_dirty_bytes;
211         int ret;
212
213         ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
214         if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
215                 update_completion_period();
216                 vm_dirty_ratio = 0;
217         }
218         return ret;
219 }
220
221 /*
222  * Increment the BDI's writeout completion count and the global writeout
223  * completion count. Called from test_clear_page_writeback().
224  */
225 static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
226 {
227         __inc_bdi_stat(bdi, BDI_WRITTEN);
228         __prop_inc_percpu_max(&vm_completions, &bdi->completions,
229                               bdi->max_prop_frac);
230 }
231
232 void bdi_writeout_inc(struct backing_dev_info *bdi)
233 {
234         unsigned long flags;
235
236         local_irq_save(flags);
237         __bdi_writeout_inc(bdi);
238         local_irq_restore(flags);
239 }
240 EXPORT_SYMBOL_GPL(bdi_writeout_inc);
241
242 void task_dirty_inc(struct task_struct *tsk)
243 {
244         prop_inc_single(&vm_dirties, &tsk->dirties);
245 }
246
247 /*
248  * Obtain an accurate fraction of the BDI's portion.
249  */
250 static void bdi_writeout_fraction(struct backing_dev_info *bdi,
251                 long *numerator, long *denominator)
252 {
253         prop_fraction_percpu(&vm_completions, &bdi->completions,
254                                 numerator, denominator);
255 }
256
257 static inline void task_dirties_fraction(struct task_struct *tsk,
258                 long *numerator, long *denominator)
259 {
260         prop_fraction_single(&vm_dirties, &tsk->dirties,
261                                 numerator, denominator);
262 }
263
264 /*
265  * task_dirty_limit - scale down dirty throttling threshold for one task
266  *
267  * task specific dirty limit:
268  *
269  *   dirty -= (dirty/8) * p_{t}
270  *
271  * To protect light/slow dirtying tasks from heavier/fast ones, we start
272  * throttling individual tasks before reaching the bdi dirty limit.
273  * Relatively low thresholds will be allocated to heavy dirtiers. So when
274  * dirty pages grow large, heavy dirtiers will be throttled first, which will
275  * effectively curb the growth of dirty pages. Light dirtiers with high enough
276  * dirty threshold may never get throttled.
277  */
278 static unsigned long task_dirty_limit(struct task_struct *tsk,
279                                        unsigned long bdi_dirty)
280 {
281         long numerator, denominator;
282         unsigned long dirty = bdi_dirty;
283         u64 inv = dirty >> 3;
284
285         task_dirties_fraction(tsk, &numerator, &denominator);
286         inv *= numerator;
287         do_div(inv, denominator);
288
289         dirty -= inv;
290
291         return max(dirty, bdi_dirty/2);
292 }
293
294 /*
295  *
296  */
297 static unsigned int bdi_min_ratio;
298
299 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
300 {
301         int ret = 0;
302
303         spin_lock_bh(&bdi_lock);
304         if (min_ratio > bdi->max_ratio) {
305                 ret = -EINVAL;
306         } else {
307                 min_ratio -= bdi->min_ratio;
308                 if (bdi_min_ratio + min_ratio < 100) {
309                         bdi_min_ratio += min_ratio;
310                         bdi->min_ratio += min_ratio;
311                 } else {
312                         ret = -EINVAL;
313                 }
314         }
315         spin_unlock_bh(&bdi_lock);
316
317         return ret;
318 }
319
320 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
321 {
322         int ret = 0;
323
324         if (max_ratio > 100)
325                 return -EINVAL;
326
327         spin_lock_bh(&bdi_lock);
328         if (bdi->min_ratio > max_ratio) {
329                 ret = -EINVAL;
330         } else {
331                 bdi->max_ratio = max_ratio;
332                 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
333         }
334         spin_unlock_bh(&bdi_lock);
335
336         return ret;
337 }
338 EXPORT_SYMBOL(bdi_set_max_ratio);
339
340 /*
341  * Work out the current dirty-memory clamping and background writeout
342  * thresholds.
343  *
344  * The main aim here is to lower them aggressively if there is a lot of mapped
345  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
346  * pages.  It is better to clamp down on writers than to start swapping, and
347  * performing lots of scanning.
348  *
349  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
350  *
351  * We don't permit the clamping level to fall below 5% - that is getting rather
352  * excessive.
353  *
354  * We make sure that the background writeout level is below the adjusted
355  * clamping level.
356  */
357
358 static unsigned long highmem_dirtyable_memory(unsigned long total)
359 {
360 #ifdef CONFIG_HIGHMEM
361         int node;
362         unsigned long x = 0;
363
364         for_each_node_state(node, N_HIGH_MEMORY) {
365                 struct zone *z =
366                         &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
367
368                 x += zone_page_state(z, NR_FREE_PAGES) +
369                      zone_reclaimable_pages(z);
370         }
371         /*
372          * Make sure that the number of highmem pages is never larger
373          * than the number of the total dirtyable memory. This can only
374          * occur in very strange VM situations but we want to make sure
375          * that this does not occur.
376          */
377         return min(x, total);
378 #else
379         return 0;
380 #endif
381 }
382
383 /**
384  * determine_dirtyable_memory - amount of memory that may be used
385  *
386  * Returns the numebr of pages that can currently be freed and used
387  * by the kernel for direct mappings.
388  */
389 unsigned long determine_dirtyable_memory(void)
390 {
391         unsigned long x;
392
393         x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
394
395         if (!vm_highmem_is_dirtyable)
396                 x -= highmem_dirtyable_memory(x);
397
398         return x + 1;   /* Ensure that we never return 0 */
399 }
400
401 /*
402  * global_dirty_limits - background-writeback and dirty-throttling thresholds
403  *
404  * Calculate the dirty thresholds based on sysctl parameters
405  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
406  * - vm.dirty_ratio             or  vm.dirty_bytes
407  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
408  * real-time tasks.
409  */
410 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
411 {
412         unsigned long background;
413         unsigned long dirty;
414         unsigned long uninitialized_var(available_memory);
415         struct task_struct *tsk;
416
417         if (!vm_dirty_bytes || !dirty_background_bytes)
418                 available_memory = determine_dirtyable_memory();
419
420         if (vm_dirty_bytes)
421                 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
422         else
423                 dirty = (vm_dirty_ratio * available_memory) / 100;
424
425         if (dirty_background_bytes)
426                 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
427         else
428                 background = (dirty_background_ratio * available_memory) / 100;
429
430         if (background >= dirty)
431                 background = dirty / 2;
432         tsk = current;
433         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
434                 background += background / 4;
435                 dirty += dirty / 4;
436         }
437         *pbackground = background;
438         *pdirty = dirty;
439 }
440
441 /**
442  * bdi_dirty_limit - @bdi's share of dirty throttling threshold
443  * @bdi: the backing_dev_info to query
444  * @dirty: global dirty limit in pages
445  *
446  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
447  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
448  * And the "limit" in the name is not seriously taken as hard limit in
449  * balance_dirty_pages().
450  *
451  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
452  * - starving fast devices
453  * - piling up dirty pages (that will take long time to sync) on slow devices
454  *
455  * The bdi's share of dirty limit will be adapting to its throughput and
456  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
457  */
458 unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
459 {
460         u64 bdi_dirty;
461         long numerator, denominator;
462
463         /*
464          * Calculate this BDI's share of the dirty ratio.
465          */
466         bdi_writeout_fraction(bdi, &numerator, &denominator);
467
468         bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
469         bdi_dirty *= numerator;
470         do_div(bdi_dirty, denominator);
471
472         bdi_dirty += (dirty * bdi->min_ratio) / 100;
473         if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
474                 bdi_dirty = dirty * bdi->max_ratio / 100;
475
476         return bdi_dirty;
477 }
478
479 static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
480                                        unsigned long elapsed,
481                                        unsigned long written)
482 {
483         const unsigned long period = roundup_pow_of_two(3 * HZ);
484         unsigned long avg = bdi->avg_write_bandwidth;
485         unsigned long old = bdi->write_bandwidth;
486         u64 bw;
487
488         /*
489          * bw = written * HZ / elapsed
490          *
491          *                   bw * elapsed + write_bandwidth * (period - elapsed)
492          * write_bandwidth = ---------------------------------------------------
493          *                                          period
494          */
495         bw = written - bdi->written_stamp;
496         bw *= HZ;
497         if (unlikely(elapsed > period)) {
498                 do_div(bw, elapsed);
499                 avg = bw;
500                 goto out;
501         }
502         bw += (u64)bdi->write_bandwidth * (period - elapsed);
503         bw >>= ilog2(period);
504
505         /*
506          * one more level of smoothing, for filtering out sudden spikes
507          */
508         if (avg > old && old >= (unsigned long)bw)
509                 avg -= (avg - old) >> 3;
510
511         if (avg < old && old <= (unsigned long)bw)
512                 avg += (old - avg) >> 3;
513
514 out:
515         bdi->write_bandwidth = bw;
516         bdi->avg_write_bandwidth = avg;
517 }
518
519 void __bdi_update_bandwidth(struct backing_dev_info *bdi,
520                             unsigned long start_time)
521 {
522         unsigned long now = jiffies;
523         unsigned long elapsed = now - bdi->bw_time_stamp;
524         unsigned long written;
525
526         /*
527          * rate-limit, only update once every 200ms.
528          */
529         if (elapsed < BANDWIDTH_INTERVAL)
530                 return;
531
532         written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
533
534         /*
535          * Skip quiet periods when disk bandwidth is under-utilized.
536          * (at least 1s idle time between two flusher runs)
537          */
538         if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
539                 goto snapshot;
540
541         bdi_update_write_bandwidth(bdi, elapsed, written);
542
543 snapshot:
544         bdi->written_stamp = written;
545         bdi->bw_time_stamp = now;
546 }
547
548 static void bdi_update_bandwidth(struct backing_dev_info *bdi,
549                                  unsigned long start_time)
550 {
551         if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
552                 return;
553         spin_lock(&bdi->wb.list_lock);
554         __bdi_update_bandwidth(bdi, start_time);
555         spin_unlock(&bdi->wb.list_lock);
556 }
557
558 /*
559  * balance_dirty_pages() must be called by processes which are generating dirty
560  * data.  It looks at the number of dirty pages in the machine and will force
561  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
562  * If we're over `background_thresh' then the writeback threads are woken to
563  * perform some writeout.
564  */
565 static void balance_dirty_pages(struct address_space *mapping,
566                                 unsigned long write_chunk)
567 {
568         unsigned long nr_reclaimable, bdi_nr_reclaimable;
569         unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
570         unsigned long bdi_dirty;
571         unsigned long background_thresh;
572         unsigned long dirty_thresh;
573         unsigned long bdi_thresh;
574         unsigned long pages_written = 0;
575         unsigned long pause = 1;
576         bool dirty_exceeded = false;
577         struct backing_dev_info *bdi = mapping->backing_dev_info;
578         unsigned long start_time = jiffies;
579
580         for (;;) {
581                 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
582                                         global_page_state(NR_UNSTABLE_NFS);
583                 nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
584
585                 global_dirty_limits(&background_thresh, &dirty_thresh);
586
587                 /*
588                  * Throttle it only when the background writeback cannot
589                  * catch-up. This avoids (excessively) small writeouts
590                  * when the bdi limits are ramping up.
591                  */
592                 if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
593                         break;
594
595                 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
596                 bdi_thresh = task_dirty_limit(current, bdi_thresh);
597
598                 /*
599                  * In order to avoid the stacked BDI deadlock we need
600                  * to ensure we accurately count the 'dirty' pages when
601                  * the threshold is low.
602                  *
603                  * Otherwise it would be possible to get thresh+n pages
604                  * reported dirty, even though there are thresh-m pages
605                  * actually dirty; with m+n sitting in the percpu
606                  * deltas.
607                  */
608                 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
609                         bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
610                         bdi_dirty = bdi_nr_reclaimable +
611                                     bdi_stat_sum(bdi, BDI_WRITEBACK);
612                 } else {
613                         bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
614                         bdi_dirty = bdi_nr_reclaimable +
615                                     bdi_stat(bdi, BDI_WRITEBACK);
616                 }
617
618                 /*
619                  * The bdi thresh is somehow "soft" limit derived from the
620                  * global "hard" limit. The former helps to prevent heavy IO
621                  * bdi or process from holding back light ones; The latter is
622                  * the last resort safeguard.
623                  */
624                 dirty_exceeded = (bdi_dirty > bdi_thresh) ||
625                                   (nr_dirty > dirty_thresh);
626
627                 if (!dirty_exceeded)
628                         break;
629
630                 if (!bdi->dirty_exceeded)
631                         bdi->dirty_exceeded = 1;
632
633                 bdi_update_bandwidth(bdi, start_time);
634
635                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
636                  * Unstable writes are a feature of certain networked
637                  * filesystems (i.e. NFS) in which data may have been
638                  * written to the server's write cache, but has not yet
639                  * been flushed to permanent storage.
640                  * Only move pages to writeback if this bdi is over its
641                  * threshold otherwise wait until the disk writes catch
642                  * up.
643                  */
644                 trace_balance_dirty_start(bdi);
645                 if (bdi_nr_reclaimable > bdi_thresh) {
646                         pages_written += writeback_inodes_wb(&bdi->wb,
647                                                              write_chunk);
648                         trace_balance_dirty_written(bdi, pages_written);
649                         if (pages_written >= write_chunk)
650                                 break;          /* We've done our duty */
651                 }
652                 __set_current_state(TASK_UNINTERRUPTIBLE);
653                 io_schedule_timeout(pause);
654                 trace_balance_dirty_wait(bdi);
655
656                 /*
657                  * Increase the delay for each loop, up to our previous
658                  * default of taking a 100ms nap.
659                  */
660                 pause <<= 1;
661                 if (pause > HZ / 10)
662                         pause = HZ / 10;
663         }
664
665         if (!dirty_exceeded && bdi->dirty_exceeded)
666                 bdi->dirty_exceeded = 0;
667
668         if (writeback_in_progress(bdi))
669                 return;
670
671         /*
672          * In laptop mode, we wait until hitting the higher threshold before
673          * starting background writeout, and then write out all the way down
674          * to the lower threshold.  So slow writers cause minimal disk activity.
675          *
676          * In normal mode, we start background writeout at the lower
677          * background_thresh, to keep the amount of dirty memory low.
678          */
679         if ((laptop_mode && pages_written) ||
680             (!laptop_mode && (nr_reclaimable > background_thresh)))
681                 bdi_start_background_writeback(bdi);
682 }
683
684 void set_page_dirty_balance(struct page *page, int page_mkwrite)
685 {
686         if (set_page_dirty(page) || page_mkwrite) {
687                 struct address_space *mapping = page_mapping(page);
688
689                 if (mapping)
690                         balance_dirty_pages_ratelimited(mapping);
691         }
692 }
693
694 static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
695
696 /**
697  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
698  * @mapping: address_space which was dirtied
699  * @nr_pages_dirtied: number of pages which the caller has just dirtied
700  *
701  * Processes which are dirtying memory should call in here once for each page
702  * which was newly dirtied.  The function will periodically check the system's
703  * dirty state and will initiate writeback if needed.
704  *
705  * On really big machines, get_writeback_state is expensive, so try to avoid
706  * calling it too often (ratelimiting).  But once we're over the dirty memory
707  * limit we decrease the ratelimiting by a lot, to prevent individual processes
708  * from overshooting the limit by (ratelimit_pages) each.
709  */
710 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
711                                         unsigned long nr_pages_dirtied)
712 {
713         struct backing_dev_info *bdi = mapping->backing_dev_info;
714         unsigned long ratelimit;
715         unsigned long *p;
716
717         if (!bdi_cap_account_dirty(bdi))
718                 return;
719
720         ratelimit = ratelimit_pages;
721         if (mapping->backing_dev_info->dirty_exceeded)
722                 ratelimit = 8;
723
724         /*
725          * Check the rate limiting. Also, we do not want to throttle real-time
726          * tasks in balance_dirty_pages(). Period.
727          */
728         preempt_disable();
729         p =  &__get_cpu_var(bdp_ratelimits);
730         *p += nr_pages_dirtied;
731         if (unlikely(*p >= ratelimit)) {
732                 ratelimit = sync_writeback_pages(*p);
733                 *p = 0;
734                 preempt_enable();
735                 balance_dirty_pages(mapping, ratelimit);
736                 return;
737         }
738         preempt_enable();
739 }
740 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
741
742 void throttle_vm_writeout(gfp_t gfp_mask)
743 {
744         unsigned long background_thresh;
745         unsigned long dirty_thresh;
746
747         for ( ; ; ) {
748                 global_dirty_limits(&background_thresh, &dirty_thresh);
749
750                 /*
751                  * Boost the allowable dirty threshold a bit for page
752                  * allocators so they don't get DoS'ed by heavy writers
753                  */
754                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
755
756                 if (global_page_state(NR_UNSTABLE_NFS) +
757                         global_page_state(NR_WRITEBACK) <= dirty_thresh)
758                                 break;
759                 congestion_wait(BLK_RW_ASYNC, HZ/10);
760
761                 /*
762                  * The caller might hold locks which can prevent IO completion
763                  * or progress in the filesystem.  So we cannot just sit here
764                  * waiting for IO to complete.
765                  */
766                 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
767                         break;
768         }
769 }
770
771 /*
772  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
773  */
774 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
775         void __user *buffer, size_t *length, loff_t *ppos)
776 {
777         proc_dointvec(table, write, buffer, length, ppos);
778         bdi_arm_supers_timer();
779         return 0;
780 }
781
782 #ifdef CONFIG_BLOCK
783 void laptop_mode_timer_fn(unsigned long data)
784 {
785         struct request_queue *q = (struct request_queue *)data;
786         int nr_pages = global_page_state(NR_FILE_DIRTY) +
787                 global_page_state(NR_UNSTABLE_NFS);
788
789         /*
790          * We want to write everything out, not just down to the dirty
791          * threshold
792          */
793         if (bdi_has_dirty_io(&q->backing_dev_info))
794                 bdi_start_writeback(&q->backing_dev_info, nr_pages);
795 }
796
797 /*
798  * We've spun up the disk and we're in laptop mode: schedule writeback
799  * of all dirty data a few seconds from now.  If the flush is already scheduled
800  * then push it back - the user is still using the disk.
801  */
802 void laptop_io_completion(struct backing_dev_info *info)
803 {
804         mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
805 }
806
807 /*
808  * We're in laptop mode and we've just synced. The sync's writes will have
809  * caused another writeback to be scheduled by laptop_io_completion.
810  * Nothing needs to be written back anymore, so we unschedule the writeback.
811  */
812 void laptop_sync_completion(void)
813 {
814         struct backing_dev_info *bdi;
815
816         rcu_read_lock();
817
818         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
819                 del_timer(&bdi->laptop_mode_wb_timer);
820
821         rcu_read_unlock();
822 }
823 #endif
824
825 /*
826  * If ratelimit_pages is too high then we can get into dirty-data overload
827  * if a large number of processes all perform writes at the same time.
828  * If it is too low then SMP machines will call the (expensive)
829  * get_writeback_state too often.
830  *
831  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
832  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
833  * thresholds before writeback cuts in.
834  *
835  * But the limit should not be set too high.  Because it also controls the
836  * amount of memory which the balance_dirty_pages() caller has to write back.
837  * If this is too large then the caller will block on the IO queue all the
838  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
839  * will write six megabyte chunks, max.
840  */
841
842 void writeback_set_ratelimit(void)
843 {
844         ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
845         if (ratelimit_pages < 16)
846                 ratelimit_pages = 16;
847         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
848                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
849 }
850
851 static int __cpuinit
852 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
853 {
854         writeback_set_ratelimit();
855         return NOTIFY_DONE;
856 }
857
858 static struct notifier_block __cpuinitdata ratelimit_nb = {
859         .notifier_call  = ratelimit_handler,
860         .next           = NULL,
861 };
862
863 /*
864  * Called early on to tune the page writeback dirty limits.
865  *
866  * We used to scale dirty pages according to how total memory
867  * related to pages that could be allocated for buffers (by
868  * comparing nr_free_buffer_pages() to vm_total_pages.
869  *
870  * However, that was when we used "dirty_ratio" to scale with
871  * all memory, and we don't do that any more. "dirty_ratio"
872  * is now applied to total non-HIGHPAGE memory (by subtracting
873  * totalhigh_pages from vm_total_pages), and as such we can't
874  * get into the old insane situation any more where we had
875  * large amounts of dirty pages compared to a small amount of
876  * non-HIGHMEM memory.
877  *
878  * But we might still want to scale the dirty_ratio by how
879  * much memory the box has..
880  */
881 void __init page_writeback_init(void)
882 {
883         int shift;
884
885         writeback_set_ratelimit();
886         register_cpu_notifier(&ratelimit_nb);
887
888         shift = calc_period_shift();
889         prop_descriptor_init(&vm_completions, shift);
890         prop_descriptor_init(&vm_dirties, shift);
891 }
892
893 /**
894  * tag_pages_for_writeback - tag pages to be written by write_cache_pages
895  * @mapping: address space structure to write
896  * @start: starting page index
897  * @end: ending page index (inclusive)
898  *
899  * This function scans the page range from @start to @end (inclusive) and tags
900  * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
901  * that write_cache_pages (or whoever calls this function) will then use
902  * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
903  * used to avoid livelocking of writeback by a process steadily creating new
904  * dirty pages in the file (thus it is important for this function to be quick
905  * so that it can tag pages faster than a dirtying process can create them).
906  */
907 /*
908  * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
909  */
910 void tag_pages_for_writeback(struct address_space *mapping,
911                              pgoff_t start, pgoff_t end)
912 {
913 #define WRITEBACK_TAG_BATCH 4096
914         unsigned long tagged;
915
916         do {
917                 spin_lock_irq(&mapping->tree_lock);
918                 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
919                                 &start, end, WRITEBACK_TAG_BATCH,
920                                 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
921                 spin_unlock_irq(&mapping->tree_lock);
922                 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
923                 cond_resched();
924                 /* We check 'start' to handle wrapping when end == ~0UL */
925         } while (tagged >= WRITEBACK_TAG_BATCH && start);
926 }
927 EXPORT_SYMBOL(tag_pages_for_writeback);
928
929 /**
930  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
931  * @mapping: address space structure to write
932  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
933  * @writepage: function called for each page
934  * @data: data passed to writepage function
935  *
936  * If a page is already under I/O, write_cache_pages() skips it, even
937  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
938  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
939  * and msync() need to guarantee that all the data which was dirty at the time
940  * the call was made get new I/O started against them.  If wbc->sync_mode is
941  * WB_SYNC_ALL then we were called for data integrity and we must wait for
942  * existing IO to complete.
943  *
944  * To avoid livelocks (when other process dirties new pages), we first tag
945  * pages which should be written back with TOWRITE tag and only then start
946  * writing them. For data-integrity sync we have to be careful so that we do
947  * not miss some pages (e.g., because some other process has cleared TOWRITE
948  * tag we set). The rule we follow is that TOWRITE tag can be cleared only
949  * by the process clearing the DIRTY tag (and submitting the page for IO).
950  */
951 int write_cache_pages(struct address_space *mapping,
952                       struct writeback_control *wbc, writepage_t writepage,
953                       void *data)
954 {
955         int ret = 0;
956         int done = 0;
957         struct pagevec pvec;
958         int nr_pages;
959         pgoff_t uninitialized_var(writeback_index);
960         pgoff_t index;
961         pgoff_t end;            /* Inclusive */
962         pgoff_t done_index;
963         int cycled;
964         int range_whole = 0;
965         int tag;
966
967         pagevec_init(&pvec, 0);
968         if (wbc->range_cyclic) {
969                 writeback_index = mapping->writeback_index; /* prev offset */
970                 index = writeback_index;
971                 if (index == 0)
972                         cycled = 1;
973                 else
974                         cycled = 0;
975                 end = -1;
976         } else {
977                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
978                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
979                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
980                         range_whole = 1;
981                 cycled = 1; /* ignore range_cyclic tests */
982         }
983         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
984                 tag = PAGECACHE_TAG_TOWRITE;
985         else
986                 tag = PAGECACHE_TAG_DIRTY;
987 retry:
988         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
989                 tag_pages_for_writeback(mapping, index, end);
990         done_index = index;
991         while (!done && (index <= end)) {
992                 int i;
993
994                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
995                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
996                 if (nr_pages == 0)
997                         break;
998
999                 for (i = 0; i < nr_pages; i++) {
1000                         struct page *page = pvec.pages[i];
1001
1002                         /*
1003                          * At this point, the page may be truncated or
1004                          * invalidated (changing page->mapping to NULL), or
1005                          * even swizzled back from swapper_space to tmpfs file
1006                          * mapping. However, page->index will not change
1007                          * because we have a reference on the page.
1008                          */
1009                         if (page->index > end) {
1010                                 /*
1011                                  * can't be range_cyclic (1st pass) because
1012                                  * end == -1 in that case.
1013                                  */
1014                                 done = 1;
1015                                 break;
1016                         }
1017
1018                         done_index = page->index;
1019
1020                         lock_page(page);
1021
1022                         /*
1023                          * Page truncated or invalidated. We can freely skip it
1024                          * then, even for data integrity operations: the page
1025                          * has disappeared concurrently, so there could be no
1026                          * real expectation of this data interity operation
1027                          * even if there is now a new, dirty page at the same
1028                          * pagecache address.
1029                          */
1030                         if (unlikely(page->mapping != mapping)) {
1031 continue_unlock:
1032                                 unlock_page(page);
1033                                 continue;
1034                         }
1035
1036                         if (!PageDirty(page)) {
1037                                 /* someone wrote it for us */
1038                                 goto continue_unlock;
1039                         }
1040
1041                         if (PageWriteback(page)) {
1042                                 if (wbc->sync_mode != WB_SYNC_NONE)
1043                                         wait_on_page_writeback(page);
1044                                 else
1045                                         goto continue_unlock;
1046                         }
1047
1048                         BUG_ON(PageWriteback(page));
1049                         if (!clear_page_dirty_for_io(page))
1050                                 goto continue_unlock;
1051
1052                         trace_wbc_writepage(wbc, mapping->backing_dev_info);
1053                         ret = (*writepage)(page, wbc, data);
1054                         if (unlikely(ret)) {
1055                                 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1056                                         unlock_page(page);
1057                                         ret = 0;
1058                                 } else {
1059                                         /*
1060                                          * done_index is set past this page,
1061                                          * so media errors will not choke
1062                                          * background writeout for the entire
1063                                          * file. This has consequences for
1064                                          * range_cyclic semantics (ie. it may
1065                                          * not be suitable for data integrity
1066                                          * writeout).
1067                                          */
1068                                         done_index = page->index + 1;
1069                                         done = 1;
1070                                         break;
1071                                 }
1072                         }
1073
1074                         /*
1075                          * We stop writing back only if we are not doing
1076                          * integrity sync. In case of integrity sync we have to
1077                          * keep going until we have written all the pages
1078                          * we tagged for writeback prior to entering this loop.
1079                          */
1080                         if (--wbc->nr_to_write <= 0 &&
1081                             wbc->sync_mode == WB_SYNC_NONE) {
1082                                 done = 1;
1083                                 break;
1084                         }
1085                 }
1086                 pagevec_release(&pvec);
1087                 cond_resched();
1088         }
1089         if (!cycled && !done) {
1090                 /*
1091                  * range_cyclic:
1092                  * We hit the last page and there is more work to be done: wrap
1093                  * back to the start of the file
1094                  */
1095                 cycled = 1;
1096                 index = 0;
1097                 end = writeback_index - 1;
1098                 goto retry;
1099         }
1100         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1101                 mapping->writeback_index = done_index;
1102
1103         return ret;
1104 }
1105 EXPORT_SYMBOL(write_cache_pages);
1106
1107 /*
1108  * Function used by generic_writepages to call the real writepage
1109  * function and set the mapping flags on error
1110  */
1111 static int __writepage(struct page *page, struct writeback_control *wbc,
1112                        void *data)
1113 {
1114         struct address_space *mapping = data;
1115         int ret = mapping->a_ops->writepage(page, wbc);
1116         mapping_set_error(mapping, ret);
1117         return ret;
1118 }
1119
1120 /**
1121  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1122  * @mapping: address space structure to write
1123  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1124  *
1125  * This is a library function, which implements the writepages()
1126  * address_space_operation.
1127  */
1128 int generic_writepages(struct address_space *mapping,
1129                        struct writeback_control *wbc)
1130 {
1131         struct blk_plug plug;
1132         int ret;
1133
1134         /* deal with chardevs and other special file */
1135         if (!mapping->a_ops->writepage)
1136                 return 0;
1137
1138         blk_start_plug(&plug);
1139         ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1140         blk_finish_plug(&plug);
1141         return ret;
1142 }
1143
1144 EXPORT_SYMBOL(generic_writepages);
1145
1146 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1147 {
1148         int ret;
1149
1150         if (wbc->nr_to_write <= 0)
1151                 return 0;
1152         if (mapping->a_ops->writepages)
1153                 ret = mapping->a_ops->writepages(mapping, wbc);
1154         else
1155                 ret = generic_writepages(mapping, wbc);
1156         return ret;
1157 }
1158
1159 /**
1160  * write_one_page - write out a single page and optionally wait on I/O
1161  * @page: the page to write
1162  * @wait: if true, wait on writeout
1163  *
1164  * The page must be locked by the caller and will be unlocked upon return.
1165  *
1166  * write_one_page() returns a negative error code if I/O failed.
1167  */
1168 int write_one_page(struct page *page, int wait)
1169 {
1170         struct address_space *mapping = page->mapping;
1171         int ret = 0;
1172         struct writeback_control wbc = {
1173                 .sync_mode = WB_SYNC_ALL,
1174                 .nr_to_write = 1,
1175         };
1176
1177         BUG_ON(!PageLocked(page));
1178
1179         if (wait)
1180                 wait_on_page_writeback(page);
1181
1182         if (clear_page_dirty_for_io(page)) {
1183                 page_cache_get(page);
1184                 ret = mapping->a_ops->writepage(page, &wbc);
1185                 if (ret == 0 && wait) {
1186                         wait_on_page_writeback(page);
1187                         if (PageError(page))
1188                                 ret = -EIO;
1189                 }
1190                 page_cache_release(page);
1191         } else {
1192                 unlock_page(page);
1193         }
1194         return ret;
1195 }
1196 EXPORT_SYMBOL(write_one_page);
1197
1198 /*
1199  * For address_spaces which do not use buffers nor write back.
1200  */
1201 int __set_page_dirty_no_writeback(struct page *page)
1202 {
1203         if (!PageDirty(page))
1204                 return !TestSetPageDirty(page);
1205         return 0;
1206 }
1207
1208 /*
1209  * Helper function for set_page_dirty family.
1210  * NOTE: This relies on being atomic wrt interrupts.
1211  */
1212 void account_page_dirtied(struct page *page, struct address_space *mapping)
1213 {
1214         if (mapping_cap_account_dirty(mapping)) {
1215                 __inc_zone_page_state(page, NR_FILE_DIRTY);
1216                 __inc_zone_page_state(page, NR_DIRTIED);
1217                 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1218                 task_dirty_inc(current);
1219                 task_io_account_write(PAGE_CACHE_SIZE);
1220         }
1221 }
1222 EXPORT_SYMBOL(account_page_dirtied);
1223
1224 /*
1225  * Helper function for set_page_writeback family.
1226  * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1227  * wrt interrupts.
1228  */
1229 void account_page_writeback(struct page *page)
1230 {
1231         inc_zone_page_state(page, NR_WRITEBACK);
1232         inc_zone_page_state(page, NR_WRITTEN);
1233 }
1234 EXPORT_SYMBOL(account_page_writeback);
1235
1236 /*
1237  * For address_spaces which do not use buffers.  Just tag the page as dirty in
1238  * its radix tree.
1239  *
1240  * This is also used when a single buffer is being dirtied: we want to set the
1241  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1242  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1243  *
1244  * Most callers have locked the page, which pins the address_space in memory.
1245  * But zap_pte_range() does not lock the page, however in that case the
1246  * mapping is pinned by the vma's ->vm_file reference.
1247  *
1248  * We take care to handle the case where the page was truncated from the
1249  * mapping by re-checking page_mapping() inside tree_lock.
1250  */
1251 int __set_page_dirty_nobuffers(struct page *page)
1252 {
1253         if (!TestSetPageDirty(page)) {
1254                 struct address_space *mapping = page_mapping(page);
1255                 struct address_space *mapping2;
1256
1257                 if (!mapping)
1258                         return 1;
1259
1260                 spin_lock_irq(&mapping->tree_lock);
1261                 mapping2 = page_mapping(page);
1262                 if (mapping2) { /* Race with truncate? */
1263                         BUG_ON(mapping2 != mapping);
1264                         WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1265                         account_page_dirtied(page, mapping);
1266                         radix_tree_tag_set(&mapping->page_tree,
1267                                 page_index(page), PAGECACHE_TAG_DIRTY);
1268                 }
1269                 spin_unlock_irq(&mapping->tree_lock);
1270                 if (mapping->host) {
1271                         /* !PageAnon && !swapper_space */
1272                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1273                 }
1274                 return 1;
1275         }
1276         return 0;
1277 }
1278 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1279
1280 /*
1281  * When a writepage implementation decides that it doesn't want to write this
1282  * page for some reason, it should redirty the locked page via
1283  * redirty_page_for_writepage() and it should then unlock the page and return 0
1284  */
1285 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1286 {
1287         wbc->pages_skipped++;
1288         return __set_page_dirty_nobuffers(page);
1289 }
1290 EXPORT_SYMBOL(redirty_page_for_writepage);
1291
1292 /*
1293  * Dirty a page.
1294  *
1295  * For pages with a mapping this should be done under the page lock
1296  * for the benefit of asynchronous memory errors who prefer a consistent
1297  * dirty state. This rule can be broken in some special cases,
1298  * but should be better not to.
1299  *
1300  * If the mapping doesn't provide a set_page_dirty a_op, then
1301  * just fall through and assume that it wants buffer_heads.
1302  */
1303 int set_page_dirty(struct page *page)
1304 {
1305         struct address_space *mapping = page_mapping(page);
1306
1307         if (likely(mapping)) {
1308                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1309                 /*
1310                  * readahead/lru_deactivate_page could remain
1311                  * PG_readahead/PG_reclaim due to race with end_page_writeback
1312                  * About readahead, if the page is written, the flags would be
1313                  * reset. So no problem.
1314                  * About lru_deactivate_page, if the page is redirty, the flag
1315                  * will be reset. So no problem. but if the page is used by readahead
1316                  * it will confuse readahead and make it restart the size rampup
1317                  * process. But it's a trivial problem.
1318                  */
1319                 ClearPageReclaim(page);
1320 #ifdef CONFIG_BLOCK
1321                 if (!spd)
1322                         spd = __set_page_dirty_buffers;
1323 #endif
1324                 return (*spd)(page);
1325         }
1326         if (!PageDirty(page)) {
1327                 if (!TestSetPageDirty(page))
1328                         return 1;
1329         }
1330         return 0;
1331 }
1332 EXPORT_SYMBOL(set_page_dirty);
1333
1334 /*
1335  * set_page_dirty() is racy if the caller has no reference against
1336  * page->mapping->host, and if the page is unlocked.  This is because another
1337  * CPU could truncate the page off the mapping and then free the mapping.
1338  *
1339  * Usually, the page _is_ locked, or the caller is a user-space process which
1340  * holds a reference on the inode by having an open file.
1341  *
1342  * In other cases, the page should be locked before running set_page_dirty().
1343  */
1344 int set_page_dirty_lock(struct page *page)
1345 {
1346         int ret;
1347
1348         lock_page(page);
1349         ret = set_page_dirty(page);
1350         unlock_page(page);
1351         return ret;
1352 }
1353 EXPORT_SYMBOL(set_page_dirty_lock);
1354
1355 /*
1356  * Clear a page's dirty flag, while caring for dirty memory accounting.
1357  * Returns true if the page was previously dirty.
1358  *
1359  * This is for preparing to put the page under writeout.  We leave the page
1360  * tagged as dirty in the radix tree so that a concurrent write-for-sync
1361  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1362  * implementation will run either set_page_writeback() or set_page_dirty(),
1363  * at which stage we bring the page's dirty flag and radix-tree dirty tag
1364  * back into sync.
1365  *
1366  * This incoherency between the page's dirty flag and radix-tree tag is
1367  * unfortunate, but it only exists while the page is locked.
1368  */
1369 int clear_page_dirty_for_io(struct page *page)
1370 {
1371         struct address_space *mapping = page_mapping(page);
1372
1373         BUG_ON(!PageLocked(page));
1374
1375         if (mapping && mapping_cap_account_dirty(mapping)) {
1376                 /*
1377                  * Yes, Virginia, this is indeed insane.
1378                  *
1379                  * We use this sequence to make sure that
1380                  *  (a) we account for dirty stats properly
1381                  *  (b) we tell the low-level filesystem to
1382                  *      mark the whole page dirty if it was
1383                  *      dirty in a pagetable. Only to then
1384                  *  (c) clean the page again and return 1 to
1385                  *      cause the writeback.
1386                  *
1387                  * This way we avoid all nasty races with the
1388                  * dirty bit in multiple places and clearing
1389                  * them concurrently from different threads.
1390                  *
1391                  * Note! Normally the "set_page_dirty(page)"
1392                  * has no effect on the actual dirty bit - since
1393                  * that will already usually be set. But we
1394                  * need the side effects, and it can help us
1395                  * avoid races.
1396                  *
1397                  * We basically use the page "master dirty bit"
1398                  * as a serialization point for all the different
1399                  * threads doing their things.
1400                  */
1401                 if (page_mkclean(page))
1402                         set_page_dirty(page);
1403                 /*
1404                  * We carefully synchronise fault handlers against
1405                  * installing a dirty pte and marking the page dirty
1406                  * at this point. We do this by having them hold the
1407                  * page lock at some point after installing their
1408                  * pte, but before marking the page dirty.
1409                  * Pages are always locked coming in here, so we get
1410                  * the desired exclusion. See mm/memory.c:do_wp_page()
1411                  * for more comments.
1412                  */
1413                 if (TestClearPageDirty(page)) {
1414                         dec_zone_page_state(page, NR_FILE_DIRTY);
1415                         dec_bdi_stat(mapping->backing_dev_info,
1416                                         BDI_RECLAIMABLE);
1417                         return 1;
1418                 }
1419                 return 0;
1420         }
1421         return TestClearPageDirty(page);
1422 }
1423 EXPORT_SYMBOL(clear_page_dirty_for_io);
1424
1425 int test_clear_page_writeback(struct page *page)
1426 {
1427         struct address_space *mapping = page_mapping(page);
1428         int ret;
1429
1430         if (mapping) {
1431                 struct backing_dev_info *bdi = mapping->backing_dev_info;
1432                 unsigned long flags;
1433
1434                 spin_lock_irqsave(&mapping->tree_lock, flags);
1435                 ret = TestClearPageWriteback(page);
1436                 if (ret) {
1437                         radix_tree_tag_clear(&mapping->page_tree,
1438                                                 page_index(page),
1439                                                 PAGECACHE_TAG_WRITEBACK);
1440                         if (bdi_cap_account_writeback(bdi)) {
1441                                 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1442                                 __bdi_writeout_inc(bdi);
1443                         }
1444                 }
1445                 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1446         } else {
1447                 ret = TestClearPageWriteback(page);
1448         }
1449         if (ret)
1450                 dec_zone_page_state(page, NR_WRITEBACK);
1451         return ret;
1452 }
1453
1454 int test_set_page_writeback(struct page *page)
1455 {
1456         struct address_space *mapping = page_mapping(page);
1457         int ret;
1458
1459         if (mapping) {
1460                 struct backing_dev_info *bdi = mapping->backing_dev_info;
1461                 unsigned long flags;
1462
1463                 spin_lock_irqsave(&mapping->tree_lock, flags);
1464                 ret = TestSetPageWriteback(page);
1465                 if (!ret) {
1466                         radix_tree_tag_set(&mapping->page_tree,
1467                                                 page_index(page),
1468                                                 PAGECACHE_TAG_WRITEBACK);
1469                         if (bdi_cap_account_writeback(bdi))
1470                                 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1471                 }
1472                 if (!PageDirty(page))
1473                         radix_tree_tag_clear(&mapping->page_tree,
1474                                                 page_index(page),
1475                                                 PAGECACHE_TAG_DIRTY);
1476                 radix_tree_tag_clear(&mapping->page_tree,
1477                                      page_index(page),
1478                                      PAGECACHE_TAG_TOWRITE);
1479                 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1480         } else {
1481                 ret = TestSetPageWriteback(page);
1482         }
1483         if (!ret)
1484                 account_page_writeback(page);
1485         return ret;
1486
1487 }
1488 EXPORT_SYMBOL(test_set_page_writeback);
1489
1490 /*
1491  * Return true if any of the pages in the mapping are marked with the
1492  * passed tag.
1493  */
1494 int mapping_tagged(struct address_space *mapping, int tag)
1495 {
1496         int ret;
1497         rcu_read_lock();
1498         ret = radix_tree_tagged(&mapping->page_tree, tag);
1499         rcu_read_unlock();
1500         return ret;
1501 }
1502 EXPORT_SYMBOL(mapping_tagged);