*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#include "internal.h"
+/*
+ * 4MB minimal write chunk size
+ */
+#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
+
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
+ unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
+ unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
+ enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
struct completion *done; /* set if the caller waits */
};
-/*
- * Include the creation of the trace points after defining the
- * wb_writeback_work structure so that the definition remains local to this
- * file.
- */
-#define CREATE_TRACE_POINTS
-#include <trace/events/writeback.h>
-
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
{
return test_bit(BDI_writeback_running, &bdi->state);
}
+EXPORT_SYMBOL(writeback_in_progress);
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
return sb->s_bdi;
}
-static void bdi_queue_work(struct backing_dev_info *bdi,
- struct wb_writeback_work *work)
+static inline struct inode *wb_inode(struct list_head *head)
{
- trace_writeback_queue(bdi, work);
+ return list_entry(head, struct inode, i_wb_list);
+}
- spin_lock_bh(&bdi->wb_lock);
- list_add_tail(&work->list, &bdi->work_list);
+/*
+ * Include the creation of the trace points after defining the
+ * wb_writeback_work structure and inline functions so that the definition
+ * remains local to this file.
+ */
+#define CREATE_TRACE_POINTS
+#include <trace/events/writeback.h>
+
+/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
+static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
+{
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
} else {
* The bdi thread isn't there, wake up the forker thread which
* will create and run it.
*/
- trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
}
+}
+
+static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
+{
+ trace_writeback_queue(bdi, work);
+
+ spin_lock_bh(&bdi->wb_lock);
+ list_add_tail(&work->list, &bdi->work_list);
+ if (!bdi->wb.task)
+ trace_writeback_nothread(bdi, work);
+ bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
}
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- bool range_cyclic, bool for_background)
+ bool range_cyclic, enum wb_reason reason)
{
struct wb_writeback_work *work;
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic;
- work->for_background = for_background;
+ work->reason = reason;
bdi_queue_work(bdi, work);
}
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
- * started when this function returns, we make no guarentees on
+ * started when this function returns, we make no guarantees on
* completion. Caller need not hold sb s_umount semaphore.
*
*/
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ enum wb_reason reason)
{
- __bdi_start_writeback(bdi, nr_pages, true, false);
+ __bdi_start_writeback(bdi, nr_pages, true, reason);
}
/**
* @bdi: the backing device to write from
*
* Description:
- * This does WB_SYNC_NONE background writeback. The IO is only
- * started when this function returns, we make no guarentees on
- * completion. Caller need not hold sb s_umount semaphore.
+ * This makes sure WB_SYNC_NONE background writeback happens. When
+ * this function returns, it is only guaranteed that for given BDI
+ * some IO is happening if we are over background dirty threshold.
+ * Caller need not hold sb s_umount semaphore.
*/
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
- __bdi_start_writeback(bdi, LONG_MAX, true, true);
+ /*
+ * We just wake up the flusher thread. It will perform background
+ * writeback as soon as there is no other work to do.
+ */
+ trace_writeback_wake_background(bdi);
+ spin_lock_bh(&bdi->wb_lock);
+ bdi_wakeup_flusher(bdi);
+ spin_unlock_bh(&bdi->wb_lock);
+}
+
+/*
+ * Remove the inode from the writeback list it is on.
+ */
+void inode_wb_list_del(struct inode *inode)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ spin_lock(&bdi->wb.list_lock);
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&bdi->wb.list_lock);
}
/*
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
-static void redirty_tail(struct inode *inode)
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-
+ assert_spin_locked(&wb->list_lock);
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
- tail = list_entry(wb->b_dirty.next, struct inode, i_list);
+ tail = wb_inode(wb->b_dirty.next);
if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
- list_move(&inode->i_list, &wb->b_dirty);
+ list_move(&inode->i_wb_list, &wb->b_dirty);
}
/*
* requeue inode for re-scanning after bdi->b_io list is exhausted.
*/
-static void requeue_io(struct inode *inode)
+static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-
- list_move(&inode->i_list, &wb->b_more_io);
+ assert_spin_locked(&wb->list_lock);
+ list_move(&inode->i_wb_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
{
- /*
- * Prevent speculative execution through spin_unlock(&inode_lock);
- */
+ inode->i_state &= ~I_SYNC;
+ /* If inode is clean an unused, put it into LRU now... */
+ inode_add_lru(inode);
+ /* Waiters must see I_SYNC cleared before being woken up */
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
}
/*
- * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
+ * Move expired (dirtied before work->older_than_this) dirty inodes from
+ * @delaying_queue to @dispatch_queue.
*/
-static void move_expired_inodes(struct list_head *delaying_queue,
+static int move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
- unsigned long *older_than_this)
+ struct wb_writeback_work *work)
{
LIST_HEAD(tmp);
struct list_head *pos, *node;
struct super_block *sb = NULL;
struct inode *inode;
int do_sb_sort = 0;
+ int moved = 0;
while (!list_empty(delaying_queue)) {
- inode = list_entry(delaying_queue->prev, struct inode, i_list);
- if (older_than_this &&
- inode_dirtied_after(inode, *older_than_this))
+ inode = wb_inode(delaying_queue->prev);
+ if (work->older_than_this &&
+ inode_dirtied_after(inode, *work->older_than_this))
break;
if (sb && sb != inode->i_sb)
do_sb_sort = 1;
sb = inode->i_sb;
- list_move(&inode->i_list, &tmp);
+ list_move(&inode->i_wb_list, &tmp);
+ moved++;
}
/* just one sb in list, splice to dispatch_queue and we're done */
if (!do_sb_sort) {
list_splice(&tmp, dispatch_queue);
- return;
+ goto out;
}
/* Move inodes from one superblock together */
while (!list_empty(&tmp)) {
- inode = list_entry(tmp.prev, struct inode, i_list);
- sb = inode->i_sb;
+ sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) {
- inode = list_entry(pos, struct inode, i_list);
+ inode = wb_inode(pos);
if (inode->i_sb == sb)
- list_move(&inode->i_list, dispatch_queue);
+ list_move(&inode->i_wb_list, dispatch_queue);
}
}
+out:
+ return moved;
}
/*
* |
* +--> dequeue for IO
*/
-static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
+static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
{
+ int moved;
+ assert_spin_locked(&wb->list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
- move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
+ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
+ trace_writeback_queue_io(wb, work, moved);
}
static int write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
*/
-static void inode_wait_for_writeback(struct inode *inode)
+static void __inode_wait_for_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+ __acquires(inode->i_lock)
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- while (inode->i_state & I_SYNC) {
- spin_unlock(&inode_lock);
+ while (inode->i_state & I_SYNC) {
+ spin_unlock(&inode->i_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
}
}
/*
- * Write out an inode's dirty pages. Called under inode_lock. Either the
- * caller has ref on the inode (either via __iget or via syscall against an fd)
- * or the inode has I_WILL_FREE set (via generic_forget_inode)
- *
- * If `wait' is set, wait on the writeout.
- *
- * The whole writeout design is quite complex and fragile. We want to avoid
- * starvation of particular inodes when others are being redirtied, prevent
- * livelocks, etc.
- *
- * Called under inode_lock.
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
*/
-static int
-writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+void inode_wait_for_writeback(struct inode *inode)
{
- struct address_space *mapping = inode->i_mapping;
- unsigned dirty;
- int ret;
+ spin_lock(&inode->i_lock);
+ __inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
+}
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
- else
- WARN_ON(inode->i_state & I_WILL_FREE);
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+{
+ DEFINE_WAIT(wait);
+ wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+ int sleep;
+
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ sleep = inode->i_state & I_SYNC;
+ spin_unlock(&inode->i_lock);
+ if (sleep)
+ schedule();
+ finish_wait(wqh, &wait);
+}
- if (inode->i_state & I_SYNC) {
+/*
+ * Find proper writeback list for the inode depending on its current state and
+ * possibly also change of its state while we were doing writeback. Here we
+ * handle things such as livelock prevention or fairness of writeback among
+ * inodes. This function can be called only by flusher thread - noone else
+ * processes all inodes in writeback lists and requeueing inodes behind flusher
+ * thread's back can have unexpected consequences.
+ */
+static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ if (inode->i_state & I_FREEING)
+ return;
+
+ /*
+ * Sync livelock prevention. Each inode is tagged and synced in one
+ * shot. If still dirty, it will be redirty_tail()'ed below. Update
+ * the dirty time to prevent enqueue and sync it again.
+ */
+ if ((inode->i_state & I_DIRTY) &&
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
+ inode->dirtied_when = jiffies;
+
+ if (wbc->pages_skipped) {
/*
- * If this inode is locked for writeback and we are not doing
- * writeback-for-data-integrity, move it to b_more_io so that
- * writeback can proceed with the other inodes on s_io.
- *
- * We'll have another go at writing back this inode when we
- * completed a full scan of b_io.
+ * writeback is not making progress due to locked
+ * buffers. Skip this inode for now.
*/
- if (wbc->sync_mode != WB_SYNC_ALL) {
- requeue_io(inode);
- return 0;
- }
+ redirty_tail(inode, wb);
+ return;
+ }
+ if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+ /*
+ * We didn't write back all the pages. nfs_writepages()
+ * sometimes bales out without doing anything.
+ */
+ if (wbc->nr_to_write <= 0) {
+ /* Slice used up. Queue for next turn. */
+ requeue_io(inode, wb);
+ } else {
+ /*
+ * Writeback blocked by something other than
+ * congestion. Delay the inode for some time to
+ * avoid spinning on the CPU (100% iowait)
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
+ */
+ redirty_tail(inode, wb);
+ }
+ } else if (inode->i_state & I_DIRTY) {
/*
- * It's a data-integrity sync. We must wait.
+ * Filesystems can dirty the inode during writeback operations,
+ * such as delayed allocation during submission or metadata
+ * updates after data IO completion.
*/
- inode_wait_for_writeback(inode);
+ redirty_tail(inode, wb);
+ } else {
+ /* The inode is clean. Remove from writeback lists. */
+ list_del_init(&inode->i_wb_list);
}
+}
- BUG_ON(inode->i_state & I_SYNC);
+/*
+ * Write out an inode and its dirty pages. Do not update the writeback list
+ * linkage. That is left to the caller. The caller is also responsible for
+ * setting I_SYNC flag and calling inode_sync_complete() to clear it.
+ */
+static int
+__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ struct address_space *mapping = inode->i_mapping;
+ long nr_to_write = wbc->nr_to_write;
+ unsigned dirty;
+ int ret;
- /* Set I_SYNC, reset I_DIRTY_PAGES */
- inode->i_state |= I_SYNC;
- inode->i_state &= ~I_DIRTY_PAGES;
- spin_unlock(&inode_lock);
+ WARN_ON(!(inode->i_state & I_SYNC));
ret = do_writepages(mapping, wbc);
* due to delalloc, clear dirty metadata flags right before
* write_inode()
*/
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
+ /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ inode->i_state &= ~I_DIRTY_PAGES;
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
if (ret == 0)
ret = err;
}
-
- spin_lock(&inode_lock);
- inode->i_state &= ~I_SYNC;
- if (!(inode->i_state & I_FREEING)) {
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- /*
- * We didn't write back all the pages. nfs_writepages()
- * sometimes bales out without doing anything.
- */
- inode->i_state |= I_DIRTY_PAGES;
- if (wbc->nr_to_write <= 0) {
- /*
- * slice used up: queue for next turn
- */
- requeue_io(inode);
- } else {
- /*
- * Writeback blocked by something other than
- * congestion. Delay the inode for some time to
- * avoid spinning on the CPU (100% iowait)
- * retrying writeback of the dirty page/inode
- * that cannot be performed immediately.
- */
- redirty_tail(inode);
- }
- } else if (inode->i_state & I_DIRTY) {
- /*
- * Filesystems can dirty the inode during writeback
- * operations, such as delayed allocation during
- * submission or metadata updates after data IO
- * completion.
- */
- redirty_tail(inode);
- } else if (atomic_read(&inode->i_count)) {
- /*
- * The inode is clean, inuse
- */
- list_move(&inode->i_list, &inode_in_use);
- } else {
- /*
- * The inode is clean, unused
- */
- list_move(&inode->i_list, &inode_unused);
- }
- }
- inode_sync_complete(inode);
+ trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
/*
- * For background writeback the caller does not have the sb pinned
- * before calling writeback. So make sure that we do pin it, so it doesn't
- * go away while we are writing inodes from it.
+ * Write out an inode's dirty pages. Either the caller has an active reference
+ * on the inode or the inode has I_WILL_FREE set.
+ *
+ * This function is designed to be called for writing back one inode which
+ * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+ * and does more profound writeback list handling in writeback_sb_inodes().
*/
-static bool pin_sb_for_writeback(struct super_block *sb)
+static int
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
- spin_lock(&sb_lock);
- if (list_empty(&sb->s_instances)) {
- spin_unlock(&sb_lock);
- return false;
+ int ret = 0;
+
+ spin_lock(&inode->i_lock);
+ if (!atomic_read(&inode->i_count))
+ WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ else
+ WARN_ON(inode->i_state & I_WILL_FREE);
+
+ if (inode->i_state & I_SYNC) {
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ goto out;
+ /*
+ * It's a data-integrity sync. We must wait. Since callers hold
+ * inode reference or inode has I_WILL_FREE set, it cannot go
+ * away under us.
+ */
+ __inode_wait_for_writeback(inode);
}
+ WARN_ON(inode->i_state & I_SYNC);
+ /*
+ * Skip inode if it is clean. We don't want to mess with writeback
+ * lists in this function since flusher thread may be doing for example
+ * sync in parallel and if we move the inode, it could get skipped. So
+ * here we make sure inode is on some writeback list and leave it there
+ * unless we have completely cleaned the inode.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ goto out;
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
- sb->s_count++;
- spin_unlock(&sb_lock);
+ ret = __writeback_single_inode(inode, wbc);
- if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root)
- return true;
- up_read(&sb->s_umount);
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
+ /*
+ * If inode is clean, remove it from writeback lists. Otherwise don't
+ * touch it. See comment above for explanation.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&wb->list_lock);
+ inode_sync_complete(inode);
+out:
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+static long writeback_chunk_size(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
+{
+ long pages;
+
+ /*
+ * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
+ * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
+ * here avoids calling into writeback_inodes_wb() more than once.
+ *
+ * The intended call sequence for WB_SYNC_ALL writeback is:
+ *
+ * wb_writeback()
+ * writeback_sb_inodes() <== called only once
+ * write_cache_pages() <== called once for each inode
+ * (quickly) tag currently dirty pages
+ * (maybe slowly) sync all tagged pages
+ */
+ if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
+ pages = LONG_MAX;
+ else {
+ pages = min(bdi->avg_write_bandwidth / 2,
+ global_dirty_limit / DIRTY_SCOPE);
+ pages = min(pages, work->nr_pages);
+ pages = round_down(pages + MIN_WRITEBACK_PAGES,
+ MIN_WRITEBACK_PAGES);
}
- put_super(sb);
- return false;
+ return pages;
}
/*
* Write a portion of b_io inodes which belong to @sb.
*
- * If @only_this_sb is true, then find and write all such
- * inodes. Otherwise write only ones which go sequentially
- * in reverse order.
- *
- * Return 1, if the caller writeback routine should be
- * interrupted. Otherwise return 0.
+ * Return the number of pages and/or inodes written.
*/
-static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
- struct writeback_control *wbc, bool only_this_sb)
+static long writeback_sb_inodes(struct super_block *sb,
+ struct bdi_writeback *wb,
+ struct wb_writeback_work *work)
{
+ struct writeback_control wbc = {
+ .sync_mode = work->sync_mode,
+ .tagged_writepages = work->tagged_writepages,
+ .for_kupdate = work->for_kupdate,
+ .for_background = work->for_background,
+ .range_cyclic = work->range_cyclic,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
+ unsigned long start_time = jiffies;
+ long write_chunk;
+ long wrote = 0; /* count both pages and inodes */
+
while (!list_empty(&wb->b_io)) {
- long pages_skipped;
- struct inode *inode = list_entry(wb->b_io.prev,
- struct inode, i_list);
+ struct inode *inode = wb_inode(wb->b_io.prev);
if (inode->i_sb != sb) {
- if (only_this_sb) {
+ if (work->sb) {
/*
* We only want to write back data for this
* superblock, move all inodes not belonging
* to it back onto the dirty list.
*/
- redirty_tail(inode);
+ redirty_tail(inode, wb);
continue;
}
* Bounce back to the caller to unpin this and
* pin the next superblock.
*/
- return 0;
+ break;
}
- if (inode->i_state & (I_NEW | I_WILL_FREE)) {
- requeue_io(inode);
- continue;
- }
/*
- * Was this inode dirtied after sync_sb_inodes was called?
- * This keeps sync from extra jobs and livelock.
+ * Don't bother with new inodes or inodes being freed, first
+ * kind does not need periodic writeout yet, and for the latter
+ * kind writeout is handled by the freer.
*/
- if (inode_dirtied_after(inode, wbc->wb_start))
- return 1;
-
- BUG_ON(inode->i_state & I_FREEING);
- __iget(inode);
- pages_skipped = wbc->pages_skipped;
- writeback_single_inode(inode, wbc);
- if (wbc->pages_skipped != pages_skipped) {
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
+ redirty_tail(inode, wb);
+ continue;
+ }
+ if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
/*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
+ * If this inode is locked for writeback and we are not
+ * doing writeback-for-data-integrity, move it to
+ * b_more_io so that writeback can proceed with the
+ * other inodes on s_io.
+ *
+ * We'll have another go at writing back this inode
+ * when we completed a full scan of b_io.
*/
- redirty_tail(inode);
+ spin_unlock(&inode->i_lock);
+ requeue_io(inode, wb);
+ trace_writeback_sb_inodes_requeue(inode);
+ continue;
}
- spin_unlock(&inode_lock);
- iput(inode);
- cond_resched();
- spin_lock(&inode_lock);
- if (wbc->nr_to_write <= 0) {
- wbc->more_io = 1;
- return 1;
+ spin_unlock(&wb->list_lock);
+
+ /*
+ * We already requeued the inode if it had I_SYNC set and we
+ * are doing WB_SYNC_NONE writeback. So this catches only the
+ * WB_SYNC_ALL case.
+ */
+ if (inode->i_state & I_SYNC) {
+ /* Wait for I_SYNC. This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ /* Inode may be gone, start again */
+ spin_lock(&wb->list_lock);
+ continue;
+ }
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+
+ write_chunk = writeback_chunk_size(wb->bdi, work);
+ wbc.nr_to_write = write_chunk;
+ wbc.pages_skipped = 0;
+
+ /*
+ * We use I_SYNC to pin the inode in memory. While it is set
+ * evict_inode() will wait so the inode cannot be freed.
+ */
+ __writeback_single_inode(inode, &wbc);
+
+ work->nr_pages -= write_chunk - wbc.nr_to_write;
+ wrote += write_chunk - wbc.nr_to_write;
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_DIRTY))
+ wrote++;
+ requeue_inode(inode, wb, &wbc);
+ inode_sync_complete(inode);
+ spin_unlock(&inode->i_lock);
+ cond_resched_lock(&wb->list_lock);
+ /*
+ * bail out to wb_writeback() often enough to check
+ * background threshold and other termination conditions.
+ */
+ if (wrote) {
+ if (time_is_before_jiffies(start_time + HZ / 10UL))
+ break;
+ if (work->nr_pages <= 0)
+ break;
}
- if (!list_empty(&wb->b_more_io))
- wbc->more_io = 1;
}
- /* b_io is empty */
- return 1;
+ return wrote;
}
-void writeback_inodes_wb(struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static long __writeback_inodes_wb(struct bdi_writeback *wb,
+ struct wb_writeback_work *work)
{
- int ret = 0;
-
- if (!wbc->wb_start)
- wbc->wb_start = jiffies; /* livelock avoidance */
- spin_lock(&inode_lock);
- if (!wbc->for_kupdate || list_empty(&wb->b_io))
- queue_io(wb, wbc->older_than_this);
+ unsigned long start_time = jiffies;
+ long wrote = 0;
while (!list_empty(&wb->b_io)) {
- struct inode *inode = list_entry(wb->b_io.prev,
- struct inode, i_list);
+ struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
- if (!pin_sb_for_writeback(sb)) {
- requeue_io(inode);
+ if (!grab_super_passive(sb)) {
+ /*
+ * grab_super_passive() may fail consistently due to
+ * s_umount being grabbed by someone else. Don't use
+ * requeue_io() to avoid busy retrying the inode/sb.
+ */
+ redirty_tail(inode, wb);
continue;
}
- ret = writeback_sb_inodes(sb, wb, wbc, false);
+ wrote += writeback_sb_inodes(sb, wb, work);
drop_super(sb);
- if (ret)
- break;
+ /* refer to the same tests at the end of writeback_sb_inodes */
+ if (wrote) {
+ if (time_is_before_jiffies(start_time + HZ / 10UL))
+ break;
+ if (work->nr_pages <= 0)
+ break;
+ }
}
- spin_unlock(&inode_lock);
/* Leave any unwritten inodes on b_io */
+ return wrote;
}
-static void __writeback_inodes_sb(struct super_block *sb,
- struct bdi_writeback *wb, struct writeback_control *wbc)
+long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
+ enum wb_reason reason)
{
- WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ struct wb_writeback_work work = {
+ .nr_pages = nr_pages,
+ .sync_mode = WB_SYNC_NONE,
+ .range_cyclic = 1,
+ .reason = reason,
+ };
- spin_lock(&inode_lock);
- if (!wbc->for_kupdate || list_empty(&wb->b_io))
- queue_io(wb, wbc->older_than_this);
- writeback_sb_inodes(sb, wb, wbc, true);
- spin_unlock(&inode_lock);
-}
+ spin_lock(&wb->list_lock);
+ if (list_empty(&wb->b_io))
+ queue_io(wb, &work);
+ __writeback_inodes_wb(wb, &work);
+ spin_unlock(&wb->list_lock);
-/*
- * The maximum number of pages to writeout in a single bdi flush/kupdate
- * operation. We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode. Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES 1024
+ return nr_pages - work.nr_pages;
+}
-static inline bool over_bground_thresh(void)
+static bool over_bground_thresh(struct backing_dev_info *bdi)
{
unsigned long background_thresh, dirty_thresh;
global_dirty_limits(&background_thresh, &dirty_thresh);
- return (global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+ if (global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) > background_thresh)
+ return true;
+
+ if (bdi_stat(bdi, BDI_RECLAIMABLE) >
+ bdi_dirty_limit(bdi, background_thresh))
+ return true;
+
+ return false;
+}
+
+/*
+ * Called under wb->list_lock. If there are multiple wb per bdi,
+ * only the flusher working on the first wb should do it.
+ */
+static void wb_update_bandwidth(struct bdi_writeback *wb,
+ unsigned long start_time)
+{
+ __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
}
/*
static long wb_writeback(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
- struct writeback_control wbc = {
- .sync_mode = work->sync_mode,
- .older_than_this = NULL,
- .for_kupdate = work->for_kupdate,
- .for_background = work->for_background,
- .range_cyclic = work->range_cyclic,
- };
+ unsigned long wb_start = jiffies;
+ long nr_pages = work->nr_pages;
unsigned long oldest_jif;
- long wrote = 0;
struct inode *inode;
+ long progress;
- if (wbc.for_kupdate) {
- wbc.older_than_this = &oldest_jif;
- oldest_jif = jiffies -
- msecs_to_jiffies(dirty_expire_interval * 10);
- }
- if (!wbc.range_cyclic) {
- wbc.range_start = 0;
- wbc.range_end = LLONG_MAX;
- }
+ oldest_jif = jiffies;
+ work->older_than_this = &oldest_jif;
- wbc.wb_start = jiffies; /* livelock avoidance */
+ spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
if (work->nr_pages <= 0)
break;
+ /*
+ * Background writeout and kupdate-style writeback may
+ * run forever. Stop them if there is other work to do
+ * so that e.g. sync can proceed. They'll be restarted
+ * after the other works are all done.
+ */
+ if ((work->for_background || work->for_kupdate) &&
+ !list_empty(&wb->bdi->work_list))
+ break;
+
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
- if (work->for_background && !over_bground_thresh())
+ if (work->for_background && !over_bground_thresh(wb->bdi))
break;
- wbc.more_io = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- wbc.pages_skipped = 0;
+ /*
+ * Kupdate and background works are special and we want to
+ * include all inodes that need writing. Livelock avoidance is
+ * handled by these works yielding to any other work so we are
+ * safe.
+ */
+ if (work->for_kupdate) {
+ oldest_jif = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
+ } else if (work->for_background)
+ oldest_jif = jiffies;
- trace_wbc_writeback_start(&wbc, wb->bdi);
+ trace_writeback_start(wb->bdi, work);
+ if (list_empty(&wb->b_io))
+ queue_io(wb, work);
if (work->sb)
- __writeback_inodes_sb(work->sb, wb, &wbc);
+ progress = writeback_sb_inodes(work->sb, wb, work);
else
- writeback_inodes_wb(wb, &wbc);
- trace_wbc_writeback_written(&wbc, wb->bdi);
+ progress = __writeback_inodes_wb(wb, work);
+ trace_writeback_written(wb->bdi, work);
- work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ wb_update_bandwidth(wb, wb_start);
/*
- * If we consumed everything, see if we have more
+ * Did we write something? Try for more
+ *
+ * Dirty inodes are moved to b_io for writeback in batches.
+ * The completion of the current batch does not necessarily
+ * mean the overall work is done. So we keep looping as long
+ * as made some progress on cleaning pages or inodes.
*/
- if (wbc.nr_to_write <= 0)
+ if (progress)
continue;
/*
- * Didn't write everything and we don't have more IO, bail
+ * No more inodes for IO, bail
*/
- if (!wbc.more_io)
+ if (list_empty(&wb->b_more_io))
break;
- /*
- * Did we write something? Try for more
- */
- if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
- continue;
/*
* Nothing written. Wait for some inode to
* become available for writeback. Otherwise
* we'll just busyloop.
*/
- spin_lock(&inode_lock);
if (!list_empty(&wb->b_more_io)) {
- inode = list_entry(wb->b_more_io.prev,
- struct inode, i_list);
- trace_wbc_writeback_wait(&wbc, wb->bdi);
- inode_wait_for_writeback(inode);
+ trace_writeback_wait(wb->bdi, work);
+ inode = wb_inode(wb->b_more_io.prev);
+ spin_lock(&inode->i_lock);
+ spin_unlock(&wb->list_lock);
+ /* This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ spin_lock(&wb->list_lock);
}
- spin_unlock(&inode_lock);
}
+ spin_unlock(&wb->list_lock);
- return wrote;
+ return nr_pages - work->nr_pages;
}
/*
return work;
}
+/*
+ * Add in the number of potentially dirty inodes, because each inode
+ * write can dirty pagecache in the underlying blockdev.
+ */
+static unsigned long get_nr_dirty_pages(void)
+{
+ return global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) +
+ get_nr_dirty_inodes();
+}
+
+static long wb_check_background_flush(struct bdi_writeback *wb)
+{
+ if (over_bground_thresh(wb->bdi)) {
+
+ struct wb_writeback_work work = {
+ .nr_pages = LONG_MAX,
+ .sync_mode = WB_SYNC_NONE,
+ .for_background = 1,
+ .range_cyclic = 1,
+ .reason = WB_REASON_BACKGROUND,
+ };
+
+ return wb_writeback(wb, &work);
+ }
+
+ return 0;
+}
+
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
unsigned long expired;
return 0;
wb->last_old_flush = jiffies;
- nr_pages = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+ nr_pages = get_nr_dirty_pages();
if (nr_pages) {
struct wb_writeback_work work = {
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
+ .reason = WB_REASON_PERIODIC,
};
return wb_writeback(wb, &work);
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
+ wrote += wb_check_background_flush(wb);
clear_bit(BDI_writeback_running, &wb->bdi->state);
return wrote;
struct backing_dev_info *bdi = wb->bdi;
long pages_written;
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ current->flags |= PF_SWAPWRITE;
set_freezable();
wb->last_active = jiffies;
trace_writeback_thread_start(bdi);
- while (!kthread_should_stop()) {
+ while (!kthread_freezable_should_stop(NULL)) {
/*
* Remove own delayed wake-up timer, since we are already awake
- * and we'll take care of the preriodic write-back.
+ * and we'll take care of the periodic write-back.
*/
del_timer(&wb->wakeup_timer);
*/
schedule();
}
-
- try_to_freeze();
}
/* Flush any work that raced with us exiting */
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world.
*/
-void wakeup_flusher_threads(long nr_pages)
+void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
{
struct backing_dev_info *bdi;
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
- __bdi_start_writeback(bdi, nr_pages, false, false);
+ __bdi_start_writeback(bdi, nr_pages, false, reason);
}
rcu_read_unlock();
}
* In short, make sure you hash any inodes _before_ you start marking
* them dirty.
*
- * This function *must* be atomic for the I_DIRTY_PAGES case -
- * set_page_dirty() is called under spinlock in several places.
- *
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
{
struct super_block *sb = inode->i_sb;
struct backing_dev_info *bdi = NULL;
- bool wakeup_bdi = false;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
*/
if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode);
+ sb->s_op->dirty_inode(inode, flags);
}
/*
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
* superblock list, based upon its state.
*/
if (inode->i_state & I_SYNC)
- goto out;
+ goto out_unlock_inode;
/*
* Only add valid (hashed) inodes to the superblock's
* dirty list. Add blockdev inodes as well.
*/
if (!S_ISBLK(inode->i_mode)) {
- if (hlist_unhashed(&inode->i_hash))
- goto out;
+ if (inode_unhashed(inode))
+ goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
- goto out;
+ goto out_unlock_inode;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
+ bool wakeup_bdi = false;
bdi = inode_to_bdi(inode);
if (bdi_cap_writeback_dirty(bdi)) {
wakeup_bdi = true;
}
+ spin_unlock(&inode->i_lock);
+ spin_lock(&bdi->wb.list_lock);
inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &bdi->wb.b_dirty);
+ list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+ spin_unlock(&bdi->wb.list_lock);
+
+ if (wakeup_bdi)
+ bdi_wakeup_thread_delayed(bdi);
+ return;
}
}
-out:
- spin_unlock(&inode_lock);
+out_unlock_inode:
+ spin_unlock(&inode->i_lock);
- if (wakeup_bdi)
- bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);
-/*
- * Write out a superblock's list of dirty inodes. A wait will be performed
- * upon no inodes, all inodes or the final one, depending upon sync_mode.
- *
- * If older_than_this is non-NULL, then only write out inodes which
- * had their first dirtying at a time earlier than *older_than_this.
- *
- * If `bdi' is non-zero then we're being asked to writeback a specific queue.
- * This function assumes that the blockdev superblock's inodes are backed by
- * a variety of queues, so all inodes are searched. For other superblocks,
- * assume that all inodes are backed by the same queue.
- *
- * The inodes to be written are parked on bdi->b_io. They are moved back onto
- * bdi->b_dirty as they are selected for writing. This way, none can be missed
- * on the writer throttling path, and we get decent balancing between many
- * throttled threads: we don't want them all piling up on inode_sync_wait.
- */
static void wait_sb_inodes(struct super_block *sb)
{
struct inode *inode, *old_inode = NULL;
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
/*
* Data integrity sync. Must wait for all pages under writeback,
* we still have to wait for that writeout.
*/
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- struct address_space *mapping;
+ struct address_space *mapping = inode->i_mapping;
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
- continue;
- mapping = inode->i_mapping;
- if (mapping->nrpages == 0)
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ (mapping->nrpages == 0)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
+
/*
- * We hold a reference to 'inode' so it couldn't have
- * been removed from s_inodes list while we dropped the
- * inode_lock. We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it
- * under inode_lock. So we keep the reference and iput
- * it later.
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
*/
iput(old_inode);
old_inode = inode;
cond_resched();
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
}
/**
- * writeback_inodes_sb - writeback dirty inodes from given super_block
+ * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
+ * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
- * for IO completion of submitted IO. The number of pages submitted is
- * returned.
+ * for IO completion of submitted IO.
*/
-void writeback_inodes_sb(struct super_block *sb)
+void writeback_inodes_sb_nr(struct super_block *sb,
+ unsigned long nr,
+ enum wb_reason reason)
{
- unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
- unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
- .sb = sb,
- .sync_mode = WB_SYNC_NONE,
- .done = &done,
+ .sb = sb,
+ .sync_mode = WB_SYNC_NONE,
+ .tagged_writepages = 1,
+ .done = &done,
+ .nr_pages = nr,
+ .reason = reason,
};
+ if (sb->s_bdi == &noop_backing_dev_info)
+ return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
-
- work.nr_pages = nr_dirty + nr_unstable +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
-
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
}
+EXPORT_SYMBOL(writeback_inodes_sb_nr);
+
+/**
+ * writeback_inodes_sb - writeback dirty inodes from given super_block
+ * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
+ *
+ * Start writeback on some inodes on this super_block. No guarantees are made
+ * on how many (if any) will be written, and this function does not wait
+ * for IO completion of submitted IO.
+ */
+void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
+{
+ return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
+}
EXPORT_SYMBOL(writeback_inodes_sb);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
*/
-int writeback_inodes_sb_if_idle(struct super_block *sb)
+int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
{
if (!writeback_in_progress(sb->s_bdi)) {
down_read(&sb->s_umount);
- writeback_inodes_sb(sb);
+ writeback_inodes_sb(sb, reason);
up_read(&sb->s_umount);
return 1;
} else
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
+/**
+ * writeback_inodes_sb_nr_if_idle - start writeback if none underway
+ * @sb: the superblock
+ * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
+ *
+ * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Returns 1 if writeback was started, 0 if not.
+ */
+int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
+ unsigned long nr,
+ enum wb_reason reason)
+{
+ if (!writeback_in_progress(sb->s_bdi)) {
+ down_read(&sb->s_umount);
+ writeback_inodes_sb_nr(sb, nr, reason);
+ up_read(&sb->s_umount);
+ return 1;
+ } else
+ return 0;
+}
+EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
+
/**
* sync_inodes_sb - sync sb inode pages
* @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
- * super_block. The number of pages synced is returned.
+ * super_block.
*/
void sync_inodes_sb(struct super_block *sb)
{
.nr_pages = LONG_MAX,
.range_cyclic = 0,
.done = &done,
+ .reason = WB_REASON_SYNC,
};
+ /* Nothing to do? */
+ if (sb->s_bdi == &noop_backing_dev_info)
+ return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
*/
int write_inode_now(struct inode *inode, int sync)
{
- int ret;
+ struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
wbc.nr_to_write = 0;
might_sleep();
- spin_lock(&inode_lock);
- ret = writeback_single_inode(inode, &wbc);
- spin_unlock(&inode_lock);
- if (sync)
- inode_sync_wait(inode);
- return ret;
+ return writeback_single_inode(inode, wb, &wbc);
}
EXPORT_SYMBOL(write_inode_now);
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
- int ret;
-
- spin_lock(&inode_lock);
- ret = writeback_single_inode(inode, wbc);
- spin_unlock(&inode_lock);
- return ret;
+ return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
}
EXPORT_SYMBOL(sync_inode);
+
+/**
+ * sync_inode_metadata - write an inode to disk
+ * @inode: the inode to sync
+ * @wait: wait for I/O to complete.
+ *
+ * Write an inode to disk and adjust its dirty state after completion.
+ *
+ * Note: only writes the actual inode, no associated data or other metadata.
+ */
+int sync_inode_metadata(struct inode *inode, int wait)
+{
+ struct writeback_control wbc = {
+ .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .nr_to_write = 0, /* metadata-only */
+ };
+
+ return sync_inode(inode, &wbc);
+}
+EXPORT_SYMBOL(sync_inode_metadata);