]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - fs/gfs2/log.c
block: unify flags for struct bio and struct request
[linux-3.10.git] / fs / gfs2 / log.c
index 4dcc7a8cda22eedfd9b2cb121599e3276e28ce8c..cde1248a62255ae9bdb03b4c2757a71e973fa089 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
 #include <linux/buffer_head.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
-#include <linux/lm_interface.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/bio.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -28,6 +28,7 @@
 #include "meta_io.h"
 #include "util.h"
 #include "dir.h"
+#include "trace_gfs2.h"
 
 #define PULL 1
 
@@ -87,13 +88,13 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  */
 
 static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+__releases(&sdp->sd_log_lock)
+__acquires(&sdp->sd_log_lock)
 {
        struct gfs2_bufdata *bd, *s;
        struct buffer_head *bh;
        int retry;
 
-       BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
-
        do {
                retry = 0;
 
@@ -120,7 +121,7 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
                        lock_buffer(bh);
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
-                               submit_bh(WRITE, bh);
+                               submit_bh(WRITE_SYNC_PLUG, bh);
                        } else {
                                unlock_buffer(bh);
                                brelse(bh);
@@ -167,12 +168,11 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
        return list_empty(&ai->ai_ail1_list);
 }
 
-static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+static void gfs2_ail1_start(struct gfs2_sbd *sdp)
 {
        struct list_head *head;
        u64 sync_gen;
-       struct list_head *first;
-       struct gfs2_ail *first_ai, *ai, *tmp;
+       struct gfs2_ail *ai;
        int done = 0;
 
        gfs2_log_lock(sdp);
@@ -183,21 +183,9 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
        }
        sync_gen = sdp->sd_ail_sync_gen++;
 
-       first = head->prev;
-       first_ai = list_entry(first, struct gfs2_ail, ai_list);
-       first_ai->ai_sync_gen = sync_gen;
-       gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
-
-       if (flags & DIO_ALL)
-               first = NULL;
-
        while(!done) {
-               if (first && (head->prev != first ||
-                             gfs2_ail1_empty_one(sdp, first_ai, 0)))
-                       break;
-
                done = 1;
-               list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
+               list_for_each_entry_reverse(ai, head, ai_list) {
                        if (ai->ai_sync_gen >= sync_gen)
                                continue;
                        ai->ai_sync_gen = sync_gen;
@@ -289,70 +277,67 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
  * flush time, so we ensure that we have just enough free blocks at all
  * times to avoid running out during a log flush.
  *
+ * We no longer flush the log here, instead we wake up logd to do that
+ * for us. To avoid the thundering herd and to ensure that we deal fairly
+ * with queued waiters, we use an exclusive wait. This means that when we
+ * get woken with enough journal space to get our reservation, we need to
+ * wake the next waiter on the list.
+ *
  * Returns: errno
  */
 
 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 {
-       unsigned int try = 0;
        unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
+       unsigned wanted = blks + reserved_blks;
+       DEFINE_WAIT(wait);
+       int did_wait = 0;
+       unsigned int free_blocks;
 
        if (gfs2_assert_warn(sdp, blks) ||
            gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
                return -EINVAL;
-
-       mutex_lock(&sdp->sd_log_reserve_mutex);
-       gfs2_log_lock(sdp);
-       while(atomic_read(&sdp->sd_log_blks_free) <= (blks + reserved_blks)) {
-               gfs2_log_unlock(sdp);
-               gfs2_ail1_empty(sdp, 0);
-               gfs2_log_flush(sdp, NULL);
-
-               if (try++)
-                       gfs2_ail1_start(sdp, 0);
-               gfs2_log_lock(sdp);
+retry:
+       free_blocks = atomic_read(&sdp->sd_log_blks_free);
+       if (unlikely(free_blocks <= wanted)) {
+               do {
+                       prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       wake_up(&sdp->sd_logd_waitq);
+                       did_wait = 1;
+                       if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
+                               io_schedule();
+                       free_blocks = atomic_read(&sdp->sd_log_blks_free);
+               } while(free_blocks <= wanted);
+               finish_wait(&sdp->sd_log_waitq, &wait);
        }
-       atomic_sub(blks, &sdp->sd_log_blks_free);
-       gfs2_log_unlock(sdp);
-       mutex_unlock(&sdp->sd_log_reserve_mutex);
+       if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
+                               free_blocks - blks) != free_blocks)
+               goto retry;
+       trace_gfs2_log_blocks(sdp, -blks);
+
+       /*
+        * If we waited, then so might others, wake them up _after_ we get
+        * our share of the log.
+        */
+       if (unlikely(did_wait))
+               wake_up(&sdp->sd_log_waitq);
 
        down_read(&sdp->sd_log_flush_lock);
 
        return 0;
 }
 
-/**
- * gfs2_log_release - Release a given number of log blocks
- * @sdp: The GFS2 superblock
- * @blks: The number of blocks
- *
- */
-
-void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
-{
-
-       gfs2_log_lock(sdp);
-       atomic_add(blks, &sdp->sd_log_blks_free);
-       gfs2_assert_withdraw(sdp,
-                            atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
-       gfs2_log_unlock(sdp);
-       up_read(&sdp->sd_log_flush_lock);
-}
-
 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
 {
-       struct inode *inode = sdp->sd_jdesc->jd_inode;
-       int error;
-       struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
+       struct gfs2_journal_extent *je;
 
-       bh_map.b_size = 1 << inode->i_blkbits;
-       error = gfs2_block_map(inode, lbn, 0, &bh_map);
-       if (error || !bh_map.b_blocknr)
-               printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
-                      (unsigned long long)bh_map.b_blocknr, lbn);
-       gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
+       list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
+               if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
+                       return je->dblock + lbn - je->lblock;
+       }
 
-       return bh_map.b_blocknr;
+       return -1;
 }
 
 /**
@@ -418,7 +403,7 @@ static unsigned int calc_reserved(struct gfs2_sbd *sdp)
        databufhdrs_needed = (sdp->sd_log_commited_databuf +
                              (dbuf_limit - 1)) / dbuf_limit;
 
-       if (sdp->sd_log_commited_revoke)
+       if (sdp->sd_log_commited_revoke > 0)
                revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
                                          sizeof(u64));
 
@@ -560,10 +545,10 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
 
        ail2_empty(sdp, new_tail);
 
-       gfs2_log_lock(sdp);
        atomic_add(dist, &sdp->sd_log_blks_free);
-       gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
-       gfs2_log_unlock(sdp);
+       trace_gfs2_log_blocks(sdp, dist);
+       gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+                            sdp->sd_jdesc->jd_blocks);
 
        sdp->sd_log_tail = new_tail;
 }
@@ -588,7 +573,6 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        memset(bh->b_data, 0, bh->b_size);
        set_buffer_uptodate(bh);
        clear_buffer_dirty(bh);
-       unlock_buffer(bh);
 
        gfs2_ail1_empty(sdp, 0);
        tail = current_tail(sdp);
@@ -597,7 +581,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        memset(lh, 0, sizeof(struct gfs2_log_header));
        lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
        lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
+       lh->lh_header.__pad0 = cpu_to_be64(0);
        lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
+       lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
        lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
        lh->lh_flags = cpu_to_be32(flags);
        lh->lh_tail = cpu_to_be32(tail);
@@ -605,8 +591,24 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
        lh->lh_hash = cpu_to_be32(hash);
 
-       set_buffer_dirty(bh);
-       if (sync_dirty_buffer(bh))
+       bh->b_end_io = end_buffer_write_sync;
+       if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
+               goto skip_barrier;
+       get_bh(bh);
+       submit_bh(WRITE_BARRIER | REQ_META, bh);
+       wait_on_buffer(bh);
+       if (buffer_eopnotsupp(bh)) {
+               clear_buffer_eopnotsupp(bh);
+               set_buffer_uptodate(bh);
+               fs_info(sdp, "barrier sync failed - disabling barriers\n");
+               set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+               lock_buffer(bh);
+skip_barrier:
+               get_bh(bh);
+               submit_bh(WRITE_SYNC | REQ_META, bh);
+               wait_on_buffer(bh);
+       }
+       if (!buffer_uptodate(bh))
                gfs2_io_error_bh(sdp, bh);
        brelse(bh);
 
@@ -654,7 +656,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
                lock_buffer(bh);
                if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
                        bh->b_end_io = end_buffer_write_sync;
-                       submit_bh(WRITE, bh);
+                       submit_bh(WRITE_SYNC_PLUG, bh);
                } else {
                        unlock_buffer(bh);
                        brelse(bh);
@@ -694,7 +696,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
  *
  */
 
-void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
 {
        struct gfs2_ail *ai;
 
@@ -705,6 +707,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
                up_write(&sdp->sd_log_flush_lock);
                return;
        }
+       trace_gfs2_log_flush(sdp, 1);
 
        ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ai->ai_ail1_list);
@@ -736,6 +739,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
        else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
                gfs2_log_lock(sdp);
                atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
+               trace_gfs2_log_blocks(sdp, -1);
                gfs2_log_unlock(sdp);
                log_write_header(sdp, 0, PULL);
        }
@@ -753,8 +757,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
                ai = NULL;
        }
        gfs2_log_unlock(sdp);
-
-       sdp->sd_vfs->s_dirt = 0;
+       trace_gfs2_log_flush(sdp, 0);
        up_write(&sdp->sd_log_flush_lock);
 
        kfree(ai);
@@ -763,7 +766,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
        unsigned int reserved;
-       unsigned int old;
+       unsigned int unused;
 
        gfs2_log_lock(sdp);
 
@@ -773,41 +776,59 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
                             (((int)sdp->sd_log_commited_databuf) >= 0));
        sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
-       gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
        reserved = calc_reserved(sdp);
-       old = atomic_read(&sdp->sd_log_blks_free);
-       atomic_add(tr->tr_reserved - (reserved - sdp->sd_log_blks_reserved),
-                  &sdp->sd_log_blks_free);
-
-       gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) >= old);
+       gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
+       unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
+       atomic_add(unused, &sdp->sd_log_blks_free);
+       trace_gfs2_log_blocks(sdp, unused);
        gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
                             sdp->sd_jdesc->jd_blocks);
-
        sdp->sd_log_blks_reserved = reserved;
 
        gfs2_log_unlock(sdp);
 }
 
+static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+       struct list_head *head = &tr->tr_list_buf;
+       struct gfs2_bufdata *bd;
+
+       gfs2_log_lock(sdp);
+       while (!list_empty(head)) {
+               bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
+               list_del_init(&bd->bd_list_tr);
+               tr->tr_num_buf--;
+       }
+       gfs2_log_unlock(sdp);
+       gfs2_assert_warn(sdp, !tr->tr_num_buf);
+}
+
 /**
  * gfs2_log_commit - Commit a transaction to the log
  * @sdp: the filesystem
  * @tr: the transaction
  *
+ * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
+ * or the total number of used blocks (pinned blocks plus AIL blocks)
+ * is greater than thresh2.
+ *
+ * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
+ * journal size.
+ *
  * Returns: errno
  */
 
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
        log_refund(sdp, tr);
-       lops_incore_commit(sdp, tr);
+       buf_lo_incore_commit(sdp, tr);
 
-       sdp->sd_vfs->s_dirt = 1;
        up_read(&sdp->sd_log_flush_lock);
 
-       gfs2_log_lock(sdp);
-       if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
-               wake_up_process(sdp->sd_logd_process);
-       gfs2_log_unlock(sdp);
+       if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
+           ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
+           atomic_read(&sdp->sd_log_thresh2)))
+               wake_up(&sdp->sd_logd_waitq);
 }
 
 /**
@@ -854,13 +875,23 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
 {
        gfs2_log_flush(sdp, NULL);
        for (;;) {
-               gfs2_ail1_start(sdp, DIO_ALL);
+               gfs2_ail1_start(sdp);
                if (gfs2_ail1_empty(sdp, DIO_ALL))
                        break;
                msleep(10);
        }
 }
 
+static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
+{
+       return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
+}
+
+static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+{
+       unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
+       return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
+}
 
 /**
  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
@@ -873,28 +904,43 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
 int gfs2_logd(void *data)
 {
        struct gfs2_sbd *sdp = data;
-       unsigned long t;
-       int need_flush;
+       unsigned long t = 1;
+       DEFINE_WAIT(wait);
+       unsigned preflush;
 
        while (!kthread_should_stop()) {
-               /* Advance the log tail */
 
-               t = sdp->sd_log_flush_time +
-                   gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
+               preflush = atomic_read(&sdp->sd_log_pinned);
+               if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
+                       gfs2_ail1_empty(sdp, DIO_ALL);
+                       gfs2_log_flush(sdp, NULL);
+                       gfs2_ail1_empty(sdp, DIO_ALL);
+               }
 
-               gfs2_ail1_empty(sdp, DIO_ALL);
-               gfs2_log_lock(sdp);
-               need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
-               gfs2_log_unlock(sdp);
-               if (need_flush || time_after_eq(jiffies, t)) {
+               if (gfs2_ail_flush_reqd(sdp)) {
+                       gfs2_ail1_start(sdp);
+                       io_schedule();
+                       gfs2_ail1_empty(sdp, 0);
                        gfs2_log_flush(sdp, NULL);
-                       sdp->sd_log_flush_time = jiffies;
+                       gfs2_ail1_empty(sdp, DIO_ALL);
                }
 
+               wake_up(&sdp->sd_log_waitq);
                t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
                if (freezing(current))
                        refrigerator();
-               schedule_timeout_interruptible(t);
+
+               do {
+                       prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       if (!gfs2_ail_flush_reqd(sdp) &&
+                           !gfs2_jrnl_flush_reqd(sdp) &&
+                           !kthread_should_stop())
+                               t = schedule_timeout(t);
+               } while(t && !gfs2_ail_flush_reqd(sdp) &&
+                       !gfs2_jrnl_flush_reqd(sdp) &&
+                       !kthread_should_stop());
+               finish_wait(&sdp->sd_logd_waitq, &wait);
        }
 
        return 0;