jbd2: fix the way the b_modified flag is cleared
[linux-2.6.git] / fs / jbd2 / commit.c
index 70b2ae1..988fbec 100644 (file)
@@ -20,7 +20,8 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
+#include <linux/jiffies.h>
+#include <linux/crc32.h>
 
 /*
  * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -93,19 +94,23 @@ static int inverted_lock(journal_t *journal, struct buffer_head *bh)
        return 1;
 }
 
-/* Done it all: now write the commit record.  We should have
+/*
+ * Done it all: now submit the commit record.  We should have
  * cleaned up our previous buffers by now, so if we are in abort
  * mode we can now just skip the rest of the journal write
  * entirely.
  *
  * Returns 1 if the journal needs to be aborted or 0 on success
  */
-static int journal_write_commit_record(journal_t *journal,
-                                       transaction_t *commit_transaction)
+static int journal_submit_commit_record(journal_t *journal,
+                                       transaction_t *commit_transaction,
+                                       struct buffer_head **cbh,
+                                       __u32 crc32_sum)
 {
        struct journal_head *descriptor;
+       struct commit_header *tmp;
        struct buffer_head *bh;
-       int i, ret;
+       int ret;
        int barrier_done = 0;
 
        if (is_journal_aborted(journal))
@@ -117,21 +122,35 @@ static int journal_write_commit_record(journal_t *journal,
 
        bh = jh2bh(descriptor);
 
-       /* AKPM: buglet - add `i' to tmp! */
-       for (i = 0; i < bh->b_size; i += 512) {
-               journal_header_t *tmp = (journal_header_t*)bh->b_data;
-               tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
-               tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
-               tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
+       tmp = (struct commit_header *)bh->b_data;
+       tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+       tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
+       tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
+
+       if (JBD2_HAS_COMPAT_FEATURE(journal,
+                                   JBD2_FEATURE_COMPAT_CHECKSUM)) {
+               tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
+               tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
+               tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
        }
 
-       JBUFFER_TRACE(descriptor, "write commit block");
+       JBUFFER_TRACE(descriptor, "submit commit block");
+       lock_buffer(bh);
+       get_bh(bh);
        set_buffer_dirty(bh);
-       if (journal->j_flags & JBD2_BARRIER) {
+       set_buffer_uptodate(bh);
+       bh->b_end_io = journal_end_buffer_io_sync;
+
+       if (journal->j_flags & JBD2_BARRIER &&
+               !JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
                set_buffer_ordered(bh);
                barrier_done = 1;
        }
-       ret = sync_dirty_buffer(bh);
+       ret = submit_bh(WRITE, bh);
+       if (barrier_done)
+               clear_buffer_ordered(bh);
+
        /* is it possible for another commit to fail at roughly
         * the same time as this one?  If so, we don't want to
         * trust the barrier flag in the super, but instead want
@@ -149,17 +168,74 @@ static int journal_write_commit_record(journal_t *journal,
                spin_unlock(&journal->j_state_lock);
 
                /* And try again, without the barrier */
-               clear_buffer_ordered(bh);
                set_buffer_uptodate(bh);
                set_buffer_dirty(bh);
-               ret = sync_dirty_buffer(bh);
+               ret = submit_bh(WRITE, bh);
        }
-       put_bh(bh);             /* One for getblk() */
-       jbd2_journal_put_journal_head(descriptor);
+       *cbh = bh;
+       return ret;
+}
 
-       return (ret == -EIO);
+/*
+ * This function along with journal_submit_commit_record
+ * allows to write the commit record asynchronously.
+ */
+static int journal_wait_on_commit_record(struct buffer_head *bh)
+{
+       int ret = 0;
+
+       clear_buffer_dirty(bh);
+       wait_on_buffer(bh);
+
+       if (unlikely(!buffer_uptodate(bh)))
+               ret = -EIO;
+       put_bh(bh);            /* One for getblk() */
+       jbd2_journal_put_journal_head(bh2jh(bh));
+
+       return ret;
 }
 
+/*
+ * Wait for all submitted IO to complete.
+ */
+static int journal_wait_on_locked_list(journal_t *journal,
+                                      transaction_t *commit_transaction)
+{
+       int ret = 0;
+       struct journal_head *jh;
+
+       while (commit_transaction->t_locked_list) {
+               struct buffer_head *bh;
+
+               jh = commit_transaction->t_locked_list->b_tprev;
+               bh = jh2bh(jh);
+               get_bh(bh);
+               if (buffer_locked(bh)) {
+                       spin_unlock(&journal->j_list_lock);
+                       wait_on_buffer(bh);
+                       if (unlikely(!buffer_uptodate(bh)))
+                               ret = -EIO;
+                       spin_lock(&journal->j_list_lock);
+               }
+               if (!inverted_lock(journal, bh)) {
+                       put_bh(bh);
+                       spin_lock(&journal->j_list_lock);
+                       continue;
+               }
+               if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd_unlock_bh_state(bh);
+                       jbd2_journal_remove_journal_head(bh);
+                       put_bh(bh);
+               } else {
+                       jbd_unlock_bh_state(bh);
+               }
+               put_bh(bh);
+               cond_resched_lock(&journal->j_list_lock);
+       }
+       return ret;
+  }
+
 static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
 {
        int i;
@@ -248,8 +324,12 @@ write_out_data:
                                bufs = 0;
                                goto write_out_data;
                        }
-               }
-               else {
+               } else if (!locked && buffer_locked(bh)) {
+                       __jbd2_journal_file_buffer(jh, commit_transaction,
+                                               BJ_Locked);
+                       jbd_unlock_bh_state(bh);
+                       put_bh(bh);
+               } else {
                        BUFFER_TRACE(bh, "writeout complete: unfile");
                        __jbd2_journal_unfile_buffer(jh);
                        jbd_unlock_bh_state(bh);
@@ -262,7 +342,7 @@ write_out_data:
                        put_bh(bh);
                }
 
-               if (lock_need_resched(&journal->j_list_lock)) {
+               if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
                        spin_unlock(&journal->j_list_lock);
                        goto write_out_data;
                }
@@ -271,11 +351,25 @@ write_out_data:
        journal_do_submit_data(wbuf, bufs);
 }
 
-static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
+static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
+{
+       struct page *page = bh->b_page;
+       char *addr;
+       __u32 checksum;
+
+       addr = kmap_atomic(page, KM_USER0);
+       checksum = crc32_be(crc32_sum,
+               (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
+       kunmap_atomic(addr, KM_USER0);
+
+       return checksum;
+}
+
+static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
                                   unsigned long long block)
 {
        tag->t_blocknr = cpu_to_be32(block & (u32)~0);
-       if (tag_bytes > JBD_TAG_SIZE32)
+       if (tag_bytes > JBD2_TAG_SIZE32)
                tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
@@ -287,6 +381,7 @@ static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
  */
 void jbd2_journal_commit_transaction(journal_t *journal)
 {
+       struct transaction_stats_s stats;
        transaction_t *commit_transaction;
        struct journal_head *jh, *new_jh, *descriptor;
        struct buffer_head **wbuf = journal->j_wbuf;
@@ -302,6 +397,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        int tag_flag;
        int i;
        int tag_bytes = journal_tag_bytes(journal);
+       struct buffer_head *cbh = NULL; /* For transactional checksums */
+       __u32 crc32_sum = ~0;
 
        /*
         * First job: lock down the current transaction and wait for
@@ -334,6 +431,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        spin_lock(&journal->j_state_lock);
        commit_transaction->t_state = T_LOCKED;
 
+       stats.u.run.rs_wait = commit_transaction->t_max_wait;
+       stats.u.run.rs_locked = jiffies;
+       stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
+                                               stats.u.run.rs_locked);
+
        spin_lock(&commit_transaction->t_handle_lock);
        while (commit_transaction->t_updates) {
                DEFINE_WAIT(wait);
@@ -381,7 +483,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                        struct buffer_head *bh = jh2bh(jh);
 
                        jbd_lock_bh_state(bh);
-                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
+                       jbd2_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        jbd_unlock_bh_state(bh);
                }
@@ -404,6 +506,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
         */
        jbd2_journal_switch_revoke_table(journal);
 
+       stats.u.run.rs_flushing = jiffies;
+       stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
+                                              stats.u.run.rs_flushing);
+
        commit_transaction->t_state = T_FLUSH;
        journal->j_committing_transaction = commit_transaction;
        journal->j_running_transaction = NULL;
@@ -414,22 +520,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        jbd_debug (3, "JBD: commit phase 2\n");
 
        /*
-        * First, drop modified flag: all accesses to the buffers
-        * will be tracked for a new trasaction only -bzzz
-        */
-       spin_lock(&journal->j_list_lock);
-       if (commit_transaction->t_buffers) {
-               new_jh = jh = commit_transaction->t_buffers->b_tnext;
-               do {
-                       J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
-                                       new_jh->b_modified == 0);
-                       new_jh->b_modified = 0;
-                       new_jh = new_jh->b_tnext;
-               } while (new_jh != jh);
-       }
-       spin_unlock(&journal->j_list_lock);
-
-       /*
         * Now start flushing things to disk, in the order they appear
         * on the transaction lists.  Data blocks go first.
         */
@@ -437,42 +527,19 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        journal_submit_data_buffers(journal, commit_transaction);
 
        /*
-        * Wait for all previously submitted IO to complete.
+        * Wait for all previously submitted IO to complete if commit
+        * record is to be written synchronously.
         */
        spin_lock(&journal->j_list_lock);
-       while (commit_transaction->t_locked_list) {
-               struct buffer_head *bh;
+       if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
+               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
+               err = journal_wait_on_locked_list(journal,
+                                               commit_transaction);
 
-               jh = commit_transaction->t_locked_list->b_tprev;
-               bh = jh2bh(jh);
-               get_bh(bh);
-               if (buffer_locked(bh)) {
-                       spin_unlock(&journal->j_list_lock);
-                       wait_on_buffer(bh);
-                       if (unlikely(!buffer_uptodate(bh)))
-                               err = -EIO;
-                       spin_lock(&journal->j_list_lock);
-               }
-               if (!inverted_lock(journal, bh)) {
-                       put_bh(bh);
-                       spin_lock(&journal->j_list_lock);
-                       continue;
-               }
-               if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
-                       __jbd2_journal_unfile_buffer(jh);
-                       jbd_unlock_bh_state(bh);
-                       jbd2_journal_remove_journal_head(bh);
-                       put_bh(bh);
-               } else {
-                       jbd_unlock_bh_state(bh);
-               }
-               put_bh(bh);
-               cond_resched_lock(&journal->j_list_lock);
-       }
        spin_unlock(&journal->j_list_lock);
 
        if (err)
-               __jbd2_journal_abort_hard(journal);
+               jbd2_journal_abort(journal, err);
 
        jbd2_journal_write_revoke_records(journal, commit_transaction);
 
@@ -495,6 +562,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
         */
        commit_transaction->t_state = T_COMMIT;
 
+       stats.u.run.rs_logging = jiffies;
+       stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
+                                                stats.u.run.rs_logging);
+       stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
+       stats.u.run.rs_blocks_logged = 0;
+
        descriptor = NULL;
        bufs = 0;
        while (commit_transaction->t_buffers) {
@@ -530,7 +603,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                        descriptor = jbd2_journal_get_descriptor_buffer(journal);
                        if (!descriptor) {
-                               __jbd2_journal_abort_hard(journal);
+                               jbd2_journal_abort(journal, -EIO);
                                continue;
                        }
 
@@ -563,7 +636,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                   and repeat this loop: we'll fall into the
                   refile-on-abort condition above. */
                if (err) {
-                       __jbd2_journal_abort_hard(journal);
+                       jbd2_journal_abort(journal, err);
                        continue;
                }
 
@@ -636,6 +709,15 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 start_journal_io:
                        for (i = 0; i < bufs; i++) {
                                struct buffer_head *bh = wbuf[i];
+                               /*
+                                * Compute checksum.
+                                */
+                               if (JBD2_HAS_COMPAT_FEATURE(journal,
+                                       JBD2_FEATURE_COMPAT_CHECKSUM)) {
+                                       crc32_sum =
+                                           jbd2_checksum_data(crc32_sum, bh);
+                               }
+
                                lock_buffer(bh);
                                clear_buffer_dirty(bh);
                                set_buffer_uptodate(bh);
@@ -643,6 +725,7 @@ start_journal_io:
                                submit_bh(WRITE, bh);
                        }
                        cond_resched();
+                       stats.u.run.rs_blocks_logged += bufs;
 
                        /* Force a new descriptor to be generated next
                            time round the loop. */
@@ -651,6 +734,23 @@ start_journal_io:
                }
        }
 
+       /* Done it all: now write the commit record asynchronously. */
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
+               err = journal_submit_commit_record(journal, commit_transaction,
+                                                &cbh, crc32_sum);
+               if (err)
+                       __jbd2_journal_abort_hard(journal);
+
+               spin_lock(&journal->j_list_lock);
+               err = journal_wait_on_locked_list(journal,
+                                               commit_transaction);
+               spin_unlock(&journal->j_list_lock);
+               if (err)
+                       __jbd2_journal_abort_hard(journal);
+       }
+
        /* Lo and behold: we have just managed to send a transaction to
            the log.  Before we can commit it, wait for the IO so far to
            complete.  Control buffers being written are on the
@@ -750,11 +850,18 @@ wait_for_iobuf:
 
        jbd_debug(3, "JBD: commit phase 6\n");
 
-       if (journal_write_commit_record(journal, commit_transaction))
-               err = -EIO;
+       if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
+               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
+               err = journal_submit_commit_record(journal, commit_transaction,
+                                               &cbh, crc32_sum);
+               if (err)
+                       __jbd2_journal_abort_hard(journal);
+       }
+       if (!err && !is_journal_aborted(journal))
+               err = journal_wait_on_commit_record(cbh);
 
        if (err)
-               __jbd2_journal_abort_hard(journal);
+               jbd2_journal_abort(journal, err);
 
        /* End of a transaction!  Finally, we can do checkpoint
            processing: any buffers committed as a result of this
@@ -798,14 +905,14 @@ restart_loop:
                 * Otherwise, we can just throw away the frozen data now.
                 */
                if (jh->b_committed_data) {
-                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
+                       jbd2_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        if (jh->b_frozen_data) {
                                jh->b_committed_data = jh->b_frozen_data;
                                jh->b_frozen_data = NULL;
                        }
                } else if (jh->b_frozen_data) {
-                       jbd2_slab_free(jh->b_frozen_data, bh->b_size);
+                       jbd2_free(jh->b_frozen_data, bh->b_size);
                        jh->b_frozen_data = NULL;
                }
 
@@ -813,6 +920,7 @@ restart_loop:
                cp_transaction = jh->b_cp_transaction;
                if (cp_transaction) {
                        JBUFFER_TRACE(jh, "remove from old cp transaction");
+                       cp_transaction->t_chp_stats.cs_dropped++;
                        __jbd2_journal_remove_checkpoint(jh);
                }
 
@@ -864,10 +972,10 @@ restart_loop:
        }
        spin_unlock(&journal->j_list_lock);
        /*
-        * This is a bit sleazy.  We borrow j_list_lock to protect
-        * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
-        * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
-        * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
+        * This is a bit sleazy.  We use j_list_lock to protect transition
+        * of a transaction into T_FINISHED state and calling
+        * __jbd2_journal_drop_transaction(). Otherwise we could race with
+        * other checkpointing code processing the transaction...
         */
        spin_lock(&journal->j_state_lock);
        spin_lock(&journal->j_list_lock);
@@ -887,13 +995,44 @@ restart_loop:
 
        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 
+       commit_transaction->t_start = jiffies;
+       stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging,
+                                               commit_transaction->t_start);
+
+       /*
+        * File the transaction for history
+        */
+       stats.ts_type = JBD2_STATS_RUN;
+       stats.ts_tid = commit_transaction->t_tid;
+       stats.u.run.rs_handle_count = commit_transaction->t_handle_count;
+       spin_lock(&journal->j_history_lock);
+       memcpy(journal->j_history + journal->j_history_cur, &stats,
+                       sizeof(stats));
+       if (++journal->j_history_cur == journal->j_history_max)
+               journal->j_history_cur = 0;
+
+       /*
+        * Calculate overall stats
+        */
+       journal->j_stats.ts_tid++;
+       journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait;
+       journal->j_stats.u.run.rs_running += stats.u.run.rs_running;
+       journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked;
+       journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing;
+       journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging;
+       journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count;
+       journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks;
+       journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged;
+       spin_unlock(&journal->j_history_lock);
+
        commit_transaction->t_state = T_FINISHED;
        J_ASSERT(commit_transaction == journal->j_committing_transaction);
        journal->j_commit_sequence = commit_transaction->t_tid;
        journal->j_committing_transaction = NULL;
        spin_unlock(&journal->j_state_lock);
 
-       if (commit_transaction->t_checkpoint_list == NULL) {
+       if (commit_transaction->t_checkpoint_list == NULL &&
+           commit_transaction->t_checkpoint_io_list == NULL) {
                __jbd2_journal_drop_transaction(journal, commit_transaction);
        } else {
                if (journal->j_checkpoint_transactions == NULL) {