]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/xfs/xfs_trans.c
xfs: Improve scalability of busy extent tracking
[linux-2.6.git] / fs / xfs / xfs_trans.c
index ee2721e0de4d7fd700e22e8a3b635255c14696cc..40d9595a8de2ed28df29632a4d3f68ecd871d3de 100644 (file)
 #include "xfs_quota.h"
 #include "xfs_trans_priv.h"
 #include "xfs_trans_space.h"
-
-
-STATIC void    xfs_trans_apply_sb_deltas(xfs_trans_t *);
-STATIC uint    xfs_trans_count_vecs(xfs_trans_t *);
-STATIC void    xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *);
-STATIC void    xfs_trans_uncommit(xfs_trans_t *, uint);
-STATIC void    xfs_trans_committed(xfs_trans_t *, int);
-STATIC void    xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int);
-STATIC void    xfs_trans_free(xfs_trans_t *);
+#include "xfs_inode_item.h"
+#include "xfs_trace.h"
 
 kmem_zone_t    *xfs_trans_zone;
 
-
 /*
  * Reservation functions here avoid a huge stack in xfs_trans_init
  * due to register overflow from temporaries in the calculations.
  */
-
 STATIC uint
 xfs_calc_write_reservation(xfs_mount_t *mp)
 {
@@ -234,30 +225,48 @@ xfs_trans_alloc(
        xfs_mount_t     *mp,
        uint            type)
 {
-       vfs_wait_for_freeze(XFS_MTOVFS(mp), SB_FREEZE_TRANS);
-       return _xfs_trans_alloc(mp, type);
+       xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
+       return _xfs_trans_alloc(mp, type, KM_SLEEP);
 }
 
 xfs_trans_t *
 _xfs_trans_alloc(
        xfs_mount_t     *mp,
-       uint            type)
+       uint            type,
+       uint            memflags)
 {
        xfs_trans_t     *tp;
 
        atomic_inc(&mp->m_active_trans);
 
-       tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+       tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
        tp->t_magic = XFS_TRANS_MAGIC;
        tp->t_type = type;
        tp->t_mountp = mp;
        tp->t_items_free = XFS_LIC_NUM_SLOTS;
-       tp->t_busy_free = XFS_LBC_NUM_SLOTS;
-       XFS_LIC_INIT(&(tp->t_items));
-       XFS_LBC_INIT(&(tp->t_busy));
+       xfs_lic_init(&(tp->t_items));
+       INIT_LIST_HEAD(&tp->t_busy);
        return tp;
 }
 
+/*
+ * Free the transaction structure.  If there is more clean up
+ * to do when the structure is freed, add it here.
+ */
+STATIC void
+xfs_trans_free(
+       struct xfs_trans        *tp)
+{
+       struct xfs_busy_extent  *busyp, *n;
+
+       list_for_each_entry_safe(busyp, n, &tp->t_busy, list)
+               xfs_alloc_busy_clear(tp->t_mountp, busyp);
+
+       atomic_dec(&tp->t_mountp->m_active_trans);
+       xfs_trans_free_dqinfo(tp);
+       kmem_zone_free(xfs_trans_zone, tp);
+}
+
 /*
  * This is called to create a new transaction which will share the
  * permanent log reservation of the given transaction.  The remaining
@@ -281,22 +290,21 @@ xfs_trans_dup(
        ntp->t_type = tp->t_type;
        ntp->t_mountp = tp->t_mountp;
        ntp->t_items_free = XFS_LIC_NUM_SLOTS;
-       ntp->t_busy_free = XFS_LBC_NUM_SLOTS;
-       XFS_LIC_INIT(&(ntp->t_items));
-       XFS_LBC_INIT(&(ntp->t_busy));
+       xfs_lic_init(&(ntp->t_items));
+       INIT_LIST_HEAD(&ntp->t_busy);
 
        ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
        ASSERT(tp->t_ticket != NULL);
 
        ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE);
-       ntp->t_ticket = tp->t_ticket;
+       ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
        ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
        tp->t_blk_res = tp->t_blk_res_used;
        ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
        tp->t_rtx_res = tp->t_rtx_res_used;
        ntp->t_pflags = tp->t_pflags;
 
-       XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
+       xfs_trans_dup_dqinfo(tp, ntp);
 
        atomic_inc(&tp->t_mountp->m_active_trans);
        return ntp;
@@ -339,7 +347,7 @@ xfs_trans_reserve(
         */
        if (blocks > 0) {
                error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
-                                         -blocks, rsvd);
+                                         -((int64_t)blocks), rsvd);
                if (error != 0) {
                        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
                        return (XFS_ERROR(ENOSPC));
@@ -380,7 +388,7 @@ xfs_trans_reserve(
         */
        if (rtextents > 0) {
                error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
-                                         -rtextents, rsvd);
+                                         -((int64_t)rtextents), rsvd);
                if (error) {
                        error = XFS_ERROR(ENOSPC);
                        goto undo_log;
@@ -410,7 +418,7 @@ undo_log:
 undo_blocks:
        if (blocks > 0) {
                (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
-                                        blocks, rsvd);
+                                        (int64_t)blocks, rsvd);
                tp->t_blk_res = 0;
        }
 
@@ -419,7 +427,6 @@ undo_blocks:
        return error;
 }
 
-
 /*
  * Record the indicated change to the given field for application
  * to the file system's superblock when the transaction commits.
@@ -427,20 +434,34 @@ undo_blocks:
  *
  * Mark the transaction structure to indicate that the superblock
  * needs to be updated before committing.
+ *
+ * Because we may not be keeping track of allocated/free inodes and
+ * used filesystem blocks in the superblock, we do not mark the
+ * superblock dirty in this transaction if we modify these fields.
+ * We still need to update the transaction deltas so that they get
+ * applied to the incore superblock, but we don't want them to
+ * cause the superblock to get locked and logged if these are the
+ * only fields in the superblock that the transaction modifies.
  */
 void
 xfs_trans_mod_sb(
        xfs_trans_t     *tp,
        uint            field,
-       long            delta)
+       int64_t         delta)
 {
+       uint32_t        flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
+       xfs_mount_t     *mp = tp->t_mountp;
 
        switch (field) {
        case XFS_TRANS_SB_ICOUNT:
                tp->t_icount_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
                break;
        case XFS_TRANS_SB_IFREE:
                tp->t_ifree_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
                break;
        case XFS_TRANS_SB_FDBLOCKS:
                /*
@@ -453,6 +474,8 @@ xfs_trans_mod_sb(
                        ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
                }
                tp->t_fdblocks_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
                break;
        case XFS_TRANS_SB_RES_FDBLOCKS:
                /*
@@ -462,6 +485,8 @@ xfs_trans_mod_sb(
                 */
                ASSERT(delta < 0);
                tp->t_res_fdblocks_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
                break;
        case XFS_TRANS_SB_FREXTENTS:
                /*
@@ -515,7 +540,7 @@ xfs_trans_mod_sb(
                return;
        }
 
-       tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY);
+       tp->t_flags |= flags;
 }
 
 /*
@@ -530,7 +555,7 @@ STATIC void
 xfs_trans_apply_sb_deltas(
        xfs_trans_t     *tp)
 {
-       xfs_sb_t        *sbp;
+       xfs_dsb_t       *sbp;
        xfs_buf_t       *bp;
        int             whole = 0;
 
@@ -544,56 +569,55 @@ xfs_trans_apply_sb_deltas(
               (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
                tp->t_ag_btree_delta));
 
-       if (tp->t_icount_delta != 0) {
-               INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta);
-       }
-       if (tp->t_ifree_delta != 0) {
-               INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta);
+       /*
+        * Only update the superblock counters if we are logging them
+        */
+       if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
+               if (tp->t_icount_delta)
+                       be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
+               if (tp->t_ifree_delta)
+                       be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
+               if (tp->t_fdblocks_delta)
+                       be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
+               if (tp->t_res_fdblocks_delta)
+                       be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
        }
 
-       if (tp->t_fdblocks_delta != 0) {
-               INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta);
-       }
-       if (tp->t_res_fdblocks_delta != 0) {
-               INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta);
-       }
+       if (tp->t_frextents_delta)
+               be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
+       if (tp->t_res_frextents_delta)
+               be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 
-       if (tp->t_frextents_delta != 0) {
-               INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta);
-       }
-       if (tp->t_res_frextents_delta != 0) {
-               INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_res_frextents_delta);
-       }
-       if (tp->t_dblocks_delta != 0) {
-               INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta);
+       if (tp->t_dblocks_delta) {
+               be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
                whole = 1;
        }
-       if (tp->t_agcount_delta != 0) {
-               INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta);
+       if (tp->t_agcount_delta) {
+               be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
                whole = 1;
        }
-       if (tp->t_imaxpct_delta != 0) {
-               INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta);
+       if (tp->t_imaxpct_delta) {
+               sbp->sb_imax_pct += tp->t_imaxpct_delta;
                whole = 1;
        }
-       if (tp->t_rextsize_delta != 0) {
-               INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta);
+       if (tp->t_rextsize_delta) {
+               be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
                whole = 1;
        }
-       if (tp->t_rbmblocks_delta != 0) {
-               INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta);
+       if (tp->t_rbmblocks_delta) {
+               be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
                whole = 1;
        }
-       if (tp->t_rblocks_delta != 0) {
-               INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta);
+       if (tp->t_rblocks_delta) {
+               be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
                whole = 1;
        }
-       if (tp->t_rextents_delta != 0) {
-               INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta);
+       if (tp->t_rextents_delta) {
+               be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
                whole = 1;
        }
-       if (tp->t_rextslog_delta != 0) {
-               INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta);
+       if (tp->t_rextslog_delta) {
+               sbp->sb_rextslog += tp->t_rextslog_delta;
                whole = 1;
        }
 
@@ -601,25 +625,35 @@ xfs_trans_apply_sb_deltas(
                /*
                 * Log the whole thing, the fields are noncontiguous.
                 */
-               xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1);
+               xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
        else
                /*
                 * Since all the modifiable fields are contiguous, we
                 * can get away with this.
                 */
-               xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount),
-                                 offsetof(xfs_sb_t, sb_frextents) +
+               xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
+                                 offsetof(xfs_dsb_t, sb_frextents) +
                                  sizeof(sbp->sb_frextents) - 1);
-
-       XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1;
 }
 
 /*
- * xfs_trans_unreserve_and_mod_sb() is called to release unused
- * reservations and apply superblock counter changes to the in-core
- * superblock.
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
+ * and apply superblock counter changes to the in-core superblock.  The
+ * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
+ * applied to the in-core superblock.  The idea is that that has already been
+ * done.
  *
  * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
+ * However, we have to ensure that we only modify each superblock field only
+ * once because the application of the delta values may not be atomic. That can
+ * lead to ENOSPC races occurring if we have two separate modifcations of the
+ * free space counter to put back the entire reservation and then take away
+ * what we used.
+ *
+ * If we are not logging superblock counters, then the inode allocated/free and
+ * used block counts are not updated in the on disk superblock. In this case,
+ * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
+ * still need to update the incore superblock with the changes.
  */
 STATIC void
 xfs_trans_unreserve_and_mod_sb(
@@ -627,98 +661,100 @@ xfs_trans_unreserve_and_mod_sb(
 {
        xfs_mod_sb_t    msb[14];        /* If you add cases, add entries */
        xfs_mod_sb_t    *msbp;
+       xfs_mount_t     *mp = tp->t_mountp;
        /* REFERENCED */
        int             error;
        int             rsvd;
+       int64_t         blkdelta = 0;
+       int64_t         rtxdelta = 0;
 
        msbp = msb;
        rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 
-       /*
-        * Release any reserved blocks.  Any that were allocated
-        * will be taken back again by fdblocks_delta below.
-        */
-       if (tp->t_blk_res > 0) {
+       /* calculate free blocks delta */
+       if (tp->t_blk_res > 0)
+               blkdelta = tp->t_blk_res;
+
+       if ((tp->t_fdblocks_delta != 0) &&
+           (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+            (tp->t_flags & XFS_TRANS_SB_DIRTY)))
+               blkdelta += tp->t_fdblocks_delta;
+
+       if (blkdelta != 0) {
                msbp->msb_field = XFS_SBS_FDBLOCKS;
-               msbp->msb_delta = tp->t_blk_res;
+               msbp->msb_delta = blkdelta;
                msbp++;
        }
 
-       /*
-        * Release any reserved real time extents .  Any that were
-        * allocated will be taken back again by frextents_delta below.
-        */
-       if (tp->t_rtx_res > 0) {
+       /* calculate free realtime extents delta */
+       if (tp->t_rtx_res > 0)
+               rtxdelta = tp->t_rtx_res;
+
+       if ((tp->t_frextents_delta != 0) &&
+           (tp->t_flags & XFS_TRANS_SB_DIRTY))
+               rtxdelta += tp->t_frextents_delta;
+
+       if (rtxdelta != 0) {
                msbp->msb_field = XFS_SBS_FREXTENTS;
-               msbp->msb_delta = tp->t_rtx_res;
+               msbp->msb_delta = rtxdelta;
                msbp++;
        }
 
-       /*
-        * Apply any superblock modifications to the in-core version.
-        * The t_res_fdblocks_delta and t_res_frextents_delta fields are
-        * explicitly NOT applied to the in-core superblock.
-        * The idea is that that has already been done.
-        */
-       if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
+       /* apply remaining deltas */
+
+       if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+            (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
                if (tp->t_icount_delta != 0) {
                        msbp->msb_field = XFS_SBS_ICOUNT;
-                       msbp->msb_delta = (int)tp->t_icount_delta;
+                       msbp->msb_delta = tp->t_icount_delta;
                        msbp++;
                }
                if (tp->t_ifree_delta != 0) {
                        msbp->msb_field = XFS_SBS_IFREE;
-                       msbp->msb_delta = (int)tp->t_ifree_delta;
-                       msbp++;
-               }
-               if (tp->t_fdblocks_delta != 0) {
-                       msbp->msb_field = XFS_SBS_FDBLOCKS;
-                       msbp->msb_delta = (int)tp->t_fdblocks_delta;
-                       msbp++;
-               }
-               if (tp->t_frextents_delta != 0) {
-                       msbp->msb_field = XFS_SBS_FREXTENTS;
-                       msbp->msb_delta = (int)tp->t_frextents_delta;
+                       msbp->msb_delta = tp->t_ifree_delta;
                        msbp++;
                }
+       }
+
+       if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
                if (tp->t_dblocks_delta != 0) {
                        msbp->msb_field = XFS_SBS_DBLOCKS;
-                       msbp->msb_delta = (int)tp->t_dblocks_delta;
+                       msbp->msb_delta = tp->t_dblocks_delta;
                        msbp++;
                }
                if (tp->t_agcount_delta != 0) {
                        msbp->msb_field = XFS_SBS_AGCOUNT;
-                       msbp->msb_delta = (int)tp->t_agcount_delta;
+                       msbp->msb_delta = tp->t_agcount_delta;
                        msbp++;
                }
                if (tp->t_imaxpct_delta != 0) {
                        msbp->msb_field = XFS_SBS_IMAX_PCT;
-                       msbp->msb_delta = (int)tp->t_imaxpct_delta;
+                       msbp->msb_delta = tp->t_imaxpct_delta;
                        msbp++;
                }
                if (tp->t_rextsize_delta != 0) {
                        msbp->msb_field = XFS_SBS_REXTSIZE;
-                       msbp->msb_delta = (int)tp->t_rextsize_delta;
+                       msbp->msb_delta = tp->t_rextsize_delta;
                        msbp++;
                }
                if (tp->t_rbmblocks_delta != 0) {
                        msbp->msb_field = XFS_SBS_RBMBLOCKS;
-                       msbp->msb_delta = (int)tp->t_rbmblocks_delta;
+                       msbp->msb_delta = tp->t_rbmblocks_delta;
                        msbp++;
                }
                if (tp->t_rblocks_delta != 0) {
                        msbp->msb_field = XFS_SBS_RBLOCKS;
-                       msbp->msb_delta = (int)tp->t_rblocks_delta;
+                       msbp->msb_delta = tp->t_rblocks_delta;
                        msbp++;
                }
                if (tp->t_rextents_delta != 0) {
                        msbp->msb_field = XFS_SBS_REXTENTS;
-                       msbp->msb_delta = (int)tp->t_rextents_delta;
+                       msbp->msb_delta = tp->t_rextents_delta;
                        msbp++;
                }
                if (tp->t_rextslog_delta != 0) {
                        msbp->msb_field = XFS_SBS_REXTSLOG;
-                       msbp->msb_delta = (int)tp->t_rextslog_delta;
+                       msbp->msb_delta = tp->t_rextslog_delta;
                        msbp++;
                }
        }
@@ -733,98 +769,256 @@ xfs_trans_unreserve_and_mod_sb(
        }
 }
 
+/*
+ * Total up the number of log iovecs needed to commit this
+ * transaction.  The transaction itself needs one for the
+ * transaction header.  Ask each dirty item in turn how many
+ * it needs to get the total.
+ */
+static uint
+xfs_trans_count_vecs(
+       struct xfs_trans        *tp)
+{
+       int                     nvecs;
+       xfs_log_item_desc_t     *lidp;
+
+       nvecs = 1;
+       lidp = xfs_trans_first_item(tp);
+       ASSERT(lidp != NULL);
+
+       /* In the non-debug case we need to start bailing out if we
+        * didn't find a log_item here, return zero and let trans_commit
+        * deal with it.
+        */
+       if (lidp == NULL)
+               return 0;
+
+       while (lidp != NULL) {
+               /*
+                * Skip items which aren't dirty in this transaction.
+                */
+               if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+                       lidp = xfs_trans_next_item(tp, lidp);
+                       continue;
+               }
+               lidp->lid_size = IOP_SIZE(lidp->lid_item);
+               nvecs += lidp->lid_size;
+               lidp = xfs_trans_next_item(tp, lidp);
+       }
+
+       return nvecs;
+}
 
 /*
- * xfs_trans_commit
- *
- * Commit the given transaction to the log a/synchronously.
+ * Fill in the vector with pointers to data to be logged
+ * by this transaction.  The transaction header takes
+ * the first vector, and then each dirty item takes the
+ * number of vectors it indicated it needed in xfs_trans_count_vecs().
  *
- * XFS disk error handling mechanism is not based on a typical
- * transaction abort mechanism. Logically after the filesystem
- * gets marked 'SHUTDOWN', we can't let any new transactions
- * be durable - ie. committed to disk - because some metadata might
- * be inconsistent. In such cases, this returns an error, and the
- * caller may assume that all locked objects joined to the transaction
- * have already been unlocked as if the commit had succeeded.
- * Do not reference the transaction structure after this call.
+ * As each item fills in the entries it needs, also pin the item
+ * so that it cannot be flushed out until the log write completes.
  */
- /*ARGSUSED*/
-int
-_xfs_trans_commit(
-       xfs_trans_t     *tp,
-       uint            flags,
-       xfs_lsn_t       *commit_lsn_p,
-       int             *log_flushed)
+static void
+xfs_trans_fill_vecs(
+       struct xfs_trans        *tp,
+       struct xfs_log_iovec    *log_vector)
 {
-       xfs_log_iovec_t         *log_vector;
-       int                     nvec;
-       xfs_mount_t             *mp;
-       xfs_lsn_t               commit_lsn;
-       /* REFERENCED */
-       int                     error;
-       int                     log_flags;
-       int                     sync;
-#define        XFS_TRANS_LOGVEC_COUNT  16
-       xfs_log_iovec_t         log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
-       void                    *commit_iclog;
-       int                     shutdown;
-
-       commit_lsn = -1;
+       xfs_log_item_desc_t     *lidp;
+       struct xfs_log_iovec    *vecp;
+       uint                    nitems;
 
        /*
-        * Determine whether this commit is releasing a permanent
-        * log reservation or not.
+        * Skip over the entry for the transaction header, we'll
+        * fill that in at the end.
         */
-       if (flags & XFS_TRANS_RELEASE_LOG_RES) {
-               ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
-               log_flags = XFS_LOG_REL_PERM_RESERV;
-       } else {
-               log_flags = 0;
+       vecp = log_vector + 1;
+
+       nitems = 0;
+       lidp = xfs_trans_first_item(tp);
+       ASSERT(lidp);
+       while (lidp) {
+               /* Skip items which aren't dirty in this transaction. */
+               if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+                       lidp = xfs_trans_next_item(tp, lidp);
+                       continue;
+               }
+
+               /*
+                * The item may be marked dirty but not log anything.  This can
+                * be used to get called when a transaction is committed.
+                */
+               if (lidp->lid_size)
+                       nitems++;
+               IOP_FORMAT(lidp->lid_item, vecp);
+               vecp += lidp->lid_size;
+               IOP_PIN(lidp->lid_item);
+               lidp = xfs_trans_next_item(tp, lidp);
        }
-       mp = tp->t_mountp;
 
        /*
-        * If there is nothing to be logged by the transaction,
-        * then unlock all of the items associated with the
-        * transaction and free the transaction structure.
-        * Also make sure to return any reserved blocks to
-        * the free pool.
+        * Now that we've counted the number of items in this transaction, fill
+        * in the transaction header. Note that the transaction header does not
+        * have a log item.
+        */
+       tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
+       tp->t_header.th_type = tp->t_type;
+       tp->t_header.th_num_items = nitems;
+       log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
+       log_vector->i_len = sizeof(xfs_trans_header_t);
+       log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
+}
+
+/*
+ * The committed item processing consists of calling the committed routine of
+ * each logged item, updating the item's position in the AIL if necessary, and
+ * unpinning each item.  If the committed routine returns -1, then do nothing
+ * further with the item because it may have been freed.
+ *
+ * Since items are unlocked when they are copied to the incore log, it is
+ * possible for two transactions to be completing and manipulating the same
+ * item simultaneously.  The AIL lock will protect the lsn field of each item.
+ * The value of this field can never go backwards.
+ *
+ * We unpin the items after repositioning them in the AIL, because otherwise
+ * they could be immediately flushed and we'd have to race with the flusher
+ * trying to pull the item from the AIL as we add it.
+ */
+static void
+xfs_trans_item_committed(
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               commit_lsn,
+       int                     aborted)
+{
+       xfs_lsn_t               item_lsn;
+       struct xfs_ail          *ailp;
+
+       if (aborted)
+               lip->li_flags |= XFS_LI_ABORTED;
+       item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+       /* If the committed routine returns -1, item has been freed. */
+       if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+               return;
+
+       /*
+        * If the returned lsn is greater than what it contained before, update
+        * the location of the item in the AIL.  If it is not, then do nothing.
+        * Items can never move backwards in the AIL.
+        *
+        * While the new lsn should usually be greater, it is possible that a
+        * later transaction completing simultaneously with an earlier one
+        * using the same item could complete first with a higher lsn.  This
+        * would cause the earlier transaction to fail the test below.
         */
-shut_us_down:
-       shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0;
-       if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) {
-               xfs_trans_unreserve_and_mod_sb(tp);
+       ailp = lip->li_ailp;
+       spin_lock(&ailp->xa_lock);
+       if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
                /*
-                * It is indeed possible for the transaction to be
-                * not dirty but the dqinfo portion to be. All that
-                * means is that we have some (non-persistent) quota
-                * reservations that need to be unreserved.
+                * This will set the item's lsn to item_lsn and update the
+                * position of the item in the AIL.
+                *
+                * xfs_trans_ail_update() drops the AIL lock.
                 */
-               XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
-               if (tp->t_ticket) {
-                       commit_lsn = xfs_log_done(mp, tp->t_ticket,
-                                                       NULL, log_flags);
-                       if (commit_lsn == -1 && !shutdown)
-                               shutdown = XFS_ERROR(EIO);
-               }
-               current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-               xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0);
-               xfs_trans_free_busy(tp);
-               xfs_trans_free(tp);
-               XFS_STATS_INC(xs_trans_empty);
-               if (commit_lsn_p)
-                       *commit_lsn_p = commit_lsn;
-               return (shutdown);
+               xfs_trans_ail_update(ailp, lip, item_lsn);
+       } else {
+               spin_unlock(&ailp->xa_lock);
        }
-       ASSERT(tp->t_ticket != NULL);
 
        /*
-        * If we need to update the superblock, then do it now.
+        * Now that we've repositioned the item in the AIL, unpin it so it can
+        * be flushed. Pass information about buffer stale state down from the
+        * log item flags, if anyone else stales the buffer we do not want to
+        * pay any attention to it.
         */
-       if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
-               xfs_trans_apply_sb_deltas(tp);
+       IOP_UNPIN(lip);
+}
+
+/*
+ * This is typically called by the LM when a transaction has been fully
+ * committed to disk.  It needs to unpin the items which have
+ * been logged by the transaction and update their positions
+ * in the AIL if necessary.
+ *
+ * This also gets called when the transactions didn't get written out
+ * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
+ */
+STATIC void
+xfs_trans_committed(
+       struct xfs_trans        *tp,
+       int                     abortflag)
+{
+       xfs_log_item_desc_t     *lidp;
+       xfs_log_item_chunk_t    *licp;
+       xfs_log_item_chunk_t    *next_licp;
+
+       /* Call the transaction's completion callback if there is one. */
+       if (tp->t_callback != NULL)
+               tp->t_callback(tp, tp->t_callarg);
+
+       for (lidp = xfs_trans_first_item(tp);
+            lidp != NULL;
+            lidp = xfs_trans_next_item(tp, lidp)) {
+               xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
+       }
+
+       /* free the item chunks, ignoring the embedded chunk */
+       for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
+               next_licp = licp->lic_next;
+               kmem_free(licp);
        }
-       XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
+
+       xfs_trans_free(tp);
+}
+
+/*
+ * Called from the trans_commit code when we notice that
+ * the filesystem is in the middle of a forced shutdown.
+ */
+STATIC void
+xfs_trans_uncommit(
+       struct xfs_trans        *tp,
+       uint                    flags)
+{
+       xfs_log_item_desc_t     *lidp;
+
+       for (lidp = xfs_trans_first_item(tp);
+            lidp != NULL;
+            lidp = xfs_trans_next_item(tp, lidp)) {
+               /*
+                * Unpin all but those that aren't dirty.
+                */
+               if (lidp->lid_flags & XFS_LID_DIRTY)
+                       IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+       }
+
+       xfs_trans_unreserve_and_mod_sb(tp);
+       xfs_trans_unreserve_and_mod_dquots(tp);
+
+       xfs_trans_free_items(tp, flags);
+       xfs_trans_free(tp);
+}
+
+/*
+ * Format the transaction direct to the iclog. This isolates the physical
+ * transaction commit operation from the logical operation and hence allows
+ * other methods to be introduced without affecting the existing commit path.
+ */
+static int
+xfs_trans_commit_iclog(
+       struct xfs_mount        *mp,
+       struct xfs_trans        *tp,
+       xfs_lsn_t               *commit_lsn,
+       int                     flags)
+{
+       int                     shutdown;
+       int                     error;
+       int                     log_flags = 0;
+       struct xlog_in_core     *commit_iclog;
+#define XFS_TRANS_LOGVEC_COUNT  16
+       struct xfs_log_iovec    log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
+       struct xfs_log_iovec    *log_vector;
+       uint                    nvec;
+
 
        /*
         * Ask each log item how many log_vector entries it will
@@ -834,8 +1028,7 @@ shut_us_down:
         */
        nvec = xfs_trans_count_vecs(tp);
        if (nvec == 0) {
-               xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
-               goto shut_us_down;
+               return ENOMEM;  /* triggers a shutdown! */
        } else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
                log_vector = log_vector_fast;
        } else {
@@ -850,6 +1043,9 @@ shut_us_down:
         */
        xfs_trans_fill_vecs(tp, log_vector);
 
+       if (flags & XFS_TRANS_RELEASE_LOG_RES)
+               log_flags = XFS_LOG_REL_PERM_RESERV;
+
        error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
 
        /*
@@ -857,21 +1053,19 @@ shut_us_down:
         * at any time after this call.  However, all the items associated
         * with the transaction are still locked and pinned in memory.
         */
-       commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
+       *commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
 
-       tp->t_commit_lsn = commit_lsn;
-       if (nvec > XFS_TRANS_LOGVEC_COUNT) {
-               kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t));
-       }
+       tp->t_commit_lsn = *commit_lsn;
+       trace_xfs_trans_commit_lsn(tp);
 
-       if (commit_lsn_p)
-               *commit_lsn_p = commit_lsn;
+       if (nvec > XFS_TRANS_LOGVEC_COUNT)
+               kmem_free(log_vector);
 
        /*
         * If we got a log write error. Unpin the logitems that we
         * had pinned, clean up, free trans structure, and return error.
         */
-       if (error || commit_lsn == -1) {
+       if (error || *commit_lsn == -1) {
                current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
                xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
                return XFS_ERROR(EIO);
@@ -885,8 +1079,6 @@ shut_us_down:
         */
        xfs_trans_unreserve_and_mod_sb(tp);
 
-       sync = tp->t_flags & XFS_TRANS_SYNC;
-
        /*
         * Tell the LM to call the transaction completion routine
         * when the log write with LSN commit_lsn completes (e.g.
@@ -929,7 +1121,7 @@ shut_us_down:
         * the commit lsn of this transaction for dependency tracking
         * purposes.
         */
-       xfs_trans_unlock_items(tp, commit_lsn);
+       xfs_trans_unlock_items(tp, *commit_lsn);
 
        /*
         * If we detected a log error earlier, finish committing
@@ -949,157 +1141,113 @@ shut_us_down:
         * and the items are released we can finally allow the iclog to
         * go to disk.
         */
-       error = xfs_log_release_iclog(mp, commit_iclog);
-
-       /*
-        * If the transaction needs to be synchronous, then force the
-        * log out now and wait for it.
-        */
-       if (sync) {
-               if (!error) {
-                       error = _xfs_log_force(mp, commit_lsn,
-                                     XFS_LOG_FORCE | XFS_LOG_SYNC,
-                                     log_flushed);
-               }
-               XFS_STATS_INC(xs_trans_sync);
-       } else {
-               XFS_STATS_INC(xs_trans_async);
-       }
-
-       return (error);
+       return xfs_log_release_iclog(mp, commit_iclog);
 }
 
 
 /*
- * Total up the number of log iovecs needed to commit this
- * transaction.  The transaction itself needs one for the
- * transaction header.  Ask each dirty item in turn how many
- * it needs to get the total.
+ * xfs_trans_commit
+ *
+ * Commit the given transaction to the log a/synchronously.
+ *
+ * XFS disk error handling mechanism is not based on a typical
+ * transaction abort mechanism. Logically after the filesystem
+ * gets marked 'SHUTDOWN', we can't let any new transactions
+ * be durable - ie. committed to disk - because some metadata might
+ * be inconsistent. In such cases, this returns an error, and the
+ * caller may assume that all locked objects joined to the transaction
+ * have already been unlocked as if the commit had succeeded.
+ * Do not reference the transaction structure after this call.
  */
-STATIC uint
-xfs_trans_count_vecs(
-       xfs_trans_t     *tp)
+int
+_xfs_trans_commit(
+       struct xfs_trans        *tp,
+       uint                    flags,
+       int                     *log_flushed)
 {
-       int                     nvecs;
-       xfs_log_item_desc_t     *lidp;
+       struct xfs_mount        *mp = tp->t_mountp;
+       xfs_lsn_t               commit_lsn = -1;
+       int                     error = 0;
+       int                     log_flags = 0;
+       int                     sync = tp->t_flags & XFS_TRANS_SYNC;
 
-       nvecs = 1;
-       lidp = xfs_trans_first_item(tp);
-       ASSERT(lidp != NULL);
-
-       /* In the non-debug case we need to start bailing out if we
-        * didn't find a log_item here, return zero and let trans_commit
-        * deal with it.
+       /*
+        * Determine whether this commit is releasing a permanent
+        * log reservation or not.
         */
-       if (lidp == NULL)
-               return 0;
-
-       while (lidp != NULL) {
-               /*
-                * Skip items which aren't dirty in this transaction.
-                */
-               if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
-                       lidp = xfs_trans_next_item(tp, lidp);
-                       continue;
-               }
-               lidp->lid_size = IOP_SIZE(lidp->lid_item);
-               nvecs += lidp->lid_size;
-               lidp = xfs_trans_next_item(tp, lidp);
+       if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+               ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+               log_flags = XFS_LOG_REL_PERM_RESERV;
        }
 
-       return nvecs;
-}
-
-/*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
- */
-STATIC void
-xfs_trans_uncommit(
-       xfs_trans_t     *tp,
-       uint            flags)
-{
-       xfs_log_item_desc_t     *lidp;
+       /*
+        * If there is nothing to be logged by the transaction,
+        * then unlock all of the items associated with the
+        * transaction and free the transaction structure.
+        * Also make sure to return any reserved blocks to
+        * the free pool.
+        */
+       if (!(tp->t_flags & XFS_TRANS_DIRTY))
+               goto out_unreserve;
 
-       for (lidp = xfs_trans_first_item(tp);
-            lidp != NULL;
-            lidp = xfs_trans_next_item(tp, lidp)) {
-               /*
-                * Unpin all but those that aren't dirty.
-                */
-               if (lidp->lid_flags & XFS_LID_DIRTY)
-                       IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+       if (XFS_FORCED_SHUTDOWN(mp)) {
+               error = XFS_ERROR(EIO);
+               goto out_unreserve;
        }
 
-       xfs_trans_unreserve_and_mod_sb(tp);
-       XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
+       ASSERT(tp->t_ticket != NULL);
 
-       xfs_trans_free_items(tp, flags);
-       xfs_trans_free_busy(tp);
-       xfs_trans_free(tp);
-}
+       /*
+        * If we need to update the superblock, then do it now.
+        */
+       if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+               xfs_trans_apply_sb_deltas(tp);
+       xfs_trans_apply_dquot_deltas(tp);
 
-/*
- * Fill in the vector with pointers to data to be logged
- * by this transaction.  The transaction header takes
- * the first vector, and then each dirty item takes the
- * number of vectors it indicated it needed in xfs_trans_count_vecs().
- *
- * As each item fills in the entries it needs, also pin the item
- * so that it cannot be flushed out until the log write completes.
- */
-STATIC void
-xfs_trans_fill_vecs(
-       xfs_trans_t             *tp,
-       xfs_log_iovec_t         *log_vector)
-{
-       xfs_log_item_desc_t     *lidp;
-       xfs_log_iovec_t         *vecp;
-       uint                    nitems;
+       error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
+       if (error == ENOMEM) {
+               xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+               error = XFS_ERROR(EIO);
+               goto out_unreserve;
+       }
 
        /*
-        * Skip over the entry for the transaction header, we'll
-        * fill that in at the end.
+        * If the transaction needs to be synchronous, then force the
+        * log out now and wait for it.
         */
-       vecp = log_vector + 1;          /* pointer arithmetic */
-
-       nitems = 0;
-       lidp = xfs_trans_first_item(tp);
-       ASSERT(lidp != NULL);
-       while (lidp != NULL) {
-               /*
-                * Skip items which aren't dirty in this transaction.
-                */
-               if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
-                       lidp = xfs_trans_next_item(tp, lidp);
-                       continue;
-               }
-               /*
-                * The item may be marked dirty but not log anything.
-                * This can be used to get called when a transaction
-                * is committed.
-                */
-               if (lidp->lid_size) {
-                       nitems++;
+       if (sync) {
+               if (!error) {
+                       error = _xfs_log_force_lsn(mp, commit_lsn,
+                                     XFS_LOG_SYNC, log_flushed);
                }
-               IOP_FORMAT(lidp->lid_item, vecp);
-               vecp += lidp->lid_size;         /* pointer arithmetic */
-               IOP_PIN(lidp->lid_item);
-               lidp = xfs_trans_next_item(tp, lidp);
+               XFS_STATS_INC(xs_trans_sync);
+       } else {
+               XFS_STATS_INC(xs_trans_async);
        }
 
+       return error;
+
+out_unreserve:
+       xfs_trans_unreserve_and_mod_sb(tp);
+
        /*
-        * Now that we've counted the number of items in this
-        * transaction, fill in the transaction header.
+        * It is indeed possible for the transaction to be not dirty but
+        * the dqinfo portion to be.  All that means is that we have some
+        * (non-persistent) quota reservations that need to be unreserved.
         */
-       tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
-       tp->t_header.th_type = tp->t_type;
-       tp->t_header.th_num_items = nitems;
-       log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
-       log_vector->i_len = sizeof(xfs_trans_header_t);
-       XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
-}
+       xfs_trans_unreserve_and_mod_dquots(tp);
+       if (tp->t_ticket) {
+               commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+               if (commit_lsn == -1 && !error)
+                       error = XFS_ERROR(EIO);
+       }
+       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+       xfs_trans_free_items(tp, error ? XFS_TRANS_ABORT : 0);
+       xfs_trans_free(tp);
 
+       XFS_STATS_INC(xs_trans_empty);
+       return error;
+}
 
 /*
  * Unlock all of the transaction's items and free the transaction.
@@ -1144,7 +1292,7 @@ xfs_trans_cancel(
                while (licp != NULL) {
                        lidp = licp->lic_descs;
                        for (i = 0; i < licp->lic_unused; i++, lidp++) {
-                               if (XFS_LIC_ISFREE(licp, i)) {
+                               if (xfs_lic_isfree(licp, i)) {
                                        continue;
                                }
 
@@ -1157,7 +1305,7 @@ xfs_trans_cancel(
        }
 #endif
        xfs_trans_unreserve_and_mod_sb(tp);
-       XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
+       xfs_trans_unreserve_and_mod_dquots(tp);
 
        if (tp->t_ticket) {
                if (flags & XFS_TRANS_RELEASE_LOG_RES) {
@@ -1173,192 +1321,75 @@ xfs_trans_cancel(
        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 
        xfs_trans_free_items(tp, flags);
-       xfs_trans_free_busy(tp);
        xfs_trans_free(tp);
 }
 
-
 /*
- * Free the transaction structure.  If there is more clean up
- * to do when the structure is freed, add it here.
- */
-STATIC void
-xfs_trans_free(
-       xfs_trans_t     *tp)
-{
-       atomic_dec(&tp->t_mountp->m_active_trans);
-       XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
-       kmem_zone_free(xfs_trans_zone, tp);
-}
-
-
-/*
- * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
- *
- * This is typically called by the LM when a transaction has been fully
- * committed to disk.  It needs to unpin the items which have
- * been logged by the transaction and update their positions
- * in the AIL if necessary.
- * This also gets called when the transactions didn't get written out
- * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
- *
- * Call xfs_trans_chunk_committed() to process the items in
- * each chunk.
+ * Roll from one trans in the sequence of PERMANENT transactions to
+ * the next: permanent transactions are only flushed out when
+ * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
+ * as possible to let chunks of it go to the log. So we commit the
+ * chunk we've been working on and get a new transaction to continue.
  */
-STATIC void
-xfs_trans_committed(
-       xfs_trans_t     *tp,
-       int             abortflag)
+int
+xfs_trans_roll(
+       struct xfs_trans        **tpp,
+       struct xfs_inode        *dp)
 {
-       xfs_log_item_chunk_t    *licp;
-       xfs_log_item_chunk_t    *next_licp;
-       xfs_log_busy_chunk_t    *lbcp;
-       xfs_log_busy_slot_t     *lbsp;
-       int                     i;
+       struct xfs_trans        *trans;
+       unsigned int            logres, count;
+       int                     error;
 
        /*
-        * Call the transaction's completion callback if there
-        * is one.
+        * Ensure that the inode is always logged.
         */
-       if (tp->t_callback != NULL) {
-               tp->t_callback(tp, tp->t_callarg);
-       }
+       trans = *tpp;
+       xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
 
        /*
-        * Special case the chunk embedded in the transaction.
+        * Copy the critical parameters from one trans to the next.
         */
-       licp = &(tp->t_items);
-       if (!(XFS_LIC_ARE_ALL_FREE(licp))) {
-               xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
-       }
+       logres = trans->t_log_res;
+       count = trans->t_log_count;
+       *tpp = xfs_trans_dup(trans);
 
        /*
-        * Process the items in each chunk in turn.
+        * Commit the current transaction.
+        * If this commit failed, then it'd just unlock those items that
+        * are not marked ihold. That also means that a filesystem shutdown
+        * is in progress. The caller takes the responsibility to cancel
+        * the duplicate transaction that gets returned.
         */
-       licp = licp->lic_next;
-       while (licp != NULL) {
-               ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
-               xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
-               next_licp = licp->lic_next;
-               kmem_free(licp, sizeof(xfs_log_item_chunk_t));
-               licp = next_licp;
-       }
+       error = xfs_trans_commit(trans, 0);
+       if (error)
+               return (error);
 
-       /*
-        * Clear all the per-AG busy list items listed in this transaction
-        */
-       lbcp = &tp->t_busy;
-       while (lbcp != NULL) {
-               for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
-                       if (!XFS_LBC_ISFREE(lbcp, i)) {
-                               xfs_alloc_clear_busy(tp, lbsp->lbc_ag,
-                                                    lbsp->lbc_idx);
-                       }
-               }
-               lbcp = lbcp->lbc_next;
-       }
-       xfs_trans_free_busy(tp);
+       trans = *tpp;
 
        /*
-        * That's it for the transaction structure.  Free it.
+        * transaction commit worked ok so we can drop the extra ticket
+        * reference that we gained in xfs_trans_dup()
         */
-       xfs_trans_free(tp);
-}
+       xfs_log_ticket_put(trans->t_ticket);
 
-/*
- * This is called to perform the commit processing for each
- * item described by the given chunk.
- *
- * The commit processing consists of unlocking items which were
- * held locked with the SYNC_UNLOCK attribute, calling the committed
- * routine of each logged item, updating the item's position in the AIL
- * if necessary, and unpinning each item.  If the committed routine
- * returns -1, then do nothing further with the item because it
- * may have been freed.
- *
- * Since items are unlocked when they are copied to the incore
- * log, it is possible for two transactions to be completing
- * and manipulating the same item simultaneously.  The AIL lock
- * will protect the lsn field of each item.  The value of this
- * field can never go backwards.
- *
- * We unpin the items after repositioning them in the AIL, because
- * otherwise they could be immediately flushed and we'd have to race
- * with the flusher trying to pull the item from the AIL as we add it.
- */
-STATIC void
-xfs_trans_chunk_committed(
-       xfs_log_item_chunk_t    *licp,
-       xfs_lsn_t               lsn,
-       int                     aborted)
-{
-       xfs_log_item_desc_t     *lidp;
-       xfs_log_item_t          *lip;
-       xfs_lsn_t               item_lsn;
-       struct xfs_mount        *mp;
-       int                     i;
-       SPLDECL(s);
-
-       lidp = licp->lic_descs;
-       for (i = 0; i < licp->lic_unused; i++, lidp++) {
-               if (XFS_LIC_ISFREE(licp, i)) {
-                       continue;
-               }
-
-               lip = lidp->lid_item;
-               if (aborted)
-                       lip->li_flags |= XFS_LI_ABORTED;
 
-               /*
-                * Send in the ABORTED flag to the COMMITTED routine
-                * so that it knows whether the transaction was aborted
-                * or not.
-                */
-               item_lsn = IOP_COMMITTED(lip, lsn);
-
-               /*
-                * If the committed routine returns -1, make
-                * no more references to the item.
-                */
-               if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) {
-                       continue;
-               }
-
-               /*
-                * If the returned lsn is greater than what it
-                * contained before, update the location of the
-                * item in the AIL.  If it is not, then do nothing.
-                * Items can never move backwards in the AIL.
-                *
-                * While the new lsn should usually be greater, it
-                * is possible that a later transaction completing
-                * simultaneously with an earlier one using the
-                * same item could complete first with a higher lsn.
-                * This would cause the earlier transaction to fail
-                * the test below.
-                */
-               mp = lip->li_mountp;
-               AIL_LOCK(mp,s);
-               if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
-                       /*
-                        * This will set the item's lsn to item_lsn
-                        * and update the position of the item in
-                        * the AIL.
-                        *
-                        * xfs_trans_update_ail() drops the AIL lock.
-                        */
-                       xfs_trans_update_ail(mp, lip, item_lsn, s);
-               } else {
-                       AIL_UNLOCK(mp, s);
-               }
+       /*
+        * Reserve space in the log for th next transaction.
+        * This also pushes items in the "AIL", the list of logged items,
+        * out to disk if they are taking up space at the tail of the log
+        * that we want to use.  This requires that either nothing be locked
+        * across this call, or that anything that is locked be logged in
+        * the prior and the next transactions.
+        */
+       error = xfs_trans_reserve(trans, 0, logres, 0,
+                                 XFS_TRANS_PERM_LOG_RES, count);
+       /*
+        *  Ensure that the inode is in the new transaction and locked.
+        */
+       if (error)
+               return error;
 
-               /*
-                * Now that we've repositioned the item in the AIL,
-                * unpin it so it can be flushed. Pass information
-                * about buffer stale state down from the log item
-                * flags, if anyone else stales the buffer we do not
-                * want to pay any attention to it.
-                */
-               IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE);
-       }
+       xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
+       xfs_trans_ihold(trans, dp);
+       return 0;
 }