Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
Linus Torvalds [Thu, 21 Jan 2010 15:32:11 +0000 (07:32 -0800)]
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  fs/bio.c: fix shadows sparse warning
  drbd: The kernel code is now equivalent to out of tree release 8.3.7
  drbd: Allow online resizing of DRBD devices while peer not reachable (needs to be explicitly forced)
  drbd: Don't go into StandAlone mode when authentification failes because of network error
  drivers/block/drbd/drbd_receiver.c: correct NULL test
  cfq-iosched: Respect ioprio_class when preempting
  genhd: overlapping variable definition
  block: removed unused as_io_context
  DM: Fix device mapper topology stacking
  block: bdev_stack_limits wrapper
  block: Fix discard alignment calculation and printing
  block: Correct handling of bottom device misaligment
  drbd: check on CONFIG_LBDAF, not LBD
  drivers/block/drbd: Correct NULL test
  drbd: Silenced an assert that could triggered after changing write ordering method
  drbd: Kconfig fix
  drbd: Fix for a race between IO and a detach operation [Bugz 262]
  drbd: Use drbd_crypto_is_hash() instead of an open coded check

16 files changed:
block/blk-ioc.c
block/blk-settings.c
block/cfq-iosched.c
block/genhd.c
drivers/block/drbd/Kconfig
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/md/dm-table.c
fs/bio.c
include/linux/blkdev.h
include/linux/drbd.h
include/linux/drbd_nl.h
include/linux/genhd.h
include/linux/iocontext.h

index cbdabb0..98e6bf6 100644 (file)
@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
 
        if (atomic_long_dec_and_test(&ioc->refcount)) {
                rcu_read_lock();
-               if (ioc->aic && ioc->aic->dtor)
-                       ioc->aic->dtor(ioc->aic);
                cfq_dtor(ioc);
                rcu_read_unlock();
 
@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
        task_unlock(task);
 
        if (atomic_dec_and_test(&ioc->nr_tasks)) {
-               if (ioc->aic && ioc->aic->exit)
-                       ioc->aic->exit(ioc->aic);
                cfq_exit(ioc);
 
        }
@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
                ret->ioprio = 0;
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
-               ret->aic = NULL;
                INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
                INIT_HLIST_HEAD(&ret->cic_list);
                ret->ioc_data = NULL;
index d52d4ad..5eeb9e0 100644 (file)
@@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                     sector_t offset)
 {
        sector_t alignment;
-       unsigned int top, bottom;
+       unsigned int top, bottom, ret = 0;
 
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->max_segment_size = min_not_zero(t->max_segment_size,
                                           b->max_segment_size);
 
+       t->misaligned |= b->misaligned;
+
        alignment = queue_limit_alignment_offset(b, offset);
 
        /* Bottom device has different alignment.  Check that it is
@@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                bottom = max(b->physical_block_size, b->io_min) + alignment;
 
                /* Verify that top and bottom intervals line up */
-               if (max(top, bottom) & (min(top, bottom) - 1))
+               if (max(top, bottom) & (min(top, bottom) - 1)) {
                        t->misaligned = 1;
+                       ret = -1;
+               }
        }
 
        t->logical_block_size = max(t->logical_block_size,
@@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        if (t->physical_block_size & (t->logical_block_size - 1)) {
                t->physical_block_size = t->logical_block_size;
                t->misaligned = 1;
+               ret = -1;
        }
 
        /* Minimum I/O a multiple of the physical block size? */
        if (t->io_min & (t->physical_block_size - 1)) {
                t->io_min = t->physical_block_size;
                t->misaligned = 1;
+               ret = -1;
        }
 
        /* Optimal I/O a multiple of the physical block size? */
        if (t->io_opt & (t->physical_block_size - 1)) {
                t->io_opt = 0;
                t->misaligned = 1;
+               ret = -1;
        }
 
        /* Find lowest common alignment_offset */
@@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                & (max(t->physical_block_size, t->io_min) - 1);
 
        /* Verify that new alignment_offset is on a logical block boundary */
-       if (t->alignment_offset & (t->logical_block_size - 1))
+       if (t->alignment_offset & (t->logical_block_size - 1)) {
                t->misaligned = 1;
+               ret = -1;
+       }
 
        /* Discard alignment and granularity */
        if (b->discard_granularity) {
@@ -626,11 +635,33 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                        (t->discard_granularity - 1);
        }
 
-       return t->misaligned ? -1 : 0;
+       return ret;
 }
 EXPORT_SYMBOL(blk_stack_limits);
 
 /**
+ * bdev_stack_limits - adjust queue limits for stacked drivers
+ * @t: the stacking driver limits (top device)
+ * @bdev:  the component block_device (bottom)
+ * @start:  first data sector within component device
+ *
+ * Description:
+ *    Merges queue limits for a top device and a block_device.  Returns
+ *    0 if alignment didn't change.  Returns -1 if adding the bottom
+ *    device caused misalignment.
+ */
+int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+                     sector_t start)
+{
+       struct request_queue *bq = bdev_get_queue(bdev);
+
+       start += get_start_sect(bdev);
+
+       return blk_stack_limits(t, &bq->limits, start << 9);
+}
+EXPORT_SYMBOL(bdev_stack_limits);
+
+/**
  * disk_stack_limits - adjust queue limits for stacked drivers
  * @disk:  MD/DM gendisk (top)
  * @bdev:  the underlying block device (bottom)
index 918c7fd..ee130f1 100644 (file)
@@ -3077,6 +3077,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                return true;
 
        /*
+        * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
+        */
+       if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
+               return false;
+
+       /*
         * if the new request is sync, but the currently running queue is
         * not, let the sync request have priority.
         */
index b11a4ad..d13ba76 100644 (file)
@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
 {
        struct gendisk *disk = dev_to_disk(dev);
 
-       return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue));
+       return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
 }
 
 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
index f4acd04..df09837 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
-       depends on !PROC_FS || !INET || !CONNECTOR
+       depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
 
 config BLK_DEV_DRBD
        tristate "DRBD Distributed Replicated Block Device support"
index c975587..2bf3a6e 100644 (file)
@@ -1275,7 +1275,7 @@ struct bm_extent {
 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32
+#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
 #else
@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
 extern void drbd_suspend_io(struct drbd_conf *mdev);
 extern void drbd_resume_io(struct drbd_conf *mdev);
 extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *,
-               struct drbd_backing_dev *);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
 extern void resync_after_online_grow(struct drbd_conf *);
 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
index 9348f33..e898ad9 100644 (file)
@@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                                dev_err(DEV, "Sending state in drbd_io_error() failed\n");
                }
 
+               wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
                lc_destroy(mdev->resync);
                mdev->resync = NULL;
                lc_destroy(mdev->act_log);
index 4e0726a..1292e06 100644 (file)
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
  * Returns 0 on success, negative return values indicate errors.
  * You should call drbd_md_sync() after calling this function.
  */
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
 {
        sector_t prev_first_sect, prev_size; /* previous meta location */
        sector_t la_size;
@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
        /* TODO: should only be some assert here, not (re)init... */
        drbd_md_set_sector_offsets(mdev, mdev->ldev);
 
-       size = drbd_new_dev_size(mdev, mdev->ldev);
+       size = drbd_new_dev_size(mdev, mdev->ldev, force);
 
        if (drbd_get_capacity(mdev->this_bdev) != size ||
            drbd_bm_capacity(mdev) != size) {
@@ -596,7 +596,7 @@ out:
 }
 
 sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
 {
        sector_t p_size = mdev->p_size;   /* partner's disk size. */
        sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 
        m_size = drbd_get_max_capacity(bdev);
 
+       if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
+               dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+               p_size = m_size;
+       }
+
        if (p_size && m_size) {
                size = min_t(sector_t, p_size, m_size);
        } else {
@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 
        /* Prevent shrinking of consistent devices ! */
        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
-          drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
+           drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
                dev_warn(DEV, "refusing to truncate a consistent device\n");
                retcode = ERR_DISK_TO_SMALL;
                goto force_diskless_dec;
@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
            !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
                set_bit(USE_DEGR_WFC_T, &mdev->flags);
 
-       dd = drbd_determin_dev_size(mdev);
+       dd = drbd_determin_dev_size(mdev, 0);
        if (dd == dev_size_error) {
                retcode = ERR_NOMEM_BITMAP;
                goto force_diskless_dec;
@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                        goto fail;
                }
 
-               if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
+               if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
                        retcode = ERR_AUTH_ALG_ND;
                        goto fail;
                }
@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
        }
 
        mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
-       dd = drbd_determin_dev_size(mdev);
+       dd = drbd_determin_dev_size(mdev, rs.resize_force);
        drbd_md_sync(mdev);
        put_ldev(mdev);
        if (dd == dev_size_error) {
index 259c135..f22a528 100644 (file)
@@ -878,9 +878,13 @@ retry:
 
        if (mdev->cram_hmac_tfm) {
                /* drbd_request_state(mdev, NS(conn, WFAuth)); */
-               if (!drbd_do_auth(mdev)) {
+               switch (drbd_do_auth(mdev)) {
+               case -1:
                        dev_err(DEV, "Authentication of peer failed\n");
                        return -1;
+               case 0:
+                       dev_err(DEV, "Authentication of peer failed, trying again.\n");
+                       return 0;
                }
        }
 
@@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
 
        case WO_bdev_flush:
        case WO_drain_io:
-               D_ASSERT(rv == FE_STILL_LIVE);
-               set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
-               drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-               rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+               if (rv == FE_STILL_LIVE) {
+                       set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
+                       drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+                       rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+               }
                if (rv == FE_RECYCLED)
                        return TRUE;
 
@@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
 
                /* Never shrink a device with usable data during connect.
                   But allow online shrinking if we are connected. */
-               if (drbd_new_dev_size(mdev, mdev->ldev) <
+               if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
                   drbd_get_capacity(mdev->this_bdev) &&
                   mdev->state.disk >= D_OUTDATED &&
                   mdev->state.conn < C_CONNECTED) {
@@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
 #undef min_not_zero
 
        if (get_ldev(mdev)) {
-               dd = drbd_determin_dev_size(mdev);
+         dd = drbd_determin_dev_size(mdev, 0);
                put_ldev(mdev);
                if (dd == dev_size_error)
                        return FALSE;
@@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
 {
        dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
        dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
-       return 0;
+       return -1;
 }
 #else
 #define CHALLENGE_LEN 64
+
+/* Return value:
+       1 - auth succeeded,
+       0 - failed, try again (network error),
+       -1 - auth failed, don't try again.
+*/
+
 static int drbd_do_auth(struct drbd_conf *mdev)
 {
        char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
@@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
                                (u8 *)mdev->net_conf->shared_secret, key_len);
        if (rv) {
                dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
 
        if (p.length > CHALLENGE_LEN*2) {
                dev_err(DEV, "expected AuthChallenge payload too big.\n");
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
        peers_ch = kmalloc(p.length, GFP_NOIO);
        if (peers_ch == NULL) {
                dev_err(DEV, "kmalloc of peers_ch failed\n");
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        response = kmalloc(resp_size, GFP_NOIO);
        if (response == NULL) {
                dev_err(DEV, "kmalloc of response failed\n");
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        rv = crypto_hash_digest(&desc, &sg, sg.length, response);
        if (rv) {
                dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        }
 
        right_response = kmalloc(resp_size, GFP_NOIO);
-       if (response == NULL) {
+       if (right_response == NULL) {
                dev_err(DEV, "kmalloc of right_response failed\n");
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
        if (rv) {
                dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
-               rv = 0;
+               rv = -1;
                goto fail;
        }
 
@@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        if (rv)
                dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
                     resp_size, mdev->net_conf->cram_hmac_alg);
+       else
+               rv = -1;
 
  fail:
        kfree(peers_ch);
index be62547..4b22feb 100644 (file)
@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
                return 0;
        }
 
-       if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
-               DMWARN("%s: target device %s is misaligned: "
+       if (bdev_stack_limits(limits, bdev, start) < 0)
+               DMWARN("%s: adding target device %s caused an alignment inconsistency: "
                       "physical_block_size=%u, logical_block_size=%u, "
                       "alignment_offset=%u, start=%llu",
                       dm_device_name(ti->table->md), bdevname(bdev, b),
                       q->limits.physical_block_size,
                       q->limits.logical_block_size,
                       q->limits.alignment_offset,
-                      (unsigned long long) start << 9);
-
+                      (unsigned long long) start << SECTOR_SHIFT);
 
        /*
         * Check if merge fn is supported.
@@ -1026,9 +1025,9 @@ combine_limits:
                 * for the table.
                 */
                if (blk_stack_limits(limits, &ti_limits, 0) < 0)
-                       DMWARN("%s: target device "
+                       DMWARN("%s: adding target device "
                               "(start sect %llu len %llu) "
-                              "is misaligned",
+                              "caused an alignment inconsistency",
                               dm_device_name(table->md),
                               (unsigned long long) ti->begin,
                               (unsigned long long) ti->len);
@@ -1080,15 +1079,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                               struct queue_limits *limits)
 {
        /*
-        * Each target device in the table has a data area that should normally
-        * be aligned such that the DM device's alignment_offset is 0.
-        * FIXME: Propagate alignment_offsets up the stack and warn of
-        *        sub-optimal or inconsistent settings.
-        */
-       limits->alignment_offset = 0;
-       limits->misaligned = 0;
-
-       /*
         * Copy table's limits to the DM device's request_queue
         */
        q->limits = *limits;
index 76e6713..12429c9 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
 
        i = 0;
        while (i < bio_slab_nr) {
-               struct bio_slab *bslab = &bio_slabs[i];
+               bslab = &bio_slabs[i];
 
                if (!bslab->slab && entry == -1)
                        entry = i;
index 9b98173..5c80189 100644 (file)
@@ -938,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
 extern void blk_set_default_limits(struct queue_limits *lim);
 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                            sector_t offset);
+extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+                           sector_t offset);
 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
                              sector_t offset);
 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1148,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
 static inline int queue_sector_discard_alignment(struct request_queue *q,
                                                 sector_t sector)
 {
-       return ((sector << 9) - q->limits.discard_alignment)
-               & (q->limits.discard_granularity - 1);
+       struct queue_limits *lim = &q->limits;
+       unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+
+       return (lim->discard_granularity + lim->discard_alignment - alignment)
+               & (lim->discard_granularity - 1);
 }
 
 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
index e84f473..7896227 100644 (file)
@@ -53,7 +53,7 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.6"
+#define REL_VERSION "8.3.7"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 91
index db5721a..a4d82f8 100644 (file)
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
 
 NL_PACKET(resize, 7,
        NL_INT64(               29,     T_MAY_IGNORE,   resize_size)
+       NL_BIT(                 68,     T_MAY_IGNORE,   resize_force)
 )
 
 NL_PACKET(syncer_conf, 8,
index c6c0c41..9717081 100644 (file)
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
 #define part_stat_read(part, field)                                    \
 ({                                                                     \
        typeof((part)->dkstats->field) res = 0;                         \
-       int i;                                                          \
-       for_each_possible_cpu(i)                                        \
-               res += per_cpu_ptr((part)->dkstats, i)->field;          \
+       unsigned int _cpu;                                              \
+       for_each_possible_cpu(_cpu)                                     \
+               res += per_cpu_ptr((part)->dkstats, _cpu)->field;       \
        res;                                                            \
 })
 
index a632359..78ef023 100644 (file)
@@ -4,32 +4,6 @@
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
 
-/*
- * This is the per-process anticipatory I/O scheduler state.
- */
-struct as_io_context {
-       spinlock_t lock;
-
-       void (*dtor)(struct as_io_context *aic); /* destructor */
-       void (*exit)(struct as_io_context *aic); /* called on task exit */
-
-       unsigned long state;
-       atomic_t nr_queued; /* queued reads & sync writes */
-       atomic_t nr_dispatched; /* number of requests gone to the drivers */
-
-       /* IO History tracking */
-       /* Thinktime */
-       unsigned long last_end_request;
-       unsigned long ttime_total;
-       unsigned long ttime_samples;
-       unsigned long ttime_mean;
-       /* Layout pattern */
-       unsigned int seek_samples;
-       sector_t last_request_pos;
-       u64 seek_total;
-       sector_t seek_mean;
-};
-
 struct cfq_queue;
 struct cfq_io_context {
        void *key;
@@ -78,7 +52,6 @@ struct io_context {
        unsigned long last_waited; /* Time last woken after wait for request */
        int nr_batch_requests;     /* Number of requests left in the batch */
 
-       struct as_io_context *aic;
        struct radix_tree_root radix_root;
        struct hlist_head cic_list;
        void *ioc_data;