Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
Linus Torvalds [Fri, 10 Oct 2008 17:52:45 +0000 (10:52 -0700)]
* 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block: (132 commits)
  doc/cdrom: Trvial documentation error, file not present
  block_dev: fix kernel-doc in new functions
  block: add some comments around the bio read-write flags
  block: mark bio_split_pool static
  block: Find bio sector offset given idx and offset
  block: gendisk integrity wrapper
  block: Switch blk_integrity_compare from bdev to gendisk
  block: Fix double put in blk_integrity_unregister
  block: Introduce integrity data ownership flag
  block: revert part of d7533ad0e132f92e75c1b2eb7c26387b25a583c1
  bio.h: Remove unused conditional code
  block: remove end_{queued|dequeued}_request()
  block: change elevator to use __blk_end_request()
  gdrom: change to use __blk_end_request()
  memstick: change to use __blk_end_request()
  virtio_blk: change to use __blk_end_request()
  blktrace: use BLKTRACE_BDEV_SIZE as the name size for setup structure
  block: add lld busy state exporting interface
  block: Fix blk_start_queueing() to not kick a stopped queue
  include blktrace_api.h in headers_install
  ...

124 files changed:
Documentation/DMA-API.txt
Documentation/DocBook/kernel-api.tmpl
Documentation/block/deadline-iosched.txt
Documentation/cdrom/ide-cd
block/Makefile
block/as-iosched.c
block/blk-barrier.c
block/blk-core.c
block/blk-exec.c
block/blk-integrity.c
block/blk-map.c
block/blk-merge.c
block/blk-settings.c
block/blk-softirq.c [new file with mode: 0644]
block/blk-sysfs.c
block/blk-tag.c
block/blk-timeout.c [new file with mode: 0644]
block/blk.h
block/blktrace.c
block/bsg.c
block/cfq-iosched.c
block/cmd-filter.c
block/compat_ioctl.c
block/deadline-iosched.c
block/elevator.c
block/genhd.c
block/ioctl.c
block/scsi_ioctl.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/base/base.h
drivers/base/class.c
drivers/base/core.c
drivers/block/aoe/aoeblk.c
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoedev.c
drivers/block/cciss.c
drivers/block/cciss_scsi.c
drivers/block/cciss_scsi.h
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/nbd.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/cdrom/cdrom.c
drivers/cdrom/gdrom.c
drivers/char/random.c
drivers/ide/ide-cd.c
drivers/ide/ide-disk.c
drivers/ide/ide-probe.c
drivers/md/dm-ioctl.c
drivers/md/dm-mpath.c
drivers/md/dm-stripe.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/memstick/core/mspro_block.c
drivers/mmc/card/block.c
drivers/mtd/ftl.c
drivers/mtd/mtd_blkdevs.c
drivers/s390/block/dasd_proc.c
drivers/s390/block/dcssblk.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/gdth.c
drivers/scsi/gdth.h
drivers/scsi/gdth_proc.c
drivers/scsi/gdth_proc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ide-scsi.c
drivers/scsi/ipr.c
drivers/scsi/ips.c
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/qla1280.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_tgt_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/sym53c8xx_2/sym_glue.c
fs/bio-integrity.c
fs/bio.c
fs/block_dev.c
fs/fat/fatent.c
fs/partitions/check.c
fs/partitions/check.h
include/linux/Kbuild
include/linux/ata.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/device.h
include/linux/elevator.h
include/linux/fd.h
include/linux/fs.h
include/linux/genhd.h
include/linux/klist.h
include/linux/major.h
include/linux/mtd/blktrans.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_host.h
include/scsi/scsi_transport.h
init/do_mounts.c
lib/Kconfig.debug
lib/klist.c
mm/bounce.c

index d8b63d1..b8e8646 100644 (file)
@@ -337,7 +337,7 @@ With scatterlists, you use the resulting mapping like this:
        int i, count = dma_map_sg(dev, sglist, nents, direction);
        struct scatterlist *sg;
 
-       for (i = 0, sg = sglist; i < count; i++, sg++) {
+       for_each_sg(sglist, sg, count, i) {
                hw_address[i] = sg_dma_address(sg);
                hw_len[i] = sg_dma_len(sg);
        }
index b7b1482..f5696ba 100644 (file)
@@ -364,6 +364,10 @@ X!Edrivers/pnp/system.c
 !Eblock/blk-barrier.c
 !Eblock/blk-tag.c
 !Iblock/blk-tag.c
+!Eblock/blk-integrity.c
+!Iblock/blktrace.c
+!Iblock/genhd.c
+!Eblock/genhd.c
   </chapter>
 
   <chapter id="chrdev">
index c23cab1..7257676 100644 (file)
@@ -30,12 +30,18 @@ write_expire        (in ms)
 Similar to read_expire mentioned above, but for writes.
 
 
-fifo_batch
+fifo_batch     (number of requests)
 ----------
 
-When a read request expires its deadline, we must move some requests from
-the sorted io scheduler list to the block device dispatch queue. fifo_batch
-controls how many requests we move.
+Requests are grouped into ``batches'' of a particular data direction (read or
+write) which are serviced in increasing sector order.  To limit extra seeking,
+deadline expiries are only checked between batches.  fifo_batch controls the
+maximum number of requests per batch.
+
+This parameter tunes the balance between per-request latency and aggregate
+throughput.  When low latency is the primary concern, smaller is better (where
+a value of 1 yields first-come first-served behaviour).  Increasing fifo_batch
+generally improves throughput, at the cost of latency variation.
 
 
 writes_starved (number of dispatches)
index 91c0dcc..2c558cd 100644 (file)
@@ -145,8 +145,7 @@ useful for reading photocds.
 
 To play an audio CD, you should first unmount and remove any data
 CDROM.  Any of the CDROM player programs should then work (workman,
-workbone, cdplayer, etc.).  Lacking anything else, you could use the
-cdtester program in Documentation/cdrom/sbpcd.
+workbone, cdplayer, etc.).
 
 On a few drives, you can read digital audio directly using a program
 such as cdda2wav.  The only types of drive which I've heard support
index 208000b..bfe7304 100644 (file)
@@ -4,8 +4,8 @@
 
 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
                        blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
-                       blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \
-                       cmd-filter.o
+                       blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
+                       ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
 
 obj-$(CONFIG_BLK_DEV_BSG)      += bsg.o
 obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosched.o
index cf4eb0e..71f0abb 100644 (file)
@@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
                        del_timer(&ad->antic_timer);
                ad->antic_status = ANTIC_FINISHED;
                /* see as_work_handler */
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(ad->q, &ad->antic_work);
        }
 }
 
@@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
                aic = ad->io_context->aic;
 
                ad->antic_status = ANTIC_FINISHED;
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(q, &ad->antic_work);
 
                if (aic->ttime_samples == 0) {
                        /* process anticipated on has exited or timed out*/
@@ -745,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
  */
 static int as_can_anticipate(struct as_data *ad, struct request *rq)
 {
+#if 0 /* disable for now, we need to check tag level as well */
+       /*
+        * SSD device without seek penalty, disable idling
+        */
+       if (blk_queue_nonrot(ad->q)) axman
+               return 0;
+#endif
+
        if (!ad->io_context)
                /*
                 * Last request submitted was a write
@@ -844,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
        if (ad->changed_batch && ad->nr_dispatched == 1) {
                ad->current_batch_expires = jiffies +
                                        ad->batch_expire[ad->batch_data_dir];
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(q, &ad->antic_work);
                ad->changed_batch = 0;
 
                if (ad->batch_data_dir == REQ_SYNC)
index a09ead1..5c99ff8 100644 (file)
@@ -293,7 +293,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        bio->bi_end_io = bio_end_empty_barrier;
        bio->bi_private = &wait;
        bio->bi_bdev = bdev;
-       submit_bio(1 << BIO_RW_BARRIER, bio);
+       submit_bio(WRITE_BARRIER, bio);
 
        wait_for_completion(&wait);
 
@@ -315,3 +315,73 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       }
+
+       bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev:      blockdev to issue discard for
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to discard
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ *    Issue a discard request for the sectors in question. Does not wait.
+ */
+int blkdev_issue_discard(struct block_device *bdev,
+                        sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
+{
+       struct request_queue *q;
+       struct bio *bio;
+       int ret = 0;
+
+       if (bdev->bd_disk == NULL)
+               return -ENXIO;
+
+       q = bdev_get_queue(bdev);
+       if (!q)
+               return -ENXIO;
+
+       if (!q->prepare_discard_fn)
+               return -EOPNOTSUPP;
+
+       while (nr_sects && !ret) {
+               bio = bio_alloc(gfp_mask, 0);
+               if (!bio)
+                       return -ENOMEM;
+
+               bio->bi_end_io = blkdev_discard_end_io;
+               bio->bi_bdev = bdev;
+
+               bio->bi_sector = sector;
+
+               if (nr_sects > q->max_hw_sectors) {
+                       bio->bi_size = q->max_hw_sectors << 9;
+                       nr_sects -= q->max_hw_sectors;
+                       sector += q->max_hw_sectors;
+               } else {
+                       bio->bi_size = nr_sects << 9;
+                       nr_sects = 0;
+               }
+               bio_get(bio);
+               submit_bio(DISCARD_BARRIER, bio);
+
+               /* Check if it failed immediately */
+               if (bio_flagged(bio, BIO_EOPNOTSUPP))
+                       ret = -EOPNOTSUPP;
+               else if (!bio_flagged(bio, BIO_UPTODATE))
+                       ret = -EIO;
+               bio_put(bio);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
index 2cba5ef..2d053b5 100644 (file)
@@ -26,8 +26,6 @@
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/task_io_accounting_ops.h>
-#include <linux/interrupt.h>
-#include <linux/cpu.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
 
@@ -50,27 +48,26 @@ struct kmem_cache *blk_requestq_cachep;
  */
 static struct workqueue_struct *kblockd_workqueue;
 
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
-
 static void drive_stat_acct(struct request *rq, int new_io)
 {
        struct hd_struct *part;
        int rw = rq_data_dir(rq);
+       int cpu;
 
        if (!blk_fs_request(rq) || !rq->rq_disk)
                return;
 
-       part = get_part(rq->rq_disk, rq->sector);
+       cpu = part_stat_lock();
+       part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+
        if (!new_io)
-               __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
+               part_stat_inc(cpu, part, merges[rw]);
        else {
-               disk_round_stats(rq->rq_disk);
-               rq->rq_disk->in_flight++;
-               if (part) {
-                       part_round_stats(part);
-                       part->in_flight++;
-               }
+               part_round_stats(cpu, part);
+               part_inc_in_flight(part);
        }
+
+       part_stat_unlock();
 }
 
 void blk_queue_congestion_threshold(struct request_queue *q)
@@ -113,7 +110,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        memset(rq, 0, sizeof(*rq));
 
        INIT_LIST_HEAD(&rq->queuelist);
-       INIT_LIST_HEAD(&rq->donelist);
+       INIT_LIST_HEAD(&rq->timeout_list);
+       rq->cpu = -1;
        rq->q = q;
        rq->sector = rq->hard_sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
@@ -308,7 +306,7 @@ void blk_unplug_timeout(unsigned long data)
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
 
-       kblockd_schedule_work(&q->unplug_work);
+       kblockd_schedule_work(q, &q->unplug_work);
 }
 
 void blk_unplug(struct request_queue *q)
@@ -325,6 +323,21 @@ void blk_unplug(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_unplug);
 
+static void blk_invoke_request_fn(struct request_queue *q)
+{
+       /*
+        * one level of recursion is ok and is much faster than kicking
+        * the unplug handling
+        */
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+               q->request_fn(q);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
+       } else {
+               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+               kblockd_schedule_work(q, &q->unplug_work);
+       }
+}
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -339,18 +352,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               blk_plug_device(q);
-               kblockd_schedule_work(&q->unplug_work);
-       }
+       blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -408,15 +410,8 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!elv_queue_empty(q)) {
-               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-                       q->request_fn(q);
-                       queue_flag_clear(QUEUE_FLAG_REENTER, q);
-               } else {
-                       blk_plug_device(q);
-                       kblockd_schedule_work(&q->unplug_work);
-               }
-       }
+       if (!elv_queue_empty(q))
+               blk_invoke_request_fn(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -441,6 +436,14 @@ void blk_put_queue(struct request_queue *q)
 
 void blk_cleanup_queue(struct request_queue *q)
 {
+       /*
+        * We know we have process context here, so we can be a little
+        * cautious and ensure that pending block actions on this device
+        * are done before moving on. Going into this function, we should
+        * not have processes doing IO to this device.
+        */
+       blk_sync_queue(q);
+
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
@@ -496,6 +499,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        }
 
        init_timer(&q->unplug_timer);
+       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_LIST_HEAD(&q->timeout_list);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -531,7 +536,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
  *    request queue; this lock will be taken also from interrupt context, so irq
  *    disabling is needed for it.
  *
- *    Function returns a pointer to the initialized request queue, or NULL if
+ *    Function returns a pointer to the initialized request queue, or %NULL if
  *    it didn't succeed.
  *
  * Note:
@@ -569,7 +574,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
+       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER |
+                                  1 << QUEUE_FLAG_STACKABLE);
        q->queue_lock           = lock;
 
        blk_queue_segment_boundary(q, 0xffffffff);
@@ -624,10 +630,6 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
 
        blk_rq_init(q, rq);
 
-       /*
-        * first three bits are identical in rq->cmd_flags and bio->bi_rw,
-        * see bio.h and blkdev.h
-        */
        rq->cmd_flags = rw | REQ_ALLOCED;
 
        if (priv) {
@@ -888,9 +890,11 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_start_queueing(struct request_queue *q)
 {
-       if (!blk_queue_plugged(q))
+       if (!blk_queue_plugged(q)) {
+               if (unlikely(blk_queue_stopped(q)))
+                       return;
                q->request_fn(q);
-       else
+       } else
                __generic_unplug_device(q);
 }
 EXPORT_SYMBOL(blk_start_queueing);
@@ -907,6 +911,8 @@ EXPORT_SYMBOL(blk_start_queueing);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       blk_delete_timer(rq);
+       blk_clear_rq_complete(rq);
        blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 
        if (blk_rq_tagged(rq))
@@ -917,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 EXPORT_SYMBOL(blk_requeue_request);
 
 /**
- * blk_insert_request - insert a special request in to a request queue
+ * blk_insert_request - insert a special request into a request queue
  * @q:         request queue where request should be inserted
  * @rq:                request to be inserted
  * @at_head:   insert request at head or tail of queue
@@ -927,8 +933,8 @@ EXPORT_SYMBOL(blk_requeue_request);
  *    Many block devices need to execute commands asynchronously, so they don't
  *    block the whole kernel from preemption during request execution.  This is
  *    accomplished normally by inserting aritficial requests tagged as
- *    REQ_SPECIAL in to the corresponding request queue, and letting them be
- *    scheduled for actual execution by the request queue.
+ *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
+ *    be scheduled for actual execution by the request queue.
  *
  *    We have the option of inserting the head or the tail of the queue.
  *    Typically we use the tail for new ioctls and so forth.  We use the head
@@ -982,8 +988,22 @@ static inline void add_request(struct request_queue *q, struct request *req)
        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
 }
 
-/*
- * disk_round_stats()  - Round off the performance stats on a struct
+static void part_round_stats_single(int cpu, struct hd_struct *part,
+                                   unsigned long now)
+{
+       if (now == part->stamp)
+               return;
+
+       if (part->in_flight) {
+               __part_stat_add(cpu, part, time_in_queue,
+                               part->in_flight * (now - part->stamp));
+               __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
+       }
+       part->stamp = now;
+}
+
+/**
+ * part_round_stats()  - Round off the performance stats on a struct
  * disk_stats.
  *
  * The average IO queue length and utilisation statistics are maintained
@@ -997,36 +1017,15 @@ static inline void add_request(struct request_queue *q, struct request *req)
  * /proc/diskstats.  This accounts immediately for all queue usage up to
  * the current jiffies and restarts the counters again.
  */
-void disk_round_stats(struct gendisk *disk)
+void part_round_stats(int cpu, struct hd_struct *part)
 {
        unsigned long now = jiffies;
 
-       if (now == disk->stamp)
-               return;
-
-       if (disk->in_flight) {
-               __disk_stat_add(disk, time_in_queue,
-                               disk->in_flight * (now - disk->stamp));
-               __disk_stat_add(disk, io_ticks, (now - disk->stamp));
-       }
-       disk->stamp = now;
-}
-EXPORT_SYMBOL_GPL(disk_round_stats);
-
-void part_round_stats(struct hd_struct *part)
-{
-       unsigned long now = jiffies;
-
-       if (now == part->stamp)
-               return;
-
-       if (part->in_flight) {
-               __part_stat_add(part, time_in_queue,
-                               part->in_flight * (now - part->stamp));
-               __part_stat_add(part, io_ticks, (now - part->stamp));
-       }
-       part->stamp = now;
+       if (part->partno)
+               part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
+       part_round_stats_single(cpu, part, now);
 }
+EXPORT_SYMBOL_GPL(part_round_stats);
 
 /*
  * queue lock must be held
@@ -1070,6 +1069,7 @@ EXPORT_SYMBOL(blk_put_request);
 
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
+       req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
        /*
@@ -1081,7 +1081,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
         */
-       if (unlikely(bio_barrier(bio)))
+       if (unlikely(bio_discard(bio))) {
+               req->cmd_flags |= REQ_DISCARD;
+               if (bio_barrier(bio))
+                       req->cmd_flags |= REQ_SOFTBARRIER;
+               req->q->prepare_discard_fn(req->q, req);
+       } else if (unlikely(bio_barrier(bio)))
                req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
        if (bio_sync(bio))
@@ -1099,7 +1104,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, nr_sectors, barrier, err;
+       int el_ret, nr_sectors, barrier, discard, err;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
        int rw_flags;
@@ -1114,7 +1119,14 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        blk_queue_bounce(q, &bio);
 
        barrier = bio_barrier(bio);
-       if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+       if (unlikely(barrier) && bio_has_data(bio) &&
+           (q->next_ordered == QUEUE_ORDERED_NONE)) {
+               err = -EOPNOTSUPP;
+               goto end_io;
+       }
+
+       discard = bio_discard(bio);
+       if (unlikely(discard) && !q->prepare_discard_fn) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
@@ -1138,6 +1150,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->biotail = bio;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_back_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1165,6 +1179,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                req->sector = req->hard_sector = bio->bi_sector;
                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                req->ioprio = ioprio_best(req->ioprio, prio);
+               if (!blk_rq_cpu_valid(req))
+                       req->cpu = bio->bi_comp_cpu;
                drive_stat_acct(req, 0);
                if (!attempt_front_merge(q, req))
                        elv_merged_request(q, req, el_ret);
@@ -1200,13 +1216,15 @@ get_rq:
        init_request_from_bio(req, bio);
 
        spin_lock_irq(q->queue_lock);
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
+           bio_flagged(bio, BIO_CPU_AFFINE))
+               req->cpu = blk_cpu_to_group(smp_processor_id());
        if (elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
        if (sync)
                __generic_unplug_device(q);
-
        spin_unlock_irq(q->queue_lock);
        return 0;
 
@@ -1260,8 +1278,9 @@ __setup("fail_make_request=", setup_fail_make_request);
 
 static int should_fail_request(struct bio *bio)
 {
-       if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
-           (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
+       struct hd_struct *part = bio->bi_bdev->bd_part;
+
+       if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
                return should_fail(&fail_make_request, bio->bi_size);
 
        return 0;
@@ -1314,7 +1333,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
 }
 
 /**
- * generic_make_request: hand a buffer to its device driver for I/O
+ * generic_make_request - hand a buffer to its device driver for I/O
  * @bio:  The bio describing the location in memory and on the device.
  *
  * generic_make_request() is used to make I/O requests of block
@@ -1409,7 +1428,8 @@ end_io:
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
-               if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+               if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
+                   (bio_discard(bio) && !q->prepare_discard_fn)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
@@ -1471,13 +1491,13 @@ void generic_make_request(struct bio *bio)
 EXPORT_SYMBOL(generic_make_request);
 
 /**
- * submit_bio: submit a bio to the block device layer for I/O
+ * submit_bio - submit a bio to the block device layer for I/O
  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  * @bio: The &struct bio which describes the I/O
  *
  * submit_bio() is very similar in purpose to generic_make_request(), and
  * uses that function to do most of the work. Both are fairly rough
- * interfaces, @bio must be presetup and ready for I/O.
+ * interfaces; @bio must be presetup and ready for I/O.
  *
  */
 void submit_bio(int rw, struct bio *bio)
@@ -1490,11 +1510,7 @@ void submit_bio(int rw, struct bio *bio)
         * If it's a regular read/write or a barrier with data attached,
         * go through the normal accounting stuff before submission.
         */
-       if (!bio_empty_barrier(bio)) {
-
-               BIO_BUG_ON(!bio->bi_size);
-               BIO_BUG_ON(!bio->bi_io_vec);
-
+       if (bio_has_data(bio)) {
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
@@ -1517,9 +1533,90 @@ void submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 
 /**
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * @q:  the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ *    @rq may have been made based on weaker limitations of upper-level queues
+ *    in request stacking drivers, and it may violate the limitation of @q.
+ *    Since the block layer and the underlying device driver trust @rq
+ *    after it is inserted to @q, it should be checked against @q before
+ *    the insertion using this generic function.
+ *
+ *    This function should also be useful for request stacking drivers
+ *    in some cases below, so export this fuction.
+ *    Request stacking drivers like request-based dm may change the queue
+ *    limits while requests are in the queue (e.g. dm's table swapping).
+ *    Such request stacking drivers should check those requests agaist
+ *    the new queue limits again when they dispatch those requests,
+ *    although such checkings are also done against the old queue limits
+ *    when submitting requests.
+ */
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+{
+       if (rq->nr_sectors > q->max_sectors ||
+           rq->data_len > q->max_hw_sectors << 9) {
+               printk(KERN_ERR "%s: over max size limit.\n", __func__);
+               return -EIO;
+       }
+
+       /*
+        * queue's settings related to segment counting like q->bounce_pfn
+        * may differ from that of other stacking queues.
+        * Recalculate it to check the request correctly on this queue's
+        * limitation.
+        */
+       blk_recalc_rq_segments(rq);
+       if (rq->nr_phys_segments > q->max_phys_segments ||
+           rq->nr_phys_segments > q->max_hw_segments) {
+               printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q:  the queue to submit the request
+ * @rq: the request being queued
+ */
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+       unsigned long flags;
+
+       if (blk_rq_check_limits(q, rq))
+               return -EIO;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+       if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
+           should_fail(&fail_make_request, blk_rq_bytes(rq)))
+               return -EIO;
+#endif
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       /*
+        * Submitting request must be dequeued before calling this function
+        * because it will be linked to another request_queue
+        */
+       BUG_ON(blk_queued_rq(rq));
+
+       drive_stat_acct(rq, 1);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
  * __end_that_request_first - end I/O on a request
  * @req:      the request being processed
- * @error:    0 for success, < 0 for error
+ * @error:    %0 for success, < %0 for error
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -1527,8 +1624,8 @@ EXPORT_SYMBOL(submit_bio);
  *     for the next range of segments (if any) in the cluster.
  *
  * Return:
- *     0 - we are done with this request, call end_that_request_last()
- *     1 - still buffers pending for this request
+ *     %0 - we are done with this request, call end_that_request_last()
+ *     %1 - still buffers pending for this request
  **/
 static int __end_that_request_first(struct request *req, int error,
                                    int nr_bytes)
@@ -1539,7 +1636,7 @@ static int __end_that_request_first(struct request *req, int error,
        blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
 
        /*
-        * for a REQ_BLOCK_PC request, we want to carry any eventual
+        * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
         * sense key with us all the way through
         */
        if (!blk_pc_request(req))
@@ -1552,11 +1649,14 @@ static int __end_that_request_first(struct request *req, int error,
        }
 
        if (blk_fs_request(req) && req->rq_disk) {
-               struct hd_struct *part = get_part(req->rq_disk, req->sector);
                const int rw = rq_data_dir(req);
+               struct hd_struct *part;
+               int cpu;
 
-               all_stat_add(req->rq_disk, part, sectors[rw],
-                               nr_bytes >> 9, req->sector);
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
+               part_stat_unlock();
        }
 
        total_bytes = bio_nbytes = 0;
@@ -1641,88 +1741,14 @@ static int __end_that_request_first(struct request *req, int error,
 }
 
 /*
- * splice the completion data to a local structure and hand off to
- * process_completion_queue() to complete the requests
- */
-static void blk_done_softirq(struct softirq_action *h)
-{
-       struct list_head *cpu_list, local_list;
-
-       local_irq_disable();
-       cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_replace_init(cpu_list, &local_list);
-       local_irq_enable();
-
-       while (!list_empty(&local_list)) {
-               struct request *rq;
-
-               rq = list_entry(local_list.next, struct request, donelist);
-               list_del_init(&rq->donelist);
-               rq->q->softirq_done_fn(rq);
-       }
-}
-
-static int __cpuinit blk_cpu_notify(struct notifier_block *self,
-                                   unsigned long action, void *hcpu)
-{
-       /*
-        * If a CPU goes away, splice its entries to the current CPU
-        * and trigger a run of the softirq
-        */
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               int cpu = (unsigned long) hcpu;
-
-               local_irq_disable();
-               list_splice_init(&per_cpu(blk_cpu_done, cpu),
-                                &__get_cpu_var(blk_cpu_done));
-               raise_softirq_irqoff(BLOCK_SOFTIRQ);
-               local_irq_enable();
-       }
-
-       return NOTIFY_OK;
-}
-
-
-static struct notifier_block blk_cpu_notifier __cpuinitdata = {
-       .notifier_call  = blk_cpu_notify,
-};
-
-/**
- * blk_complete_request - end I/O on a request
- * @req:      the request being processed
- *
- * Description:
- *     Ends all I/O on a request. It does not handle partial completions,
- *     unless the driver actually implements this in its completion callback
- *     through requeueing. The actual completion happens out-of-order,
- *     through a softirq handler. The user must have registered a completion
- *     callback through blk_queue_softirq_done().
- **/
-
-void blk_complete_request(struct request *req)
-{
-       struct list_head *cpu_list;
-       unsigned long flags;
-
-       BUG_ON(!req->q->softirq_done_fn);
-
-       local_irq_save(flags);
-
-       cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_add_tail(&req->donelist, cpu_list);
-       raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(blk_complete_request);
-
-/*
  * queue lock must be held
  */
 static void end_that_request_last(struct request *req, int error)
 {
        struct gendisk *disk = req->rq_disk;
 
+       blk_delete_timer(req);
+
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
@@ -1740,16 +1766,18 @@ static void end_that_request_last(struct request *req, int error)
        if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
-               struct hd_struct *part = get_part(disk, req->sector);
-
-               __all_stat_inc(disk, part, ios[rw], req->sector);
-               __all_stat_add(disk, part, ticks[rw], duration, req->sector);
-               disk_round_stats(disk);
-               disk->in_flight--;
-               if (part) {
-                       part_round_stats(part);
-                       part->in_flight--;
-               }
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(disk, req->sector);
+
+               part_stat_inc(cpu, part, ios[rw]);
+               part_stat_add(cpu, part, ticks[rw], duration);
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
        }
 
        if (req->end_io)
@@ -1762,17 +1790,6 @@ static void end_that_request_last(struct request *req, int error)
        }
 }
 
-static inline void __end_request(struct request *rq, int uptodate,
-                                unsigned int nr_bytes)
-{
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       __blk_end_request(rq, error, nr_bytes);
-}
-
 /**
  * blk_rq_bytes - Returns bytes left to complete in the entire request
  * @rq: the request being processed
@@ -1803,74 +1820,57 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
 EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
- * end_queued_request - end all I/O on a queued request
- * @rq:                the request being processed
- * @uptodate:  error value or 0/1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request, and removes it from the block layer queues.
- *     Not suitable for normal IO completion, unless the driver still has
- *     the request attached to the block layer.
- *
- **/
-void end_queued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_queued_request);
-
-/**
- * end_dequeued_request - end all I/O on a dequeued request
- * @rq:                the request being processed
- * @uptodate:  error value or 0/1 uptodate flag
- *
- * Description:
- *     Ends all I/O on a request. The request must already have been
- *     dequeued using blkdev_dequeue_request(), as is normally the case
- *     for most drivers.
- *
- **/
-void end_dequeued_request(struct request *rq, int uptodate)
-{
-       __end_request(rq, uptodate, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(end_dequeued_request);
-
-
-/**
  * end_request - end I/O on the current segment of the request
  * @req:       the request being processed
- * @uptodate:  error value or 0/1 uptodate flag
+ * @uptodate:  error value or %0/%1 uptodate flag
  *
  * Description:
  *     Ends I/O on the current segment of a request. If that is the only
  *     remaining segment, the request is also completed and freed.
  *
- *     This is a remnant of how older block drivers handled IO completions.
- *     Modern drivers typically end IO on the full request in one go, unless
+ *     This is a remnant of how older block drivers handled I/O completions.
+ *     Modern drivers typically end I/O on the full request in one go, unless
  *     they have a residual value to account for. For that case this function
  *     isn't really useful, unless the residual just happens to be the
  *     full current segment. In other words, don't use this function in new
- *     code. Either use end_request_completely(), or the
- *     end_that_request_chunk() (along with end_that_request_last()) for
- *     partial completions.
- *
+ *     code. Use blk_end_request() or __blk_end_request() to end a request.
  **/
 void end_request(struct request *req, int uptodate)
 {
-       __end_request(req, uptodate, req->hard_cur_sectors << 9);
+       int error = 0;
+
+       if (uptodate <= 0)
+               error = uptodate ? uptodate : -EIO;
+
+       __blk_end_request(req, error, req->hard_cur_sectors << 9);
 }
 EXPORT_SYMBOL(end_request);
 
+static int end_that_request_data(struct request *rq, int error,
+                                unsigned int nr_bytes, unsigned int bidi_bytes)
+{
+       if (rq->bio) {
+               if (__end_that_request_first(rq, error, nr_bytes))
+                       return 1;
+
+               /* Bidi request must be completed as a whole */
+               if (blk_bidi_rq(rq) &&
+                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /**
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:           the request being processed
- * @error:        0 for success, < 0 for error
+ * @error:        %0 for success, < %0 for error
  * @nr_bytes:     number of bytes to complete @rq
  * @bidi_bytes:   number of bytes to complete @rq->next_rq
  * @drv_callback: function called between completion of bios in the request
  *                and completion of the request.
- *                If the callback returns non 0, this helper returns without
+ *                If the callback returns non %0, this helper returns without
  *                completion of the request.
  *
  * Description:
@@ -1878,8 +1878,8 @@ EXPORT_SYMBOL(end_request);
  *     If @rq has leftover, sets it up for the next range of segments.
  *
  * Return:
- *     0 - we are done with this request
- *     1 - this request is not freed yet, it still has pending buffers.
+ *     %0 - we are done with this request
+ *     %1 - this request is not freed yet, it still has pending buffers.
  **/
 static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
                      unsigned int bidi_bytes,
@@ -1888,15 +1888,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
        struct request_queue *q = rq->q;
        unsigned long flags = 0UL;
 
-       if (blk_fs_request(rq) || blk_pc_request(rq)) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
+       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
+               return 1;
 
        /* Special feature for tricky drivers */
        if (drv_callback && drv_callback(rq))
@@ -1914,7 +1907,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
 /**
  * blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    0 for success, < 0 for error
+ * @error:    %0 for success, < %0 for error
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -1922,8 +1915,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
  *     If @rq has leftover, sets it up for the next range of segments.
  *
  * Return:
- *     0 - we are done with this request
- *     1 - still buffers pending for this request
+ *     %0 - we are done with this request
+ *     %1 - still buffers pending for this request
  **/
 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
@@ -1934,22 +1927,20 @@ EXPORT_SYMBOL_GPL(blk_end_request);
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    0 for success, < 0 for error
+ * @error:    %0 for success, < %0 for error
  * @nr_bytes: number of bytes to complete
  *
  * Description:
  *     Must be called with queue lock held unlike blk_end_request().
  *
  * Return:
- *     0 - we are done with this request
- *     1 - still buffers pending for this request
+ *     %0 - we are done with this request
+ *     %1 - still buffers pending for this request
  **/
 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
-       if (blk_fs_request(rq) || blk_pc_request(rq)) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-       }
+       if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
+               return 1;
 
        add_disk_randomness(rq->rq_disk);
 
@@ -1962,7 +1953,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
 /**
  * blk_end_bidi_request - Helper function for drivers to complete bidi request.
  * @rq:         the bidi request being processed
- * @error:      0 for success, < 0 for error
+ * @error:      %0 for success, < %0 for error
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -1970,8 +1961,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
  *
  * Return:
- *     0 - we are done with this request
- *     1 - still buffers pending for this request
+ *     %0 - we are done with this request
+ *     %1 - still buffers pending for this request
  **/
 int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
                         unsigned int bidi_bytes)
@@ -1981,13 +1972,43 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
 EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:           the request being processed
+ * @error:        %0 for success, < %0 for error
+ * @nr_bytes:     number of bytes to complete @rq
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ */
+void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+{
+       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
+               /*
+                * These members are not updated in end_that_request_data()
+                * when all bios are completed.
+                * Update them so that the request stacking driver can find
+                * how many bytes remain in the request later.
+                */
+               rq->nr_sectors = rq->hard_nr_sectors = 0;
+               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
+       }
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+/**
  * blk_end_request_callback - Special helper function for tricky drivers
  * @rq:           the request being processed
- * @error:        0 for success, < 0 for error
+ * @error:        %0 for success, < %0 for error
  * @nr_bytes:     number of bytes to complete
  * @drv_callback: function called between completion of bios in the request
  *                and completion of the request.
- *                If the callback returns non 0, this helper returns without
+ *                If the callback returns non %0, this helper returns without
  *                completion of the request.
  *
  * Description:
@@ -2000,10 +2021,10 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
  *     Don't use this interface in other places anymore.
  *
  * Return:
- *     0 - we are done with this request
- *     1 - this request is not freed yet.
- *         this request still has pending buffers or
- *         the driver doesn't want to finish this request yet.
+ *     %0 - we are done with this request
+ *     %1 - this request is not freed yet.
+ *          this request still has pending buffers or
+ *          the driver doesn't want to finish this request yet.
  **/
 int blk_end_request_callback(struct request *rq, int error,
                             unsigned int nr_bytes,
@@ -2016,15 +2037,17 @@ EXPORT_SYMBOL_GPL(blk_end_request_callback);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
-       /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+       /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
+          we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
        rq->cmd_flags |= (bio->bi_rw & 3);
 
-       rq->nr_phys_segments = bio_phys_segments(q, bio);
-       rq->nr_hw_segments = bio_hw_segments(q, bio);
+       if (bio_has_data(bio)) {
+               rq->nr_phys_segments = bio_phys_segments(q, bio);
+               rq->buffer = bio_data(bio);
+       }
        rq->current_nr_sectors = bio_cur_sectors(bio);
        rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
-       rq->buffer = bio_data(bio);
        rq->data_len = bio->bi_size;
 
        rq->bio = rq->biotail = bio;
@@ -2033,7 +2056,35 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
-int kblockd_schedule_work(struct work_struct *work)
+/**
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
+ * @q : the queue of the device being checked
+ *
+ * Description:
+ *    Check if underlying low-level drivers of a device are busy.
+ *    If the drivers want to export their busy state, they must set own
+ *    exporting function using blk_queue_lld_busy() first.
+ *
+ *    Basically, this function is used only by request stacking drivers
+ *    to stop dispatching requests to underlying devices when underlying
+ *    devices are busy.  This behavior helps more I/O merging on the queue
+ *    of the request stacking driver and prevents I/O throughput regression
+ *    on burst I/O load.
+ *
+ * Return:
+ *    0 - Not busy (The request stacking driver should dispatch request)
+ *    1 - Busy (The request stacking driver should stop dispatching request)
+ */
+int blk_lld_busy(struct request_queue *q)
+{
+       if (q->lld_busy_fn)
+               return q->lld_busy_fn(q);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_lld_busy);
+
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
 }
@@ -2047,8 +2098,6 @@ EXPORT_SYMBOL(kblockd_flush_work);
 
 int __init blk_dev_init(void)
 {
-       int i;
-
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
@@ -2059,12 +2108,6 @@ int __init blk_dev_init(void)
        blk_requestq_cachep = kmem_cache_create("blkdev_queue",
                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
 
-       for_each_possible_cpu(i)
-               INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
-
-       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
-       register_hotcpu_notifier(&blk_cpu_notifier);
-
        return 0;
 }
 
index 9bceff7..6af716d 100644 (file)
@@ -16,7 +16,7 @@
 /**
  * blk_end_sync_rq - executes a completion event on a request
  * @rq: request to complete
- * @error: end io status of the request
+ * @error: end I/O status of the request
  */
 static void blk_end_sync_rq(struct request *rq, int error)
 {
@@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
  * @done:      I/O completion handler
  *
  * Description:
- *    Insert a fully prepared request at the back of the io scheduler queue
+ *    Insert a fully prepared request at the back of the I/O scheduler queue
  *    for execution.  Don't wait for completion.
  */
 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
  * @at_head:    insert request at head or tail of queue
  *
  * Description:
- *    Insert a fully prepared request at the back of the io scheduler queue
+ *    Insert a fully prepared request at the back of the I/O scheduler queue
  *    for execution and wait for completion.
  */
 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
index 3f1a847..61a8e2f 100644 (file)
@@ -108,51 +108,51 @@ new_segment:
 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
 
 /**
- * blk_integrity_compare - Compare integrity profile of two block devices
- * @b1:                Device to compare
- * @b2:                Device to compare
+ * blk_integrity_compare - Compare integrity profile of two disks
+ * @gd1:       Disk to compare
+ * @gd2:       Disk to compare
  *
  * Description: Meta-devices like DM and MD need to verify that all
  * sub-devices use the same integrity format before advertising to
  * upper layers that they can send/receive integrity metadata.  This
- * function can be used to check whether two block devices have
+ * function can be used to check whether two gendisk devices have
  * compatible integrity formats.
  */
-int blk_integrity_compare(struct block_device *bd1, struct block_device *bd2)
+int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
 {
-       struct blk_integrity *b1 = bd1->bd_disk->integrity;
-       struct blk_integrity *b2 = bd2->bd_disk->integrity;
+       struct blk_integrity *b1 = gd1->integrity;
+       struct blk_integrity *b2 = gd2->integrity;
 
-       BUG_ON(bd1->bd_disk == NULL);
-       BUG_ON(bd2->bd_disk == NULL);
+       if (!b1 && !b2)
+               return 0;
 
        if (!b1 || !b2)
-               return 0;
+               return -1;
 
        if (b1->sector_size != b2->sector_size) {
                printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__,
-                      bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+                      gd1->disk_name, gd2->disk_name,
                       b1->sector_size, b2->sector_size);
                return -1;
        }
 
        if (b1->tuple_size != b2->tuple_size) {
                printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
-                      bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+                      gd1->disk_name, gd2->disk_name,
                       b1->tuple_size, b2->tuple_size);
                return -1;
        }
 
        if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
                printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
-                      bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+                      gd1->disk_name, gd2->disk_name,
                       b1->tag_size, b2->tag_size);
                return -1;
        }
 
        if (strcmp(b1->name, b2->name)) {
                printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
-                      bd1->bd_disk->disk_name, bd2->bd_disk->disk_name,
+                      gd1->disk_name, gd2->disk_name,
                       b1->name, b2->name);
                return -1;
        }
@@ -331,7 +331,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
                        return -1;
 
                if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
-                                        &disk->dev.kobj, "%s", "integrity")) {
+                                        &disk_to_dev(disk)->kobj,
+                                        "%s", "integrity")) {
                        kmem_cache_free(integrity_cachep, bi);
                        return -1;
                }
@@ -375,7 +376,7 @@ void blk_integrity_unregister(struct gendisk *disk)
 
        kobject_uevent(&bi->kobj, KOBJ_REMOVE);
        kobject_del(&bi->kobj);
-       kobject_put(&disk->dev.kobj);
        kmem_cache_free(integrity_cachep, bi);
+       disk->integrity = NULL;
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
index af37e4a..4849fa3 100644 (file)
@@ -41,10 +41,10 @@ static int __blk_rq_unmap_user(struct bio *bio)
 }
 
 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
-                            void __user *ubuf, unsigned int len)
+                            struct rq_map_data *map_data, void __user *ubuf,
+                            unsigned int len, int null_mapped, gfp_t gfp_mask)
 {
        unsigned long uaddr;
-       unsigned int alignment;
        struct bio *bio, *orig_bio;
        int reading, ret;
 
@@ -55,15 +55,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
         * direct dma. else, set up kernel bounce buffers
         */
        uaddr = (unsigned long) ubuf;
-       alignment = queue_dma_alignment(q) | q->dma_pad_mask;
-       if (!(uaddr & alignment) && !(len & alignment))
-               bio = bio_map_user(q, NULL, uaddr, len, reading);
+       if (blk_rq_aligned(q, ubuf, len) && !map_data)
+               bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
        else
-               bio = bio_copy_user(q, uaddr, len, reading);
+               bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
 
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
+       if (null_mapped)
+               bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
        orig_bio = bio;
        blk_queue_bounce(q, &bio);
 
@@ -85,17 +87,19 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 }
 
 /**
- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  * @q:         request queue where request should be inserted
  * @rq:                request structure to fill
+ * @map_data:   pointer to the rq_map_data holding pages (if necessary)
  * @ubuf:      the user buffer
  * @len:       length of user data
+ * @gfp_mask:  memory allocation flags
  *
  * Description:
- *    Data will be mapped directly for zero copy io, if possible. Otherwise
+ *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
  *    a kernel bounce buffer is used.
  *
- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+ *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  *    still in process context.
  *
  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -105,16 +109,22 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  *    unmapping.
  */
 int blk_rq_map_user(struct request_queue *q, struct request *rq,
-                   void __user *ubuf, unsigned long len)
+                   struct rq_map_data *map_data, void __user *ubuf,
+                   unsigned long len, gfp_t gfp_mask)
 {
        unsigned long bytes_read = 0;
        struct bio *bio = NULL;
-       int ret;
+       int ret, null_mapped = 0;
 
        if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
-       if (!len || !ubuf)
+       if (!len)
                return -EINVAL;
+       if (!ubuf) {
+               if (!map_data || rq_data_dir(rq) != READ)
+                       return -EINVAL;
+               null_mapped = 1;
+       }
 
        while (bytes_read != len) {
                unsigned long map_len, end, start;
@@ -132,7 +142,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
                if (end - start > BIO_MAX_PAGES)
                        map_len -= PAGE_SIZE;
 
-               ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+               ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
+                                       null_mapped, gfp_mask);
                if (ret < 0)
                        goto unmap_rq;
                if (!bio)
@@ -154,18 +165,20 @@ unmap_rq:
 EXPORT_SYMBOL(blk_rq_map_user);
 
 /**
- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  * @q:         request queue where request should be inserted
  * @rq:                request to map data to
+ * @map_data:   pointer to the rq_map_data holding pages (if necessary)
  * @iov:       pointer to the iovec
  * @iov_count: number of elements in the iovec
  * @len:       I/O byte count
+ * @gfp_mask:  memory allocation flags
  *
  * Description:
- *    Data will be mapped directly for zero copy io, if possible. Otherwise
+ *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
  *    a kernel bounce buffer is used.
  *
- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+ *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  *    still in process context.
  *
  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -175,7 +188,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
  *    unmapping.
  */
 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-                       struct sg_iovec *iov, int iov_count, unsigned int len)
+                       struct rq_map_data *map_data, struct sg_iovec *iov,
+                       int iov_count, unsigned int len, gfp_t gfp_mask)
 {
        struct bio *bio;
        int i, read = rq_data_dir(rq) == READ;
@@ -193,10 +207,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                }
        }
 
-       if (unaligned || (q->dma_pad_mask & len))
-               bio = bio_copy_user_iov(q, iov, iov_count, read);
+       if (unaligned || (q->dma_pad_mask & len) || map_data)
+               bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
+                                       gfp_mask);
        else
-               bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
+               bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
 
        if (IS_ERR(bio))
                return PTR_ERR(bio);
@@ -216,6 +231,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        rq->buffer = rq->data = NULL;
        return 0;
 }
+EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
  * blk_rq_unmap_user - unmap a request with user data
@@ -224,7 +240,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  * Description:
  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  *    supply the original rq->bio from the blk_rq_map_user() return, since
- *    the io completion may have changed rq->bio.
+ *    the I/O completion may have changed rq->bio.
  */
 int blk_rq_unmap_user(struct bio *bio)
 {
@@ -250,7 +266,7 @@ int blk_rq_unmap_user(struct bio *bio)
 EXPORT_SYMBOL(blk_rq_unmap_user);
 
 /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+ * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  * @q:         request queue where request should be inserted
  * @rq:                request to fill
  * @kbuf:      the kernel buffer
@@ -264,8 +280,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                    unsigned int len, gfp_t gfp_mask)
 {
-       unsigned long kaddr;
-       unsigned int alignment;
        int reading = rq_data_dir(rq) == READ;
        int do_copy = 0;
        struct bio *bio;
@@ -275,11 +289,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (!len || !kbuf)
                return -EINVAL;
 
-       kaddr = (unsigned long)kbuf;
-       alignment = queue_dma_alignment(q) | q->dma_pad_mask;
-       do_copy = ((kaddr & alignment) || (len & alignment) ||
-                  object_is_on_stack(kbuf));
-
+       do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
        if (do_copy)
                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
        else
index 5efc9e7..908d3e1 100644 (file)
@@ -11,7 +11,7 @@
 
 void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
-       if (blk_fs_request(rq)) {
+       if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                rq->hard_sector += nsect;
                rq->hard_nr_sectors -= nsect;
 
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
 void blk_recalc_rq_segments(struct request *rq)
 {
        int nr_phys_segs;
-       int nr_hw_segs;
        unsigned int phys_size;
-       unsigned int hw_size;
        struct bio_vec *bv, *bvprv = NULL;
        int seg_size;
-       int hw_seg_size;
        int cluster;
        struct req_iterator iter;
        int high, highprv = 1;
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
                return;
 
        cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-       hw_seg_size = seg_size = 0;
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       seg_size = 0;
+       phys_size = nr_phys_segs = 0;
        rq_for_each_segment(bv, rq, iter) {
                /*
                 * the trick here is making sure that a high page is never
@@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq)
                 */
                high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
                if (high || highprv)
-                       goto new_hw_segment;
+                       goto new_segment;
                if (cluster) {
                        if (seg_size + bv->bv_len > q->max_segment_size)
                                goto new_segment;
@@ -74,40 +71,19 @@ void blk_recalc_rq_segments(struct request *rq)
                                goto new_segment;
                        if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
                                goto new_segment;
-                       if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                               goto new_hw_segment;
 
                        seg_size += bv->bv_len;
-                       hw_seg_size += bv->bv_len;
                        bvprv = bv;
                        continue;
                }
 new_segment:
-               if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                       hw_seg_size += bv->bv_len;
-               else {
-new_hw_segment:
-                       if (nr_hw_segs == 1 &&
-                           hw_seg_size > rq->bio->bi_hw_front_size)
-                               rq->bio->bi_hw_front_size = hw_seg_size;
-                       hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
-                       nr_hw_segs++;
-               }
-
                nr_phys_segs++;
                bvprv = bv;
                seg_size = bv->bv_len;
                highprv = high;
        }
 
-       if (nr_hw_segs == 1 &&
-           hw_seg_size > rq->bio->bi_hw_front_size)
-               rq->bio->bi_hw_front_size = hw_seg_size;
-       if (hw_seg_size > rq->biotail->bi_hw_back_size)
-               rq->biotail->bi_hw_back_size = hw_seg_size;
        rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
 }
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -120,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
        blk_recalc_rq_segments(&rq);
        bio->bi_next = nxt;
        bio->bi_phys_segments = rq.nr_phys_segments;
-       bio->bi_hw_segments = rq.nr_hw_segments;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
@@ -131,13 +106,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
-               return 0;
        if (bio->bi_size + nxt->bi_size > q->max_segment_size)
                return 0;
 
+       if (!bio_has_data(bio))
+               return 1;
+
+       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+               return 0;
+
        /*
-        * bio and nxt are contigous in memory, check if the queue allows
+        * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
        if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -146,22 +125,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return 0;
 }
 
-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
-                                struct bio *nxt)
-{
-       if (!bio_flagged(bio, BIO_SEG_VALID))
-               blk_recount_segments(q, bio);
-       if (!bio_flagged(nxt, BIO_SEG_VALID))
-               blk_recount_segments(q, nxt);
-       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
-               return 0;
-       if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
-               return 0;
-
-       return 1;
-}
-
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -275,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
 {
-       int nr_hw_segs = bio_hw_segments(q, bio);
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
            || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
@@ -290,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
         * This will form the start of a new hw segment.  Bump both
         * counters.
         */
-       req->nr_hw_segments += nr_hw_segs;
        req->nr_phys_segments += nr_phys_segs;
        return 1;
 }
@@ -299,7 +260,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
        unsigned short max_sectors;
-       int len;
 
        if (unlikely(blk_pc_request(req)))
                max_sectors = q->max_hw_sectors;
@@ -316,19 +276,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
                blk_recount_segments(q, req->biotail);
        if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
-       len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
-           && !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (req->nr_hw_segments == 1)
-                               req->bio->bi_hw_front_size = len;
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
 
        return ll_new_hw_segment(q, req, bio);
 }
@@ -337,7 +284,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
        unsigned short max_sectors;
-       int len;
 
        if (unlikely(blk_pc_request(req)))
                max_sectors = q->max_hw_sectors;
@@ -351,23 +297,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                        q->last_merge = NULL;
                return 0;
        }
-       len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
        if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
        if (!bio_flagged(req->bio, BIO_SEG_VALID))
                blk_recount_segments(q, req->bio);
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_front_size = len;
-                       if (req->nr_hw_segments == 1)
-                               req->biotail->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
 
        return ll_new_hw_segment(q, req, bio);
 }
@@ -376,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
        int total_phys_segments;
-       int total_hw_segments;
 
        /*
         * First check if the either of the requests are re-queued
@@ -398,26 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        if (total_phys_segments > q->max_phys_segments)
                return 0;
 
-       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-       if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-               int len = req->biotail->bi_hw_back_size +
-                               next->bio->bi_hw_front_size;
-               /*
-                * propagate the combined length to the end of the requests
-                */
-               if (req->nr_hw_segments == 1)
-                       req->bio->bi_hw_front_size = len;
-               if (next->nr_hw_segments == 1)
-                       next->biotail->bi_hw_back_size = len;
-               total_hw_segments--;
-       }
-
-       if (total_hw_segments > q->max_hw_segments)
+       if (total_phys_segments > q->max_hw_segments)
                return 0;
 
        /* Merge is OK... */
        req->nr_phys_segments = total_phys_segments;
-       req->nr_hw_segments = total_hw_segments;
        return 1;
 }
 
@@ -470,17 +387,21 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        elv_merge_requests(q, req, next);
 
        if (req->rq_disk) {
-               struct hd_struct *part
-                       = get_part(req->rq_disk, req->sector);
-               disk_round_stats(req->rq_disk);
-               req->rq_disk->in_flight--;
-               if (part) {
-                       part_round_stats(part);
-                       part->in_flight--;
-               }
+               struct hd_struct *part;
+               int cpu;
+
+               cpu = part_stat_lock();
+               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+               part_round_stats(cpu, part);
+               part_dec_in_flight(part);
+
+               part_stat_unlock();
        }
 
        req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+       if (blk_rq_cpu_valid(next))
+               req->cpu = next->cpu;
 
        __blk_put_request(q, next);
        return 1;
index dfc7701..b21dcdb 100644 (file)
@@ -33,6 +33,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 EXPORT_SYMBOL(blk_queue_prep_rq);
 
 /**
+ * blk_queue_set_discard - set a discard_sectors function for queue
+ * @q:         queue
+ * @dfn:       prepare_discard function
+ *
+ * It's possible for a queue to register a discard callback which is used
+ * to transform a discard request into the appropriate type for the
+ * hardware. If none is registered, then discard requests are failed
+ * with %EOPNOTSUPP.
+ *
+ */
+void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
+{
+       q->prepare_discard_fn = dfn;
+}
+EXPORT_SYMBOL(blk_queue_set_discard);
+
+/**
  * blk_queue_merge_bvec - set a merge_bvec function for queue
  * @q:         queue
  * @mbfn:      merge_bvec_fn
@@ -60,6 +77,24 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 }
 EXPORT_SYMBOL(blk_queue_softirq_done);
 
+void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
+{
+       q->rq_timeout = timeout;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
+
+void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
+{
+       q->rq_timed_out_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+
+void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
+{
+       q->lld_busy_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
+
 /**
  * blk_queue_make_request - define an alternate make_request function for a device
  * @q:  the request queue for the device to be affected
@@ -127,7 +162,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page.
+ *    buffers for doing I/O to pages residing above @dma_addr.
  **/
 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
 {
@@ -212,7 +247,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
  * Description:
  *    Enables a low level driver to set an upper limit on the number of
  *    hw data segments in a request.  This would be the largest number of
- *    address/length pairs the host adapter can actually give as once
+ *    address/length pairs the host adapter can actually give at once
  *    to the device.
  **/
 void blk_queue_max_hw_segments(struct request_queue *q,
@@ -393,7 +428,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
  * @mask:  alignment mask
  *
  * description:
- *    set required memory and length aligment for direct dma transactions.
+ *    set required memory and length alignment for direct dma transactions.
  *    this is used when buiding direct io requests for the queue.
  *
  **/
@@ -409,7 +444,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
  * @mask:  alignment mask
  *
  * description:
- *    update required memory and length aligment for direct dma transactions.
+ *    update required memory and length alignment for direct dma transactions.
  *    If the requested alignment is larger than the current alignment, then
  *    the current queue alignment is updated to the new value, otherwise it
  *    is left alone.  The design of this is to allow multiple objects
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
new file mode 100644 (file)
index 0000000..e660d26
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Functions related to softirq rq completions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include "blk.h"
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+       struct list_head *cpu_list, local_list;
+
+       local_irq_disable();
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_replace_init(cpu_list, &local_list);
+       local_irq_enable();
+
+       while (!list_empty(&local_list)) {
+               struct request *rq;
+
+               rq = list_entry(local_list.next, struct request, csd.list);
+               list_del_init(&rq->csd.list);
+               rq->q->softirq_done_fn(rq);
+       }
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+static void trigger_softirq(void *data)
+{
+       struct request *rq = data;
+       unsigned long flags;
+       struct list_head *list;
+
+       local_irq_save(flags);
+       list = &__get_cpu_var(blk_cpu_done);
+       list_add_tail(&rq->csd.list, list);
+
+       if (list->next == &rq->csd.list)
+               raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Setup and invoke a run of 'trigger_softirq' on the given cpu.
+ */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+       if (cpu_online(cpu)) {
+               struct call_single_data *data = &rq->csd;
+
+               data->func = trigger_softirq;
+               data->info = rq;
+               data->flags = 0;
+
+               __smp_call_function_single(cpu, data);
+               return 0;
+       }
+
+       return 1;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+       return 1;
+}
+#endif
+
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       /*
+        * If a CPU goes away, splice its entries to the current CPU
+        * and trigger a run of the softirq
+        */
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+               int cpu = (unsigned long) hcpu;
+
+               local_irq_disable();
+               list_splice_init(&per_cpu(blk_cpu_done, cpu),
+                                &__get_cpu_var(blk_cpu_done));
+               raise_softirq_irqoff(BLOCK_SOFTIRQ);
+               local_irq_enable();
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+       .notifier_call  = blk_cpu_notify,
+};
+
+void __blk_complete_request(struct request *req)
+{
+       struct request_queue *q = req->q;
+       unsigned long flags;
+       int ccpu, cpu, group_cpu;
+
+       BUG_ON(!q->softirq_done_fn);
+
+       local_irq_save(flags);
+       cpu = smp_processor_id();
+       group_cpu = blk_cpu_to_group(cpu);
+
+       /*
+        * Select completion CPU
+        */
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
+               ccpu = req->cpu;
+       else
+               ccpu = cpu;
+
+       if (ccpu == cpu || ccpu == group_cpu) {
+               struct list_head *list;
+do_local:
+               list = &__get_cpu_var(blk_cpu_done);
+               list_add_tail(&req->csd.list, list);
+
+               /*
+                * if the list only contains our just added request,
+                * signal a raise of the softirq. If there are already
+                * entries there, someone already raised the irq but it
+                * hasn't run yet.
+                */
+               if (list->next == &req->csd.list)
+                       raise_softirq_irqoff(BLOCK_SOFTIRQ);
+       } else if (raise_blk_irq(ccpu, req))
+               goto do_local;
+
+       local_irq_restore(flags);
+}
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req:      the request being processed
+ *
+ * Description:
+ *     Ends all I/O on a request. It does not handle partial completions,
+ *     unless the driver actually implements this in its completion callback
+ *     through requeueing. The actual completion happens out-of-order,
+ *     through a softirq handler. The user must have registered a completion
+ *     callback through blk_queue_softirq_done().
+ **/
+void blk_complete_request(struct request *req)
+{
+       if (unlikely(blk_should_fake_timeout(req->q)))
+               return;
+       if (!blk_mark_rq_complete(req))
+               __blk_complete_request(req);
+}
+EXPORT_SYMBOL(blk_complete_request);
+
+__init int blk_softirq_init(void)
+{
+       int i;
+
+       for_each_possible_cpu(i)
+               INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
+       register_hotcpu_notifier(&blk_cpu_notifier);
+       return 0;
+}
+subsys_initcall(blk_softirq_init);
index 304ec73..21e275d 100644 (file)
@@ -156,6 +156,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
        return ret;
 }
 
+static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+{
+       unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+
+       return queue_var_show(set != 0, page);
+}
+
+static ssize_t
+queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+{
+       ssize_t ret = -EINVAL;
+#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+       unsigned long val;
+
+       ret = queue_var_store(&val, page, count);
+       spin_lock_irq(q->queue_lock);
+       if (val)
+               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+       else
+               queue_flag_clear(QUEUE_FLAG_SAME_COMP,  q);
+       spin_unlock_irq(q->queue_lock);
+#endif
+       return ret;
+}
 
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -197,6 +221,12 @@ static struct queue_sysfs_entry queue_nomerges_entry = {
        .store = queue_nomerges_store,
 };
 
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
+       .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_rq_affinity_show,
+       .store = queue_rq_affinity_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -205,6 +235,7 @@ static struct attribute *default_attrs[] = {
        &queue_iosched_entry.attr,
        &queue_hw_sector_size_entry.attr,
        &queue_nomerges_entry.attr,
+       &queue_rq_affinity_entry.attr,
        NULL,
 };
 
@@ -310,7 +341,7 @@ int blk_register_queue(struct gendisk *disk)
        if (!q->request_fn)
                return 0;
 
-       ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
+       ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
                          "%s", "queue");
        if (ret < 0)
                return ret;
@@ -339,6 +370,6 @@ void blk_unregister_queue(struct gendisk *disk)
 
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               kobject_put(&disk->dev.kobj);
+               kobject_put(&disk_to_dev(disk)->kobj);
        }
 }
index ed5166f..c0d419e 100644 (file)
@@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
  * __blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt@.  Returns true if it was
+ * Tries to free the specified @bqt.  Returns true if it was
  * actually freed and false if there are still references using it
  */
 static int __blk_free_tags(struct blk_queue_tag *bqt)
@@ -78,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
  * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * For externally managed @bqt@ frees the map.  Callers of this
+ * For externally managed @bqt frees the map.  Callers of this
  * function must guarantee to have released all the queues that
  * might have been using this tag map.
  */
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
  * @q:  the request queue for the device
  *
  *  Notes:
- *     This is used to disabled tagged queuing to a device, yet leave
+ *     This is used to disable tagged queuing to a device, yet leave
  *     queue in function.
  **/
 void blk_queue_free_tags(struct request_queue *q)
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
  * @rq: the request that has completed
  *
  *  Description:
- *    Typically called when end_that_request_first() returns 0, meaning
+ *    Typically called when end_that_request_first() returns %0, meaning
  *    all transfers have been done for a request. It's important to call
  *    this function before end_that_request_last(), as that will put the
  *    request back on the free list thus corrupting the internal tag list.
@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
+       unsigned max_depth, offset;
        int tag;
 
        if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        /*
         * Protect against shared tag maps, as we may not have exclusive
         * access to the tag map.
+        *
+        * We reserve a few tags just for sync IO, since we don't want
+        * to starve sync IO on behalf of flooding async IO.
         */
+       max_depth = bqt->max_depth;
+       if (rq_is_sync(rq))
+               offset = 0;
+       else
+               offset = max_depth >> 2;
+
        do {
-               tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
-               if (tag >= bqt->max_depth)
+               tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+               if (tag >= max_depth)
                        return 1;
 
        } while (test_and_set_bit_lock(tag, bqt->tag_map));
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
new file mode 100644 (file)
index 0000000..972a63f
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Functions related to generic timeout handling of requests.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/fault-inject.h>
+
+#include "blk.h"
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+
+static DECLARE_FAULT_ATTR(fail_io_timeout);
+
+static int __init setup_fail_io_timeout(char *str)
+{
+       return setup_fault_attr(&fail_io_timeout, str);
+}
+__setup("fail_io_timeout=", setup_fail_io_timeout);
+
+int blk_should_fake_timeout(struct request_queue *q)
+{
+       if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+               return 0;
+
+       return should_fail(&fail_io_timeout, 1);
+}
+
+static int __init fail_io_timeout_debugfs(void)
+{
+       return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
+}
+
+late_initcall(fail_io_timeout_debugfs);
+
+ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
+
+       return sprintf(buf, "%d\n", set != 0);
+}
+
+ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       int val;
+
+       if (count) {
+               struct request_queue *q = disk->queue;
+               char *p = (char *) buf;
+
+               val = simple_strtoul(p, &p, 10);
+               spin_lock_irq(q->queue_lock);
+               if (val)
+                       queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+               else
+                       queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
+               spin_unlock_irq(q->queue_lock);
+       }
+
+       return count;
+}
+
+#endif /* CONFIG_FAIL_IO_TIMEOUT */
+
+/*
+ * blk_delete_timer - Delete/cancel timer for a given function.
+ * @req:       request that we are canceling timer for
+ *
+ */
+void blk_delete_timer(struct request *req)
+{
+       struct request_queue *q = req->q;
+
+       /*
+        * Nothing to detach
+        */
+       if (!q->rq_timed_out_fn || !req->deadline)
+               return;
+
+       list_del_init(&req->timeout_list);
+
+       if (list_empty(&q->timeout_list))
+               del_timer(&q->timeout);
+}
+
+static void blk_rq_timed_out(struct request *req)
+{
+       struct request_queue *q = req->q;
+       enum blk_eh_timer_return ret;
+
+       ret = q->rq_timed_out_fn(req);
+       switch (ret) {
+       case BLK_EH_HANDLED:
+               __blk_complete_request(req);
+               break;
+       case BLK_EH_RESET_TIMER:
+               blk_clear_rq_complete(req);
+               blk_add_timer(req);
+               break;
+       case BLK_EH_NOT_HANDLED:
+               /*
+                * LLD handles this for now but in the future
+                * we can send a request msg to abort the command
+                * and we can move more of the generic scsi eh code to
+                * the blk layer.
+                */
+               break;
+       default:
+               printk(KERN_ERR "block: bad eh return: %d\n", ret);
+               break;
+       }
+}
+
+void blk_rq_timed_out_timer(unsigned long data)
+{
+       struct request_queue *q = (struct request_queue *) data;
+       unsigned long flags, uninitialized_var(next), next_set = 0;
+       struct request *rq, *tmp;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
+               if (time_after_eq(jiffies, rq->deadline)) {
+                       list_del_init(&rq->timeout_list);
+
+                       /*
+                        * Check if we raced with end io completion
+                        */
+                       if (blk_mark_rq_complete(rq))
+                               continue;
+                       blk_rq_timed_out(rq);
+               }
+               if (!next_set) {
+                       next = rq->deadline;
+                       next_set = 1;
+               } else if (time_after(next, rq->deadline))
+                       next = rq->deadline;
+       }
+
+       if (next_set && !list_empty(&q->timeout_list))
+               mod_timer(&q->timeout, round_jiffies(next));
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * blk_abort_request -- Request request recovery for the specified command
+ * @req:       pointer to the request of interest
+ *
+ * This function requests that the block layer start recovery for the
+ * request by deleting the timer and calling the q's timeout function.
+ * LLDDs who implement their own error recovery MAY ignore the timeout
+ * event if they generated blk_abort_req. Must hold queue lock.
+ */
+void blk_abort_request(struct request *req)
+{
+       if (blk_mark_rq_complete(req))
+               return;
+       blk_delete_timer(req);
+       blk_rq_timed_out(req);
+}
+EXPORT_SYMBOL_GPL(blk_abort_request);
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req:       request that is about to start running.
+ *
+ * Notes:
+ *    Each request has its own timer, and as it is added to the queue, we
+ *    set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+       struct request_queue *q = req->q;
+       unsigned long expiry;
+
+       if (!q->rq_timed_out_fn)
+               return;
+
+       BUG_ON(!list_empty(&req->timeout_list));
+       BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+       if (req->timeout)
+               req->deadline = jiffies + req->timeout;
+       else {
+               req->deadline = jiffies + q->rq_timeout;
+               /*
+                * Some LLDs, like scsi, peek at the timeout to prevent
+                * a command from being retried forever.
+                */
+               req->timeout = q->rq_timeout;
+       }
+       list_add_tail(&req->timeout_list, &q->timeout_list);
+
+       /*
+        * If the timer isn't already pending or this timeout is earlier
+        * than an existing one, modify the timer. Round to next nearest
+        * second.
+        */
+       expiry = round_jiffies(req->deadline);
+
+       /*
+        * We use ->deadline == 0 to detect whether a timer was added or
+        * not, so just increase to next jiffy for that specific case
+        */
+       if (unlikely(!req->deadline))
+               req->deadline = 1;
+
+       if (!timer_pending(&q->timeout) ||
+           time_before(expiry, q->timeout.expires))
+               mod_timer(&q->timeout, expiry);
+}
+
+/**
+ * blk_abort_queue -- Abort all request on given queue
+ * @queue:     pointer to queue
+ *
+ */
+void blk_abort_queue(struct request_queue *q)
+{
+       unsigned long flags;
+       struct request *rq, *tmp;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       elv_abort_queue(q);
+
+       list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+               blk_abort_request(rq);
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(blk_abort_queue);
index c79f30e..e5c5797 100644 (file)
@@ -17,6 +17,42 @@ void __blk_queue_free_tags(struct request_queue *q);
 
 void blk_unplug_work(struct work_struct *work);
 void blk_unplug_timeout(unsigned long data);
+void blk_rq_timed_out_timer(unsigned long data);
+void blk_delete_timer(struct request *);
+void blk_add_timer(struct request *);
+
+/*
+ * Internal atomic flags for request handling
+ */
+enum rq_atomic_flags {
+       REQ_ATOM_COMPLETE = 0,
+};
+
+/*
+ * EH timer and IO completion will both attempt to 'grab' the request, make
+ * sure that only one of them suceeds
+ */
+static inline int blk_mark_rq_complete(struct request *rq)
+{
+       return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_complete(struct request *rq)
+{
+       clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+int blk_should_fake_timeout(struct request_queue *);
+ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
+ssize_t part_timeout_store(struct device *, struct device_attribute *,
+                               const char *, size_t);
+#else
+static inline int blk_should_fake_timeout(struct request_queue *q)
+{
+       return 0;
+}
+#endif
 
 struct io_context *current_io_context(gfp_t gfp_flags, int node);
 
@@ -59,4 +95,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 #endif /* BLK_DEV_INTEGRITY */
 
+static inline int blk_cpu_to_group(int cpu)
+{
+#ifdef CONFIG_SCHED_MC
+       cpumask_t mask = cpu_coregroup_map(cpu);
+       return first_cpu(mask);
+#elif defined(CONFIG_SCHED_SMT)
+       return first_cpu(per_cpu(cpu_sibling_map, cpu));
+#else
+       return cpu;
+#endif
+}
+
 #endif
index eb9651c..85049a7 100644 (file)
@@ -111,23 +111,9 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  */
 static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
 
-/*
- * Bio action bits of interest
- */
-static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
-
-/*
- * More could be added as needed, taking care to increment the decrementer
- * to get correct indexing
- */
-#define trace_barrier_bit(rw)  \
-       (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
-#define trace_sync_bit(rw)     \
-       (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
-#define trace_ahead_bit(rw)    \
-       (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
-#define trace_meta_bit(rw)     \
-       (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
+/* The ilog2() calls fall out because they're constant */
+#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
+         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
 
 /*
  * The worker for the various blk_add_trace*() types. Fills out a
@@ -147,10 +133,11 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
                return;
 
        what |= ddir_act[rw & WRITE];
-       what |= bio_act[trace_barrier_bit(rw)];
-       what |= bio_act[trace_sync_bit(rw)];
-       what |= bio_act[trace_ahead_bit(rw)];
-       what |= bio_act[trace_meta_bit(rw)];
+       what |= MASK_TC_BIT(rw, BARRIER);
+       what |= MASK_TC_BIT(rw, SYNC);
+       what |= MASK_TC_BIT(rw, AHEAD);
+       what |= MASK_TC_BIT(rw, META);
+       what |= MASK_TC_BIT(rw, DISCARD);
 
        pid = tsk->pid;
        if (unlikely(act_log_check(bt, what, sector, pid)))
@@ -382,7 +369,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        if (!buts->buf_size || !buts->buf_nr)
                return -EINVAL;
 
-       strcpy(buts->name, name);
+       strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
+       buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
 
        /*
         * some device names have larger paths - convert the slashes
index 0aae8d7..56cb343 100644 (file)
@@ -283,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
                next_rq->cmd_type = rq->cmd_type;
 
                dxferp = (void*)(unsigned long)hdr->din_xferp;
-               ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+               ret =  blk_rq_map_user(q, next_rq, NULL, dxferp,
+                                      hdr->din_xfer_len, GFP_KERNEL);
                if (ret)
                        goto out;
        }
@@ -298,7 +299,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
                dxfer_len = 0;
 
        if (dxfer_len) {
-               ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
+               ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
+                                     GFP_KERNEL);
                if (ret)
                        goto out;
        }
index 1e2aff8..6a062ee 100644 (file)
@@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125;
 #define CFQ_MIN_TT             (2)
 
 #define CFQ_SLICE_SCALE                (5)
+#define CFQ_HW_QUEUE_MIN       (5)
 
 #define RQ_CIC(rq)             \
        ((struct cfq_io_context *) (rq)->elevator_private)
@@ -86,7 +87,14 @@ struct cfq_data {
 
        int rq_in_driver;
        int sync_flight;
+
+       /*
+        * queue-depth detection
+        */
+       int rq_queued;
        int hw_tag;
+       int hw_tag_samples;
+       int rq_in_driver_peak;
 
        /*
         * idle window management
@@ -244,7 +252,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
        if (cfqd->busy_queues) {
                cfq_log(cfqd, "schedule dispatch");
-               kblockd_schedule_work(&cfqd->unplug_work);
+               kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
        }
 }
 
@@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
                                                cfqd->rq_in_driver);
 
-       /*
-        * If the depth is larger 1, it really could be queueing. But lets
-        * make the mark a little higher - idling could still be good for
-        * low queueing, and a low queueing number could also just indicate
-        * a SCSI mid layer like behaviour where limit+1 is often seen.
-        */
-       if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
-               cfqd->hw_tag = 1;
-
        cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
 }
 
@@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq)
        list_del_init(&rq->queuelist);
        cfq_del_rq_rb(rq);
 
+       cfqq->cfqd->rq_queued--;
        if (rq_is_meta(rq)) {
                WARN_ON(!cfqq->meta_pending);
                cfqq->meta_pending--;
@@ -878,6 +878,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        struct cfq_io_context *cic;
        unsigned long sl;
 
+       /*
+        * SSD device without seek penalty, disable idling. But only do so
+        * for devices that support queuing, otherwise we still have a problem
+        * with sync vs async workloads.
+        */
+       if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+               return;
+
        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
        WARN_ON(cfq_cfqq_slice_new(cfqq));
 
@@ -1833,6 +1841,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 {
        struct cfq_io_context *cic = RQ_CIC(rq);
 
+       cfqd->rq_queued++;
        if (rq_is_meta(rq))
                cfqq->meta_pending++;
 
@@ -1880,6 +1889,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
        cfq_rq_enqueued(cfqd, cfqq, rq);
 }
 
+/*
+ * Update hw_tag based on peak queue depth over 50 samples under
+ * sufficient load.
+ */
+static void cfq_update_hw_tag(struct cfq_data *cfqd)
+{
+       if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
+               cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
+
+       if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
+           cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+               return;
+
+       if (cfqd->hw_tag_samples++ < 50)
+               return;
+
+       if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
+               cfqd->hw_tag = 1;
+       else
+               cfqd->hw_tag = 0;
+
+       cfqd->hw_tag_samples = 0;
+       cfqd->rq_in_driver_peak = 0;
+}
+
 static void cfq_completed_request(struct request_queue *q, struct request *rq)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1890,6 +1924,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        now = jiffies;
        cfq_log_cfqq(cfqd, cfqq, "complete");
 
+       cfq_update_hw_tag(cfqd);
+
        WARN_ON(!cfqd->rq_in_driver);
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
@@ -2200,6 +2236,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->hw_tag = 1;
 
        return cfqd;
 }
index 79c1499..e669aed 100644 (file)
@@ -211,14 +211,10 @@ int blk_register_filter(struct gendisk *disk)
 {
        int ret;
        struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
-       struct kobject *parent = kobject_get(disk->holder_dir->parent);
 
-       if (!parent)
-               return -ENODEV;
-
-       ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
+       ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
+                                  &disk_to_dev(disk)->kobj,
                                   "%s", "cmd_filter");
-
        if (ret < 0)
                return ret;
 
@@ -231,7 +227,6 @@ void blk_unregister_filter(struct gendisk *disk)
        struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
 
        kobject_put(&filter->kobj);
-       kobject_put(disk->holder_dir->parent);
 }
 EXPORT_SYMBOL(blk_unregister_filter);
 #endif
index c23177e..1e559fb 100644 (file)
@@ -788,6 +788,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
        case BLKFLSBUF:
        case BLKROSET:
+       case BLKDISCARD:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index 342448c..fd31117 100644 (file)
@@ -33,7 +33,7 @@ struct deadline_data {
         */
        struct rb_root sort_list[2];    
        struct list_head fifo_list[2];
-       
+
        /*
         * next in sort order. read, write or both are NULL
         */
@@ -53,7 +53,11 @@ struct deadline_data {
 
 static void deadline_move_request(struct deadline_data *, struct request *);
 
-#define RQ_RB_ROOT(dd, rq)     (&(dd)->sort_list[rq_data_dir((rq))])
+static inline struct rb_root *
+deadline_rb_root(struct deadline_data *dd, struct request *rq)
+{
+       return &dd->sort_list[rq_data_dir(rq)];
+}
 
 /*
  * get the request after `rq' in sector-sorted order
@@ -72,15 +76,11 @@ deadline_latter_request(struct request *rq)
 static void
 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
 {
-       struct rb_root *root = RQ_RB_ROOT(dd, rq);
+       struct rb_root *root = deadline_rb_root(dd, rq);
        struct request *__alias;
 
-retry:
-       __alias = elv_rb_add(root, rq);
-       if (unlikely(__alias)) {
+       while (unlikely(__alias = elv_rb_add(root, rq)))
                deadline_move_request(dd, __alias);
-               goto retry;
-       }
 }
 
 static inline void
@@ -91,7 +91,7 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
        if (dd->next_rq[data_dir] == rq)
                dd->next_rq[data_dir] = deadline_latter_request(rq);
 
-       elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
+       elv_rb_del(deadline_rb_root(dd, rq), rq);
 }
 
 /*
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
        deadline_add_rq_rb(dd, rq);
 
        /*
-        * set expire time (only used for reads) and add to fifo list
+        * set expire time and add to fifo list
         */
        rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
        list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
@@ -162,7 +162,7 @@ static void deadline_merged_request(struct request_queue *q,
         * if the merge was a front merge, we need to reposition request
         */
        if (type == ELEVATOR_FRONT_MERGE) {
-               elv_rb_del(RQ_RB_ROOT(dd, req), req);
+               elv_rb_del(deadline_rb_root(dd, req), req);
                deadline_add_rq_rb(dd, req);
        }
 }
@@ -212,7 +212,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
        dd->next_rq[WRITE] = NULL;
        dd->next_rq[data_dir] = deadline_latter_request(rq);
 
-       dd->last_sector = rq->sector + rq->nr_sectors;
+       dd->last_sector = rq_end_sector(rq);
 
        /*
         * take it off the sort and fifo list, move
@@ -222,7 +222,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
 }
 
 /*
- * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
  */
 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
@@ -258,17 +258,9 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
        else
                rq = dd->next_rq[READ];
 
-       if (rq) {
-               /* we have a "next request" */
-               
-               if (dd->last_sector != rq->sector)
-                       /* end the batch on a non sequential request */
-                       dd->batching += dd->fifo_batch;
-               
-               if (dd->batching < dd->fifo_batch)
-                       /* we are still entitled to batch */
-                       goto dispatch_request;
-       }
+       if (rq && dd->batching < dd->fifo_batch)
+               /* we have a next request are still entitled to batch */
+               goto dispatch_request;
 
        /*
         * at this point we are not running a batch. select the appropriate
index ed6f8f3..0451892 100644 (file)
@@ -34,8 +34,9 @@
 #include <linux/delay.h>
 #include <linux/blktrace_api.h>
 #include <linux/hash.h>
+#include <linux/uaccess.h>
 
-#include <asm/uaccess.h>
+#include "blk.h"
 
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
@@ -75,6 +76,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
                return 0;
 
        /*
+        * Don't merge file system requests and discard requests
+        */
+       if (bio_discard(bio) != bio_discard(rq->bio))
+               return 0;
+
+       /*
         * different data direction or already started, don't merge
         */
        if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -438,6 +445,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
+               if (blk_discard_rq(rq) != blk_discard_rq(pos))
+                       break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
                if (pos->cmd_flags & stop_flags)
@@ -607,7 +616,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                break;
 
        case ELEVATOR_INSERT_SORT:
-               BUG_ON(!blk_fs_request(rq));
+               BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
                rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
                if (rq_mergeable(rq)) {
@@ -692,7 +701,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
                 * this request is scheduling boundary, update
                 * end_sector
                 */
-               if (blk_fs_request(rq)) {
+               if (blk_fs_request(rq) || blk_discard_rq(rq)) {
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
@@ -745,7 +754,7 @@ struct request *elv_next_request(struct request_queue *q)
                 * not ever see it.
                 */
                if (blk_empty_barrier(rq)) {
-                       end_queued_request(rq, 1);
+                       __blk_end_request(rq, 0, blk_rq_bytes(rq));
                        continue;
                }
                if (!(rq->cmd_flags & REQ_STARTED)) {
@@ -764,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q)
                         */
                        rq->cmd_flags |= REQ_STARTED;
                        blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+
+                       /*
+                        * We are now handing the request to the hardware,
+                        * add the timeout handler
+                        */
+                       blk_add_timer(rq);
                }
 
                if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -782,7 +797,6 @@ struct request *elv_next_request(struct request_queue *q)
                         * device can handle
                         */
                        rq->nr_phys_segments++;
-                       rq->nr_hw_segments++;
                }
 
                if (!q->prep_rq_fn)
@@ -805,14 +819,13 @@ struct request *elv_next_request(struct request_queue *q)
                                 * so that we don't add it again
                                 */
                                --rq->nr_phys_segments;
-                               --rq->nr_hw_segments;
                        }
 
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL) {
                        rq->cmd_flags |= REQ_QUIET;
-                       end_queued_request(rq, 0);
+                       __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
@@ -901,6 +914,19 @@ int elv_may_queue(struct request_queue *q, int rw)
        return ELV_MQUEUE_MAY;
 }
 
+void elv_abort_queue(struct request_queue *q)
+{
+       struct request *rq;
+
+       while (!list_empty(&q->queue_head)) {
+               rq = list_entry_rq(q->queue_head.next);
+               rq->cmd_flags |= REQ_QUIET;
+               blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+               __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+       }
+}
+EXPORT_SYMBOL(elv_abort_queue);
+
 void elv_completed_request(struct request_queue *q, struct request *rq)
 {
        elevator_t *e = q->elevator;
index e0ce23a..4cd3433 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kobj_map.h>
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
+#include <linux/idr.h>
 
 #include "blk.h"
 
@@ -24,8 +25,194 @@ static DEFINE_MUTEX(block_class_lock);
 struct kobject *block_depr;
 #endif
 
+/* for extended dynamic devt allocation, currently only one major is used */
+#define MAX_EXT_DEVT           (1 << MINORBITS)
+
+/* For extended devt allocation.  ext_devt_mutex prevents look up
+ * results from going away underneath its user.
+ */
+static DEFINE_MUTEX(ext_devt_mutex);
+static DEFINE_IDR(ext_devt_idr);
+
 static struct device_type disk_type;
 
+/**
+ * disk_get_part - get partition
+ * @disk: disk to look partition from
+ * @partno: partition number
+ *
+ * Look for partition @partno from @disk.  If found, increment
+ * reference count and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Pointer to the found partition on success, NULL if not found.
+ */
+struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
+{
+       struct hd_struct *part = NULL;
+       struct disk_part_tbl *ptbl;
+
+       if (unlikely(partno < 0))
+               return NULL;
+
+       rcu_read_lock();
+
+       ptbl = rcu_dereference(disk->part_tbl);
+       if (likely(partno < ptbl->len)) {
+               part = rcu_dereference(ptbl->part[partno]);
+               if (part)
+                       get_device(part_to_dev(part));
+       }
+
+       rcu_read_unlock();
+
+       return part;
+}
+EXPORT_SYMBOL_GPL(disk_get_part);
+
+/**
+ * disk_part_iter_init - initialize partition iterator
+ * @piter: iterator to initialize
+ * @disk: disk to iterate over
+ * @flags: DISK_PITER_* flags
+ *
+ * Initialize @piter so that it iterates over partitions of @disk.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
+                         unsigned int flags)
+{
+       struct disk_part_tbl *ptbl;
+
+       rcu_read_lock();
+       ptbl = rcu_dereference(disk->part_tbl);
+
+       piter->disk = disk;
+       piter->part = NULL;
+
+       if (flags & DISK_PITER_REVERSE)
+               piter->idx = ptbl->len - 1;
+       else if (flags & DISK_PITER_INCL_PART0)
+               piter->idx = 0;
+       else
+               piter->idx = 1;
+
+       piter->flags = flags;
+
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_init);
+
+/**
+ * disk_part_iter_next - proceed iterator to the next partition and return it
+ * @piter: iterator of interest
+ *
+ * Proceed @piter to the next partition and return it.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
+{
+       struct disk_part_tbl *ptbl;
+       int inc, end;
+
+       /* put the last partition */
+       disk_put_part(piter->part);
+       piter->part = NULL;
+
+       /* get part_tbl */
+       rcu_read_lock();
+       ptbl = rcu_dereference(piter->disk->part_tbl);
+
+       /* determine iteration parameters */
+       if (piter->flags & DISK_PITER_REVERSE) {
+               inc = -1;
+               if (piter->flags & DISK_PITER_INCL_PART0)
+                       end = -1;
+               else
+                       end = 0;
+       } else {
+               inc = 1;
+               end = ptbl->len;
+       }
+
+       /* iterate to the next partition */
+       for (; piter->idx != end; piter->idx += inc) {
+               struct hd_struct *part;
+
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
+               if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
+                       continue;
+
+               get_device(part_to_dev(part));
+               piter->part = part;
+               piter->idx += inc;
+               break;
+       }
+
+       rcu_read_unlock();
+
+       return piter->part;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_next);
+
+/**
+ * disk_part_iter_exit - finish up partition iteration
+ * @piter: iter of interest
+ *
+ * Called when iteration is over.  Cleans up @piter.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void disk_part_iter_exit(struct disk_part_iter *piter)
+{
+       disk_put_part(piter->part);
+       piter->part = NULL;
+}
+EXPORT_SYMBOL_GPL(disk_part_iter_exit);
+
+/**
+ * disk_map_sector_rcu - map sector to partition
+ * @disk: gendisk of interest
+ * @sector: sector to map
+ *
+ * Find out which partition @sector maps to on @disk.  This is
+ * primarily used for stats accounting.
+ *
+ * CONTEXT:
+ * RCU read locked.  The returned partition pointer is valid only
+ * while preemption is disabled.
+ *
+ * RETURNS:
+ * Found partition on success, part0 is returned if no partition matches
+ */
+struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
+{
+       struct disk_part_tbl *ptbl;
+       int i;
+
+       ptbl = rcu_dereference(disk->part_tbl);
+
+       for (i = 1; i < ptbl->len; i++) {
+               struct hd_struct *part = rcu_dereference(ptbl->part[i]);
+
+               if (part && part->start_sect <= sector &&
+                   sector < part->start_sect + part->nr_sects)
+                       return part;
+       }
+       return &disk->part0;
+}
+EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
+
 /*
  * Can be deleted altogether. Later.
  *
@@ -43,14 +230,14 @@ static inline int major_to_index(int major)
 }
 
 #ifdef CONFIG_PROC_FS
-void blkdev_show(struct seq_file *f, off_t offset)
+void blkdev_show(struct seq_file *seqf, off_t offset)
 {
        struct blk_major_name *dp;
 
        if (offset < BLKDEV_MAJOR_HASH_SIZE) {
                mutex_lock(&block_class_lock);
                for (dp = major_names[offset]; dp; dp = dp->next)
-                       seq_printf(f, "%3d %s\n", dp->major, dp->name);
+                       seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
                mutex_unlock(&block_class_lock);
        }
 }
@@ -136,6 +323,118 @@ EXPORT_SYMBOL(unregister_blkdev);
 
 static struct kobj_map *bdev_map;
 
+/**
+ * blk_mangle_minor - scatter minor numbers apart
+ * @minor: minor number to mangle
+ *
+ * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
+ * is enabled.  Mangling twice gives the original value.
+ *
+ * RETURNS:
+ * Mangled value.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+static int blk_mangle_minor(int minor)
+{
+#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
+       int i;
+
+       for (i = 0; i < MINORBITS / 2; i++) {
+               int low = minor & (1 << i);
+               int high = minor & (1 << (MINORBITS - 1 - i));
+               int distance = MINORBITS - 1 - 2 * i;
+
+               minor ^= low | high;    /* clear both bits */
+               low <<= distance;       /* swap the positions */
+               high >>= distance;
+               minor |= low | high;    /* and set */
+       }
+#endif
+       return minor;
+}
+
+/**
+ * blk_alloc_devt - allocate a dev_t for a partition
+ * @part: partition to allocate dev_t for
+ * @gfp_mask: memory allocation flag
+ * @devt: out parameter for resulting dev_t
+ *
+ * Allocate a dev_t for block device.
+ *
+ * RETURNS:
+ * 0 on success, allocated dev_t is returned in *@devt.  -errno on
+ * failure.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+{
+       struct gendisk *disk = part_to_disk(part);
+       int idx, rc;
+
+       /* in consecutive minor range? */
+       if (part->partno < disk->minors) {
+               *devt = MKDEV(disk->major, disk->first_minor + part->partno);
+               return 0;
+       }
+
+       /* allocate ext devt */
+       do {
+               if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+                       return -ENOMEM;
+               rc = idr_get_new(&ext_devt_idr, part, &idx);
+       } while (rc == -EAGAIN);
+
+       if (rc)
+               return rc;
+
+       if (idx > MAX_EXT_DEVT) {
+               idr_remove(&ext_devt_idr, idx);
+               return -EBUSY;
+       }
+
+       *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+       return 0;
+}
+
+/**
+ * blk_free_devt - free a dev_t
+ * @devt: dev_t to free
+ *
+ * Free @devt which was allocated using blk_alloc_devt().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void blk_free_devt(dev_t devt)
+{
+       might_sleep();
+
+       if (devt == MKDEV(0, 0))
+               return;
+
+       if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+               mutex_lock(&ext_devt_mutex);
+               idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+               mutex_unlock(&ext_devt_mutex);
+       }
+}
+
+static char *bdevt_str(dev_t devt, char *buf)
+{
+       if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
+               char tbuf[BDEVT_SIZE];
+               snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
+               snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
+       } else
+               snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
+
+       return buf;
+}
+
 /*
  * Register device numbers dev..(dev+range-1)
  * range must be nonzero
@@ -157,11 +456,11 @@ void blk_unregister_region(dev_t devt, unsigned long range)
 
 EXPORT_SYMBOL(blk_unregister_region);
 
-static struct kobject *exact_match(dev_t devt, int *part, void *data)
+static struct kobject *exact_match(dev_t devt, int *partno, void *data)
 {
        struct gendisk *p = data;
 
-       return &p->dev.kobj;
+       return &disk_to_dev(p)->kobj;
 }
 
 static int exact_lock(dev_t devt, void *data)
@@ -179,21 +478,46 @@ static int exact_lock(dev_t devt, void *data)
  *
  * This function registers the partitioning information in @disk
  * with the kernel.
+ *
+ * FIXME: error handling
  */
 void add_disk(struct gendisk *disk)
 {
        struct backing_dev_info *bdi;
+       dev_t devt;
        int retval;
 
+       /* minors == 0 indicates to use ext devt from part0 and should
+        * be accompanied with EXT_DEVT flag.  Make sure all
+        * parameters make sense.
+        */
+       WARN_ON(disk->minors && !(disk->major || disk->first_minor));
+       WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
+
        disk->flags |= GENHD_FL_UP;
-       blk_register_region(MKDEV(disk->major, disk->first_minor),
-                           disk->minors, NULL, exact_match, exact_lock, disk);
+
+       retval = blk_alloc_devt(&disk->part0, &devt);
+       if (retval) {
+               WARN_ON(1);
+               return;
+       }
+       disk_to_dev(disk)->devt = devt;
+
+       /* ->major and ->first_minor aren't supposed to be
+        * dereferenced from here on, but set them just in case.
+        */
+       disk->major = MAJOR(devt);
+       disk->first_minor = MINOR(devt);
+
+       blk_register_region(disk_devt(disk), disk->minors, NULL,
+                           exact_match, exact_lock, disk);
        register_disk(disk);
        blk_register_queue(disk);
 
        bdi = &disk->queue->backing_dev_info;
-       bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
-       retval = sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi");
+       bdi_register_dev(bdi, disk_devt(disk));
+       retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
+                                  "bdi");
        WARN_ON(retval);
 }
 
@@ -202,78 +526,71 @@ EXPORT_SYMBOL(del_gendisk);       /* in partitions/check.c */
 
 void unlink_gendisk(struct gendisk *disk)
 {
-       sysfs_remove_link(&disk->dev.kobj, "bdi");
+       sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
        bdi_unregister(&disk->queue->backing_dev_info);
        blk_unregister_queue(disk);
-       blk_unregister_region(MKDEV(disk->major, disk->first_minor),
-                             disk->minors);
+       blk_unregister_region(disk_devt(disk), disk->minors);
 }
 
 /**
  * get_gendisk - get partitioning information for a given device
- * @dev: device to get partitioning information for
+ * @devt: device to get partitioning information for
+ * @part: returned partition index
  *
  * This function gets the structure containing partitioning
- * information for the given device @dev.
+ * information for the given device @devt.
  */
-struct gendisk *get_gendisk(dev_t devt, int *part)
+struct gendisk *get_gendisk(dev_t devt, int *partno)
 {
-       struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
-       struct device *dev = kobj_to_dev(kobj);
+       struct gendisk *disk = NULL;
+
+       if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
+               struct kobject *kobj;
+
+               kobj = kobj_lookup(bdev_map, devt, partno);
+               if (kobj)
+                       disk = dev_to_disk(kobj_to_dev(kobj));
+       } else {
+               struct hd_struct *part;
 
-       return  kobj ? dev_to_disk(dev) : NULL;
+               mutex_lock(&ext_devt_mutex);
+               part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+               if (part && get_disk(part_to_disk(part))) {
+                       *partno = part->partno;
+                       disk = part_to_disk(part);
+               }
+               mutex_unlock(&ext_devt_mutex);
+       }
+
+       return disk;
 }
 
-/*
- * print a partitions - intended for places where the root filesystem can't be
- * mounted and thus to give the victim some idea of what went wrong
+/**
+ * bdget_disk - do bdget() by gendisk and partition number
+ * @disk: gendisk of interest
+ * @partno: partition number
+ *
+ * Find partition @partno from @disk, do bdget() on it.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * Resulting block_device on success, NULL on failure.
  */
-static int printk_partition(struct device *dev, void *data)
+struct block_device *bdget_disk(struct gendisk *disk, int partno)
 {
-       struct gendisk *sgp;
-       char buf[BDEVNAME_SIZE];
-       int n;
-
-       if (dev->type != &disk_type)
-               goto exit;
+       struct hd_struct *part;
+       struct block_device *bdev = NULL;
 
-       sgp = dev_to_disk(dev);
-       /*
-        * Don't show empty devices or things that have been surpressed
-        */
-       if (get_capacity(sgp) == 0 ||
-           (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
-               goto exit;
+       part = disk_get_part(disk, partno);
+       if (part)
+               bdev = bdget(part_devt(part));
+       disk_put_part(part);
 
-       /*
-        * Note, unlike /proc/partitions, I am showing the numbers in
-        * hex - the same format as the root= option takes.
-        */
-       printk("%02x%02x %10llu %s",
-               sgp->major, sgp->first_minor,
-               (unsigned long long)get_capacity(sgp) >> 1,
-               disk_name(sgp, 0, buf));
-       if (sgp->driverfs_dev != NULL &&
-           sgp->driverfs_dev->driver != NULL)
-               printk(" driver: %s\n",
-                       sgp->driverfs_dev->driver->name);
-       else
-               printk(" (driver?)\n");
-
-       /* now show the partitions */
-       for (n = 0; n < sgp->minors - 1; ++n) {
-               if (sgp->part[n] == NULL)
-                       goto exit;
-               if (sgp->part[n]->nr_sects == 0)
-                       goto exit;
-               printk("  %02x%02x %10llu %s\n",
-                       sgp->major, n + 1 + sgp->first_minor,
-                       (unsigned long long)sgp->part[n]->nr_sects >> 1,
-                       disk_name(sgp, n + 1, buf));
-       }
-exit:
-       return 0;
+       return bdev;
 }
+EXPORT_SYMBOL(bdget_disk);
 
 /*
  * print a full list of all partitions - intended for places where the root
@@ -282,120 +599,145 @@ exit:
  */
 void __init printk_all_partitions(void)
 {
-       mutex_lock(&block_class_lock);
-       class_for_each_device(&block_class, NULL, NULL, printk_partition);
-       mutex_unlock(&block_class_lock);
+       struct class_dev_iter iter;
+       struct device *dev;
+
+       class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+       while ((dev = class_dev_iter_next(&iter))) {
+               struct gendisk *disk = dev_to_disk(dev);
+               struct disk_part_iter piter;
+               struct hd_struct *part;
+               char name_buf[BDEVNAME_SIZE];
+               char devt_buf[BDEVT_SIZE];
+
+               /*
+                * Don't show empty devices or things that have been
+                * surpressed
+                */
+               if (get_capacity(disk) == 0 ||
+                   (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+                       continue;
+
+               /*
+                * Note, unlike /proc/partitions, I am showing the
+                * numbers in hex - the same format as the root=
+                * option takes.
+                */
+               disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+               while ((part = disk_part_iter_next(&piter))) {
+                       bool is_part0 = part == &disk->part0;
+
+                       printk("%s%s %10llu %s", is_part0 ? "" : "  ",
+                              bdevt_str(part_devt(part), devt_buf),
+                              (unsigned long long)part->nr_sects >> 1,
+                              disk_name(disk, part->partno, name_buf));
+                       if (is_part0) {
+                               if (disk->driverfs_dev != NULL &&
+                                   disk->driverfs_dev->driver != NULL)
+                                       printk(" driver: %s\n",
+                                             disk->driverfs_dev->driver->name);
+                               else
+                                       printk(" (driver?)\n");
+                       } else
+                               printk("\n");
+               }
+               disk_part_iter_exit(&piter);
+       }
+       class_dev_iter_exit(&iter);
 }
 
 #ifdef CONFIG_PROC_FS
 /* iterator */
-static int find_start(struct device *dev, void *data)
+static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
 {
-       loff_t *k = data;
+       loff_t skip = *pos;
+       struct class_dev_iter *iter;
+       struct device *dev;
 
-       if (dev->type != &disk_type)
-               return 0;
-       if (!*k)
-               return 1;
-       (*k)--;
-       return 0;
+       iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return ERR_PTR(-ENOMEM);
+
+       seqf->private = iter;
+       class_dev_iter_init(iter, &block_class, NULL, &disk_type);
+       do {
+               dev = class_dev_iter_next(iter);
+               if (!dev)
+                       return NULL;
+       } while (skip--);
+
+       return dev_to_disk(dev);
 }
 
-static void *part_start(struct seq_file *part, loff_t *pos)
+static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
 {
        struct device *dev;
-       loff_t k = *pos;
-
-       if (!k)
-               part->private = (void *)1LU;    /* tell show to print header */
 
-       mutex_lock(&block_class_lock);
-       dev = class_find_device(&block_class, NULL, &k, find_start);
-       if (dev) {
-               put_device(dev);
+       (*pos)++;
+       dev = class_dev_iter_next(seqf->private);
+       if (dev)
                return dev_to_disk(dev);
-       }
+
        return NULL;
 }
 
-static int find_next(struct device *dev, void *data)
+static void disk_seqf_stop(struct seq_file *seqf, void *v)
 {
-       if (dev->type == &disk_type)
-               return 1;
-       return 0;
-}
+       struct class_dev_iter *iter = seqf->private;
 
-static void *part_next(struct seq_file *part, void *v, loff_t *pos)
-{
-       struct gendisk *gp = v;
-       struct device *dev;
-       ++*pos;
-       dev = class_find_device(&block_class, &gp->dev, NULL, find_next);
-       if (dev) {
-               put_device(dev);
-               return dev_to_disk(dev);
+       /* stop is called even after start failed :-( */
+       if (iter) {
+               class_dev_iter_exit(iter);
+               kfree(iter);
        }
-       return NULL;
 }
 
-static void part_stop(struct seq_file *part, void *v)
+static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
 {
-       mutex_unlock(&block_class_lock);
+       static void *p;
+
+       p = disk_seqf_start(seqf, pos);
+       if (!IS_ERR(p) && p && !*pos)
+               seq_puts(seqf, "major minor  #blocks  name\n\n");
+       return p;
 }
 
-static int show_partition(struct seq_file *part, void *v)
+static int show_partition(struct seq_file *seqf, void *v)
 {
        struct gendisk *sgp = v;
-       int n;
+       struct disk_part_iter piter;
+       struct hd_struct *part;
        char buf[BDEVNAME_SIZE];
 
-       /*
-        * Print header if start told us to do.  This is to preserve
-        * the original behavior of not printing header if no
-        * partition exists.  This hackery will be removed later with
-        * class iteration clean up.
-        */
-       if (part->private) {
-               seq_puts(part, "major minor  #blocks  name\n\n");
-               part->private = NULL;
-       }
-
        /* Don't show non-partitionable removeable devices or empty devices */
-       if (!get_capacity(sgp) ||
-                       (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
+       if (!get_capacity(sgp) || (!disk_partitionable(sgp) &&
+                                  (sgp->flags & GENHD_FL_REMOVABLE)))
                return 0;
        if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
                return 0;
 
        /* show the full disk and all non-0 size partitions of it */
-       seq_printf(part, "%4d  %4d %10llu %s\n",
-               sgp->major, sgp->first_minor,
-               (unsigned long long)get_capacity(sgp) >> 1,
-               disk_name(sgp, 0, buf));
-       for (n = 0; n < sgp->minors - 1; n++) {
-               if (!sgp->part[n])
-                       continue;
-               if (sgp->part[n]->nr_sects == 0)
-                       continue;
-               seq_printf(part, "%4d  %4d %10llu %s\n",
-                       sgp->major, n + 1 + sgp->first_minor,
-                       (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
-                       disk_name(sgp, n + 1, buf));
-       }
+       disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
+       while ((part = disk_part_iter_next(&piter)))
+               seq_printf(seqf, "%4d  %7d %10llu %s\n",
+                          MAJOR(part_devt(part)), MINOR(part_devt(part)),
+                          (unsigned long long)part->nr_sects >> 1,
+                          disk_name(sgp, part->partno, buf));
+       disk_part_iter_exit(&piter);
 
        return 0;
 }
 
 const struct seq_operations partitions_op = {
-       .start  = part_start,
-       .next   = part_next,
-       .stop   = part_stop,
+       .start  = show_partition_start,
+       .next   = disk_seqf_next,
+       .stop   = disk_seqf_stop,
        .show   = show_partition
 };
 #endif
 
 
-static struct kobject *base_probe(dev_t devt, int *part, void *data)
+static struct kobject *base_probe(dev_t devt, int *partno, void *data)
 {
        if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
                /* Make old-style 2.4 aliases work */
@@ -431,29 +773,29 @@ static ssize_t disk_range_show(struct device *dev,
        return sprintf(buf, "%d\n", disk->minors);
 }
 
-static ssize_t disk_removable_show(struct device *dev,
+static ssize_t disk_ext_range_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
-       return sprintf(buf, "%d\n",
-                      (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+       return sprintf(buf, "%d\n", disk_max_parts(disk));
 }
 
-static ssize_t disk_ro_show(struct device *dev,
+static ssize_t disk_removable_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
-       return sprintf(buf, "%d\n", disk->policy ? 1 : 0);
+       return sprintf(buf, "%d\n",
+                      (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
 }
 
-static ssize_t disk_size_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t disk_ro_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
-       return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
+       return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
 }
 
 static ssize_t disk_capability_show(struct device *dev,
@@ -464,73 +806,26 @@ static ssize_t disk_capability_show(struct device *dev,
        return sprintf(buf, "%x\n", disk->flags);
 }
 
-static ssize_t disk_stat_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct gendisk *disk = dev_to_disk(dev);
-
-       preempt_disable();
-       disk_round_stats(disk);
-       preempt_enable();
-       return sprintf(buf,
-               "%8lu %8lu %8llu %8u "
-               "%8lu %8lu %8llu %8u "
-               "%8u %8u %8u"
-               "\n",
-               disk_stat_read(disk, ios[READ]),
-               disk_stat_read(disk, merges[READ]),
-               (unsigned long long)disk_stat_read(disk, sectors[READ]),
-               jiffies_to_msecs(disk_stat_read(disk, ticks[READ])),
-               disk_stat_read(disk, ios[WRITE]),
-               disk_stat_read(disk, merges[WRITE]),
-               (unsigned long long)disk_stat_read(disk, sectors[WRITE]),
-               jiffies_to_msecs(disk_stat_read(disk, ticks[WRITE])),
-               disk->in_flight,
-               jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
-               jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
-}
-
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-static ssize_t disk_fail_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct gendisk *disk = dev_to_disk(dev);
-
-       return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
-}
-
-static ssize_t disk_fail_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t count)
-{
-       struct gendisk *disk = dev_to_disk(dev);
-       int i;
-
-       if (count > 0 && sscanf(buf, "%d", &i) > 0) {
-               if (i == 0)
-                       disk->flags &= ~GENHD_FL_FAIL;
-               else
-                       disk->flags |= GENHD_FL_FAIL;
-       }
-
-       return count;
-}
-
-#endif
-
 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
+static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
-static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
-static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
+static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
-       __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
+       __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
+#endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+static struct device_attribute dev_attr_fail_timeout =
+       __ATTR(io-timeout-fail,  S_IRUGO|S_IWUSR, part_timeout_show,
+               part_timeout_store);
 #endif
 
 static struct attribute *disk_attrs[] = {
        &dev_attr_range.attr,
+       &dev_attr_ext_range.attr,
        &dev_attr_removable.attr,
        &dev_attr_ro.attr,
        &dev_attr_size.attr,
@@ -539,6 +834,9 @@ static struct attribute *disk_attrs[] = {
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+       &dev_attr_fail_timeout.attr,
+#endif
        NULL
 };
 
@@ -551,13 +849,87 @@ static struct attribute_group *disk_attr_groups[] = {
        NULL
 };
 
+static void disk_free_ptbl_rcu_cb(struct rcu_head *head)
+{
+       struct disk_part_tbl *ptbl =
+               container_of(head, struct disk_part_tbl, rcu_head);
+
+       kfree(ptbl);
+}
+
+/**
+ * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
+ * @disk: disk to replace part_tbl for
+ * @new_ptbl: new part_tbl to install
+ *
+ * Replace disk->part_tbl with @new_ptbl in RCU-safe way.  The
+ * original ptbl is freed using RCU callback.
+ *
+ * LOCKING:
+ * Matching bd_mutx locked.
+ */
+static void disk_replace_part_tbl(struct gendisk *disk,
+                                 struct disk_part_tbl *new_ptbl)
+{
+       struct disk_part_tbl *old_ptbl = disk->part_tbl;
+
+       rcu_assign_pointer(disk->part_tbl, new_ptbl);
+       if (old_ptbl)
+               call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
+}
+
+/**
+ * disk_expand_part_tbl - expand disk->part_tbl
+ * @disk: disk to expand part_tbl for
+ * @partno: expand such that this partno can fit in
+ *
+ * Expand disk->part_tbl such that @partno can fit in.  disk->part_tbl
+ * uses RCU to allow unlocked dereferencing for stats and other stuff.
+ *
+ * LOCKING:
+ * Matching bd_mutex locked, might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int disk_expand_part_tbl(struct gendisk *disk, int partno)
+{
+       struct disk_part_tbl *old_ptbl = disk->part_tbl;
+       struct disk_part_tbl *new_ptbl;
+       int len = old_ptbl ? old_ptbl->len : 0;
+       int target = partno + 1;
+       size_t size;
+       int i;
+
+       /* disk_max_parts() is zero during initialization, ignore if so */
+       if (disk_max_parts(disk) && target > disk_max_parts(disk))
+               return -EINVAL;
+
+       if (target <= len)
+               return 0;
+
+       size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
+       new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
+       if (!new_ptbl)
+               return -ENOMEM;
+
+       INIT_RCU_HEAD(&new_ptbl->rcu_head);
+       new_ptbl->len = target;
+
+       for (i = 0; i < len; i++)
+               rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
+
+       disk_replace_part_tbl(disk, new_ptbl);
+       return 0;
+}
+
 static void disk_release(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
        kfree(disk->random);
-       kfree(disk->part);
-       free_disk_stats(disk);
+       disk_replace_part_tbl(disk, NULL);
+       free_part_stats(&disk->part0);
        kfree(disk);
 }
 struct class block_class = {
@@ -578,83 +950,31 @@ static struct device_type disk_type = {
  * The output looks suspiciously like /proc/partitions with a bunch of
  * extra fields.
  */
-
-static void *diskstats_start(struct seq_file *part, loff_t *pos)
-{
-       struct device *dev;
-       loff_t k = *pos;
-
-       mutex_lock(&block_class_lock);
-       dev = class_find_device(&block_class, NULL, &k, find_start);
-       if (dev) {
-               put_device(dev);
-               return dev_to_disk(dev);
-       }
-       return NULL;
-}
-
-static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
-{
-       struct gendisk *gp = v;
-       struct device *dev;
-
-       ++*pos;
-       dev = class_find_device(&block_class, &gp->dev, NULL, find_next);
-       if (dev) {
-               put_device(dev);
-               return dev_to_disk(dev);
-       }
-       return NULL;
-}
-
-static void diskstats_stop(struct seq_file *part, void *v)
-{
-       mutex_unlock(&block_class_lock);
-}
-
-static int diskstats_show(struct seq_file *s, void *v)
+static int diskstats_show(struct seq_file *seqf, void *v)
 {
        struct gendisk *gp = v;
+       struct disk_part_iter piter;
+       struct hd_struct *hd;
        char buf[BDEVNAME_SIZE];
-       int n = 0;
+       int cpu;
 
        /*
-       if (&gp->dev.kobj.entry == block_class.devices.next)
-               seq_puts(s,     "major minor name"
+       if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
+               seq_puts(seqf,  "major minor name"
                                "     rio rmerge rsect ruse wio wmerge "
                                "wsect wuse running use aveq"
                                "\n\n");
        */
  
-       preempt_disable();
-       disk_round_stats(gp);
-       preempt_enable();
-       seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n",
-               gp->major, n + gp->first_minor, disk_name(gp, n, buf),
-               disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
-               (unsigned long long)disk_stat_read(gp, sectors[0]),
-               jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
-               disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
-               (unsigned long long)disk_stat_read(gp, sectors[1]),
-               jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
-               gp->in_flight,
-               jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
-               jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
-
-       /* now show all non-0 size partitions of it */
-       for (n = 0; n < gp->minors - 1; n++) {
-               struct hd_struct *hd = gp->part[n];
-
-               if (!hd || !hd->nr_sects)
-                       continue;
-
-               preempt_disable();
-               part_round_stats(hd);
-               preempt_enable();
-               seq_printf(s, "%4d %4d %s %lu %lu %llu "
+       disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
+       while ((hd = disk_part_iter_next(&piter))) {
+               cpu = part_stat_lock();
+               part_round_stats(cpu, hd);
+               part_stat_unlock();
+               seq_printf(seqf, "%4d %7d %s %lu %lu %llu "
                           "%u %lu %lu %llu %u %u %u %u\n",
-                          gp->major, n + gp->first_minor + 1,
-                          disk_name(gp, n + 1, buf),
+                          MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
+                          disk_name(gp, hd->partno, buf),
                           part_stat_read(hd, ios[0]),
                           part_stat_read(hd, merges[0]),
                           (unsigned long long)part_stat_read(hd, sectors[0]),
@@ -668,14 +988,15 @@ static int diskstats_show(struct seq_file *s, void *v)
                           jiffies_to_msecs(part_stat_read(hd, time_in_queue))
                        );
        }
+       disk_part_iter_exit(&piter);
  
        return 0;
 }
 
 const struct seq_operations diskstats_op = {
-       .start  = diskstats_start,
-       .next   = diskstats_next,
-       .stop   = diskstats_stop,
+       .start  = disk_seqf_start,
+       .next   = disk_seqf_next,
+       .stop   = disk_seqf_stop,
        .show   = diskstats_show
 };
 #endif /* CONFIG_PROC_FS */
@@ -690,7 +1011,7 @@ static void media_change_notify_thread(struct work_struct *work)
         * set enviroment vars to indicate which event this is for
         * so that user space will know to go check the media status.
         */
-       kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
+       kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
        put_device(gd->driverfs_dev);
 }
 
@@ -703,42 +1024,29 @@ void genhd_media_change_notify(struct gendisk *disk)
 EXPORT_SYMBOL_GPL(genhd_media_change_notify);
 #endif  /*  0  */
 
-struct find_block {
-       const char *name;
-       int part;
-};
-
-static int match_id(struct device *dev, void *data)
+dev_t blk_lookup_devt(const char *name, int partno)
 {
-       struct find_block *find = data;
+       dev_t devt = MKDEV(0, 0);
+       struct class_dev_iter iter;
+       struct device *dev;
 
-       if (dev->type != &disk_type)
-               return 0;
-       if (strcmp(dev->bus_id, find->name) == 0) {
+       class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+       while ((dev = class_dev_iter_next(&iter))) {
                struct gendisk *disk = dev_to_disk(dev);
-               if (find->part < disk->minors)
-                       return 1;
-       }
-       return 0;
-}
+               struct hd_struct *part;
 
-dev_t blk_lookup_devt(const char *name, int part)
-{
-       struct device *dev;
-       dev_t devt = MKDEV(0, 0);
-       struct find_block find;
+               if (strcmp(dev->bus_id, name))
+                       continue;
 
-       mutex_lock(&block_class_lock);
-       find.name = name;
-       find.part = part;
-       dev = class_find_device(&block_class, NULL, &find, match_id);
-       if (dev) {
-               put_device(dev);
-               devt = MKDEV(MAJOR(dev->devt),
-                            MINOR(dev->devt) + part);
+               part = disk_get_part(disk, partno);
+               if (part) {
+                       devt = part_devt(part);
+                       disk_put_part(part);
+                       break;
+               }
+               disk_put_part(part);
        }
-       mutex_unlock(&block_class_lock);
-
+       class_dev_iter_exit(&iter);
        return devt;
 }
 EXPORT_SYMBOL(blk_lookup_devt);
@@ -747,6 +1055,7 @@ struct gendisk *alloc_disk(int minors)
 {
        return alloc_disk_node(minors, -1);
 }
+EXPORT_SYMBOL(alloc_disk);
 
 struct gendisk *alloc_disk_node(int minors, int node_id)
 {
@@ -755,32 +1064,28 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
        disk = kmalloc_node(sizeof(struct gendisk),
                                GFP_KERNEL | __GFP_ZERO, node_id);
        if (disk) {
-               if (!init_disk_stats(disk)) {
+               if (!init_part_stats(&disk->part0)) {
                        kfree(disk);
                        return NULL;
                }
-               if (minors > 1) {
-                       int size = (minors - 1) * sizeof(struct hd_struct *);
-                       disk->part = kmalloc_node(size,
-                               GFP_KERNEL | __GFP_ZERO, node_id);
-                       if (!disk->part) {
-                               free_disk_stats(disk);
-                               kfree(disk);
-                               return NULL;
-                       }
+               if (disk_expand_part_tbl(disk, 0)) {
+                       free_part_stats(&disk->part0);
+                       kfree(disk);
+                       return NULL;
                }
+               disk->part_tbl->part[0] = &disk->part0;
+
                disk->minors = minors;
                rand_initialize_disk(disk);
-               disk->dev.class = &block_class;
-               disk->dev.type = &disk_type;
-               device_initialize(&disk->dev);
+               disk_to_dev(disk)->class = &block_class;
+               disk_to_dev(disk)->type = &disk_type;
+               device_initialize(disk_to_dev(disk));
                INIT_WORK(&disk->async_notify,
                        media_change_notify_thread);
+               disk->node_id = node_id;
        }
        return disk;
 }
-
-EXPORT_SYMBOL(alloc_disk);
 EXPORT_SYMBOL(alloc_disk_node);
 
 struct kobject *get_disk(struct gendisk *disk)
@@ -793,7 +1098,7 @@ struct kobject *get_disk(struct gendisk *disk)
        owner = disk->fops->owner;
        if (owner && !try_module_get(owner))
                return NULL;
-       kobj = kobject_get(&disk->dev.kobj);
+       kobj = kobject_get(&disk_to_dev(disk)->kobj);
        if (kobj == NULL) {
                module_put(owner);
                return NULL;
@@ -807,27 +1112,28 @@ EXPORT_SYMBOL(get_disk);
 void put_disk(struct gendisk *disk)
 {
        if (disk)
-               kobject_put(&disk->dev.kobj);
+               kobject_put(&disk_to_dev(disk)->kobj);
 }
 
 EXPORT_SYMBOL(put_disk);
 
 void set_device_ro(struct block_device *bdev, int flag)
 {
-       if (bdev->bd_contains != bdev)
-               bdev->bd_part->policy = flag;
-       else
-               bdev->bd_disk->policy = flag;
+       bdev->bd_part->policy = flag;
 }
 
 EXPORT_SYMBOL(set_device_ro);
 
 void set_disk_ro(struct gendisk *disk, int flag)
 {
-       int i;
-       disk->policy = flag;
-       for (i = 0; i < disk->minors - 1; i++)
-               if (disk->part[i]) disk->part[i]->policy = flag;
+       struct disk_part_iter piter;
+       struct hd_struct *part;
+
+       disk_part_iter_init(&piter, disk,
+                           DISK_PITER_INCL_EMPTY | DISK_PITER_INCL_PART0);
+       while ((part = disk_part_iter_next(&piter)))
+               part->policy = flag;
+       disk_part_iter_exit(&piter);
 }
 
 EXPORT_SYMBOL(set_disk_ro);
@@ -836,18 +1142,15 @@ int bdev_read_only(struct block_device *bdev)
 {
        if (!bdev)
                return 0;
-       else if (bdev->bd_contains != bdev)
-               return bdev->bd_part->policy;
-       else
-               return bdev->bd_disk->policy;
+       return bdev->bd_part->policy;
 }
 
 EXPORT_SYMBOL(bdev_read_only);
 
-int invalidate_partition(struct gendisk *disk, int index)
+int invalidate_partition(struct gendisk *disk, int partno)
 {
        int res = 0;
-       struct block_device *bdev = bdget_disk(disk, index);
+       struct block_device *bdev = bdget_disk(disk, partno);
        if (bdev) {
                fsync_bdev(bdev);
                res = __invalidate_device(bdev);
index 77185e5..38bee32 100644 (file)
@@ -12,11 +12,12 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
 {
        struct block_device *bdevp;
        struct gendisk *disk;
+       struct hd_struct *part;
        struct blkpg_ioctl_arg a;
        struct blkpg_partition p;
+       struct disk_part_iter piter;
        long long start, length;
-       int part;
-       int i;
+       int partno;
        int err;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -28,8 +29,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
        disk = bdev->bd_disk;
        if (bdev != bdev->bd_contains)
                return -EINVAL;
-       part = p.pno;
-       if (part <= 0 || part >= disk->minors)
+       partno = p.pno;
+       if (partno <= 0)
                return -EINVAL;
        switch (a.op) {
                case BLKPG_ADD_PARTITION:
@@ -43,36 +44,37 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
                                    || pstart < 0 || plength < 0)
                                        return -EINVAL;
                        }
-                       /* partition number in use? */
+
                        mutex_lock(&bdev->bd_mutex);
-                       if (disk->part[part - 1]) {
-                               mutex_unlock(&bdev->bd_mutex);
-                               return -EBUSY;
-                       }
-                       /* overlap? */
-                       for (i = 0; i < disk->minors - 1; i++) {
-                               struct hd_struct *s = disk->part[i];
 
-                               if (!s)
-                                       continue;
-                               if (!(start+length <= s->start_sect ||
-                                     start >= s->start_sect + s->nr_sects)) {
+                       /* overlap? */
+                       disk_part_iter_init(&piter, disk,
+                                           DISK_PITER_INCL_EMPTY);
+                       while ((part = disk_part_iter_next(&piter))) {
+                               if (!(start + length <= part->start_sect ||
+                                     start >= part->start_sect + part->nr_sects)) {
+                                       disk_part_iter_exit(&piter);
                                        mutex_unlock(&bdev->bd_mutex);
                                        return -EBUSY;
                                }
                        }
+                       disk_part_iter_exit(&piter);
+
                        /* all seems OK */
-                       err = add_partition(disk, part, start, length, ADDPART_FLAG_NONE);
+                       err = add_partition(disk, partno, start, length,
+                                           ADDPART_FLAG_NONE);
                        mutex_unlock(&bdev->bd_mutex);
                        return err;
                case BLKPG_DEL_PARTITION:
-                       if (!disk->part[part-1])
-                               return -ENXIO;
-                       if (disk->part[part - 1]->nr_sects == 0)
+                       part = disk_get_part(disk, partno);
+                       if (!part)
                                return -ENXIO;
-                       bdevp = bdget_disk(disk, part);
+
+                       bdevp = bdget(part_devt(part));
+                       disk_put_part(part);
                        if (!bdevp)
                                return -ENOMEM;
+
                        mutex_lock(&bdevp->bd_mutex);
                        if (bdevp->bd_openers) {
                                mutex_unlock(&bdevp->bd_mutex);
@@ -84,7 +86,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
                        invalidate_bdev(bdevp);
 
                        mutex_lock_nested(&bdev->bd_mutex, 1);
-                       delete_partition(disk, part);
+                       delete_partition(disk, partno);
                        mutex_unlock(&bdev->bd_mutex);
                        mutex_unlock(&bdevp->bd_mutex);
                        bdput(bdevp);
@@ -100,7 +102,7 @@ static int blkdev_reread_part(struct block_device *bdev)
        struct gendisk *disk = bdev->bd_disk;
        int res;
 
-       if (disk->minors == 1 || bdev != bdev->bd_contains)
+       if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
                return -EINVAL;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
@@ -111,6 +113,69 @@ static int blkdev_reread_part(struct block_device *bdev)
        return res;
 }
 
+static void blk_ioc_discard_endio(struct bio *bio, int err)
+{
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       }
+       complete(bio->bi_private);
+}
+
+static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
+                            uint64_t len)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+       int ret = 0;
+
+       if (start & 511)
+               return -EINVAL;
+       if (len & 511)
+               return -EINVAL;
+       start >>= 9;
+       len >>= 9;
+
+       if (start + len > (bdev->bd_inode->i_size >> 9))
+               return -EINVAL;
+
+       if (!q->prepare_discard_fn)
+               return -EOPNOTSUPP;
+
+       while (len && !ret) {
+               DECLARE_COMPLETION_ONSTACK(wait);
+               struct bio *bio;
+
+               bio = bio_alloc(GFP_KERNEL, 0);
+               if (!bio)
+                       return -ENOMEM;
+
+               bio->bi_end_io = blk_ioc_discard_endio;
+               bio->bi_bdev = bdev;
+               bio->bi_private = &wait;
+               bio->bi_sector = start;
+
+               if (len > q->max_hw_sectors) {
+                       bio->bi_size = q->max_hw_sectors << 9;
+                       len -= q->max_hw_sectors;
+                       start += q->max_hw_sectors;
+               } else {
+                       bio->bi_size = len << 9;
+                       len = 0;
+               }
+               submit_bio(DISCARD_NOBARRIER, bio);
+
+               wait_for_completion(&wait);
+
+               if (bio_flagged(bio, BIO_EOPNOTSUPP))
+                       ret = -EOPNOTSUPP;
+               else if (!bio_flagged(bio, BIO_UPTODATE))
+                       ret = -EIO;
+               bio_put(bio);
+       }
+       return ret;
+}
+
 static int put_ushort(unsigned long arg, unsigned short val)
 {
        return put_user(val, (unsigned short __user *)arg);
@@ -258,6 +323,19 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
                set_device_ro(bdev, n);
                unlock_kernel();
                return 0;
+
+       case BLKDISCARD: {
+               uint64_t range[2];
+
+               if (!(file->f_mode & FMODE_WRITE))
+                       return -EBADF;
+
+               if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+                       return -EFAULT;
+
+               return blk_ioctl_discard(bdev, range[0], range[1]);
+       }
+
        case HDIO_GETGEO: {
                struct hd_geometry geo;
 
index ec4b7f2..c34272a 100644 (file)
@@ -185,6 +185,7 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
        __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
        __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
        __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
+       __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
 }
 EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
 
@@ -313,11 +314,12 @@ static int sg_io(struct file *file, struct request_queue *q,
                        goto out;
                }
 
-               ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
-                                         hdr->dxfer_len);
+               ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+                                         hdr->dxfer_len, GFP_KERNEL);
                kfree(iov);
        } else if (hdr->dxfer_len)
-               ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
+               ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
+                                     GFP_KERNEL);
 
        if (ret)
                goto out;
index f2dd991..a93247c 100644 (file)
@@ -33,6 +33,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/blkdev.h>
 #include <linux/pci.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -459,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
  *     RETURNS:
  *     EH_HANDLED or EH_NOT_HANDLED
  */
-enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
+enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
 {
        struct Scsi_Host *host = cmd->device->host;
        struct ata_port *ap = ata_shost_to_port(host);
        unsigned long flags;
        struct ata_queued_cmd *qc;
-       enum scsi_eh_timer_return ret;
+       enum blk_eh_timer_return ret;
 
        DPRINTK("ENTER\n");
 
        if (ap->ops->error_handler) {
-               ret = EH_NOT_HANDLED;
+               ret = BLK_EH_NOT_HANDLED;
                goto out;
        }
 
-       ret = EH_HANDLED;
+       ret = BLK_EH_HANDLED;
        spin_lock_irqsave(ap->lock, flags);
        qc = ata_qc_from_tag(ap, ap->link.active_tag);
        if (qc) {
                WARN_ON(qc->scsicmd != cmd);
                qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
                qc->err_mask |= AC_ERR_TIMEOUT;
-               ret = EH_NOT_HANDLED;
+               ret = BLK_EH_NOT_HANDLED;
        }
        spin_unlock_irqrestore(ap->lock, flags);
 
@@ -833,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
-       scsi_req_abort_cmd(qc->scsicmd);
+       blk_abort_request(qc->scsicmd->request);
 }
 
 /**
index fccd5e4..59fe051 100644 (file)
@@ -1085,6 +1085,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
 
                blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
        } else {
+               if (ata_id_is_ssd(dev->id))
+                       queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
+                                               sdev->request_queue);
+
                /* ATA devices must be sector aligned */
                blk_queue_update_dma_alignment(sdev->request_queue,
                                               ATA_SECT_SIZE - 1);
index e96de96..fe2839e 100644 (file)
@@ -155,7 +155,7 @@ extern int ata_bus_probe(struct ata_port *ap);
 /* libata-eh.c */
 extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
 extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
-extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
+extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
 extern void ata_port_wait_eh(struct ata_port *ap);
 extern void ata_eh_fastdrain_timerfn(unsigned long arg);
index 31dc0cd..0a5f055 100644 (file)
@@ -54,7 +54,7 @@ struct driver_private {
  */
 struct class_private {
        struct kset class_subsys;
-       struct list_head class_devices;
+       struct klist class_devices;
        struct list_head class_interfaces;
        struct kset class_dirs;
        struct mutex class_mutex;
index cc5e28c..eb85e43 100644 (file)
@@ -135,6 +135,20 @@ static void remove_class_attrs(struct class *cls)
        }
 }
 
+static void klist_class_dev_get(struct klist_node *n)
+{
+       struct device *dev = container_of(n, struct device, knode_class);
+
+       get_device(dev);
+}
+
+static void klist_class_dev_put(struct klist_node *n)
+{
+       struct device *dev = container_of(n, struct device, knode_class);
+
+       put_device(dev);
+}
+
 int __class_register(struct class *cls, struct lock_class_key *key)
 {
        struct class_private *cp;
@@ -145,7 +159,7 @@ int __class_register(struct class *cls, struct lock_class_key *key)
        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
        if (!cp)
                return -ENOMEM;
-       INIT_LIST_HEAD(&cp->class_devices);
+       klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
        INIT_LIST_HEAD(&cp->class_interfaces);
        kset_init(&cp->class_dirs);
        __mutex_init(&cp->class_mutex, "struct class mutex", key);
@@ -269,6 +283,71 @@ char *make_class_name(const char *name, struct kobject *kobj)
 #endif
 
 /**
+ * class_dev_iter_init - initialize class device iterator
+ * @iter: class iterator to initialize
+ * @class: the class we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize class iterator @iter such that it iterates over devices
+ * of @class.  If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
+                        struct device *start, const struct device_type *type)
+{
+       struct klist_node *start_knode = NULL;
+
+       if (start)
+               start_knode = &start->knode_class;
+       klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
+       iter->type = type;
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_init);
+
+/**
+ * class_dev_iter_next - iterate to the next device
+ * @iter: class iterator to proceed
+ *
+ * Proceed @iter to the next device and return it.  Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited.  The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into class code.
+ */
+struct device *class_dev_iter_next(struct class_dev_iter *iter)
+{
+       struct klist_node *knode;
+       struct device *dev;
+
+       while (1) {
+               knode = klist_next(&iter->ki);
+               if (!knode)
+                       return NULL;
+               dev = container_of(knode, struct device, knode_class);
+               if (!iter->type || iter->type == dev->type)
+                       return dev;
+       }
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_next);
+
+/**
+ * class_dev_iter_exit - finish iteration
+ * @iter: class iterator to finish
+ *
+ * Finish an iteration.  Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void class_dev_iter_exit(struct class_dev_iter *iter)
+{
+       klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_exit);
+
+/**
  * class_for_each_device - device iterator
  * @class: the class we're iterating
  * @start: the device to start with in the list, if any.
@@ -283,13 +362,13 @@ char *make_class_name(const char *name, struct kobject *kobj)
  * We check the return of @fn each time. If it returns anything
  * other than 0, we break out and return that value.
  *
- * Note, we hold class->class_mutex in this function, so it can not be
- * re-acquired in @fn, otherwise it will self-deadlocking. For
- * example, calls to add or remove class members would be verboten.
+ * @fn is allowed to do anything including calling back into class
+ * code.  There's no locking restriction.
  */
 int class_for_each_device(struct class *class, struct device *start,
                          void *data, int (*fn)(struct device *, void *))
 {
+       struct class_dev_iter iter;
        struct device *dev;
        int error = 0;
 
@@ -301,20 +380,13 @@ int class_for_each_device(struct class *class, struct device *start,
                return -EINVAL;
        }
 
-       mutex_lock(&class->p->class_mutex);
-       list_for_each_entry(dev, &class->p->class_devices, node) {
-               if (start) {
-                       if (start == dev)
-                               start = NULL;
-                       continue;
-               }
-               dev = get_device(dev);
+       class_dev_iter_init(&iter, class, start, NULL);
+       while ((dev = class_dev_iter_next(&iter))) {
                error = fn(dev, data);
-               put_device(dev);
                if (error)
                        break;
        }
-       mutex_unlock(&class->p->class_mutex);
+       class_dev_iter_exit(&iter);
 
        return error;
 }
@@ -337,16 +409,15 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
  *
  * Note, you will need to drop the reference with put_device() after use.
  *
- * We hold class->class_mutex in this function, so it can not be
- * re-acquired in @match, otherwise it will self-deadlocking. For
- * example, calls to add or remove class members would be verboten.
+ * @fn is allowed to do anything including calling back into class
+ * code.  There's no locking restriction.
  */
 struct device *class_find_device(struct class *class, struct device *start,
                                 void *data,
                                 int (*match)(struct device *, void *))
 {
+       struct class_dev_iter iter;
        struct device *dev;
-       int found = 0;
 
        if (!class)
                return NULL;
@@ -356,29 +427,23 @@ struct device *class_find_device(struct class *class, struct device *start,
                return NULL;
        }
 
-       mutex_lock(&class->p->class_mutex);
-       list_for_each_entry(dev, &class->p->class_devices, node) {
-               if (start) {
-                       if (start == dev)
-                               start = NULL;
-                       continue;
-               }
-               dev = get_device(dev);
+       class_dev_iter_init(&iter, class, start, NULL);
+       while ((dev = class_dev_iter_next(&iter))) {
                if (match(dev, data)) {
-                       found = 1;
+                       get_device(dev);
                        break;
-               } else
-                       put_device(dev);
+               }
        }
-       mutex_unlock(&class->p->class_mutex);
+       class_dev_iter_exit(&iter);
 
-       return found ? dev : NULL;
+       return dev;
 }
 EXPORT_SYMBOL_GPL(class_find_device);
 
 int class_interface_register(struct class_interface *class_intf)
 {
        struct class *parent;
+       struct class_dev_iter iter;
        struct device *dev;
 
        if (!class_intf || !class_intf->class)
@@ -391,8 +456,10 @@ int class_interface_register(struct class_interface *class_intf)
        mutex_lock(&parent->p->class_mutex);
        list_add_tail(&class_intf->node, &parent->p->class_interfaces);
        if (class_intf->add_dev) {
-               list_for_each_entry(dev, &parent->p->class_devices, node)
+               class_dev_iter_init(&iter, parent, NULL, NULL);
+               while ((dev = class_dev_iter_next(&iter)))
                        class_intf->add_dev(dev, class_intf);
+               class_dev_iter_exit(&iter);
        }
        mutex_unlock(&parent->p->class_mutex);
 
@@ -402,6 +469,7 @@ int class_interface_register(struct class_interface *class_intf)
 void class_interface_unregister(struct class_interface *class_intf)
 {
        struct class *parent = class_intf->class;
+       struct class_dev_iter iter;
        struct device *dev;
 
        if (!parent)
@@ -410,8 +478,10 @@ void class_interface_unregister(struct class_interface *class_intf)
        mutex_lock(&parent->p->class_mutex);
        list_del_init(&class_intf->node);
        if (class_intf->remove_dev) {
-               list_for_each_entry(dev, &parent->p->class_devices, node)
+               class_dev_iter_init(&iter, parent, NULL, NULL);
+               while ((dev = class_dev_iter_next(&iter)))
                        class_intf->remove_dev(dev, class_intf);
+               class_dev_iter_exit(&iter);
        }
        mutex_unlock(&parent->p->class_mutex);
 
index d021c98..b98cb14 100644 (file)
@@ -536,7 +536,6 @@ void device_initialize(struct device *dev)
        klist_init(&dev->klist_children, klist_children_get,
                   klist_children_put);
        INIT_LIST_HEAD(&dev->dma_pools);
-       INIT_LIST_HEAD(&dev->node);
        init_MUTEX(&dev->sem);
        spin_lock_init(&dev->devres_lock);
        INIT_LIST_HEAD(&dev->devres_head);
@@ -916,7 +915,8 @@ int device_add(struct device *dev)
        if (dev->class) {
                mutex_lock(&dev->class->p->class_mutex);
                /* tie the class to the device */
-               list_add_tail(&dev->node, &dev->class->p->class_devices);
+               klist_add_tail(&dev->knode_class,
+                              &dev->class->p->class_devices);
 
                /* notify any interfaces that the device is here */
                list_for_each_entry(class_intf,
@@ -1032,7 +1032,7 @@ void device_del(struct device *dev)
                        if (class_intf->remove_dev)
                                class_intf->remove_dev(dev, class_intf);
                /* remove the device from the class list */
-               list_del_init(&dev->node);
+               klist_del(&dev->knode_class);
                mutex_unlock(&dev->class->p->class_mutex);
        }
        device_remove_file(dev, &uevent_attr);
index 0c39782..aa69556 100644 (file)
@@ -109,12 +109,12 @@ static const struct attribute_group attr_group = {
 static int
 aoedisk_add_sysfs(struct aoedev *d)
 {
-       return sysfs_create_group(&d->gd->dev.kobj, &attr_group);
+       return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
 }
 void
 aoedisk_rm_sysfs(struct aoedev *d)
 {
-       sysfs_remove_group(&d->gd->dev.kobj, &attr_group);
+       sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
 }
 
 static int
@@ -276,7 +276,7 @@ aoeblk_gdalloc(void *vp)
        gd->first_minor = d->sysminor * AOE_PARTITIONS;
        gd->fops = &aoe_bdops;
        gd->private_data = d;
-       gd->capacity = d->ssize;
+       set_capacity(gd, d->ssize);
        snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
                d->aoemajor, d->aoeminor);
 
index 2f17462..961d29a 100644 (file)
@@ -645,7 +645,7 @@ aoecmd_sleepwork(struct work_struct *work)
                unsigned long flags;
                u64 ssize;
 
-               ssize = d->gd->capacity;
+               ssize = get_capacity(d->gd);
                bd = bdget_disk(d->gd, 0);
 
                if (bd) {
@@ -707,7 +707,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
        if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
                return;
        if (d->gd != NULL) {
-               d->gd->capacity = ssize;
+               set_capacity(d->gd, ssize);
                d->flags |= DEVFL_NEWSIZE;
        } else
                d->flags |= DEVFL_GDALLOC;
@@ -756,12 +756,17 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
        unsigned long n_sect = bio->bi_size >> 9;
        const int rw = bio_data_dir(bio);
        struct hd_struct *part;
+       int cpu;
 
-       part = get_part(disk, sector);
-       all_stat_inc(disk, part, ios[rw], sector);
-       all_stat_add(disk, part, ticks[rw], duration, sector);
-       all_stat_add(disk, part, sectors[rw], n_sect, sector);
-       all_stat_add(disk, part, io_ticks, duration, sector);
+       cpu = part_stat_lock();
+       part = disk_map_sector_rcu(disk, sector);
+
+       part_stat_inc(cpu, part, ios[rw]);
+       part_stat_add(cpu, part, ticks[rw], duration);
+       part_stat_add(cpu, part, sectors[rw], n_sect);
+       part_stat_add(cpu, part, io_ticks, duration);
+
+       part_stat_unlock();
 }
 
 void
index a1d813a..6a8038d 100644 (file)
@@ -91,7 +91,7 @@ aoedev_downdev(struct aoedev *d)
        }
 
        if (d->gd)
-               d->gd->capacity = 0;
+               set_capacity(d->gd, 0);
 
        d->flags &= ~DEVFL_UP;
 }
index b73116e..1e1f915 100644 (file)
@@ -3460,8 +3460,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
               hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
 
        hba[i]->cmd_pool_bits =
-           kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
-                     1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
+           kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+                       * sizeof(unsigned long), GFP_KERNEL);
        hba[i]->cmd_pool = (CommandList_struct *)
            pci_alloc_consistent(hba[i]->pdev,
                    hba[i]->nr_cmds * sizeof(CommandList_struct),
@@ -3493,8 +3493,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        /* command and error info recs zeroed out before
           they are used */
        memset(hba[i]->cmd_pool_bits, 0,
-              ((hba[i]->nr_cmds + BITS_PER_LONG -
-                1) / BITS_PER_LONG) * sizeof(unsigned long));
+              DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+                       * sizeof(unsigned long));
 
        hba[i]->num_luns = 0;
        hba[i]->highest_lun = -1;
index e1233aa..a3fd87b 100644 (file)
@@ -365,7 +365,7 @@ struct scsi2map {
 
 static int 
 cciss_scsi_add_entry(int ctlr, int hostno, 
-               unsigned char *scsi3addr, int devtype,
+               struct cciss_scsi_dev_t *device,
                struct scsi2map *added, int *nadded)
 {
        /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
@@ -384,12 +384,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
        lun = 0;
        /* Is this device a non-zero lun of a multi-lun device */
        /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
-       if (scsi3addr[4] != 0) {
+       if (device->scsi3addr[4] != 0) {
                /* Search through our list and find the device which */
                /* has the same 8 byte LUN address, excepting byte 4. */
                /* Assign the same bus and target for this new LUN. */
                /* Use the logical unit number from the firmware. */
-               memcpy(addr1, scsi3addr, 8);
+               memcpy(addr1, device->scsi3addr, 8);
                addr1[4] = 0;
                for (i = 0; i < n; i++) {
                        sd = &ccissscsi[ctlr].dev[i];
@@ -399,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
                        if (memcmp(addr1, addr2, 8) == 0) {
                                bus = sd->bus;
                                target = sd->target;
-                               lun = scsi3addr[4];
+                               lun = device->scsi3addr[4];
                                break;
                        }
                }
@@ -420,8 +420,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
        added[*nadded].lun = sd->lun;
        (*nadded)++;
 
-       memcpy(&sd->scsi3addr[0], scsi3addr, 8);
-       sd->devtype = devtype;
+       memcpy(sd->scsi3addr, device->scsi3addr, 8);
+       memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
+       memcpy(sd->revision, device->revision, sizeof(sd->revision));
+       memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
+       sd->devtype = device->devtype;
+
        ccissscsi[ctlr].ndevices++;
 
        /* initially, (before registering with scsi layer) we don't 
@@ -487,6 +491,22 @@ static void fixup_botched_add(int ctlr, char *scsi3addr)
        CPQ_TAPE_UNLOCK(ctlr, flags);
 }
 
+static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+       struct cciss_scsi_dev_t *dev2)
+{
+       return dev1->devtype == dev2->devtype &&
+               memcmp(dev1->scsi3addr, dev2->scsi3addr,
+                       sizeof(dev1->scsi3addr)) == 0 &&
+               memcmp(dev1->device_id, dev2->device_id,
+                       sizeof(dev1->device_id)) == 0 &&
+               memcmp(dev1->vendor, dev2->vendor,
+                       sizeof(dev1->vendor)) == 0 &&
+               memcmp(dev1->model, dev2->model,
+                       sizeof(dev1->model)) == 0 &&
+               memcmp(dev1->revision, dev2->revision,
+                       sizeof(dev1->revision)) == 0;
+}
+
 static int
 adjust_cciss_scsi_table(int ctlr, int hostno,
        struct cciss_scsi_dev_t sd[], int nsds)
@@ -532,7 +552,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
                for (j=0;j<nsds;j++) {
                        if (SCSI3ADDR_EQ(sd[j].scsi3addr,
                                csd->scsi3addr)) {
-                               if (sd[j].devtype == csd->devtype)
+                               if (device_is_the_same(&sd[j], csd))
                                        found=2;
                                else
                                        found=1;
@@ -548,22 +568,26 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
                        cciss_scsi_remove_entry(ctlr, hostno, i,
                                removed, &nremoved);
                        /* remove ^^^, hence i not incremented */
-               } 
-               else if (found == 1) { /* device is different kind */
+               } else if (found == 1) { /* device is different in some way */
                        changes++;
-                       printk("cciss%d: device c%db%dt%dl%d type changed "
-                               "(device type now %s).\n",
-                               ctlr, hostno, csd->bus, csd->target, csd->lun,
-                                       scsi_device_type(csd->devtype));
+                       printk("cciss%d: device c%db%dt%dl%d has changed.\n",
+                               ctlr, hostno, csd->bus, csd->target, csd->lun);
                        cciss_scsi_remove_entry(ctlr, hostno, i,
                                removed, &nremoved);
                        /* remove ^^^, hence i not incremented */
-                       if (cciss_scsi_add_entry(ctlr, hostno,
-                               &sd[j].scsi3addr[0], sd[j].devtype,
+                       if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
                                added, &nadded) != 0)
                                /* we just removed one, so add can't fail. */
                                        BUG();
                        csd->devtype = sd[j].devtype;
+                       memcpy(csd->device_id, sd[j].device_id,
+                               sizeof(csd->device_id));
+                       memcpy(csd->vendor, sd[j].vendor,
+                               sizeof(csd->vendor));
+                       memcpy(csd->model, sd[j].model,
+                               sizeof(csd->model));
+                       memcpy(csd->revision, sd[j].revision,
+                               sizeof(csd->revision));
                } else          /* device is same as it ever was, */
                        i++;    /* so just move along. */
        }
@@ -577,7 +601,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
                        csd = &ccissscsi[ctlr].dev[j];
                        if (SCSI3ADDR_EQ(sd[i].scsi3addr,
                                csd->scsi3addr)) {
-                               if (sd[i].devtype == csd->devtype)
+                               if (device_is_the_same(&sd[i], csd))
                                        found=2;        /* found device */
                                else
                                        found=1;        /* found a bug. */
@@ -586,16 +610,14 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
                }
                if (!found) {
                        changes++;
-                       if (cciss_scsi_add_entry(ctlr, hostno, 
-
-                               &sd[i].scsi3addr[0], sd[i].devtype,
+                       if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
                                added, &nadded) != 0)
                                break;
                } else if (found == 1) {
                        /* should never happen... */
                        changes++;
-                       printk("cciss%d: device unexpectedly changed type\n",
-                               ctlr);
+                       printk(KERN_WARNING "cciss%d: device "
+                               "unexpectedly changed\n", ctlr);
                        /* but if it does happen, we just ignore that device */
                }
        }
@@ -1012,7 +1034,8 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
 
 static int
 cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 
-                unsigned char *buf, unsigned char bufsize)
+       unsigned char page, unsigned char *buf,
+       unsigned char bufsize)
 {
        int rc;
        CommandList_struct *cp;
@@ -1032,8 +1055,8 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
        ei = cp->err_info; 
 
        cdb[0] = CISS_INQUIRY;
-       cdb[1] = 0;
-       cdb[2] = 0;
+       cdb[1] = (page != 0);
+       cdb[2] = page;
        cdb[3] = 0;
        cdb[4] = bufsize;
        cdb[5] = 0;
@@ -1053,6 +1076,25 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
        return rc;      
 }
 
+/* Get the device id from inquiry page 0x83 */
+static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+       unsigned char *device_id, int buflen)
+{
+       int rc;
+       unsigned char *buf;
+
+       if (buflen > 16)
+               buflen = 16;
+       buf = kzalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -1;
+       rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
+       if (rc == 0)
+               memcpy(device_id, &buf[8], buflen);
+       kfree(buf);
+       return rc != 0;
+}
+
 static int
 cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 
                ReportLunData_struct *buf, int bufsize)
@@ -1142,25 +1184,21 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
        ctlr_info_t *c;
        __u32 num_luns=0;
        unsigned char *ch;
-       /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
-       struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+       struct cciss_scsi_dev_t *currentsd, *this_device;
        int ncurrent=0;
        int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
        int i;
 
        c = (ctlr_info_t *) hba[cntl_num];      
        ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
-       if (ld_buff == NULL) {
-               printk(KERN_ERR "cciss: out of memory\n");
-               return;
-       }
        inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
-        if (inq_buff == NULL) {
-                printk(KERN_ERR "cciss: out of memory\n");
-                kfree(ld_buff);
-                return;
+       currentsd = kzalloc(sizeof(*currentsd) *
+                       (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
+       if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
+               printk(KERN_ERR "cciss: out of memory\n");
+               goto out;
        }
-
+       this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
        if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
                ch = &ld_buff->LUNListLength[0];
                num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
@@ -1179,23 +1217,34 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 
 
        /* adjust our table of devices */       
-       for(i=0; i<num_luns; i++)
-       {
-               int devtype;
-
+       for (i = 0; i < num_luns; i++) {
                /* for each physical lun, do an inquiry */
                if (ld_buff->LUN[i][3] & 0xC0) continue;
                memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
                memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
 
-               if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff,
-                       (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+               if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
+                       (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
                        /* Inquiry failed (msg printed already) */
-                       devtype = 0; /* so we will skip this device. */
-               } else /* what kind of device is this? */
-                       devtype = (inq_buff[0] & 0x1f);
-
-               switch (devtype)
+                       continue; /* so we will skip this device. */
+
+               this_device->devtype = (inq_buff[0] & 0x1f);
+               this_device->bus = -1;
+               this_device->target = -1;
+               this_device->lun = -1;
+               memcpy(this_device->scsi3addr, scsi3addr, 8);
+               memcpy(this_device->vendor, &inq_buff[8],
+                       sizeof(this_device->vendor));
+               memcpy(this_device->model, &inq_buff[16],
+                       sizeof(this_device->model));
+               memcpy(this_device->revision, &inq_buff[32],
+                       sizeof(this_device->revision));
+               memset(this_device->device_id, 0,
+                       sizeof(this_device->device_id));
+               cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
+                       this_device->device_id, sizeof(this_device->device_id));
+
+               switch (this_device->devtype)
                {
                  case 0x05: /* CD-ROM */ {
 
@@ -1220,15 +1269,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
                        if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
                                printk(KERN_INFO "cciss%d: %s ignored, "
                                        "too many devices.\n", cntl_num,
-                                       scsi_device_type(devtype));
+                                       scsi_device_type(this_device->devtype));
                                break;
                        }
-                       memcpy(&currentsd[ncurrent].scsi3addr[0], 
-                               &scsi3addr[0], 8);
-                       currentsd[ncurrent].devtype = devtype;
-                       currentsd[ncurrent].bus = -1;
-                       currentsd[ncurrent].target = -1;
-                       currentsd[ncurrent].lun = -1;
+                       currentsd[ncurrent] = *this_device;
                        ncurrent++;
                        break;
                  default: 
@@ -1240,6 +1284,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 out:
        kfree(inq_buff);
        kfree(ld_buff);
+       kfree(currentsd);
        return;
 }
 
index d9c2c58..7b75024 100644 (file)
@@ -66,6 +66,10 @@ struct cciss_scsi_dev_t {
        int devtype;
        int bus, target, lun;           /* as presented to the OS */
        unsigned char scsi3addr[8];     /* as presented to the HW */
+       unsigned char device_id[16];    /* from inquiry pg. 0x83 */
+       unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
+       unsigned char model[16];        /* bytes 16-31 of inquiry data */
+       unsigned char revision[4];      /* bytes 32-35 of inquiry data */
 };
 
 struct cciss_scsi_hba_t {
index 09c1434..3d96752 100644 (file)
@@ -424,7 +424,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
                hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
                &(hba[i]->cmd_pool_dhandle));
        hba[i]->cmd_pool_bits = kcalloc(
-               (NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long),
+               DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
                GFP_KERNEL);
 
        if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
index 395f8ea..cf64ddf 100644 (file)
@@ -423,8 +423,15 @@ static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
  * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
  * side 0 is on physical side 0 (but with the misnamed sector IDs).
  * 'stretch' should probably be renamed to something more general, like
- * 'options'.  Other parameters should be self-explanatory (see also
- * setfdprm(8)).
+ * 'options'.
+ *
+ * Bits 2 through 9 of 'stretch' tell the number of the first sector.
+ * The LSB (bit 2) is flipped. For most disks, the first sector
+ * is 1 (represented by 0x00<<2).  For some CP/M and music sampler
+ * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
+ * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
+ *
+ * Other parameters should be self-explanatory (see also setfdprm(8)).
  */
 /*
            Size
@@ -1355,20 +1362,20 @@ static void fdc_specify(void)
        }
 
        /* Convert step rate from microseconds to milliseconds and 4 bits */
-       srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+       srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
        if (slow_floppy) {
                srt = srt / 4;
        }
        SUPBOUND(srt, 0xf);
        INFBOUND(srt, 0);
 
-       hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+       hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
        if (hlt < 0x01)
                hlt = 0x01;
        else if (hlt > 0x7f)
                hlt = hlt_max_code;
 
-       hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+       hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
        if (hut < 0x1)
                hut = 0x1;
        else if (hut > 0xf)
@@ -2236,9 +2243,9 @@ static void setup_format_params(int track)
                        }
                }
        }
-       if (_floppy->stretch & FD_ZEROBASED) {
+       if (_floppy->stretch & FD_SECTBASEMASK) {
                for (count = 0; count < F_SECT_PER_TRACK; count++)
-                       here[count].sect--;
+                       here[count].sect += FD_SECTBASE(_floppy) - 1;
        }
 }
 
@@ -2385,7 +2392,7 @@ static void rw_interrupt(void)
 
 #ifdef FLOPPY_SANITY_CHECK
        if (nr_sectors / ssize >
-           (in_sector_offset + current_count_sectors + ssize - 1) / ssize) {
+           DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
                DPRINT("long rw: %x instead of %lx\n",
                       nr_sectors, current_count_sectors);
                printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
@@ -2649,7 +2656,7 @@ static int make_raw_rw_request(void)
        }
        HEAD = fsector_t / _floppy->sect;
 
-       if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) ||
+       if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
             TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
                max_sector = _floppy->sect;
 
@@ -2679,7 +2686,7 @@ static int make_raw_rw_request(void)
        CODE2SIZE;
        SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
        SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
-           ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1);
+           FD_SECTBASE(_floppy);
 
        /* tracksize describes the size which can be filled up with sectors
         * of size ssize.
@@ -3311,7 +3318,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
            g->head <= 0 ||
            g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
            /* check if reserved bits are set */
-           (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0)
+           (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
                return -EINVAL;
        if (type) {
                if (!capable(CAP_SYS_ADMIN))
@@ -3356,7 +3363,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
                if (DRS->maxblock > user_params[drive].sect ||
                    DRS->maxtrack ||
                    ((user_params[drive].sect ^ oldStretch) &
-                    (FD_SWAPSIDES | FD_ZEROBASED)))
+                    (FD_SWAPSIDES | FD_SECTBASEMASK)))
                        invalidate_drive(bdev);
                else
                        process_fd_request();
index 1778e4a..7b33512 100644 (file)
@@ -403,7 +403,7 @@ static int nbd_do_it(struct nbd_device *lo)
        BUG_ON(lo->magic != LO_MAGIC);
 
        lo->pid = current->pid;
-       ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr);
+       ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
        if (ret) {
                printk(KERN_ERR "nbd: sysfs_create_file failed!");
                return ret;
@@ -412,7 +412,7 @@ static int nbd_do_it(struct nbd_device *lo)
        while ((req = nbd_read_stat(lo)) != NULL)
                nbd_end_request(req);
 
-       sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr);
+       sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
        return 0;
 }
 
index 29b7a64..0e07715 100644 (file)
@@ -2544,7 +2544,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
                if (last_zone != zone) {
                        BUG_ON(last_zone != zone + pd->settings.size);
                        first_sectors = last_zone - bio->bi_sector;
-                       bp = bio_split(bio, bio_split_pool, first_sectors);
+                       bp = bio_split(bio, first_sectors);
                        BUG_ON(!bp);
                        pkt_make_request(q, &bp->bio1);
                        pkt_make_request(q, &bp->bio2);
@@ -2911,7 +2911,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
        if (!disk->queue)
                goto out_mem2;
 
-       pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
+       pd->pkt_dev = MKDEV(pktdev_major, idx);
        ret = pkt_new_dev(pd, dev);
        if (ret)
                goto out_new_dev;
index d797e20..936466f 100644 (file)
@@ -199,7 +199,8 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
                if (blk_fs_request(req)) {
                        if (ps3disk_submit_request_sg(dev, req))
                                break;
-               } else if (req->cmd_type == REQ_TYPE_FLUSH) {
+               } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+                          req->cmd[0] == REQ_LB_OP_FLUSH) {
                        if (ps3disk_submit_flush_request(dev, req))
                                break;
                } else {
@@ -257,7 +258,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
                return IRQ_HANDLED;
        }
 
-       if (req->cmd_type == REQ_TYPE_FLUSH) {
+       if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+           req->cmd[0] == REQ_LB_OP_FLUSH) {
                read = 0;
                num_sectors = req->hard_cur_sectors;
                op = "flush";
@@ -405,7 +407,8 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
 
        dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
-       req->cmd_type = REQ_TYPE_FLUSH;
+       req->cmd_type = REQ_TYPE_LINUX_BLOCK;
+       req->cmd[0] = REQ_LB_OP_FLUSH;
 }
 
 static unsigned long ps3disk_mask;
@@ -538,7 +541,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
        struct ps3disk_private *priv = dev->sbd.core.driver_data;
 
        mutex_lock(&ps3disk_mask_mutex);
-       __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
+       __clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
                    &ps3disk_mask);
        mutex_unlock(&ps3disk_mask_mutex);
        del_gendisk(priv->gendisk);
index 4225109..6ec5fc0 100644 (file)
@@ -47,20 +47,20 @@ static void blk_done(struct virtqueue *vq)
 
        spin_lock_irqsave(&vblk->lock, flags);
        while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
-               int uptodate;
+               int error;
                switch (vbr->status) {
                case VIRTIO_BLK_S_OK:
-                       uptodate = 1;
+                       error = 0;
                        break;
                case VIRTIO_BLK_S_UNSUPP:
-                       uptodate = -ENOTTY;
+                       error = -ENOTTY;
                        break;
                default:
-                       uptodate = 0;
+                       error = -EIO;
                        break;
                }
 
-               end_dequeued_request(vbr->req, uptodate);
+               __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
                list_del(&vbr->list);
                mempool_free(vbr, vblk->pool);
        }
@@ -84,11 +84,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
        if (blk_fs_request(vbr->req)) {
                vbr->out_hdr.type = 0;
                vbr->out_hdr.sector = vbr->req->sector;
-               vbr->out_hdr.ioprio = vbr->req->ioprio;
+               vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
        } else if (blk_pc_request(vbr->req)) {
                vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
                vbr->out_hdr.sector = 0;
-               vbr->out_hdr.ioprio = vbr->req->ioprio;
+               vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
        } else {
                /* We don't put anything else in the queue. */
                BUG();
index 3ca643c..bff602c 100644 (file)
@@ -105,15 +105,17 @@ static DEFINE_SPINLOCK(blkif_io_lock);
 #define GRANT_INVALID_REF      0
 
 #define PARTS_PER_DISK         16
+#define PARTS_PER_EXT_DISK      256
 
 #define BLKIF_MAJOR(dev) ((dev)>>8)
 #define BLKIF_MINOR(dev) ((dev) & 0xff)
 
-#define DEV_NAME       "xvd"   /* name in /dev */
+#define EXT_SHIFT 28
+#define EXTENDED (1<<EXT_SHIFT)
+#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
+#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
 
-/* Information about our VBDs. */
-#define MAX_VBDS 64
-static LIST_HEAD(vbds_list);
+#define DEV_NAME       "xvd"   /* name in /dev */
 
 static int get_id_from_freelist(struct blkfront_info *info)
 {
@@ -386,31 +388,60 @@ static int xlvbd_barrier(struct blkfront_info *info)
 }
 
 
-static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
-                              int vdevice, u16 vdisk_info, u16 sector_size,
-                              struct blkfront_info *info)
+static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
+                              struct blkfront_info *info,
+                              u16 vdisk_info, u16 sector_size)
 {
        struct gendisk *gd;
        int nr_minors = 1;
        int err = -ENODEV;
+       unsigned int offset;
+       int minor;
+       int nr_parts;
 
        BUG_ON(info->gd != NULL);
        BUG_ON(info->rq != NULL);
 
-       if ((minor % PARTS_PER_DISK) == 0)
-               nr_minors = PARTS_PER_DISK;
+       if ((info->vdevice>>EXT_SHIFT) > 1) {
+               /* this is above the extended range; something is wrong */
+               printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
+               return -ENODEV;
+       }
+
+       if (!VDEV_IS_EXTENDED(info->vdevice)) {
+               minor = BLKIF_MINOR(info->vdevice);
+               nr_parts = PARTS_PER_DISK;
+       } else {
+               minor = BLKIF_MINOR_EXT(info->vdevice);
+               nr_parts = PARTS_PER_EXT_DISK;
+       }
+
+       if ((minor % nr_parts) == 0)
+               nr_minors = nr_parts;
 
        gd = alloc_disk(nr_minors);
        if (gd == NULL)
                goto out;
 
-       if (nr_minors > 1)
-               sprintf(gd->disk_name, "%s%c", DEV_NAME,
-                       'a' + minor / PARTS_PER_DISK);
-       else
-               sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
-                       'a' + minor / PARTS_PER_DISK,
-                       minor % PARTS_PER_DISK);
+       offset = minor / nr_parts;
+
+       if (nr_minors > 1) {
+               if (offset < 26)
+                       sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
+               else
+                       sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
+                               'a' + ((offset / 26)-1), 'a' + (offset % 26));
+       } else {
+               if (offset < 26)
+                       sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+                               'a' + offset,
+                               minor & (nr_parts - 1));
+               else
+                       sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
+                               'a' + ((offset / 26) - 1),
+                               'a' + (offset % 26),
+                               minor & (nr_parts - 1));
+       }
 
        gd->major = XENVBD_MAJOR;
        gd->first_minor = minor;
@@ -699,8 +730,13 @@ static int blkfront_probe(struct xenbus_device *dev,
        err = xenbus_scanf(XBT_NIL, dev->nodename,
                           "virtual-device", "%i", &vdevice);
        if (err != 1) {
-               xenbus_dev_fatal(dev, err, "reading virtual-device");
-               return err;
+               /* go looking in the extended area instead */
+               err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
+                                  "%i", &vdevice);
+               if (err != 1) {
+                       xenbus_dev_fatal(dev, err, "reading virtual-device");
+                       return err;
+               }
        }
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -861,9 +897,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err)
                info->feature_barrier = 0;
 
-       err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
-                                 sectors, info->vdevice,
-                                 binfo, sector_size, info);
+       err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
                                 info->xbdev->otherend);
index 74031de..d47f2f8 100644 (file)
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
 
                len = nr * CD_FRAMESIZE_RAW;
 
-               ret = blk_rq_map_user(q, rq, ubuf, len);
+               ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
                if (ret)
                        break;
 
index 1231d95..d6ba77a 100644 (file)
@@ -624,14 +624,14 @@ static void gdrom_readdisk_dma(struct work_struct *work)
                ctrl_outb(1, GDROM_DMA_STATUS_REG);
                wait_event_interruptible_timeout(request_queue,
                        gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
-               err = gd.transfer;
+               err = gd.transfer ? -EIO : 0;
                gd.transfer = 0;
                gd.pending = 0;
                /* now seek to take the request spinlock
                * before handling ending the request */
                spin_lock(&gdrom_lock);
                list_del_init(&req->queuelist);
-               end_dequeued_request(req, 1 - err);
+               __blk_end_request(req, err, blk_rq_bytes(req));
        }
        spin_unlock(&gdrom_lock);
        kfree(read_command);
index 7ce1ac4..6af435b 100644 (file)
@@ -661,10 +661,10 @@ void add_disk_randomness(struct gendisk *disk)
        if (!disk || !disk->random)
                return;
        /* first major is 1, so we get >= 0x200 here */
-       DEBUG_ENT("disk event %d:%d\n", disk->major, disk->first_minor);
+       DEBUG_ENT("disk event %d:%d\n",
+                 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
 
-       add_timer_randomness(disk->random,
-                            0x100 + MKDEV(disk->major, disk->first_minor));
+       add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
 }
 #endif
 
index f16bb46..03c2cb6 100644 (file)
@@ -1113,7 +1113,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
 
        if (write) {
                /* disk has become write protected */
-               if (cd->disk->policy) {
+               if (get_disk_ro(cd->disk)) {
                        cdrom_end_request(drive, 0);
                        return ide_stopped;
                }
index 07ef88b..33ea8c0 100644 (file)
 #include <asm/io.h>
 #include <asm/div64.h>
 
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define IDE_DISK_MINORS                (1 << PARTN_BITS)
+#else
+#define IDE_DISK_MINORS                0
+#endif
+
 struct ide_disk_obj {
        ide_drive_t     *drive;
        ide_driver_t    *driver;
@@ -1151,8 +1157,7 @@ static int ide_disk_probe(ide_drive_t *drive)
        if (!idkp)
                goto failed;
 
-       g = alloc_disk_node(1 << PARTN_BITS,
-                       hwif_to_node(drive->hwif));
+       g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
        if (!g)
                goto out_free_idkp;
 
@@ -1178,9 +1183,11 @@ static int ide_disk_probe(ide_drive_t *drive)
        } else
                drive->attach = 1;
 
-       g->minors = 1 << PARTN_BITS;
+       g->minors = IDE_DISK_MINORS;
        g->driverfs_dev = &drive->gendev;
-       g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0;
+       g->flags |= GENHD_FL_EXT_DEVT;
+       if (drive->removable)
+               g->flags |= GENHD_FL_REMOVABLE;
        set_capacity(g, idedisk_capacity(drive));
        g->fops = &idedisk_ops;
        add_disk(g);
index a51a30e..70aa86c 100644 (file)
@@ -1188,7 +1188,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
 {
        struct gendisk *p = data;
        *part &= (1 << PARTN_BITS) - 1;
-       return &p->dev.kobj;
+       return &disk_to_dev(p)->kobj;
 }
 
 static int exact_lock(dev_t dev, void *data)
index b262c00..5b91915 100644 (file)
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
                                old_nl->next = (uint32_t) ((void *) nl -
                                                           (void *) old_nl);
                        disk = dm_disk(hc->md);
-                       nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+                       nl->dev = huge_encode_dev(disk_devt(disk));
                        nl->next = 0;
                        strcpy(nl->name, hc->name);
 
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
        if (dm_suspended(md))
                param->flags |= DM_SUSPEND_FLAG;
 
-       param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+       param->dev = huge_encode_dev(disk_devt(disk));
 
        /*
         * Yes, this will be out of date by the time it gets back
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
         */
        param->open_count = dm_open_count(md);
 
-       if (disk->policy)
+       if (get_disk_ro(disk))
                param->flags |= DM_READONLY_FLAG;
 
        param->event_nr = dm_get_event_nr(md);
index c2fcf28..3d38481 100644 (file)
@@ -33,6 +33,7 @@ struct pgpath {
        unsigned fail_count;            /* Cumulative failure count */
 
        struct dm_path path;
+       struct work_struct deactivate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -112,6 +113,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -122,8 +124,10 @@ static struct pgpath *alloc_pgpath(void)
 {
        struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 
-       if (pgpath)
+       if (pgpath) {
                pgpath->path.is_active = 1;
+               INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+       }
 
        return pgpath;
 }
@@ -133,6 +137,14 @@ static void free_pgpath(struct pgpath *pgpath)
        kfree(pgpath);
 }
 
+static void deactivate_path(struct work_struct *work)
+{
+       struct pgpath *pgpath =
+               container_of(work, struct pgpath, deactivate_path);
+
+       blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
 static struct priority_group *alloc_priority_group(void)
 {
        struct priority_group *pg;
@@ -870,6 +882,7 @@ static int fail_path(struct pgpath *pgpath)
                      pgpath->path.dev->name, m->nr_valid_paths);
 
        queue_work(kmultipathd, &m->trigger_event);
+       queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);
index 4de90ab..b745d8a 100644 (file)
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
 
        memset(major_minor, 0, sizeof(major_minor));
        sprintf(major_minor, "%d:%d",
-               bio->bi_bdev->bd_disk->major,
-               bio->bi_bdev->bd_disk->first_minor);
+               MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
+               MINOR(disk_devt(bio->bi_bdev->bd_disk)));
 
        /*
         * Test to see which stripe drive triggered the event
index ace998c..327de03 100644 (file)
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 static void start_io_acct(struct dm_io *io)
 {
        struct mapped_device *md = io->md;
+       int cpu;
 
        io->start_time = jiffies;
 
-       preempt_disable();
-       disk_round_stats(dm_disk(md));
-       preempt_enable();
-       dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+       cpu = part_stat_lock();
+       part_round_stats(cpu, &dm_disk(md)->part0);
+       part_stat_unlock();
+       dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
 }
 
 static int end_io_acct(struct dm_io *io)
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io)
        struct mapped_device *md = io->md;
        struct bio *bio = io->bio;
        unsigned long duration = jiffies - io->start_time;
-       int pending;
+       int pending, cpu;
        int rw = bio_data_dir(bio);
 
-       preempt_disable();
-       disk_round_stats(dm_disk(md));
-       preempt_enable();
-       dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+       cpu = part_stat_lock();
+       part_round_stats(cpu, &dm_disk(md)->part0);
+       part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
+       part_stat_unlock();
 
-       disk_stat_add(dm_disk(md), ticks[rw], duration);
+       dm_disk(md)->part0.in_flight = pending =
+               atomic_dec_return(&md->pending);
 
        return !pending;
 }
@@ -885,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
        int r = -EIO;
        int rw = bio_data_dir(bio);
        struct mapped_device *md = q->queuedata;
+       int cpu;
 
        /*
         * There is no use in forwarding any barrier request since we can't
@@ -897,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
 
        down_read(&md->io_lock);
 
-       disk_stat_inc(dm_disk(md), ios[rw]);
-       disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
+       part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
+       part_stat_unlock();
 
        /*
         * If we're suspended we have to queue
@@ -1146,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md);
 
 static void free_dev(struct mapped_device *md)
 {
-       int minor = md->disk->first_minor;
+       int minor = MINOR(disk_devt(md->disk));
 
        if (md->suspended_bdev) {
                unlock_fs(md);
@@ -1182,7 +1187,7 @@ static void event_callback(void *context)
        list_splice_init(&md->uevent_list, &uevents);
        spin_unlock_irqrestore(&md->uevent_lock, flags);
 
-       dm_send_uevents(&uevents, &md->disk->dev.kobj);
+       dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
 
        atomic_inc(&md->event_nr);
        wake_up(&md->eventq);
@@ -1267,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
 
        md = idr_find(&_minor_idr, minor);
        if (md && (md == MINOR_ALLOCED ||
-                  (dm_disk(md)->first_minor != minor) ||
+                  (MINOR(disk_devt(dm_disk(md))) != minor) ||
                   test_bit(DMF_FREEING, &md->flags))) {
                md = NULL;
                goto out;
@@ -1318,7 +1323,8 @@ void dm_put(struct mapped_device *md)
 
        if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
                map = dm_get_table(md);
-               idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
+               idr_replace(&_minor_idr, MINOR_ALLOCED,
+                           MINOR(disk_devt(dm_disk(md))));
                set_bit(DMF_FREEING, &md->flags);
                spin_unlock(&_minor_lock);
                if (!dm_suspended(md)) {
@@ -1638,7 +1644,7 @@ out:
  *---------------------------------------------------------------*/
 void dm_kobject_uevent(struct mapped_device *md)
 {
-       kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
+       kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
 }
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
index b1eebf8..b9cbee6 100644 (file)
@@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
        mddev_t *mddev = q->queuedata;
        dev_info_t *tmp_dev;
        sector_t block;
+       int cpu;
 
        if (unlikely(bio_barrier(bio))) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        }
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
        tmp_dev = which_dev(mddev, bio->bi_sector);
        block = bio->bi_sector >> 1;
@@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
                 * split it.
                 */
                struct bio_pair *bp;
-               bp = bio_split(bio, bio_split_pool,
+               bp = bio_split(bio,
                               ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
                if (linear_make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
index deeac4b..0a3a4bd 100644 (file)
@@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
                goto fail;
 
-       if (rdev->bdev->bd_part)
-               ko = &rdev->bdev->bd_part->dev.kobj;
-       else
-               ko = &rdev->bdev->bd_disk->dev.kobj;
+       ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
        if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
                kobject_del(&rdev->kobj);
                goto fail;
@@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        disk->queue = mddev->queue;
        add_disk(disk);
        mddev->gendisk = disk;
-       error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
-                                    "%s", "md");
+       error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+                                    &disk_to_dev(disk)->kobj, "%s", "md");
        mutex_unlock(&disks_mutex);
        if (error)
                printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
@@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev)
        sysfs_notify(&mddev->kobj, NULL, "array_state");
        sysfs_notify(&mddev->kobj, NULL, "sync_action");
        sysfs_notify(&mddev->kobj, NULL, "degraded");
-       kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
+       kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
        return 0;
 }
 
@@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
        rcu_read_lock();
        rdev_for_each_rcu(rdev, mddev) {
                struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
-               curr_events = disk_stat_read(disk, sectors[0]) + 
-                               disk_stat_read(disk, sectors[1]) - 
+               curr_events = part_stat_read(&disk->part0, sectors[0]) +
+                               part_stat_read(&disk->part0, sectors[1]) -
                                atomic_read(&disk->sync_io);
                /* sync IO will cause sync_io to increase before the disk_stats
                 * as sync_io is counted when a request starts, and
index c4779cc..8bb8794 100644 (file)
@@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
        struct multipath_bh * mp_bh;
        struct multipath_info *multipath;
        const int rw = bio_data_dir(bio);
+       int cpu;
 
        if (unlikely(bio_barrier(bio))) {
                bio_endio(bio, -EOPNOTSUPP);
@@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
        mp_bh->master_bio = bio;
        mp_bh->mddev = mddev;
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
        mp_bh->path = multipath_map(conf);
        if (mp_bh->path < 0) {
index 1836106..53508a8 100644 (file)
@@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
        sector_t chunk;
        sector_t block, rsect;
        const int rw = bio_data_dir(bio);
+       int cpu;
 
        if (unlikely(bio_barrier(bio))) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        }
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
        chunk_size = mddev->chunk_size >> 10;
        chunk_sects = mddev->chunk_size >> 9;
@@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
                /* This is a one page bio that upper layers
                 * refuse to split for us, so we need to split it.
                 */
-               bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+               bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
                if (raid0_make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
                if (raid0_make_request(q, &bp->bio2))
index 03a5ab7..b976442 100644 (file)
@@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
        struct page **behind_pages = NULL;
        const int rw = bio_data_dir(bio);
        const int do_sync = bio_sync(bio);
-       int do_barriers;
+       int cpu, do_barriers;
        mdk_rdev_t *blocked_rdev;
 
        /*
@@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
 
        bitmap = mddev->bitmap;
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
        /*
         * make_request() can abort the operation when READA is being
@@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
                                        sbio->bi_size = r1_bio->sectors << 9;
                                        sbio->bi_idx = 0;
                                        sbio->bi_phys_segments = 0;
-                                       sbio->bi_hw_segments = 0;
-                                       sbio->bi_hw_front_size = 0;
-                                       sbio->bi_hw_back_size = 0;
                                        sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
                                        sbio->bi_flags |= 1 << BIO_UPTODATE;
                                        sbio->bi_next = NULL;
@@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                bio->bi_vcnt = 0;
                bio->bi_idx = 0;
                bio->bi_phys_segments = 0;
-               bio->bi_hw_segments = 0;
                bio->bi_size = 0;
                bio->bi_end_io = NULL;
                bio->bi_private = NULL;
index e34cd0e..8bdc9bf 100644 (file)
@@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
        mirror_info_t *mirror;
        r10bio_t *r10_bio;
        struct bio *read_bio;
+       int cpu;
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
@@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
                /* This is a one page bio that upper layers
                 * refuse to split for us, so we need to split it.
                 */
-               bp = bio_split(bio, bio_split_pool,
+               bp = bio_split(bio,
                               chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
                if (make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
@@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
         */
        wait_barrier(conf);
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
        r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
@@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
                tbio->bi_size = r10_bio->sectors << 9;
                tbio->bi_idx = 0;
                tbio->bi_phys_segments = 0;
-               tbio->bi_hw_segments = 0;
-               tbio->bi_hw_front_size = 0;
-               tbio->bi_hw_back_size = 0;
                tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
                tbio->bi_flags |= 1 << BIO_UPTODATE;
                tbio->bi_next = NULL;
@@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                bio->bi_vcnt = 0;
                bio->bi_idx = 0;
                bio->bi_phys_segments = 0;
-               bio->bi_hw_segments = 0;
                bio->bi_size = 0;
        }
 
index 224de02..ae16794 100644 (file)
 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
 #endif
 
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_phys_segments(struct bio *bio)
+{
+       return bio->bi_phys_segments & 0xffff;
+}
+
+static inline int raid5_bi_hw_segments(struct bio *bio)
+{
+       return (bio->bi_phys_segments >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+{
+       --bio->bi_phys_segments;
+       return raid5_bi_phys_segments(bio);
+}
+
+static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+{
+       unsigned short val = raid5_bi_hw_segments(bio);
+
+       --val;
+       bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
+       return val;
+}
+
+static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+{
+       bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+}
+
 static inline int raid6_next_disk(int disk, int raid_disks)
 {
        disk++;
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
-                               if (--rbi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        if (*bip)
                bi->bi_next = *bip;
        *bip = bi;
-       bi->bi_phys_segments ++;
+       bi->bi_phys_segments++;
        spin_unlock_irq(&conf->device_lock);
        spin_unlock(&sh->lock);
 
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (--bi->bi_phys_segments == 0) {
+                       if (!raid5_dec_bi_phys_segments(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (--bi->bi_phys_segments == 0) {
+                       if (!raid5_dec_bi_phys_segments(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                               if (--bi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(bi)) {
                                        bi->bi_next = *return_bi;
                                        *return_bi = bi;
                                }
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
                                while (wbi && wbi->bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
-                                       if (--wbi->bi_phys_segments == 0) {
+                                       if (!raid5_dec_bi_phys_segments(wbi)) {
                                                md_write_end(conf->mddev);
                                                wbi->bi_next = *return_bi;
                                                *return_bi = wbi;
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                                copy_data(0, rbi, dev->page, dev->sector);
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                spin_lock_irq(&conf->device_lock);
-                               if (--rbi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
        if(bi) {
                conf->retry_read_aligned_list = bi->bi_next;
                bi->bi_next = NULL;
+               /*
+                * this sets the active strip count to 1 and the processed
+                * strip count to zero (upper 8 bits)
+                */
                bi->bi_phys_segments = 1; /* biased count of active stripes */
-               bi->bi_hw_segments = 0; /* count of processed stripes */
        }
 
        return bi;
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
        if ((bi->bi_size>>9) > q->max_sectors)
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > q->max_phys_segments ||
-           bi->bi_hw_segments > q->max_hw_segments)
+       if (bi->bi_phys_segments > q->max_phys_segments)
                return 0;
 
        if (q->merge_bvec_fn)
@@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
        sector_t logical_sector, last_sector;
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
-       int remaining;
+       int cpu, remaining;
 
        if (unlikely(bio_barrier(bi))) {
                bio_endio(bi, -EOPNOTSUPP);
@@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
 
        md_write_start(mddev, bi);
 
-       disk_stat_inc(mddev->gendisk, ios[rw]);
-       disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bi));
+       part_stat_unlock();
 
        if (rw == READ &&
             mddev->reshape_position == MaxSector &&
@@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
                        
        }
        spin_lock_irq(&conf->device_lock);
-       remaining = --bi->bi_phys_segments;
+       remaining = raid5_dec_bi_phys_segments(bi);
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0) {
 
@@ -3752,7 +3791,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                     sector += STRIPE_SECTORS,
                     scnt++) {
 
-               if (scnt < raid_bio->bi_hw_segments)
+               if (scnt < raid5_bi_hw_segments(raid_bio))
                        /* already done this stripe */
                        continue;
 
@@ -3760,7 +3799,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
 
                if (!sh) {
                        /* failed to get a stripe - must wait */
-                       raid_bio->bi_hw_segments = scnt;
+                       raid5_set_bi_hw_segments(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
@@ -3768,7 +3807,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
                        release_stripe(sh);
-                       raid_bio->bi_hw_segments = scnt;
+                       raid5_set_bi_hw_segments(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
@@ -3778,7 +3817,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                handled++;
        }
        spin_lock_irq(&conf->device_lock);
-       remaining = --raid_bio->bi_phys_segments;
+       remaining = raid5_dec_bi_phys_segments(raid_bio);
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0)
                bio_endio(raid_bio, 0);
index d2d2318..6e291bf 100644 (file)
@@ -197,7 +197,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
 static int mspro_block_disk_release(struct gendisk *disk)
 {
        struct mspro_block_data *msb = disk->private_data;
-       int disk_id = disk->first_minor >> MSPRO_BLOCK_PART_SHIFT;
+       int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
 
        mutex_lock(&mspro_block_disk_lock);
 
@@ -828,7 +828,7 @@ static void mspro_block_submit_req(struct request_queue *q)
 
        if (msb->eject) {
                while ((req = elv_next_request(q)) != NULL)
-                       end_queued_request(req, -ENODEV);
+                       __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
 
                return;
        }
index ebc8b9d..97156b6 100644 (file)
@@ -83,7 +83,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
        mutex_lock(&open_lock);
        md->usage--;
        if (md->usage == 0) {
-               int devidx = md->disk->first_minor >> MMC_SHIFT;
+               int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
                __clear_bit(devidx, dev_use);
 
                put_disk(md->disk);
index f34f20c..9bf581c 100644 (file)
@@ -1005,6 +1005,29 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
        return ftl_write((void *)dev, buf, block, 1);
 }
 
+static int ftl_discardsect(struct mtd_blktrans_dev *dev,
+                          unsigned long sector, unsigned nr_sects)
+{
+       partition_t *part = (void *)dev;
+       uint32_t bsize = 1 << part->header.EraseUnitSize;
+
+       DEBUG(1, "FTL erase sector %ld for %d sectors\n",
+             sector, nr_sects);
+
+       while (nr_sects) {
+               uint32_t old_addr = part->VirtualBlockMap[sector];
+               if (old_addr != 0xffffffff) {
+                       part->VirtualBlockMap[sector] = 0xffffffff;
+                       part->EUNInfo[old_addr/bsize].Deleted++;
+                       if (set_bam_entry(part, old_addr, 0))
+                               return -EIO;
+               }
+               nr_sects--;
+               sector++;
+       }
+
+       return 0;
+}
 /*====================================================================*/
 
 static void ftl_freepart(partition_t *part)
@@ -1069,6 +1092,7 @@ static struct mtd_blktrans_ops ftl_tr = {
        .blksize        = SECTOR_SIZE,
        .readsect       = ftl_readsect,
        .writesect      = ftl_writesect,
+       .discard        = ftl_discardsect,