]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - block/ll_rw_blk.c
[PATCH] m68k: replace kmalloc+memset with kzalloc
[linux-2.6.git] / block / ll_rw_blk.c
index c525b5a2b59849d7af6fb3fe45e8a8699711e085..a4ff3271d4a88b984b3da89821cbc6a2fd65885a 100644 (file)
@@ -10,7 +10,6 @@
 /*
  * This handles all read/write requests to block devices
  */
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
-#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/blktrace_api.h>
 
 /*
  * for max sense size
  */
 #include <scsi/scsi_cmnd.h>
 
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
 static void blk_unplug_timeout(unsigned long data);
 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+static void init_request_from_bio(struct request *req, struct bio *bio);
+static int __make_request(request_queue_t *q, struct bio *bio);
+static struct io_context *current_io_context(gfp_t gfp_flags, int node);
 
 /*
  * For the allocated request tables
  */
-static kmem_cache_t *request_cachep;
+static struct kmem_cache *request_cachep;
 
 /*
  * For queue allocation
  */
-static kmem_cache_t *requestq_cachep;
+static struct kmem_cache *requestq_cachep;
 
 /*
  * For io context allocations
  */
-static kmem_cache_t *iocontext_cachep;
-
-static wait_queue_head_t congestion_wqh[2] = {
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
-       };
+static struct kmem_cache *iocontext_cachep;
 
 /*
  * Controlling structure to kblockd
  */
-static struct workqueue_struct *kblockd_workqueue; 
+static struct workqueue_struct *kblockd_workqueue;
 
 unsigned long blk_max_low_pfn, blk_max_pfn;
 
 EXPORT_SYMBOL(blk_max_low_pfn);
 EXPORT_SYMBOL(blk_max_pfn);
 
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
 /* Amount of time in which a process may batch requests */
 #define BLK_BATCH_TIME (HZ/50UL)
 
@@ -106,35 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
        q->nr_congestion_off = nr;
 }
 
-/*
- * A queue has just exitted congestion.  Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static void clear_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       clear_bit(bit, &q->backing_dev_info.state);
-       smp_mb__after_clear_bit();
-       if (waitqueue_active(wqh))
-               wake_up(wqh);
-}
-
-/*
- * A queue has just entered congestion.  Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static void set_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       set_bit(bit, &q->backing_dev_info.state);
-}
-
 /**
  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  * @bdev:      device
@@ -153,7 +125,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
                ret = &q->backing_dev_info;
        return ret;
 }
-
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
 void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
@@ -161,7 +132,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
        q->activity_fn = fn;
        q->activity_data = data;
 }
-
 EXPORT_SYMBOL(blk_queue_activity_fn);
 
 /**
@@ -205,6 +175,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
 
 EXPORT_SYMBOL(blk_queue_merge_bvec);
 
+void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+{
+       q->softirq_done_fn = fn;
+}
+
+EXPORT_SYMBOL(blk_queue_softirq_done);
+
 /**
  * blk_queue_make_request - define an alternate make_request function for a device
  * @q:  the request queue for the device to be affected
@@ -239,7 +216,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-       blk_queue_max_sectors(q, MAX_SECTORS);
+       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
        blk_queue_hardsect_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
@@ -250,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        if (q->unplug_delay == 0)
                q->unplug_delay = 1;
 
-       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        q->unplug_timer.function = blk_unplug_timeout;
        q->unplug_timer.data = (unsigned long)q;
@@ -265,18 +242,19 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
 
 EXPORT_SYMBOL(blk_queue_make_request);
 
-static inline void rq_init(request_queue_t *q, struct request *rq)
+static void rq_init(request_queue_t *q, struct request *rq)
 {
        INIT_LIST_HEAD(&rq->queuelist);
+       INIT_LIST_HEAD(&rq->donelist);
 
        rq->errors = 0;
-       rq->rq_status = RQ_ACTIVE;
        rq->bio = rq->biotail = NULL;
+       INIT_HLIST_NODE(&rq->hash);
+       RB_CLEAR_NODE(&rq->rb_node);
        rq->ioprio = 0;
        rq->buffer = NULL;
        rq->ref_count = 1;
        rq->q = q;
-       rq->waiting = NULL;
        rq->special = NULL;
        rq->data_len = 0;
        rq->data = NULL;
@@ -284,12 +262,14 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
        rq->sense = NULL;
        rq->end_io = NULL;
        rq->end_io_data = NULL;
+       rq->completion_data = NULL;
 }
 
 /**
  * blk_queue_ordered - does this queue support ordered writes
- * @q:     the request queue
- * @flag:  see below
+ * @q:        the request queue
+ * @ordered:  one of QUEUE_ORDERED_*
+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  *
  * Description:
  *   For journalled file systems, doing ordered writes on a commit
@@ -298,28 +278,31 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
  *   feature should call this function and indicate so.
  *
  **/
-void blk_queue_ordered(request_queue_t *q, int flag)
-{
-       switch (flag) {
-               case QUEUE_ORDERED_NONE:
-                       if (q->flush_rq)
-                               kmem_cache_free(request_cachep, q->flush_rq);
-                       q->flush_rq = NULL;
-                       q->ordered = flag;
-                       break;
-               case QUEUE_ORDERED_TAG:
-                       q->ordered = flag;
-                       break;
-               case QUEUE_ORDERED_FLUSH:
-                       q->ordered = flag;
-                       if (!q->flush_rq)
-                               q->flush_rq = kmem_cache_alloc(request_cachep,
-                                                               GFP_KERNEL);
-                       break;
-               default:
-                       printk("blk_queue_ordered: bad value %d\n", flag);
-                       break;
+int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+                     prepare_flush_fn *prepare_flush_fn)
+{
+       if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
+           prepare_flush_fn == NULL) {
+               printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
+               return -EINVAL;
+       }
+
+       if (ordered != QUEUE_ORDERED_NONE &&
+           ordered != QUEUE_ORDERED_DRAIN &&
+           ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
+           ordered != QUEUE_ORDERED_DRAIN_FUA &&
+           ordered != QUEUE_ORDERED_TAG &&
+           ordered != QUEUE_ORDERED_TAG_FLUSH &&
+           ordered != QUEUE_ORDERED_TAG_FUA) {
+               printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
+               return -EINVAL;
        }
+
+       q->ordered = ordered;
+       q->next_ordered = ordered;
+       q->prepare_flush_fn = prepare_flush_fn;
+
+       return 0;
 }
 
 EXPORT_SYMBOL(blk_queue_ordered);
@@ -344,167 +327,261 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
 /*
  * Cache flushing for ordered writes handling
  */
-static void blk_pre_flush_end_io(struct request *flush_rq)
+inline unsigned blk_ordered_cur_seq(request_queue_t *q)
 {
-       struct request *rq = flush_rq->end_io_data;
-       request_queue_t *q = rq->q;
-
-       elv_completed_request(q, flush_rq);
-
-       rq->flags |= REQ_BAR_PREFLUSH;
-
-       if (!flush_rq->errors)
-               elv_requeue_request(q, rq);
-       else {
-               q->end_flush_fn(q, flush_rq);
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               q->request_fn(q);
-       }
+       if (!q->ordseq)
+               return 0;
+       return 1 << ffz(q->ordseq);
 }
 
-static void blk_post_flush_end_io(struct request *flush_rq)
+unsigned blk_ordered_req_seq(struct request *rq)
 {
-       struct request *rq = flush_rq->end_io_data;
        request_queue_t *q = rq->q;
 
-       elv_completed_request(q, flush_rq);
+       BUG_ON(q->ordseq == 0);
 
-       rq->flags |= REQ_BAR_POSTFLUSH;
+       if (rq == &q->pre_flush_rq)
+               return QUEUE_ORDSEQ_PREFLUSH;
+       if (rq == &q->bar_rq)
+               return QUEUE_ORDSEQ_BAR;
+       if (rq == &q->post_flush_rq)
+               return QUEUE_ORDSEQ_POSTFLUSH;
 
-       q->end_flush_fn(q, flush_rq);
-       clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-       q->request_fn(q);
+       if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
+           (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
+               return QUEUE_ORDSEQ_DRAIN;
+       else
+               return QUEUE_ORDSEQ_DONE;
 }
 
-struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
+void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
 {
-       struct request *flush_rq = q->flush_rq;
-
-       BUG_ON(!blk_barrier_rq(rq));
+       struct request *rq;
+       int uptodate;
 
-       if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
-               return NULL;
+       if (error && !q->orderr)
+               q->orderr = error;
 
-       rq_init(q, flush_rq);
-       flush_rq->elevator_private = NULL;
-       flush_rq->flags = REQ_BAR_FLUSH;
-       flush_rq->rq_disk = rq->rq_disk;
-       flush_rq->rl = NULL;
+       BUG_ON(q->ordseq & seq);
+       q->ordseq |= seq;
 
-       /*
-        * prepare_flush returns 0 if no flush is needed, just mark both
-        * pre and post flush as done in that case
-        */
-       if (!q->prepare_flush_fn(q, flush_rq)) {
-               rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               return rq;
-       }
+       if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
+               return;
 
        /*
-        * some drivers dequeue requests right away, some only after io
-        * completion. make sure the request is dequeued.
+        * Okay, sequence complete.
         */
-       if (!list_empty(&rq->queuelist))
-               blkdev_dequeue_request(rq);
+       rq = q->orig_bar_rq;
+       uptodate = q->orderr ? q->orderr : 1;
 
-       flush_rq->end_io_data = rq;
-       flush_rq->end_io = blk_pre_flush_end_io;
+       q->ordseq = 0;
 
-       __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
-       return flush_rq;
+       end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+       end_that_request_last(rq, uptodate);
 }
 
-static void blk_start_post_flush(request_queue_t *q, struct request *rq)
+static void pre_flush_end_io(struct request *rq, int error)
 {
-       struct request *flush_rq = q->flush_rq;
+       elv_completed_request(rq->q, rq);
+       blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
+}
 
-       BUG_ON(!blk_barrier_rq(rq));
+static void bar_end_io(struct request *rq, int error)
+{
+       elv_completed_request(rq->q, rq);
+       blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
+}
 
-       rq_init(q, flush_rq);
-       flush_rq->elevator_private = NULL;
-       flush_rq->flags = REQ_BAR_FLUSH;
-       flush_rq->rq_disk = rq->rq_disk;
-       flush_rq->rl = NULL;
+static void post_flush_end_io(struct request *rq, int error)
+{
+       elv_completed_request(rq->q, rq);
+       blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
+}
 
-       if (q->prepare_flush_fn(q, flush_rq)) {
-               flush_rq->end_io_data = rq;
-               flush_rq->end_io = blk_post_flush_end_io;
+static void queue_flush(request_queue_t *q, unsigned which)
+{
+       struct request *rq;
+       rq_end_io_fn *end_io;
 
-               __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
-               q->request_fn(q);
+       if (which == QUEUE_ORDERED_PREFLUSH) {
+               rq = &q->pre_flush_rq;
+               end_io = pre_flush_end_io;
+       } else {
+               rq = &q->post_flush_rq;
+               end_io = post_flush_end_io;
        }
+
+       rq->cmd_flags = REQ_HARDBARRIER;
+       rq_init(q, rq);
+       rq->elevator_private = NULL;
+       rq->elevator_private2 = NULL;
+       rq->rq_disk = q->bar_rq.rq_disk;
+       rq->end_io = end_io;
+       q->prepare_flush_fn(q, rq);
+
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
-static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
-                                       int sectors)
+static inline struct request *start_ordered(request_queue_t *q,
+                                           struct request *rq)
 {
-       if (sectors > rq->nr_sectors)
-               sectors = rq->nr_sectors;
+       q->bi_size = 0;
+       q->orderr = 0;
+       q->ordered = q->next_ordered;
+       q->ordseq |= QUEUE_ORDSEQ_STARTED;
+
+       /*
+        * Prep proxy barrier request.
+        */
+       blkdev_dequeue_request(rq);
+       q->orig_bar_rq = rq;
+       rq = &q->bar_rq;
+       rq->cmd_flags = 0;
+       rq_init(q, rq);
+       if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+               rq->cmd_flags |= REQ_RW;
+       rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+       rq->elevator_private = NULL;
+       rq->elevator_private2 = NULL;
+       init_request_from_bio(rq, q->orig_bar_rq->bio);
+       rq->end_io = bar_end_io;
+
+       /*
+        * Queue ordered sequence.  As we stack them at the head, we
+        * need to queue in reverse order.  Note that we rely on that
+        * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+        * request gets inbetween ordered sequence.
+        */
+       if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+               queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+       else
+               q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       rq->nr_sectors -= sectors;
-       return rq->nr_sectors;
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+
+       if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
+               queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+               rq = &q->pre_flush_rq;
+       } else
+               q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+
+       if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
+               q->ordseq |= QUEUE_ORDSEQ_DRAIN;
+       else
+               rq = NULL;
+
+       return rq;
 }
 
-static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
-                                    int sectors, int queue_locked)
+int blk_do_ordered(request_queue_t *q, struct request **rqp)
 {
-       if (q->ordered != QUEUE_ORDERED_FLUSH)
-               return 0;
-       if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
-               return 0;
-       if (blk_barrier_postflush(rq))
-               return 0;
+       struct request *rq = *rqp;
+       int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+
+       if (!q->ordseq) {
+               if (!is_barrier)
+                       return 1;
 
-       if (!blk_check_end_barrier(q, rq, sectors)) {
-               unsigned long flags = 0;
+               if (q->next_ordered != QUEUE_ORDERED_NONE) {
+                       *rqp = start_ordered(q, rq);
+                       return 1;
+               } else {
+                       /*
+                        * This can happen when the queue switches to
+                        * ORDERED_NONE while this request is on it.
+                        */
+                       blkdev_dequeue_request(rq);
+                       end_that_request_first(rq, -EOPNOTSUPP,
+                                              rq->hard_nr_sectors);
+                       end_that_request_last(rq, -EOPNOTSUPP);
+                       *rqp = NULL;
+                       return 0;
+               }
+       }
 
-               if (!queue_locked)
-                       spin_lock_irqsave(q->queue_lock, flags);
+       /*
+        * Ordered sequence in progress
+        */
 
-               blk_start_post_flush(q, rq);
+       /* Special requests are not subject to ordering rules. */
+       if (!blk_fs_request(rq) &&
+           rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+               return 1;
 
-               if (!queue_locked)
-                       spin_unlock_irqrestore(q->queue_lock, flags);
+       if (q->ordered & QUEUE_ORDERED_TAG) {
+               /* Ordered by tag.  Blocking the next barrier is enough. */
+               if (is_barrier && rq != &q->bar_rq)
+                       *rqp = NULL;
+       } else {
+               /* Ordered by draining.  Wait for turn. */
+               WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+               if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+                       *rqp = NULL;
        }
 
        return 1;
 }
 
-/**
- * blk_complete_barrier_rq - complete possible barrier request
- * @q:  the request queue for the device
- * @rq:  the request
- * @sectors:  number of sectors to complete
- *
- * Description:
- *   Used in driver end_io handling to determine whether to postpone
- *   completion of a barrier request until a post flush has been done. This
- *   is the unlocked variant, used if the caller doesn't already hold the
- *   queue lock.
- **/
-int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
+static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
 {
-       return __blk_complete_barrier_rq(q, rq, sectors, 0);
+       request_queue_t *q = bio->bi_private;
+       struct bio_vec *bvec;
+       int i;
+
+       /*
+        * This is dry run, restore bio_sector and size.  We'll finish
+        * this request again with the original bi_end_io after an
+        * error occurs or post flush is complete.
+        */
+       q->bi_size += bytes;
+
+       if (bio->bi_size)
+               return 1;
+
+       /* Rewind bvec's */
+       bio->bi_idx = 0;
+       bio_for_each_segment(bvec, bio, i) {
+               bvec->bv_len += bvec->bv_offset;
+               bvec->bv_offset = 0;
+       }
+
+       /* Reset bio */
+       set_bit(BIO_UPTODATE, &bio->bi_flags);
+       bio->bi_size = q->bi_size;
+       bio->bi_sector -= (q->bi_size >> 9);
+       q->bi_size = 0;
+
+       return 0;
 }
-EXPORT_SYMBOL(blk_complete_barrier_rq);
 
-/**
- * blk_complete_barrier_rq_locked - complete possible barrier request
- * @q:  the request queue for the device
- * @rq:  the request
- * @sectors:  number of sectors to complete
- *
- * Description:
- *   See blk_complete_barrier_rq(). This variant must be used if the caller
- *   holds the queue lock.
- **/
-int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
-                                  int sectors)
+static int ordered_bio_endio(struct request *rq, struct bio *bio,
+                            unsigned int nbytes, int error)
 {
-       return __blk_complete_barrier_rq(q, rq, sectors, 1);
+       request_queue_t *q = rq->q;
+       bio_end_io_t *endio;
+       void *private;
+
+       if (&q->bar_rq != rq)
+               return 0;
+
+       /*
+        * Okay, this is the barrier request in progress, dry finish it.
+        */
+       if (error && !q->orderr)
+               q->orderr = error;
+
+       endio = bio->bi_end_io;
+       private = bio->bi_private;
+       bio->bi_end_io = flush_dry_bio_endio;
+       bio->bi_private = q;
+
+       bio_endio(bio, nbytes, error);
+
+       bio->bi_end_io = endio;
+       bio->bi_private = private;
+
+       return 1;
 }
-EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
 
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
@@ -515,26 +592,31 @@ EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page. By default
- *    the block layer sets this to the highest numbered "low" memory page.
+ *    buffers for doing I/O to pages residing above @page.
  **/
 void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
 {
        unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
-       /*
-        * set appropriate bounce gfp mask -- unfortunately we don't have a
-        * full 4GB zone, so we have to resort to low memory for any bounces.
-        * ISA has its own < 16MB zone.
-        */
-       if (bounce_pfn < blk_max_low_pfn) {
-               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+       int dma = 0;
+
+       q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+       /* Assume anything <= 4GB can be handled by IOMMU.
+          Actually some IOMMUs can handle everything, but I don't
+          know of a way to test this here. */
+       if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+               dma = 1;
+       q->bounce_pfn = max_low_pfn;
+#else
+       if (bounce_pfn < blk_max_low_pfn)
+               dma = 1;
+       q->bounce_pfn = bounce_pfn;
+#endif
+       if (dma) {
                init_emergency_isa_pool();
                q->bounce_gfp = GFP_NOIO | GFP_DMA;
-       } else
-               q->bounce_gfp = GFP_NOIO;
-
-       q->bounce_pfn = bounce_pfn;
+               q->bounce_pfn = bounce_pfn;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -548,14 +630,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    Enables a low level driver to set an upper limit on the size of
  *    received requests.
  **/
-void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
                printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
        }
 
-       q->max_sectors = q->max_hw_sectors = max_sectors;
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->max_hw_sectors = q->max_sectors = max_sectors;
+       else {
+               q->max_sectors = BLK_DEF_MAX_SECTORS;
+               q->max_hw_sectors = max_sectors;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -657,13 +744,15 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
        /* zero is "infinity" */
-       t->max_sectors = t->max_hw_sectors =
-               min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
 
        t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
        t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
        t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+       if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+               clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
 }
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -715,32 +804,24 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
  **/
 struct request *blk_queue_find_tag(request_queue_t *q, int tag)
 {
-       struct blk_queue_tag *bqt = q->queue_tags;
-
-       if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
-               return NULL;
-
-       return bqt->tag_index[tag];
+       return blk_map_queue_find_tag(q->queue_tags, tag);
 }
 
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_queue_free_tags - release tag maintenance info
- * @q:  the request queue for the device
+ * __blk_free_tags - release a given set of tag maintenance info
+ * @bqt:       the tag map to free
  *
- *  Notes:
- *    blk_cleanup_queue() will take care of calling this function, if tagging
- *    has been used. So there's no need to call this directly.
- **/
-static void __blk_queue_free_tags(request_queue_t *q)
+ * Tries to free the specified @bqt@.  Returns true if it was
+ * actually freed and false if there are still references using it
+ */
+static int __blk_free_tags(struct blk_queue_tag *bqt)
 {
-       struct blk_queue_tag *bqt = q->queue_tags;
+       int retval;
 
-       if (!bqt)
-               return;
-
-       if (atomic_dec_and_test(&bqt->refcnt)) {
+       retval = atomic_dec_and_test(&bqt->refcnt);
+       if (retval) {
                BUG_ON(bqt->busy);
                BUG_ON(!list_empty(&bqt->busy_list));
 
@@ -751,12 +832,49 @@ static void __blk_queue_free_tags(request_queue_t *q)
                bqt->tag_map = NULL;
 
                kfree(bqt);
+
        }
 
+       return retval;
+}
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *    blk_cleanup_queue() will take care of calling this function, if tagging
+ *    has been used. So there's no need to call this directly.
+ **/
+static void __blk_queue_free_tags(request_queue_t *q)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+
+       if (!bqt)
+               return;
+
+       __blk_free_tags(bqt);
+
        q->queue_tags = NULL;
        q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
 }
 
+
+/**
+ * blk_free_tags - release a given set of tag maintenance info
+ * @bqt:       the tag map to free
+ *
+ * For externally managed @bqt@ frees the map.  Callers of this
+ * function must guarantee to have released all the queues that
+ * might have been using this tag map.
+ */
+void blk_free_tags(struct blk_queue_tag *bqt)
+{
+       if (unlikely(!__blk_free_tags(bqt)))
+               BUG();
+}
+EXPORT_SYMBOL(blk_free_tags);
+
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
@@ -779,23 +897,21 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
        unsigned long *tag_map;
        int nr_ulongs;
 
-       if (depth > q->nr_requests * 2) {
+       if (q && depth > q->nr_requests * 2) {
                depth = q->nr_requests * 2;
                printk(KERN_ERR "%s: adjusted depth to %d\n",
                                __FUNCTION__, depth);
        }
 
-       tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+       tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
        if (!tag_index)
                goto fail;
 
        nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+       tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
 
-       memset(tag_index, 0, depth * sizeof(struct request *));
-       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
        tags->real_max_depth = depth;
        tags->max_depth = depth;
        tags->tag_index = tag_index;
@@ -807,6 +923,38 @@ fail:
        return -ENOMEM;
 }
 
+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+                                                  int depth)
+{
+       struct blk_queue_tag *tags;
+
+       tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+       if (!tags)
+               goto fail;
+
+       if (init_tag_map(q, tags, depth))
+               goto fail;
+
+       INIT_LIST_HEAD(&tags->busy_list);
+       tags->busy = 0;
+       atomic_set(&tags->refcnt, 1);
+       return tags;
+fail:
+       kfree(tags);
+       return NULL;
+}
+
+/**
+ * blk_init_tags - initialize the tag info for an external tag map
+ * @depth:     the maximum queue depth supported
+ * @tags: the tag to use
+ **/
+struct blk_queue_tag *blk_init_tags(int depth)
+{
+       return __blk_queue_init_tags(NULL, depth);
+}
+EXPORT_SYMBOL(blk_init_tags);
+
 /**
  * blk_queue_init_tags - initialize the queue tag info
  * @q:  the request queue for the device
@@ -821,16 +969,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth,
        BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
 
        if (!tags && !q->queue_tags) {
-               tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
-               if (!tags)
-                       goto fail;
+               tags = __blk_queue_init_tags(q, depth);
 
-               if (init_tag_map(q, tags, depth))
+               if (!tags)
                        goto fail;
-
-               INIT_LIST_HEAD(&tags->busy_list);
-               tags->busy = 0;
-               atomic_set(&tags->refcnt, 1);
        } else if (q->queue_tags) {
                if ((rc = blk_queue_resize_tags(q, depth)))
                        return rc;
@@ -881,6 +1023,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
                return 0;
        }
 
+       /*
+        * Currently cannot replace a shared tag map with a new
+        * one, so error out if this is the case
+        */
+       if (atomic_read(&bqt->refcnt) != 1)
+               return -EBUSY;
+
        /*
         * save the old state info, so we can copy it back
         */
@@ -937,7 +1086,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
        }
 
        list_del_init(&rq->queuelist);
-       rq->flags &= ~REQ_QUEUED;
+       rq->cmd_flags &= ~REQ_QUEUED;
        rq->tag = -1;
 
        if (unlikely(bqt->tag_index[tag] == NULL))
@@ -973,7 +1122,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
        struct blk_queue_tag *bqt = q->queue_tags;
        int tag;
 
-       if (unlikely((rq->flags & REQ_QUEUED))) {
+       if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
                printk(KERN_ERR 
                       "%s: request %p for device [%s] already tagged %d",
                       __FUNCTION__, rq,
@@ -981,13 +1130,18 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
                BUG();
        }
 
-       tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
-       if (tag >= bqt->max_depth)
-               return 1;
+       /*
+        * Protect against shared tag maps, as we may not have exclusive
+        * access to the tag map.
+        */
+       do {
+               tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+               if (tag >= bqt->max_depth)
+                       return 1;
 
-       __set_bit(tag, bqt->tag_map);
+       } while (test_and_set_bit(tag, bqt->tag_map));
 
-       rq->flags |= REQ_QUEUED;
+       rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blkdev_dequeue_request(rq);
@@ -1023,63 +1177,31 @@ void blk_queue_invalidate_tags(request_queue_t *q)
                        printk(KERN_ERR
                               "%s: bad tag found on list\n", __FUNCTION__);
                        list_del_init(&rq->queuelist);
-                       rq->flags &= ~REQ_QUEUED;
+                       rq->cmd_flags &= ~REQ_QUEUED;
                } else
                        blk_queue_end_tag(q, rq);
 
-               rq->flags &= ~REQ_STARTED;
+               rq->cmd_flags &= ~REQ_STARTED;
                __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
        }
 }
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
 
-static char *rq_flags[] = {
-       "REQ_RW",
-       "REQ_FAILFAST",
-       "REQ_SORTED",
-       "REQ_SOFTBARRIER",
-       "REQ_HARDBARRIER",
-       "REQ_CMD",
-       "REQ_NOMERGE",
-       "REQ_STARTED",
-       "REQ_DONTPREP",
-       "REQ_QUEUED",
-       "REQ_ELVPRIV",
-       "REQ_PC",
-       "REQ_BLOCK_PC",
-       "REQ_SENSE",
-       "REQ_FAILED",
-       "REQ_QUIET",
-       "REQ_SPECIAL",
-       "REQ_DRIVE_CMD",
-       "REQ_DRIVE_TASK",
-       "REQ_DRIVE_TASKFILE",
-       "REQ_PREEMPT",
-       "REQ_PM_SUSPEND",
-       "REQ_PM_RESUME",
-       "REQ_PM_SHUTDOWN",
-};
-
 void blk_dump_rq_flags(struct request *rq, char *msg)
 {
        int bit;
 
-       printk("%s: dev %s: flags = ", msg,
-               rq->rq_disk ? rq->rq_disk->disk_name : "?");
-       bit = 0;
-       do {
-               if (rq->flags & (1 << bit))
-                       printk("%s ", rq_flags[bit]);
-               bit++;
-       } while (bit < __REQ_NR_BITS);
+       printk("%s: dev %s: type=%x, flags=%x\n", msg,
+               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+               rq->cmd_flags);
 
        printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
                                                       rq->nr_sectors,
                                                       rq->current_nr_sectors);
        printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
 
-       if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
+       if (blk_pc_request(rq)) {
                printk("cdb: ");
                for (bit = 0; bit < sizeof(rq->cmd); bit++)
                        printk("%02x ", rq->cmd[bit]);
@@ -1252,7 +1374,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
        int nr_phys_segs = bio_phys_segments(q, bio);
 
        if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
@@ -1275,7 +1397,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
 
        if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
            || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
@@ -1293,10 +1415,16 @@ static inline int ll_new_hw_segment(request_queue_t *q,
 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
                            struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
@@ -1325,10 +1453,17 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
                             struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+               req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
                return 0;
@@ -1418,11 +1553,13 @@ void blk_plug_device(request_queue_t *q)
         * don't plug a stopped queue, it must be paired with blk_start_queue()
         * which will restart the queueing
         */
-       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+       if (blk_queue_stopped(q))
                return;
 
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+       }
 }
 
 EXPORT_SYMBOL(blk_plug_device);
@@ -1449,7 +1586,7 @@ EXPORT_SYMBOL(blk_remove_plug);
  */
 void __generic_unplug_device(request_queue_t *q)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
+       if (unlikely(blk_queue_stopped(q)))
                return;
 
        if (!blk_remove_plug(q))
@@ -1486,13 +1623,20 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
        /*
         * devices don't necessarily have an ->unplug_fn defined
         */
-       if (q->unplug_fn)
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
                q->unplug_fn(q);
+       }
 }
 
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
 {
-       request_queue_t *q = data;
+       request_queue_t *q = container_of(work, request_queue_t, unplug_work);
+
+       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                               q->rq.count[READ] + q->rq.count[WRITE]);
 
        q->unplug_fn(q);
 }
@@ -1501,6 +1645,9 @@ static void blk_unplug_timeout(unsigned long data)
 {
        request_queue_t *q = (request_queue_t *)data;
 
+       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+                               q->rq.count[READ] + q->rq.count[WRITE]);
+
        kblockd_schedule_work(&q->unplug_work);
 }
 
@@ -1515,6 +1662,8 @@ static void blk_unplug_timeout(unsigned long data)
  **/
 void blk_start_queue(request_queue_t *q)
 {
+       WARN_ON(!irqs_disabled());
+
        clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
 
        /*
@@ -1584,15 +1733,28 @@ void blk_run_queue(struct request_queue *q)
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_remove_plug(q);
-       if (!elv_queue_empty(q))
-               q->request_fn(q);
+
+       /*
+        * Only recurse once to avoid overrunning the stack, let the unplug
+        * handling reinvoke the handler shortly if we already got there.
+        */
+       if (!elv_queue_empty(q)) {
+               if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+                       q->request_fn(q);
+                       clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+               } else {
+                       blk_plug_device(q);
+                       kblockd_schedule_work(&q->unplug_work);
+               }
+       }
+
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
 
 /**
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q:    the request queue to be released
+ * @kobj:    the kobj belonging of the request queue to be released
  *
  * Description:
  *     blk_cleanup_queue is the pair to blk_init_queue() or
@@ -1605,16 +1767,11 @@ EXPORT_SYMBOL(blk_run_queue);
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
-void blk_cleanup_queue(request_queue_t * q)
+static void blk_release_queue(struct kobject *kobj)
 {
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
        struct request_list *rl = &q->rq;
 
-       if (!atomic_dec_and_test(&q->refcnt))
-               return;
-
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
        blk_sync_queue(q);
 
        if (rl->rq_pool)
@@ -1623,11 +1780,29 @@ void blk_cleanup_queue(request_queue_t * q)
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
-       blk_queue_ordered(q, QUEUE_ORDERED_NONE);
+       blk_trace_shutdown(q);
 
        kmem_cache_free(requestq_cachep, q);
 }
 
+void blk_put_queue(request_queue_t *q)
+{
+       kobject_put(&q->kobj);
+}
+EXPORT_SYMBOL(blk_put_queue);
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+       mutex_lock(&q->sysfs_lock);
+       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+       mutex_unlock(&q->sysfs_lock);
+
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
+       blk_put_queue(q);
+}
+
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(request_queue_t *q)
@@ -1649,14 +1824,14 @@ static int blk_init_free_list(request_queue_t *q)
        return 0;
 }
 
-static int __make_request(request_queue_t *, struct bio *);
-
 request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 {
        return blk_alloc_queue_node(gfp_mask, -1);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+static struct kobj_type queue_ktype;
+
 request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        request_queue_t *q;
@@ -1667,11 +1842,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        memset(q, 0, sizeof(*q));
        init_timer(&q->unplug_timer);
-       atomic_set(&q->refcnt, 1);
+
+       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       q->kobj.ktype = &queue_ktype;
+       kobject_init(&q->kobj);
 
        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
        q->backing_dev_info.unplug_io_data = q;
 
+       mutex_init(&q->sysfs_lock);
+
        return q;
 }
 EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1698,7 +1878,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
  *    get dealt with eventually.
  *
  *    The queue spin lock must be held while manipulating the requests on the
- *    request queue.
+ *    request queue; this lock will be taken also from interrupt context, so irq
+ *    disabling is needed for it.
  *
  *    Function returns a pointer to the initialized request queue, or NULL if
  *    it didn't succeed.
@@ -1723,8 +1904,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q))
-               goto out_init;
+       if (blk_init_free_list(q)) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
 
        /*
         * if caller didn't supply a lock, they get per-queue locking with
@@ -1760,9 +1943,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_cleanup_queue(q);
-out_init:
-       kmem_cache_free(requestq_cachep, q);
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -1770,7 +1951,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
 int blk_get_queue(request_queue_t *q)
 {
        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               atomic_inc(&q->refcnt);
+               kobject_get(&q->kobj);
                return 0;
        }
 
@@ -1781,14 +1962,13 @@ EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(request_queue_t *q, struct request *rq)
 {
-       if (rq->flags & REQ_ELVPRIV)
+       if (rq->cmd_flags & REQ_ELVPRIV)
                elv_put_request(q, rq);
        mempool_free(rq, q->rq.rq_pool);
 }
 
-static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
-                 int priv, gfp_t gfp_mask)
+static struct request *
+blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
 {
        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -1796,17 +1976,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
                return NULL;
 
        /*
-        * first three bits are identical in rq->flags and bio->bi_rw,
+        * first three bits are identical in rq->cmd_flags and bio->bi_rw,
         * see bio.h and blkdev.h
         */
-       rq->flags = rw;
+       rq->cmd_flags = rw | REQ_ALLOCED;
 
        if (priv) {
-               if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+               if (unlikely(elv_set_request(q, rq, gfp_mask))) {
                        mempool_free(rq, q->rq.rq_pool);
                        return NULL;
                }
-               rq->flags |= REQ_ELVPRIV;
+               rq->cmd_flags |= REQ_ELVPRIV;
        }
 
        return rq;
@@ -1851,7 +2031,7 @@ static void __freed_request(request_queue_t *q, int rw)
        struct request_list *rl = &q->rq;
 
        if (rl->count[rw] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, rw);
+               blk_clear_queue_congested(q, rw);
 
        if (rl->count[rw] + 1 <= q->nr_requests) {
                if (waitqueue_active(&rl->wait[rw]))
@@ -1890,40 +2070,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
-       struct io_context *ioc = current_io_context(GFP_ATOMIC);
-       int priv;
+       struct io_context *ioc = NULL;
+       int may_queue, priv;
 
-       if (rl->count[rw]+1 >= q->nr_requests) {
-               /*
-                * The queue will fill after this allocation, so set it as
-                * full, and mark this process as "batching". This process
-                * will be allowed to complete a batch of requests, others
-                * will be blocked.
-                */
-               if (!blk_queue_full(q, rw)) {
-                       ioc_set_batching(q, ioc);
-                       blk_set_queue_full(q, rw);
-               }
-       }
+       may_queue = elv_may_queue(q, rw);
+       if (may_queue == ELV_MQUEUE_NO)
+               goto rq_starved;
 
-       switch (elv_may_queue(q, rw, bio)) {
-               case ELV_MQUEUE_NO:
-                       goto rq_starved;
-               case ELV_MQUEUE_MAY:
-                       break;
-               case ELV_MQUEUE_MUST:
-                       goto get_rq;
-       }
-
-       if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
-               /*
-                * The queue is full and the allocating process is not a
-                * "batcher", and not exempted by the IO scheduler
-                */
-               goto out;
+       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[rw]+1 >= q->nr_requests) {
+                       ioc = current_io_context(GFP_ATOMIC, q->node);
+                       /*
+                        * The queue will fill after this allocation, so set
+                        * it as full, and mark this process as "batching".
+                        * This process will be allowed to complete a batch of
+                        * requests, others will be blocked.
+                        */
+                       if (!blk_queue_full(q, rw)) {
+                               ioc_set_batching(q, ioc);
+                               blk_set_queue_full(q, rw);
+                       } else {
+                               if (may_queue != ELV_MQUEUE_MUST
+                                               && !ioc_batching(q, ioc)) {
+                                       /*
+                                        * The queue is full and the allocating
+                                        * process is not a "batcher", and not
+                                        * exempted by the IO scheduler
+                                        */
+                                       goto out;
+                               }
+                       }
+               }
+               blk_set_queue_congested(q, rw);
        }
 
-get_rq:
        /*
         * Only allow batching queuers to allocate up to 50% over the defined
         * limit of requests, otherwise we could have thousands of requests
@@ -1934,8 +2114,6 @@ get_rq:
 
        rl->count[rw]++;
        rl->starved[rw] = 0;
-       if (rl->count[rw] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, rw);
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
@@ -1943,8 +2121,8 @@ get_rq:
 
        spin_unlock_irq(q->queue_lock);
 
-       rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
-       if (!rq) {
+       rq = blk_alloc_request(q, rw, priv, gfp_mask);
+       if (unlikely(!rq)) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
                 * we might have messed up.
@@ -1969,11 +2147,18 @@ rq_starved:
                goto out;
        }
 
+       /*
+        * ioc may be NULL here, and ioc_batching will be false. That's
+        * OK, if the queue is under the request limit then requests need
+        * not count toward the nr_batch_requests limit. There will always
+        * be some limit enforced by BLK_BATCH_TIME.
+        */
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
        
        rq_init(q, rq);
-       rq->rl = rl;
+
+       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
        return rq;
 }
@@ -2002,6 +2187,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
                if (!rq) {
                        struct io_context *ioc;
 
+                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+
                        __generic_unplug_device(q);
                        spin_unlock_irq(q->queue_lock);
                        io_schedule();
@@ -2012,7 +2199,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
                         * up to a big batch of them for a small period time.
                         * See ioc_batching, ioc_set_batching
                         */
-                       ioc = current_io_context(GFP_NOIO);
+                       ioc = current_io_context(GFP_NOIO, q->node);
                        ioc_set_batching(q, ioc);
 
                        spin_lock_irq(q->queue_lock);
@@ -2043,6 +2230,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_get_request);
 
+/**
+ * blk_start_queueing - initiate dispatch of requests to device
+ * @q:         request queue to kick into gear
+ *
+ * This is basically a helper to remove the need to know whether a queue
+ * is plugged or not if someone just wants to initiate dispatch of requests
+ * for this queue.
+ *
+ * The queue lock must be held with interrupts disabled.
+ */
+void blk_start_queueing(request_queue_t *q)
+{
+       if (!blk_queue_plugged(q))
+               q->request_fn(q);
+       else
+               __generic_unplug_device(q);
+}
+EXPORT_SYMBOL(blk_start_queueing);
+
 /**
  * blk_requeue_request - put a request back on queue
  * @q:         request queue where request should be inserted
@@ -2055,6 +2261,8 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(request_queue_t *q, struct request *rq)
 {
+       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
 
@@ -2093,7 +2301,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
         * must not attempt merges on this) and that it acts as a soft
         * barrier
         */
-       rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
+       rq->cmd_flags |= REQ_SOFTBARRIER;
 
        rq->special = data;
 
@@ -2107,16 +2316,90 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
 
        drive_stat_acct(rq, rq->nr_sectors, 1);
        __elv_add_request(q, rq, where, 0);
-
-       if (blk_queue_plugged(q))
-               __generic_unplug_device(q);
-       else
-               q->request_fn(q);
+       blk_start_queueing(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 EXPORT_SYMBOL(blk_insert_request);
 
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+       int ret = 0;
+
+       if (bio) {
+               if (bio_flagged(bio, BIO_USER_MAPPED))
+                       bio_unmap_user(bio);
+               else
+                       ret = bio_uncopy_user(bio);
+       }
+
+       return ret;
+}
+
+static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+                            void __user *ubuf, unsigned int len)
+{
+       unsigned long uaddr;
+       struct bio *bio, *orig_bio;
+       int reading, ret;
+
+       reading = rq_data_dir(rq) == READ;
+
+       /*
+        * if alignment requirement is satisfied, map in user pages for
+        * direct dma. else, set up kernel bounce buffers
+        */
+       uaddr = (unsigned long) ubuf;
+       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+               bio = bio_map_user(q, NULL, uaddr, len, reading);
+       else
+               bio = bio_copy_user(q, uaddr, len, reading);
+
+       if (IS_ERR(bio)) {
+               return PTR_ERR(bio);
+       }
+
+       orig_bio = bio;
+       blk_queue_bounce(q, &bio);
+       /*
+        * We link the bounce buffer in and could have to traverse it
+        * later so we have to get a ref to prevent it from being freed
+        */
+       bio_get(bio);
+
+       /*
+        * for most (all? don't know of any) queues we could
+        * skip grabbing the queue lock here. only drivers with
+        * funky private ->back_merge_fn() function could be
+        * problematic.
+        */
+       spin_lock_irq(q->queue_lock);
+       if (!rq->bio)
+               blk_rq_bio_prep(q, rq, bio);
+       else if (!q->back_merge_fn(q, rq, bio)) {
+               ret = -EINVAL;
+               spin_unlock_irq(q->queue_lock);
+               goto unmap_bio;
+       } else {
+               rq->biotail->bi_next = bio;
+               rq->biotail = bio;
+
+               rq->nr_sectors += bio_sectors(bio);
+               rq->hard_nr_sectors = rq->nr_sectors;
+               rq->data_len += bio->bi_size;
+       }
+       spin_unlock_irq(q->queue_lock);
+
+       return bio->bi_size;
+
+unmap_bio:
+       /* if it was boucned we must call the end io function */
+       bio_endio(bio, bio->bi_size, 0);
+       __blk_rq_unmap_user(orig_bio);
+       bio_put(bio);
+       return ret;
+}
+
 /**
  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  * @q:         request queue where request should be inserted
@@ -2138,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
  *    unmapping.
  */
 int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-                   unsigned int len)
+                   unsigned long len)
 {
-       unsigned long uaddr;
-       struct bio *bio;
-       int reading;
+       unsigned long bytes_read = 0;
+       int ret;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !ubuf)
                return -EINVAL;
 
-       reading = rq_data_dir(rq) == READ;
+       while (bytes_read != len) {
+               unsigned long map_len, end, start;
 
-       /*
-        * if alignment requirement is satisfied, map in user pages for
-        * direct dma. else, set up kernel bounce buffers
-        */
-       uaddr = (unsigned long) ubuf;
-       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-               bio = bio_map_user(q, NULL, uaddr, len, reading);
-       else
-               bio = bio_copy_user(q, uaddr, len, reading);
+               map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+               end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+                                                               >> PAGE_SHIFT;
+               start = (unsigned long)ubuf >> PAGE_SHIFT;
 
-       if (!IS_ERR(bio)) {
-               rq->bio = rq->biotail = bio;
-               blk_rq_bio_prep(q, rq, bio);
+               /*
+                * A bad offset could cause us to require BIO_MAX_PAGES + 1
+                * pages. If this happens we just lower the requested
+                * mapping len by a page so that we can fit
+                */
+               if (end - start > BIO_MAX_PAGES)
+                       map_len -= PAGE_SIZE;
 
-               rq->buffer = rq->data = NULL;
-               rq->data_len = len;
-               return 0;
+               ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+               if (ret < 0)
+                       goto unmap_rq;
+               bytes_read += ret;
+               ubuf += ret;
        }
 
-       /*
-        * bio is the err-ptr
-        */
-       return PTR_ERR(bio);
+       rq->buffer = rq->data = NULL;
+       return 0;
+unmap_rq:
+       blk_rq_unmap_user(rq);
+       return ret;
 }
 
 EXPORT_SYMBOL(blk_rq_map_user);
@@ -2199,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
  *    unmapping.
  */
 int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
-                       struct sg_iovec *iov, int iov_count)
+                       struct sg_iovec *iov, int iov_count, unsigned int len)
 {
        struct bio *bio;
 
@@ -2213,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       rq->bio = rq->biotail = bio;
+       if (bio->bi_size != len) {
+               bio_endio(bio, bio->bi_size, 0);
+               bio_unmap_user(bio);
+               return -EINVAL;
+       }
+
+       bio_get(bio);
        blk_rq_bio_prep(q, rq, bio);
        rq->buffer = rq->data = NULL;
-       rq->data_len = bio->bi_size;
        return 0;
 }
 
@@ -2224,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
  * blk_rq_unmap_user - unmap a request with user data
- * @bio:       bio to be unmapped
- * @ulen:      length of user buffer
+ * @rq:                rq to be unmapped
  *
  * Description:
- *    Unmap a bio previously mapped by blk_rq_map_user().
+ *    Unmap a rq previously mapped by blk_rq_map_user().
+ *    rq->bio must be set to the original head of the request.
  */
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq)
 {
-       int ret = 0;
+       struct bio *bio, *mapped_bio;
 
-       if (bio) {
-               if (bio_flagged(bio, BIO_USER_MAPPED))
-                       bio_unmap_user(bio);
+       while ((bio = rq->bio)) {
+               if (bio_flagged(bio, BIO_BOUNCED))
+                       mapped_bio = bio->bi_private;
                else
-                       ret = bio_uncopy_user(bio);
-       }
+                       mapped_bio = bio;
 
+               __blk_rq_unmap_user(mapped_bio);
+               rq->bio = bio->bi_next;
+               bio_put(bio);
+       }
        return 0;
 }
 
@@ -2259,7 +2552,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
 {
        struct bio *bio;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !kbuf)
                return -EINVAL;
@@ -2271,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
        if (rq_data_dir(rq) == WRITE)
                bio->bi_rw |= (1 << BIO_RW);
 
-       rq->bio = rq->biotail = bio;
        blk_rq_bio_prep(q, rq, bio);
-
        rq->buffer = rq->data = NULL;
-       rq->data_len = len;
        return 0;
 }
 
@@ -2295,17 +2585,19 @@ EXPORT_SYMBOL(blk_rq_map_kern);
  */
 void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
                           struct request *rq, int at_head,
-                          void (*done)(struct request *))
+                          rq_end_io_fn *done)
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
        rq->rq_disk = bd_disk;
-       rq->flags |= REQ_NOMERGE;
+       rq->cmd_flags |= REQ_NOMERGE;
        rq->end_io = done;
-       elv_add_request(q, rq, where, 1);
-       generic_unplug_device(q);
+       WARN_ON(irqs_disabled());
+       spin_lock_irq(q->queue_lock);
+       __elv_add_request(q, rq, where, 1);
+       __generic_unplug_device(q);
+       spin_unlock_irq(q->queue_lock);
 }
-
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
 /**
@@ -2322,7 +2614,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
                   struct request *rq, int at_head)
 {
-       DECLARE_COMPLETION(wait);
+       DECLARE_COMPLETION_ONSTACK(wait);
        char sense[SCSI_SENSE_BUFFERSIZE];
        int err = 0;
 
@@ -2338,10 +2630,9 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
                rq->sense_len = 0;
        }
 
-       rq->waiting = &wait;
+       rq->end_io_data = &wait;
        blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
        wait_for_completion(&wait);
-       rq->waiting = NULL;
 
        if (rq->errors)
                err = -EIO;
@@ -2443,13 +2734,13 @@ void disk_round_stats(struct gendisk *disk)
        disk->stamp = now;
 }
 
+EXPORT_SYMBOL_GPL(disk_round_stats);
+
 /*
  * queue lock must be held
  */
 void __blk_put_request(request_queue_t *q, struct request *req)
 {
-       struct request_list *rl = req->rl;
-
        if (unlikely(!q))
                return;
        if (unlikely(--req->ref_count))
@@ -2457,18 +2748,16 @@ void __blk_put_request(request_queue_t *q, struct request *req)
 
        elv_completed_request(q, req);
 
-       req->rq_status = RQ_INACTIVE;
-       req->rl = NULL;
-
        /*
         * Request may not have originated from ll_rw_blk. if not,
         * it didn't come out of our reserved rq pools
         */
-       if (rl) {
+       if (req->cmd_flags & REQ_ALLOCED) {
                int rw = rq_data_dir(req);
-               int priv = req->flags & REQ_ELVPRIV;
+               int priv = req->cmd_flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
+               BUG_ON(!hlist_unhashed(&req->hash));
 
                blk_free_request(q, req);
                freed_request(q, rw, priv);
@@ -2498,12 +2787,13 @@ EXPORT_SYMBOL(blk_put_request);
 /**
  * blk_end_sync_rq - executes a completion event on a request
  * @rq: request to complete
+ * @error: end io status of the request
  */
-void blk_end_sync_rq(struct request *rq)
+void blk_end_sync_rq(struct request *rq, int error)
 {
-       struct completion *waiting = rq->waiting;
+       struct completion *waiting = rq->end_io_data;
 
-       rq->waiting = NULL;
+       rq->end_io_data = NULL;
        __blk_put_request(rq->q, rq);
 
        /*
@@ -2514,29 +2804,6 @@ void blk_end_sync_rq(struct request *rq)
 }
 EXPORT_SYMBOL(blk_end_sync_rq);
 
-/**
- * blk_congestion_wait - wait for a queue to become uncongested
- * @rw: READ or WRITE
- * @timeout: timeout in jiffies
- *
- * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
- * If no queues are congested then just wait for the next request to be
- * returned.
- */
-long blk_congestion_wait(int rw, long timeout)
-{
-       long ret;
-       DEFINE_WAIT(wait);
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
-       ret = io_schedule_timeout(timeout);
-       finish_wait(wqh, &wait);
-       return ret;
-}
-
-EXPORT_SYMBOL(blk_congestion_wait);
-
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -2547,14 +2814,14 @@ static int attempt_merge(request_queue_t *q, struct request *req,
                return 0;
 
        /*
-        * not contigious
+        * not contiguous
         */
        if (req->sector + req->nr_sectors != next->sector)
                return 0;
 
        if (rq_data_dir(req) != rq_data_dir(next)
            || req->rq_disk != next->rq_disk
-           || next->waiting || next->special)
+           || next->special)
                return 0;
 
        /*
@@ -2613,44 +2880,48 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
        return 0;
 }
 
-/**
- * blk_attempt_remerge  - attempt to remerge active head with next request
- * @q:    The &request_queue_t belonging to the device
- * @rq:   The head request (usually)
- *
- * Description:
- *    For head-active devices, the queue can easily be unplugged so quickly
- *    that proper merging is not done on the front request. This may hurt
- *    performance greatly for some devices. The block layer cannot safely
- *    do merging on that first request for these queues, but the driver can
- *    call this function and make it happen any way. Only the driver knows
- *    when it is safe to do so.
- **/
-void blk_attempt_remerge(request_queue_t *q, struct request *rq)
+static void init_request_from_bio(struct request *req, struct bio *bio)
 {
-       unsigned long flags;
+       req->cmd_type = REQ_TYPE_FS;
 
-       spin_lock_irqsave(q->queue_lock, flags);
-       attempt_back_merge(q, rq);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
+       /*
+        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+        */
+       if (bio_rw_ahead(bio) || bio_failfast(bio))
+               req->cmd_flags |= REQ_FAILFAST;
 
-EXPORT_SYMBOL(blk_attempt_remerge);
+       /*
+        * REQ_BARRIER implies no merging, but lets make it explicit
+        */
+       if (unlikely(bio_barrier(bio)))
+               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+
+       if (bio_sync(bio))
+               req->cmd_flags |= REQ_RW_SYNC;
+       if (bio_rw_meta(bio))
+               req->cmd_flags |= REQ_RW_META;
+
+       req->errors = 0;
+       req->hard_sector = req->sector = bio->bi_sector;
+       req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
+       req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
+       req->nr_phys_segments = bio_phys_segments(req->q, bio);
+       req->nr_hw_segments = bio_hw_segments(req->q, bio);
+       req->buffer = bio_data(bio);    /* see ->buffer comment above */
+       req->bio = req->biotail = bio;
+       req->ioprio = bio_prio(bio);
+       req->rq_disk = bio->bi_bdev->bd_disk;
+       req->start_time = jiffies;
+}
 
 static int __make_request(request_queue_t *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
-       unsigned short prio;
-       sector_t sector;
+       int el_ret, nr_sectors, barrier, err;
+       const unsigned short prio = bio_prio(bio);
+       const int sync = bio_sync(bio);
 
-       sector = bio->bi_sector;
        nr_sectors = bio_sectors(bio);
-       cur_nr_sectors = bio_cur_sectors(bio);
-       prio = bio_prio(bio);
-
-       rw = bio_data_dir(bio);
-       sync = bio_sync(bio);
 
        /*
         * low level driver can indicate that it wants pages above a
@@ -2659,10 +2930,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       spin_lock_prefetch(q->queue_lock);
-
        barrier = bio_barrier(bio);
-       if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
+       if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
@@ -2680,13 +2949,15 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->back_merge_fn(q, req, bio))
                                break;
 
+                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+
                        req->biotail->bi_next = bio;
                        req->biotail = bio;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                        req->ioprio = ioprio_best(req->ioprio, prio);
                        drive_stat_acct(req, nr_sectors, 0);
                        if (!attempt_back_merge(q, req))
-                               elv_merged_request(q, req);
+                               elv_merged_request(q, req, el_ret);
                        goto out;
 
                case ELEVATOR_FRONT_MERGE:
@@ -2695,6 +2966,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->front_merge_fn(q, req, bio))
                                break;
 
+                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+
                        bio->bi_next = req->bio;
                        req->bio = bio;
 
@@ -2704,14 +2977,14 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                         * not touch req->buffer either...
                         */
                        req->buffer = bio_data(bio);
-                       req->current_nr_sectors = cur_nr_sectors;
-                       req->hard_cur_sectors = cur_nr_sectors;
-                       req->sector = req->hard_sector = sector;
+                       req->current_nr_sectors = bio_cur_sectors(bio);
+                       req->hard_cur_sectors = req->current_nr_sectors;
+                       req->sector = req->hard_sector = bio->bi_sector;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
                        req->ioprio = ioprio_best(req->ioprio, prio);
                        drive_stat_acct(req, nr_sectors, 0);
                        if (!attempt_front_merge(q, req))
-                               elv_merged_request(q, req);
+                               elv_merged_request(q, req, el_ret);
                        goto out;
 
                /* ELV_NO_MERGE: elevator says don't/can't merge. */
@@ -2724,7 +2997,7 @@ get_rq:
         * Grab a free request. This is might sleep but can not fail.
         * Returns with the queue unlocked.
         */
-       req = get_request_wait(q, rw, bio);
+       req = get_request_wait(q, bio_data_dir(bio), bio);
 
        /*
         * After dropping the lock and possibly sleeping here, our request
@@ -2732,33 +3005,7 @@ get_rq:
         * We don't worry about that case for efficiency. It won't happen
         * often, and the elevators are able to handle it.
         */
-
-       req->flags |= REQ_CMD;
-
-       /*
-        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
-        */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->flags |= REQ_FAILFAST;
-
-       /*
-        * REQ_BARRIER implies no merging, but lets make it explicit
-        */
-       if (unlikely(barrier))
-               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-
-       req->errors = 0;
-       req->hard_sector = req->sector = sector;
-       req->hard_nr_sectors = req->nr_sectors = nr_sectors;
-       req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
-       req->nr_phys_segments = bio_phys_segments(q, bio);
-       req->nr_hw_segments = bio_hw_segments(q, bio);
-       req->buffer = bio_data(bio);    /* see ->buffer comment above */
-       req->waiting = NULL;
-       req->bio = req->biotail = bio;
-       req->ioprio = prio;
-       req->rq_disk = bio->bi_bdev->bd_disk;
-       req->start_time = jiffies;
+       init_request_from_bio(req, bio);
 
        spin_lock_irq(q->queue_lock);
        if (elv_queue_empty(q))
@@ -2837,7 +3084,9 @@ void generic_make_request(struct bio *bio)
 {
        request_queue_t *q;
        sector_t maxsector;
+       sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
+       dev_t old_dev;
 
        might_sleep();
        /* Test device or partition size, when known. */
@@ -2864,6 +3113,8 @@ void generic_make_request(struct bio *bio)
         * NOTE: we don't repeat the blk_size check for each new device.
         * Stacking drivers are expected to know what they are doing.
         */
+       old_sector = -1;
+       old_dev = 0;
        do {
                char b[BDEVNAME_SIZE];
 
@@ -2896,6 +3147,31 @@ end_io:
                 */
                blk_partition_remap(bio);
 
+               if (old_sector != -1)
+                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+                                           old_sector);
+
+               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+
+               old_sector = bio->bi_sector;
+               old_dev = bio->bi_bdev->bd_dev;
+
+               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+               if (maxsector) {
+                       sector_t sector = bio->bi_sector;
+
+                       if (maxsector < nr_sectors ||
+                                       maxsector - nr_sectors < sector) {
+                               /*
+                                * This may well happen - partitions are not
+                                * checked to make sure they are within the size
+                                * of the whole device.
+                                */
+                               handle_bad_sector(bio);
+                               goto end_io;
+                       }
+               }
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
@@ -2920,9 +3196,9 @@ void submit_bio(int rw, struct bio *bio)
        BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
        if (rw & WRITE)
-               mod_page_state(pgpgout, count);
+               count_vm_events(PGPGOUT, count);
        else
-               mod_page_state(pgpgin, count);
+               count_vm_events(PGPGIN, count);
 
        if (unlikely(block_dump)) {
                char b[BDEVNAME_SIZE];
@@ -3015,6 +3291,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
        int total_bytes, bio_nbytes, error, next_idx = 0;
        struct bio *bio;
 
+       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+
        /*
         * extend uptodate bool to allow < 0 value to be direct io error
         */
@@ -3030,7 +3308,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
                req->errors = 0;
 
        if (!uptodate) {
-               if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
+               if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
                        printk("end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
                                (unsigned long long)req->sector);
@@ -3039,7 +3317,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
        if (blk_fs_request(req) && req->rq_disk) {
                const int rw = rq_data_dir(req);
 
-               __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+               disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
        }
 
        total_bytes = bio_nbytes = 0;
@@ -3049,7 +3327,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
-                       bio_endio(bio, nbytes, error);
+                       if (!ordered_bio_endio(req, bio, nbytes, error))
+                               bio_endio(bio, nbytes, error);
                        next_idx = 0;
                        bio_nbytes = 0;
                } else {
@@ -3104,7 +3383,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
         * if the request wasn't completed, update state
         */
        if (bio_nbytes) {
-               bio_endio(bio, bio_nbytes, error);
+               if (!ordered_bio_endio(req, bio, bio_nbytes, error))
+                       bio_endio(bio, bio_nbytes, error);
                bio->bi_idx += next_idx;
                bio_iovec(bio)->bv_offset += nr_bytes;
                bio_iovec(bio)->bv_len -= nr_bytes;
@@ -3158,17 +3438,110 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
 
 EXPORT_SYMBOL(end_that_request_chunk);
 
+/*
+ * splice the completion data to a local structure and hand off to
+ * process_completion_queue() to complete the requests
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+       struct list_head *cpu_list, local_list;
+
+       local_irq_disable();
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_replace_init(cpu_list, &local_list);
+       local_irq_enable();
+
+       while (!list_empty(&local_list)) {
+               struct request *rq = list_entry(local_list.next, struct request, donelist);
+
+               list_del_init(&rq->donelist);
+               rq->q->softirq_done_fn(rq);
+       }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+                         void *hcpu)
+{
+       /*
+        * If a CPU goes away, splice its entries to the current CPU
+        * and trigger a run of the softirq
+        */
+       if (action == CPU_DEAD) {
+               int cpu = (unsigned long) hcpu;
+
+               local_irq_disable();
+               list_splice_init(&per_cpu(blk_cpu_done, cpu),
+                                &__get_cpu_var(blk_cpu_done));
+               raise_softirq_irqoff(BLOCK_SOFTIRQ);
+               local_irq_enable();
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block __devinitdata blk_cpu_notifier = {
+       .notifier_call  = blk_cpu_notify,
+};
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req:      the request being processed
+ *
+ * Description:
+ *     Ends all I/O on a request. It does not handle partial completions,
+ *     unless the driver actually implements this in its completion callback
+ *     through requeueing. Theh actual completion happens out-of-order,
+ *     through a softirq handler. The user must have registered a completion
+ *     callback through blk_queue_softirq_done().
+ **/
+
+void blk_complete_request(struct request *req)
+{
+       struct list_head *cpu_list;
+       unsigned long flags;
+
+       BUG_ON(!req->q->softirq_done_fn);
+               
+       local_irq_save(flags);
+
+       cpu_list = &__get_cpu_var(blk_cpu_done);
+       list_add_tail(&req->donelist, cpu_list);
+       raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+       local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(blk_complete_request);
+       
 /*
  * queue lock must be held
  */
-void end_that_request_last(struct request *req)
+void end_that_request_last(struct request *req, int uptodate)
 {
        struct gendisk *disk = req->rq_disk;
+       int error;
+
+       /*
+        * extend uptodate bool to allow < 0 value to be direct io error
+        */
+       error = 0;
+       if (end_io_error(uptodate))
+               error = !uptodate ? -EIO : uptodate;
 
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
-       if (disk && blk_fs_request(req)) {
+       /*
+        * Account IO completion.  bar_rq isn't accounted as a normal
+        * IO on queueing nor completion.  Accounting the containing
+        * request is enough.
+        */
+       if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
 
@@ -3178,7 +3551,7 @@ void end_that_request_last(struct request *req)
                disk->in_flight--;
        }
        if (req->end_io)
-               req->end_io(req);
+               req->end_io(req, error);
        else
                __blk_put_request(req->q, req);
 }
@@ -3190,7 +3563,7 @@ void end_request(struct request *req, int uptodate)
        if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
                add_disk_randomness(req->rq_disk);
                blkdev_dequeue_request(req);
-               end_that_request_last(req);
+               end_that_request_last(req, uptodate);
        }
 }
 
@@ -3198,8 +3571,8 @@ EXPORT_SYMBOL(end_request);
 
 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
 {
-       /* first three bits are identical in rq->flags and bio->bi_rw */
-       rq->flags |= (bio->bi_rw & 7);
+       /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+       rq->cmd_flags |= (bio->bi_rw & 3);
 
        rq->nr_phys_segments = bio_phys_segments(q, bio);
        rq->nr_hw_segments = bio_hw_segments(q, bio);
@@ -3207,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
        rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
        rq->buffer = bio_data(bio);
+       rq->data_len = bio->bi_size;
 
        rq->bio = rq->biotail = bio;
 }
@@ -3228,6 +3602,8 @@ EXPORT_SYMBOL(kblockd_flush);
 
 int __init blk_dev_init(void)
 {
+       int i;
+
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
@@ -3241,6 +3617,12 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
+       for_each_possible_cpu(i)
+               INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+       open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+       register_hotcpu_notifier(&blk_cpu_notifier);
+
        blk_max_low_pfn = max_low_pfn;
        blk_max_pfn = max_pfn;
 
@@ -3258,10 +3640,18 @@ void put_io_context(struct io_context *ioc)
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
+               struct cfq_io_context *cic;
+
+               rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
-               if (ioc->cic && ioc->cic->dtor)
-                       ioc->cic->dtor(ioc->cic);
+               if (ioc->cic_root.rb_node != NULL) {
+                       struct rb_node *n = rb_first(&ioc->cic_root);
+
+                       cic = rb_entry(n, struct cfq_io_context, rb_node);
+                       cic->dtor(ioc);
+               }
+               rcu_read_unlock();
 
                kmem_cache_free(iocontext_cachep, ioc);
        }
@@ -3271,21 +3661,21 @@ EXPORT_SYMBOL(put_io_context);
 /* Called by the exitting task */
 void exit_io_context(void)
 {
-       unsigned long flags;
        struct io_context *ioc;
+       struct cfq_io_context *cic;
 
-       local_irq_save(flags);
        task_lock(current);
        ioc = current->io_context;
        current->io_context = NULL;
-       ioc->task = NULL;
        task_unlock(current);
-       local_irq_restore(flags);
 
+       ioc->task = NULL;
        if (ioc->aic && ioc->aic->exit)
                ioc->aic->exit(ioc->aic);
-       if (ioc->cic && ioc->cic->exit)
-               ioc->cic->exit(ioc->cic);
+       if (ioc->cic_root.rb_node != NULL) {
+               cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
+               cic->exit(ioc);
+       }
 
        put_io_context(ioc);
 }
@@ -3298,7 +3688,7 @@ void exit_io_context(void)
  * but since the current task itself holds a reference, the context can be
  * used in general code, so long as it stays within `current` context.
  */
-struct io_context *current_io_context(gfp_t gfp_flags)
+static struct io_context *current_io_context(gfp_t gfp_flags, int node)
 {
        struct task_struct *tsk = current;
        struct io_context *ret;
@@ -3307,15 +3697,17 @@ struct io_context *current_io_context(gfp_t gfp_flags)
        if (likely(ret))
                return ret;
 
-       ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
+       ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
        if (ret) {
                atomic_set(&ret->refcount, 1);
                ret->task = current;
-               ret->set_ioprio = NULL;
+               ret->ioprio_changed = 0;
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
-               ret->cic = NULL;
+               ret->cic_root.rb_node = NULL;
+               /* make sure set_task_ioprio() sees the settings above */
+               smp_wmb();
                tsk->io_context = ret;
        }
 
@@ -3329,10 +3721,10 @@ EXPORT_SYMBOL(current_io_context);
  *
  * This is always called in the context of the task which submitted the I/O.
  */
-struct io_context *get_io_context(gfp_t gfp_flags)
+struct io_context *get_io_context(gfp_t gfp_flags, int node)
 {
        struct io_context *ret;
-       ret = current_io_context(gfp_flags);
+       ret = current_io_context(gfp_flags, node);
        if (likely(ret))
                atomic_inc(&ret->refcount);
        return ret;
@@ -3395,21 +3787,24 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
        struct request_list *rl = &q->rq;
+       unsigned long nr;
+       int ret = queue_var_store(&nr, page, count);
+       if (nr < BLKDEV_MIN_RQ)
+               nr = BLKDEV_MIN_RQ;
 
-       int ret = queue_var_store(&q->nr_requests, page, count);
-       if (q->nr_requests < BLKDEV_MIN_RQ)
-               q->nr_requests = BLKDEV_MIN_RQ;
+       spin_lock_irq(q->queue_lock);
+       q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, READ);
+               blk_set_queue_congested(q, READ);
        else if (rl->count[READ] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, READ);
+               blk_clear_queue_congested(q, READ);
 
        if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, WRITE);
+               blk_set_queue_congested(q, WRITE);
        else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, WRITE);
+               blk_clear_queue_congested(q, WRITE);
 
        if (rl->count[READ] >= q->nr_requests) {
                blk_set_queue_full(q, READ);
@@ -3424,6 +3819,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                blk_clear_queue_full(q, WRITE);
                wake_up(&rl->wait[WRITE]);
        }
+       spin_unlock_irq(q->queue_lock);
        return ret;
 }
 
@@ -3441,9 +3837,6 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
        ssize_t ret = queue_var_store(&ra_kb, page, count);
 
        spin_lock_irq(q->queue_lock);
-       if (ra_kb > (q->max_sectors >> 1))
-               ra_kb = (q->max_sectors >> 1);
-
        q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
        spin_unlock_irq(q->queue_lock);
 
@@ -3539,13 +3932,19 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->show)
                return -EIO;
-
-       return entry->show(q, page);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->show(q, page);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static ssize_t
@@ -3553,13 +3952,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
                    const char *page, size_t length)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->store)
                return -EIO;
-
-       return entry->store(q, page, length);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->store(q, page, length);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static struct sysfs_ops queue_sysfs_ops = {
@@ -3570,6 +3976,7 @@ static struct sysfs_ops queue_sysfs_ops = {
 static struct kobj_type queue_ktype = {
        .sysfs_ops      = &queue_sysfs_ops,
        .default_attrs  = default_attrs,
+       .release        = blk_release_queue,
 };
 
 int blk_register_queue(struct gendisk *disk)
@@ -3582,19 +3989,17 @@ int blk_register_queue(struct gendisk *disk)
                return -ENXIO;
 
        q->kobj.parent = kobject_get(&disk->kobj);
-       if (!q->kobj.parent)
-               return -EBUSY;
 
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-
-       ret = kobject_register(&q->kobj);
+       ret = kobject_add(&q->kobj);
        if (ret < 0)
                return ret;
 
+       kobject_uevent(&q->kobj, KOBJ_ADD);
+
        ret = elv_register_queue(q);
        if (ret) {
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                return ret;
        }
 
@@ -3608,7 +4013,8 @@ void blk_unregister_queue(struct gendisk *disk)
        if (q && q->request_fn) {
                elv_unregister_queue(q);
 
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                kobject_put(&disk->kobj);
        }
 }