fix mismerge in ll_rw_blk.c
[linux-2.6.git] / drivers / block / ll_rw_blk.c
index f6fda03..0c75995 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
+#include <linux/blkdev.h>
 
 /*
  * for max sense size
@@ -36,6 +37,7 @@
 
 static void blk_unplug_work(void *data);
 static void blk_unplug_timeout(unsigned long data);
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
 
 /*
  * For the allocated request tables
@@ -274,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
        rq->errors = 0;
        rq->rq_status = RQ_ACTIVE;
        rq->bio = rq->biotail = NULL;
+       rq->ioprio = 0;
        rq->buffer = NULL;
        rq->ref_count = 1;
        rq->q = q;
@@ -775,9 +778,9 @@ EXPORT_SYMBOL(blk_queue_free_tags);
 static int
 init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
 {
-       int bits, i;
        struct request **tag_index;
        unsigned long *tag_map;
+       int nr_ulongs;
 
        if (depth > q->nr_requests * 2) {
                depth = q->nr_requests * 2;
@@ -789,24 +792,18 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
        if (!tag_index)
                goto fail;
 
-       bits = (depth / BLK_TAGS_PER_LONG) + 1;
-       tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
+       nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
 
        memset(tag_index, 0, depth * sizeof(struct request *));
-       memset(tag_map, 0, bits * sizeof(unsigned long));
+       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
+       tags->real_max_depth = depth;
        tags->max_depth = depth;
-       tags->real_max_depth = bits * BITS_PER_LONG;
        tags->tag_index = tag_index;
        tags->tag_map = tag_map;
 
-       /*
-        * set the upper bits if the depth isn't a multiple of the word size
-        */
-       for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
-               __set_bit(i, tag_map);
-
        return 0;
 fail:
        kfree(tag_index);
@@ -871,13 +868,16 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
        struct blk_queue_tag *bqt = q->queue_tags;
        struct request **tag_index;
        unsigned long *tag_map;
-       int bits, max_depth;
+       int max_depth, nr_ulongs;
 
        if (!bqt)
                return -ENXIO;
 
        /*
-        * don't bother sizing down
+        * if we already have large enough real_max_depth.  just
+        * adjust max_depth.  *NOTE* as requests with tag value
+        * between new_depth and real_max_depth can be in-flight, tag
+        * map can not be shrunk blindly here.
         */
        if (new_depth <= bqt->real_max_depth) {
                bqt->max_depth = new_depth;
@@ -895,8 +895,8 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
                return -ENOMEM;
 
        memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
-       bits = max_depth / BLK_TAGS_PER_LONG;
-       memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
+       nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+       memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
 
        kfree(tag_index);
        kfree(tag_map);
@@ -927,10 +927,15 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
        BUG_ON(tag == -1);
 
        if (unlikely(tag >= bqt->real_max_depth))
+               /*
+                * This can happen after tag depth has been reduced.
+                * FIXME: how about a warning or info message here?
+                */
                return;
 
        if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
-               printk("attempt to clear non-busy tag (%d)\n", tag);
+               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+                      __FUNCTION__, tag);
                return;
        }
 
@@ -939,7 +944,8 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
        rq->tag = -1;
 
        if (unlikely(bqt->tag_index[tag] == NULL))
-               printk("tag %d is missing\n", tag);
+               printk(KERN_ERR "%s: tag %d is missing\n",
+                      __FUNCTION__, tag);
 
        bqt->tag_index[tag] = NULL;
        bqt->busy--;
@@ -968,24 +974,20 @@ EXPORT_SYMBOL(blk_queue_end_tag);
 int blk_queue_start_tag(request_queue_t *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
-       unsigned long *map = bqt->tag_map;
-       int tag = 0;
+       int tag;
 
        if (unlikely((rq->flags & REQ_QUEUED))) {
                printk(KERN_ERR 
-                      "request %p for device [%s] already tagged %d",
-                      rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+                      "%s: request %p for device [%s] already tagged %d",
+                      __FUNCTION__, rq,
+                      rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
                BUG();
        }
 
-       for (map = bqt->tag_map; *map == -1UL; map++) {
-               tag += BLK_TAGS_PER_LONG;
-
-               if (tag >= bqt->max_depth)
-                       return 1;
-       }
+       tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+       if (tag >= bqt->max_depth)
+               return 1;
 
-       tag += ffz(*map);
        __set_bit(tag, bqt->tag_map);
 
        rq->flags |= REQ_QUEUED;
@@ -1021,7 +1023,8 @@ void blk_queue_invalidate_tags(request_queue_t *q)
                rq = list_entry_rq(tmp);
 
                if (rq->tag == -1) {
-                       printk("bad tag found on list\n");
+                       printk(KERN_ERR
+                              "%s: bad tag found on list\n", __FUNCTION__);
                        list_del_init(&rq->queuelist);
                        rq->flags &= ~REQ_QUEUED;
                } else
@@ -1149,7 +1152,7 @@ new_hw_segment:
 }
 
 
-int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
                                   struct bio *nxt)
 {
        if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1170,9 +1173,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
        return 0;
 }
 
-EXPORT_SYMBOL(blk_phys_contig_segment);
-
-int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
                                 struct bio *nxt)
 {
        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1188,8 +1189,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
        return 1;
 }
 
-EXPORT_SYMBOL(blk_hw_contig_segment);
-
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -1359,8 +1358,8 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
                                struct request *next)
 {
-       int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
-       int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+       int total_phys_segments;
+       int total_hw_segments;
 
        /*
         * First check if the either of the requests are re-queued
@@ -1370,7 +1369,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
                return 0;
 
        /*
-        * Will it become to large?
+        * Will it become too large?
         */
        if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
                return 0;
@@ -1451,17 +1450,13 @@ EXPORT_SYMBOL(blk_remove_plug);
  */
 void __generic_unplug_device(request_queue_t *q)
 {
-       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
                return;
 
        if (!blk_remove_plug(q))
                return;
 
-       /*
-        * was plugged, fire request_fn if queue has stuff to do
-        */
-       if (elv_next_request(q))
-               q->request_fn(q);
+       q->request_fn(q);
 }
 EXPORT_SYMBOL(__generic_unplug_device);
 
@@ -1646,7 +1641,8 @@ static int blk_init_free_list(request_queue_t *q)
        init_waitqueue_head(&rl->wait[WRITE]);
        init_waitqueue_head(&rl->drain);
 
-       rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
+       rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+                               mempool_free_slab, request_cachep, q->node);
 
        if (!rl->rq_pool)
                return -ENOMEM;
@@ -1658,8 +1654,15 @@ static int __make_request(request_queue_t *, struct bio *);
 
 request_queue_t *blk_alloc_queue(int gfp_mask)
 {
-       request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
+       return blk_alloc_queue_node(gfp_mask, -1);
+}
+EXPORT_SYMBOL(blk_alloc_queue);
+
+request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
+{
+       request_queue_t *q;
 
+       q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
        if (!q)
                return NULL;
 
@@ -1672,8 +1675,7 @@ request_queue_t *blk_alloc_queue(int gfp_mask)
 
        return q;
 }
-
-EXPORT_SYMBOL(blk_alloc_queue);
+EXPORT_SYMBOL(blk_alloc_queue_node);
 
 /**
  * blk_init_queue  - prepare a request queue for use with a block device
@@ -1706,13 +1708,22 @@ EXPORT_SYMBOL(blk_alloc_queue);
  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
  *    when the block device is deactivated (such as at module unload).
  **/
+
 request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 {
-       request_queue_t *q = blk_alloc_queue(GFP_KERNEL);
+       return blk_init_queue_node(rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_queue);
+
+request_queue_t *
+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+{
+       request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 
        if (!q)
                return NULL;
 
+       q->node = node_id;
        if (blk_init_free_list(q))
                goto out_init;
 
@@ -1755,12 +1766,11 @@ out_init:
        kmem_cache_free(requestq_cachep, q);
        return NULL;
 }
-
-EXPORT_SYMBOL(blk_init_queue);
+EXPORT_SYMBOL(blk_init_queue_node);
 
 int blk_get_queue(request_queue_t *q)
 {
-       if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
                atomic_inc(&q->refcnt);
                return 0;
        }
@@ -1776,8 +1786,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
        mempool_free(rq, q->rq.rq_pool);
 }
 
-static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
-                                               int gfp_mask)
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
 {
        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -1790,7 +1800,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
         */
        rq->flags = rw;
 
-       if (!elv_set_request(q, rq, gfp_mask))
+       if (!elv_set_request(q, rq, bio, gfp_mask))
                return rq;
 
        mempool_free(rq, q->rq.rq_pool);
@@ -1822,7 +1832,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
  * is the behaviour we want though - once it gets a wakeup it should be given
  * a nice run.
  */
-void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
 {
        if (!ioc || ioc_batching(q, ioc))
                return;
@@ -1839,7 +1849,6 @@ static void __freed_request(request_queue_t *q, int rw)
                clear_queue_congested(q, rw);
 
        if (rl->count[rw] + 1 <= q->nr_requests) {
-               smp_mb();
                if (waitqueue_active(&rl->wait[rw]))
                        wake_up(&rl->wait[rw]);
 
@@ -1871,18 +1880,20 @@ static void freed_request(request_queue_t *q, int rw)
 
 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 /*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
  */
-static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+                                  int gfp_mask)
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
-       struct io_context *ioc = get_io_context(gfp_mask);
+       struct io_context *ioc = current_io_context(GFP_ATOMIC);
 
        if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
                goto out;
 
-       spin_lock_irq(q->queue_lock);
        if (rl->count[rw]+1 >= q->nr_requests) {
                /*
                 * The queue will fill after this allocation, so set it as
@@ -1896,7 +1907,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
                }
        }
 
-       switch (elv_may_queue(q, rw)) {
+       switch (elv_may_queue(q, rw, bio)) {
                case ELV_MQUEUE_NO:
                        goto rq_starved;
                case ELV_MQUEUE_MAY:
@@ -1910,18 +1921,25 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
                 * The queue is full and the allocating process is not a
                 * "batcher", and not exempted by the IO scheduler
                 */
-               spin_unlock_irq(q->queue_lock);
                goto out;
        }
 
 get_rq:
+       /*
+        * Only allow batching queuers to allocate up to 50% over the defined
+        * limit of requests, otherwise we could have thousands of requests
+        * allocated with any setting of ->nr_requests
+        */
+       if (rl->count[rw] >= (3 * q->nr_requests / 2))
+               goto out;
+
        rl->count[rw]++;
        rl->starved[rw] = 0;
        if (rl->count[rw] >= queue_congestion_on_threshold(q))
                set_queue_congested(q, rw);
        spin_unlock_irq(q->queue_lock);
 
-       rq = blk_alloc_request(q, rw, gfp_mask);
+       rq = blk_alloc_request(q, rw, bio, gfp_mask);
        if (!rq) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
@@ -1944,7 +1962,6 @@ rq_starved:
                if (unlikely(rl->count[rw] == 0))
                        rl->starved[rw] = 1;
 
-               spin_unlock_irq(q->queue_lock);
                goto out;
        }
 
@@ -1954,31 +1971,35 @@ rq_starved:
        rq_init(q, rq);
        rq->rl = rl;
 out:
-       put_io_context(ioc);
        return rq;
 }
 
 /*
  * No available requests for this queue, unplug the device and wait for some
  * requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
  */
-static struct request *get_request_wait(request_queue_t *q, int rw)
+static struct request *get_request_wait(request_queue_t *q, int rw,
+                                       struct bio *bio)
 {
-       DEFINE_WAIT(wait);
        struct request *rq;
 
-       generic_unplug_device(q);
-       do {
+       rq = get_request(q, rw, bio, GFP_NOIO);
+       while (!rq) {
+               DEFINE_WAIT(wait);
                struct request_list *rl = &q->rq;
 
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               rq = get_request(q, rw, GFP_NOIO);
+               rq = get_request(q, rw, bio, GFP_NOIO);
 
                if (!rq) {
                        struct io_context *ioc;
 
+                       __generic_unplug_device(q);
+                       spin_unlock_irq(q->queue_lock);
                        io_schedule();
 
                        /*
@@ -1987,12 +2008,13 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
                         * up to a big batch of them for a small period time.
                         * See ioc_batching, ioc_set_batching
                         */
-                       ioc = get_io_context(GFP_NOIO);
+                       ioc = current_io_context(GFP_NOIO);
                        ioc_set_batching(q, ioc);
-                       put_io_context(ioc);
+
+                       spin_lock_irq(q->queue_lock);
                }
                finish_wait(&rl->wait[rw], &wait);
-       } while (!rq);
+       }
 
        return rq;
 }
@@ -2003,14 +2025,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
 
        BUG_ON(rw != READ && rw != WRITE);
 
-       if (gfp_mask & __GFP_WAIT)
-               rq = get_request_wait(q, rw);
-       else
-               rq = get_request(q, rw, gfp_mask);
+       spin_lock_irq(q->queue_lock);
+       if (gfp_mask & __GFP_WAIT) {
+               rq = get_request_wait(q, rw, NULL);
+       } else {
+               rq = get_request(q, rw, NULL, gfp_mask);
+               if (!rq)
+                       spin_unlock_irq(q->queue_lock);
+       }
+       /* q->queue_lock is unlocked at this point */
 
        return rq;
 }
-
 EXPORT_SYMBOL(blk_get_request);
 
 /**
@@ -2385,7 +2411,7 @@ int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
 
 EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
 
-void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
 {
        int rw = rq_data_dir(rq);
 
@@ -2467,7 +2493,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
                return;
 
        req->rq_status = RQ_INACTIVE;
-       req->q = NULL;
        req->rl = NULL;
 
        /*
@@ -2596,6 +2621,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
                req->rq_disk->in_flight--;
        }
 
+       req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
        __blk_put_request(q, next);
        return 1;
 }
@@ -2644,25 +2671,17 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
 
 EXPORT_SYMBOL(blk_attempt_remerge);
 
-/*
- * Non-locking blk_attempt_remerge variant.
- */
-void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
-{
-       attempt_back_merge(q, rq);
-}
-
-EXPORT_SYMBOL(__blk_attempt_remerge);
-
 static int __make_request(request_queue_t *q, struct bio *bio)
 {
-       struct request *req, *freereq = NULL;
+       struct request *req;
        int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+       unsigned short prio;
        sector_t sector;
 
        sector = bio->bi_sector;
        nr_sectors = bio_sectors(bio);
        cur_nr_sectors = bio_cur_sectors(bio);
+       prio = bio_prio(bio);
 
        rw = bio_data_dir(bio);
        sync = bio_sync(bio);
@@ -2677,19 +2696,14 @@ static int __make_request(request_queue_t *q, struct bio *bio)
        spin_lock_prefetch(q->queue_lock);
 
        barrier = bio_barrier(bio);
-       if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) {
+       if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
 
-again:
        spin_lock_irq(q->queue_lock);
 
-       if (elv_queue_empty(q)) {
-               blk_plug_device(q);
-               goto get_rq;
-       }
-       if (barrier)
+       if (unlikely(barrier) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -2703,6 +2717,7 @@ again:
                        req->biotail->bi_next = bio;
                        req->biotail = bio;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+                       req->ioprio = ioprio_best(req->ioprio, prio);
                        drive_stat_acct(req, nr_sectors, 0);
                        if (!attempt_back_merge(q, req))
                                elv_merged_request(q, req);
@@ -2727,45 +2742,30 @@ again:
                        req->hard_cur_sectors = cur_nr_sectors;
                        req->sector = req->hard_sector = sector;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+                       req->ioprio = ioprio_best(req->ioprio, prio);
                        drive_stat_acct(req, nr_sectors, 0);
                        if (!attempt_front_merge(q, req))
                                elv_merged_request(q, req);
                        goto out;
 
-               /*
-                * elevator says don't/can't merge. get new request
-                */
-               case ELEVATOR_NO_MERGE:
-                       break;
-
+               /* ELV_NO_MERGE: elevator says don't/can't merge. */
                default:
-                       printk("elevator returned crap (%d)\n", el_ret);
-                       BUG();
+                       ;
        }
 
+get_rq:
        /*
-        * Grab a free request from the freelist - if that is empty, check
-        * if we are doing read ahead and abort instead of blocking for
-        * a free slot.
+        * Grab a free request. This is might sleep but can not fail.
+        * Returns with the queue unlocked.
+        */
+       req = get_request_wait(q, rw, bio);
+
+       /*
+        * After dropping the lock and possibly sleeping here, our request
+        * may now be mergeable after it had proven unmergeable (above).
+        * We don't worry about that case for efficiency. It won't happen
+        * often, and the elevators are able to handle it.
         */
-get_rq:
-       if (freereq) {
-               req = freereq;
-               freereq = NULL;
-       } else {
-               spin_unlock_irq(q->queue_lock);
-               if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
-                       /*
-                        * READA bit set
-                        */
-                       err = -EWOULDBLOCK;
-                       if (bio_rw_ahead(bio))
-                               goto end_io;
-       
-                       freereq = get_request_wait(q, rw);
-               }
-               goto again;
-       }
 
        req->flags |= REQ_CMD;
 
@@ -2778,7 +2778,7 @@ get_rq:
        /*
         * REQ_BARRIER implies no merging, but lets make it explicit
         */
-       if (barrier)
+       if (unlikely(barrier))
                req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
        req->errors = 0;
@@ -2790,13 +2790,15 @@ get_rq:
        req->buffer = bio_data(bio);    /* see ->buffer comment above */
        req->waiting = NULL;
        req->bio = req->biotail = bio;
+       req->ioprio = prio;
        req->rq_disk = bio->bi_bdev->bd_disk;
        req->start_time = jiffies;
 
+       spin_lock_irq(q->queue_lock);
+       if (elv_queue_empty(q))
+               blk_plug_device(q);
        add_request(q, req);
 out:
-       if (freereq)
-               __blk_put_request(q, freereq);
        if (sync)
                __generic_unplug_device(q);
 
@@ -2818,7 +2820,7 @@ static inline void blk_partition_remap(struct bio *bio)
        if (bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
 
-               switch (bio->bi_rw) {
+               switch (bio_data_dir(bio)) {
                case READ:
                        p->read_sectors += bio_sectors(bio);
                        p->reads++;
@@ -2837,6 +2839,7 @@ void blk_finish_queue_drain(request_queue_t *q)
 {
        struct request_list *rl = &q->rq;
        struct request *rq;
+       int requeued = 0;
 
        spin_lock_irq(q->queue_lock);
        clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
@@ -2845,9 +2848,13 @@ void blk_finish_queue_drain(request_queue_t *q)
                rq = list_entry_rq(q->drain_list.next);
 
                list_del_init(&rq->queuelist);
-               __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+               elv_requeue_request(q, rq);
+               requeued++;
        }
 
+       if (requeued)
+               q->request_fn(q);
+
        spin_unlock_irq(q->queue_lock);
 
        wake_up(&rl->wait[0]);
@@ -2902,7 +2909,7 @@ static inline void block_wait_queue_running(request_queue_t *q)
 {
        DEFINE_WAIT(wait);
 
-       while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
+       while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
                struct request_list *rl = &q->rq;
 
                prepare_to_wait_exclusive(&rl->drain, &wait,
@@ -3011,7 +3018,7 @@ end_io:
                        goto end_io;
                }
 
-               if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
+               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
                        goto end_io;
 
                block_wait_queue_running(q);
@@ -3044,7 +3051,7 @@ void submit_bio(int rw, struct bio *bio)
 
        BIO_BUG_ON(!bio->bi_size);
        BIO_BUG_ON(!bio->bi_io_vec);
-       bio->bi_rw = rw;
+       bio->bi_rw |= rw;
        if (rw & WRITE)
                mod_page_state(pgpgout, count);
        else
@@ -3064,7 +3071,7 @@ void submit_bio(int rw, struct bio *bio)
 
 EXPORT_SYMBOL(submit_bio);
 
-void blk_recalc_rq_segments(struct request *rq)
+static void blk_recalc_rq_segments(struct request *rq)
 {
        struct bio *bio, *prevbio = NULL;
        int nr_phys_segs, nr_hw_segs;
@@ -3106,7 +3113,7 @@ void blk_recalc_rq_segments(struct request *rq)
        rq->nr_hw_segments = nr_hw_segs;
 }
 
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
+static void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
        if (blk_fs_request(rq)) {
                rq->hard_sector += nsect;
@@ -3401,8 +3408,11 @@ void exit_io_context(void)
        struct io_context *ioc;
 
        local_irq_save(flags);
+       task_lock(current);
        ioc = current->io_context;
        current->io_context = NULL;
+       ioc->task = NULL;
+       task_unlock(current);
        local_irq_restore(flags);
 
        if (ioc->aic && ioc->aic->exit)
@@ -3415,53 +3425,49 @@ void exit_io_context(void)
 
 /*
  * If the current task has no IO context then create one and initialise it.
- * If it does have a context, take a ref on it.
+ * Otherwise, return its existing IO context.
  *
- * This is always called in the context of the task which submitted the I/O.
- * But weird things happen, so we disable local interrupts to ensure exclusive
- * access to *current.
+ * This returned IO context doesn't have a specifically elevated refcount,
+ * but since the current task itself holds a reference, the context can be
+ * used in general code, so long as it stays within `current` context.
  */
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *current_io_context(int gfp_flags)
 {
        struct task_struct *tsk = current;
-       unsigned long flags;
        struct io_context *ret;
 
-       local_irq_save(flags);
        ret = tsk->io_context;
-       if (ret)
-               goto out;
-
-       local_irq_restore(flags);
+       if (likely(ret))
+               return ret;
 
        ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
        if (ret) {
                atomic_set(&ret->refcount, 1);
-               ret->pid = tsk->pid;
+               ret->task = current;
+               ret->set_ioprio = NULL;
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
                ret->cic = NULL;
-               spin_lock_init(&ret->lock);
-
-               local_irq_save(flags);
+               tsk->io_context = ret;
+       }
 
-               /*
-                * very unlikely, someone raced with us in setting up the task
-                * io context. free new context and just grab a reference.
-                */
-               if (!tsk->io_context)
-                       tsk->io_context = ret;
-               else {
-                       kmem_cache_free(iocontext_cachep, ret);
-                       ret = tsk->io_context;
-               }
+       return ret;
+}
+EXPORT_SYMBOL(current_io_context);
 
-out:
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ */
+struct io_context *get_io_context(int gfp_flags)
+{
+       struct io_context *ret;
+       ret = current_io_context(gfp_flags);
+       if (likely(ret))
                atomic_inc(&ret->refcount);
-               local_irq_restore(flags);
-       }
-
        return ret;
 }
 EXPORT_SYMBOL(get_io_context);
@@ -3670,7 +3676,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        q = container_of(kobj, struct request_queue, kobj);
        if (!entry->show)
-               return 0;
+               return -EIO;
 
        return entry->show(q, page);
 }
@@ -3684,7 +3690,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 
        q = container_of(kobj, struct request_queue, kobj);
        if (!entry->store)
-               return -EINVAL;
+               return -EIO;
 
        return entry->store(q, page, length);
 }
@@ -3694,7 +3700,7 @@ static struct sysfs_ops queue_sysfs_ops = {
        .store  = queue_attr_store,
 };
 
-struct kobj_type queue_ktype = {
+static struct kobj_type queue_ktype = {
        .sysfs_ops      = &queue_sysfs_ops,
        .default_attrs  = default_attrs,
 };