video: tegra: host: gk20a: add missing returns
[linux-3.10.git] / block / blk-merge.c
index b8df66a..5f24482 100644 (file)
@@ -12,7 +12,6 @@
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
-       unsigned int phys_size;
        struct bio_vec *bv, *bvprv = NULL;
        int cluster, i, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
@@ -22,9 +21,9 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                return 0;
 
        fbio = bio;
-       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+       cluster = blk_queue_cluster(q);
        seg_size = 0;
-       phys_size = nr_phys_segs = 0;
+       nr_phys_segs = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, i) {
                        /*
@@ -32,11 +31,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
                        if (high || highprv)
                                goto new_segment;
                        if (cluster) {
-                               if (seg_size + bv->bv_len > q->max_segment_size)
+                               if (seg_size + bv->bv_len
+                                   > queue_max_segment_size(q))
                                        goto new_segment;
                                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
                                        goto new_segment;
@@ -87,11 +87,11 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
-       if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+       if (!blk_queue_cluster(q))
                return 0;
 
        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
-           q->max_segment_size)
+           queue_max_segment_size(q))
                return 0;
 
        if (!bio_has_data(bio))
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return 0;
 }
 
+static void
+__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
+                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist **sg, int *nsegs, int *cluster)
+{
+
+       int nbytes = bvec->bv_len;
+
+       if (*bvprv && *cluster) {
+               if ((*sg)->length + nbytes > queue_max_segment_size(q))
+                       goto new_segment;
+
+               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+                       goto new_segment;
+               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+                       goto new_segment;
+
+               (*sg)->length += nbytes;
+       } else {
+new_segment:
+               if (!*sg)
+                       *sg = sglist;
+               else {
+                       /*
+                        * If the driver previously mapped a shorter
+                        * list, we could see a termination bit
+                        * prematurely unless it fully inits the sg
+                        * table on each mapping. We KNOW that there
+                        * must be more entries here or the driver
+                        * would be buggy, so force clear the
+                        * termination bit to avoid doing a full
+                        * sg_init_table() in drivers for each command.
+                        */
+                       sg_unmark_end(*sg);
+                       *sg = sg_next(*sg);
+               }
+
+               sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
+               (*nsegs)++;
+       }
+       *bvprv = bvec;
+}
+
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -123,7 +166,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        int nsegs, cluster;
 
        nsegs = 0;
-       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+       cluster = blk_queue_cluster(q);
 
        /*
         * for each bio in rq
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               int nbytes = bvec->bv_len;
-
-               if (bvprv && cluster) {
-                       if (sg->length + nbytes > q->max_segment_size)
-                               goto new_segment;
-
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                               goto new_segment;
-
-                       sg->length += nbytes;
-               } else {
-new_segment:
-                       if (!sg)
-                               sg = sglist;
-                       else {
-                               /*
-                                * If the driver previously mapped a shorter
-                                * list, we could see a termination bit
-                                * prematurely unless it fully inits the sg
-                                * table on each mapping. We KNOW that there
-                                * must be more entries here or the driver
-                                * would be buggy, so force clear the
-                                * termination bit to avoid doing a full
-                                * sg_init_table() in drivers for each command.
-                                */
-                               sg->page_link &= ~0x02;
-                               sg = sg_next(sg);
-                       }
-
-                       sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
-                       nsegs++;
-               }
-               bvprv = bvec;
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
        } /* segments in rq */
 
 
@@ -179,7 +189,7 @@ new_segment:
        }
 
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (rq->cmd_flags & REQ_RW)
+               if (rq->cmd_flags & REQ_WRITE)
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
                sg->page_link &= ~0x02;
@@ -199,19 +209,54 @@ new_segment:
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ *    Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+                  struct scatterlist *sglist)
+{
+       struct bio_vec *bvec, *bvprv;
+       struct scatterlist *sg;
+       int nsegs, cluster;
+       unsigned long i;
+
+       nsegs = 0;
+       cluster = blk_queue_cluster(q);
+
+       bvprv = NULL;
+       sg = NULL;
+       bio_for_each_segment(bvec, bio, i) {
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
+       } /* segments in bio */
+
+       if (sg)
+               sg_mark_end(sg);
+
+       BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+       return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
 {
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
-           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
+       if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
+               goto no_merge;
+
+       if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
+               goto no_merge;
 
        /*
         * This will form the start of a new hw segment.  Bump both
@@ -219,19 +264,19 @@ static inline int ll_new_hw_segment(struct request_queue *q,
         */
        req->nr_phys_segments += nr_phys_segs;
        return 1;
+
+no_merge:
+       req->cmd_flags |= REQ_NOMERGE;
+       if (req == q->last_merge)
+               q->last_merge = NULL;
+       return 0;
 }
 
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
-       else
-               max_sectors = q->max_sectors;
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -248,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
-       else
-               max_sectors = q->max_sectors;
-
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -287,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+           blk_rq_get_max_sectors(req))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +338,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                total_phys_segments--;
        }
 
-       if (total_phys_segments > q->max_phys_segments)
+       if (total_phys_segments > queue_max_segments(q))
                return 0;
 
-       if (total_phys_segments > q->max_hw_segments)
+       if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
                return 0;
 
        /* Merge is OK... */
@@ -310,6 +349,36 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        return 1;
 }
 
+/**
+ * blk_rq_set_mixed_merge - mark a request as mixed merge
+ * @rq: request to mark as mixed merge
+ *
+ * Description:
+ *     @rq is about to be mixed merged.  Make sure the attributes
+ *     which can be mixed are set in each bio and mark @rq as mixed
+ *     merged.
+ */
+void blk_rq_set_mixed_merge(struct request *rq)
+{
+       unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+       struct bio *bio;
+
+       if (rq->cmd_flags & REQ_MIXED_MERGE)
+               return;
+
+       /*
+        * @rq will no longer represent mixable attributes for all the
+        * contained bios.  It will just track those of the first one.
+        * Distributes the attributs to each bio.
+        */
+       for (bio = rq->bio; bio; bio = bio->bi_next) {
+               WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
+                            (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
+               bio->bi_rw |= ff;
+       }
+       rq->cmd_flags |= REQ_MIXED_MERGE;
+}
+
 static void blk_account_io_merge(struct request *req)
 {
        if (blk_do_io_stat(req)) {
@@ -317,11 +386,12 @@ static void blk_account_io_merge(struct request *req)
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+               part = req->part;
 
                part_round_stats(cpu, part);
-               part_dec_in_flight(part);
+               part_dec_in_flight(part, rq_data_dir(req));
 
+               hd_struct_put(part);
                part_stat_unlock();
        }
 }
@@ -335,6 +405,9 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
+       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
+               return 0;
+
        /*
         * not contiguous
         */
@@ -346,7 +419,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
-       if (blk_integrity_rq(req) != blk_integrity_rq(next))
+       if (req->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(req->bio, next->bio))
                return 0;
 
        /*
@@ -359,6 +433,19 @@ static int attempt_merge(struct request_queue *q, struct request *req,
                return 0;
 
        /*
+        * If failfast settings disagree or any of the two is already
+        * a mixed merge, mark both as mixed before proceeding.  This
+        * makes sure that all involved bios have mixable attributes
+        * set properly.
+        */
+       if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+           (req->cmd_flags & REQ_FAILFAST_MASK) !=
+           (next->cmd_flags & REQ_FAILFAST_MASK)) {
+               blk_rq_set_mixed_merge(req);
+               blk_rq_set_mixed_merge(next);
+       }
+
+       /*
         * At this point we have either done a back merge
         * or front merge. We need the smaller start_time of
         * the merged requests to be the current request
@@ -370,7 +457,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        req->biotail->bi_next = next->bio;
        req->biotail = next->biotail;
 
-       req->data_len += blk_rq_bytes(next);
+       req->__data_len += blk_rq_bytes(next);
 
        elv_merge_requests(q, req, next);
 
@@ -408,3 +495,46 @@ int attempt_front_merge(struct request_queue *q, struct request *rq)
 
        return 0;
 }
+
+int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+                         struct request *next)
+{
+       return attempt_merge(q, rq, next);
+}
+
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+       if (!rq_mergeable(rq) || !bio_mergeable(bio))
+               return false;
+
+       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
+               return false;
+
+       /* different data direction or already started, don't merge */
+       if (bio_data_dir(bio) != rq_data_dir(rq))
+               return false;
+
+       /* must be same device and not a special request */
+       if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+               return false;
+
+       /* only merge integrity protected bio into ditto rq */
+       if (bio_integrity(bio) != blk_integrity_rq(rq))
+               return false;
+
+       /* must be using the same buffer */
+       if (rq->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(rq->bio, bio))
+               return false;
+
+       return true;
+}
+
+int blk_try_merge(struct request *rq, struct bio *bio)
+{
+       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+               return ELEVATOR_BACK_MERGE;
+       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+               return ELEVATOR_FRONT_MERGE;
+       return ELEVATOR_NO_MERGE;
+}