video: tegra: host: gk20a: add missing returns
[linux-3.10.git] / block / blk-merge.c
index 160035f..5f24482 100644 (file)
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        return 0;
 }
 
+static void
+__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
+                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist **sg, int *nsegs, int *cluster)
+{
+
+       int nbytes = bvec->bv_len;
+
+       if (*bvprv && *cluster) {
+               if ((*sg)->length + nbytes > queue_max_segment_size(q))
+                       goto new_segment;
+
+               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+                       goto new_segment;
+               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+                       goto new_segment;
+
+               (*sg)->length += nbytes;
+       } else {
+new_segment:
+               if (!*sg)
+                       *sg = sglist;
+               else {
+                       /*
+                        * If the driver previously mapped a shorter
+                        * list, we could see a termination bit
+                        * prematurely unless it fully inits the sg
+                        * table on each mapping. We KNOW that there
+                        * must be more entries here or the driver
+                        * would be buggy, so force clear the
+                        * termination bit to avoid doing a full
+                        * sg_init_table() in drivers for each command.
+                        */
+                       sg_unmark_end(*sg);
+                       *sg = sg_next(*sg);
+               }
+
+               sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
+               (*nsegs)++;
+       }
+       *bvprv = bvec;
+}
+
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
  * must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               int nbytes = bvec->bv_len;
-
-               if (bvprv && cluster) {
-                       if (sg->length + nbytes > queue_max_segment_size(q))
-                               goto new_segment;
-
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                               goto new_segment;
-
-                       sg->length += nbytes;
-               } else {
-new_segment:
-                       if (!sg)
-                               sg = sglist;
-                       else {
-                               /*
-                                * If the driver previously mapped a shorter
-                                * list, we could see a termination bit
-                                * prematurely unless it fully inits the sg
-                                * table on each mapping. We KNOW that there
-                                * must be more entries here or the driver
-                                * would be buggy, so force clear the
-                                * termination bit to avoid doing a full
-                                * sg_init_table() in drivers for each command.
-                                */
-                               sg->page_link &= ~0x02;
-                               sg = sg_next(sg);
-                       }
-
-                       sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
-                       nsegs++;
-               }
-               bvprv = bvec;
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
        } /* segments in rq */
 
 
@@ -199,6 +209,43 @@ new_segment:
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ *    Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+                  struct scatterlist *sglist)
+{
+       struct bio_vec *bvec, *bvprv;
+       struct scatterlist *sg;
+       int nsegs, cluster;
+       unsigned long i;
+
+       nsegs = 0;
+       cluster = blk_queue_cluster(q);
+
+       bvprv = NULL;
+       sg = NULL;
+       bio_for_each_segment(bvec, bio, i) {
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
+       } /* segments in bio */
+
+       if (sg)
+               sg_mark_end(sg);
+
+       BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+       return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
@@ -228,14 +275,8 @@ no_merge:
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -252,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -291,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+           blk_rq_get_max_sectors(req))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -370,16 +405,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
-       /*
-        * Don't merge file system requests and discard requests
-        */
-       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
-               return 0;
-
-       /*
-        * Don't merge discard requests and secure discard requests
-        */
-       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
                return 0;
 
        /*
@@ -393,6 +419,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
+       if (req->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(req->bio, next->bio))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -474,15 +504,10 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
-       if (!rq_mergeable(rq))
+       if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
-       /* don't merge file system requests and discard requests */
-       if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
-               return false;
-
-       /* don't merge discard requests and secure discard requests */
-       if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
                return false;
 
        /* different data direction or already started, don't merge */
@@ -497,6 +522,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (bio_integrity(bio) != blk_integrity_rq(rq))
                return false;
 
+       /* must be using the same buffer */
+       if (rq->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(rq->bio, bio))
+               return false;
+
        return true;
 }