video: tegra: host: gk20a: add missing returns
[linux-3.10.git] / block / blk-merge.c
index 576b68e..5f24482 100644 (file)
@@ -143,7 +143,7 @@ new_segment:
                         * termination bit to avoid doing a full
                         * sg_init_table() in drivers for each command.
                         */
-                       (*sg)->page_link &= ~0x02;
+                       sg_unmark_end(*sg);
                        *sg = sg_next(*sg);
                }
 
@@ -209,6 +209,43 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ *    Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+                  struct scatterlist *sglist)
+{
+       struct bio_vec *bvec, *bvprv;
+       struct scatterlist *sg;
+       int nsegs, cluster;
+       unsigned long i;
+
+       nsegs = 0;
+       cluster = blk_queue_cluster(q);
+
+       bvprv = NULL;
+       sg = NULL;
+       bio_for_each_segment(bvec, bio, i) {
+               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+                                    &nsegs, &cluster);
+       } /* segments in bio */
+
+       if (sg)
+               sg_mark_end(sg);
+
+       BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+       return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
                                    struct request *req,
                                    struct bio *bio)
@@ -238,14 +275,8 @@ no_merge:
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -262,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -301,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+           blk_rq_get_max_sectors(req))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -380,16 +405,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
-       /*
-        * Don't merge file system requests and discard requests
-        */
-       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
-               return 0;
-
-       /*
-        * Don't merge discard requests and secure discard requests
-        */
-       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
                return 0;
 
        /*
@@ -403,6 +419,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
+       if (req->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(req->bio, next->bio))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -484,15 +504,10 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
-       if (!rq_mergeable(rq))
-               return false;
-
-       /* don't merge file system requests and discard requests */
-       if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
+       if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
-       /* don't merge discard requests and secure discard requests */
-       if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
                return false;
 
        /* different data direction or already started, don't merge */
@@ -507,6 +522,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (bio_integrity(bio) != blk_integrity_rq(rq))
                return false;
 
+       /* must be using the same buffer */
+       if (rq->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(rq->bio, bio))
+               return false;
+
        return true;
 }