block: unify flags for struct bio and struct request
Christoph Hellwig [Sat, 7 Aug 2010 16:20:39 +0000 (18:20 +0200)]
Remove the current bio flags and reuse the request flags for the bio, too.
This allows to more easily trace the type of I/O from the filesystem
down to the block driver.  There were two flags in the bio that were
missing in the requests:  BIO_RW_UNPLUG and BIO_RW_AHEAD.  Also I've
renamed two request flags that had a superflous RW in them.

Note that the flags are in bio.h despite having the REQ_ name - as
blkdev.h includes bio.h that is the only way to go for now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

47 files changed:
block/blk-barrier.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/cfq-iosched.c
block/elevator.c
drivers/ata/libata-scsi.c
drivers/block/aoe/aoeblk.c
drivers/block/brd.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/loop.c
drivers/block/pktcdvd.c
drivers/block/umem.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-floppy.c
drivers/md/dm-io.c
drivers/md/dm-kcopyd.c
drivers/md/dm-raid1.c
drivers/md/dm-stripe.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/scsi/osd/osd_initiator.c
fs/bio.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/volumes.c
fs/exofs/ios.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/nilfs2/segbuf.c
include/linux/bio.h
include/linux/blkdev.h
include/linux/fs.h
kernel/power/block_io.c
kernel/trace/blktrace.c
mm/page_io.c

index 74e4043..7c6f4a7 100644 (file)
@@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
                /* initialize proxy request and queue it */
                blk_rq_init(q, rq);
                if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-                       rq->cmd_flags |= REQ_RW;
+                       rq->cmd_flags |= REQ_WRITE;
                if (q->ordered & QUEUE_ORDERED_DO_FUA)
                        rq->cmd_flags |= REQ_FUA;
                init_request_from_bio(rq, q->orig_bar_rq->bio);
index dca43a3..66c3cfe 100644 (file)
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
-       /*
-        * Inherit FAILFAST from bio (for read-ahead, and explicit
-        * FAILFAST).  FAILFAST flags are identical for req and bio.
-        */
-       if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+       req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+       if (bio->bi_rw & REQ_RAHEAD)
                req->cmd_flags |= REQ_FAILFAST_MASK;
-       else
-               req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
-
-       if (bio_rw_flagged(bio, BIO_RW_DISCARD))
-               req->cmd_flags |= REQ_DISCARD;
-       if (bio_rw_flagged(bio, BIO_RW_BARRIER))
-               req->cmd_flags |= REQ_HARDBARRIER;
-       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
-               req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_rw_flagged(bio, BIO_RW_META))
-               req->cmd_flags |= REQ_RW_META;
-       if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
-               req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
        req->__sector = bio->bi_sector;
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
-       const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+       const bool sync = (bio->bi_rw & REQ_SYNC);
+       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
        const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
-       if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+       if ((bio->bi_rw & REQ_HARDBARRIER) &&
            (q->next_ordered == QUEUE_ORDERED_NONE)) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
+       if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1259,7 @@ get_rq:
         */
        rw_flags = bio_data_dir(bio);
        if (sync)
-               rw_flags |= REQ_RW_SYNC;
+               rw_flags |= REQ_SYNC;
 
        /*
         * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+               if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
                             nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                               bdevname(bio->bi_bdev, b),
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
 
-               if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
-                   !blk_queue_discard(q)) {
+               if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
        /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
-       rq->cmd_flags |= bio->bi_rw & REQ_RW;
+       rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
 
        if (bio_has_data(bio)) {
                rq->nr_phys_segments = bio_phys_segments(q, bio);
index 9083cf0..c65d759 100644 (file)
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                return PTR_ERR(bio);
 
        if (rq_data_dir(rq) == WRITE)
-               bio->bi_rw |= (1 << BIO_RW);
+               bio->bi_rw |= (1 << REQ_WRITE);
 
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
index 87e4fb7..4852475 100644 (file)
@@ -180,7 +180,7 @@ new_segment:
        }
 
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (rq->cmd_flags & REQ_RW)
+               if (rq->cmd_flags & REQ_WRITE)
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
                sg->page_link &= ~0x02;
index d4edeb8..eb4086f 100644 (file)
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
  */
 static inline bool cfq_bio_sync(struct bio *bio)
 {
-       return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 }
 
 /*
@@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
                return rq1;
        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
                return rq2;
-       if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
+       if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
                return rq1;
-       else if ((rq2->cmd_flags & REQ_RW_META) &&
-                !(rq1->cmd_flags & REQ_RW_META))
+       else if ((rq2->cmd_flags & REQ_META) &&
+                !(rq1->cmd_flags & REQ_META))
                return rq2;
 
        s1 = blk_rq_pos(rq1);
@@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
        cfqq->cfqd->rq_queued--;
        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(rq), rq_is_sync(rq));
-       if (rq->cmd_flags & REQ_RW_META) {
+       if (rq->cmd_flags & REQ_META) {
                WARN_ON(!cfqq->meta_pending);
                cfqq->meta_pending--;
        }
@@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
-       if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
+       if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
                return true;
 
        /*
@@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_io_context *cic = RQ_CIC(rq);
 
        cfqd->rq_queued++;
-       if (rq->cmd_flags & REQ_RW_META)
+       if (rq->cmd_flags & REQ_META)
                cfqq->meta_pending++;
 
        cfq_update_io_thinktime(cfqd, cic);
index aa99b59..816a7c8 100644 (file)
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
        /*
         * Don't merge file system requests and discard requests
         */
-       if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
-           bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
+       if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
                return 0;
 
        /*
index a5c08b0..0a8cd34 100644 (file)
@@ -1114,7 +1114,7 @@ static int atapi_drain_needed(struct request *rq)
        if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
                return 0;
 
-       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
+       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
                return 0;
 
        return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
index 035cefe..65deffd 100644 (file)
@@ -173,7 +173,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
                BUG();
                bio_endio(bio, -ENXIO);
                return 0;
-       } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+       } else if (bio->bi_rw & REQ_HARDBARRIER) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        } else if (bio->bi_io_vec == NULL) {
index f1bf79d..1b218c6 100644 (file)
@@ -340,7 +340,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
                                                get_capacity(bdev->bd_disk))
                goto out;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
+       if (unlikely(bio->bi_rw & REQ_DISCARD)) {
                err = 0;
                discard_from_brd(brd, sector, bio->bi_size);
                goto out;
index df01899..9400845 100644 (file)
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        md_io.error = 0;
 
        if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-               rw |= (1 << BIO_RW_BARRIER);
-       rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
+               rw |= REQ_HARDBARRIER;
+       rw |= REQ_UNPLUG | REQ_SYNC;
 
  retry:
        bio = bio_alloc(GFP_NOIO, 1);
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        /* check for unsupported barrier op.
         * would rather check on EOPNOTSUPP, but that is not reliable.
         * don't try again for ANY return value != 0 */
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
+       if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
                /* Try again with no barrier */
                dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
                set_bit(MD_NO_BARRIER, &mdev->flags);
-               rw &= ~(1 << BIO_RW_BARRIER);
+               rw &= ~REQ_HARDBARRIER;
                bio_put(bio);
                goto retry;
        }
index 7258c95..e2ab13d 100644 (file)
@@ -2425,15 +2425,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
        /* NOTE: no need to check if barriers supported here as we would
         *       not pass the test in make_request_common in that case
         */
-       if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
+       if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
                dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
                /* dp_flags |= DP_HARDBARRIER; */
        }
-       if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
+       if (req->master_bio->bi_rw & REQ_SYNC)
                dp_flags |= DP_RW_SYNC;
        /* for now handle SYNCIO and UNPLUG
         * as if they still were one and the same flag */
-       if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
+       if (req->master_bio->bi_rw & REQ_UNPLUG)
                dp_flags |= DP_RW_SYNC;
        if (mdev->state.conn >= C_SYNC_SOURCE &&
            mdev->state.conn <= C_PAUSED_SYNC_T)
index dff4870..cba1deb 100644 (file)
@@ -1180,7 +1180,7 @@ next_bio:
        bio->bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        /* we special case some flags in the multi-bio case, see below
-        * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+        * (REQ_UNPLUG, REQ_HARDBARRIER) */
        bio->bi_rw = rw;
        bio->bi_private = e;
        bio->bi_end_io = drbd_endio_sec;
@@ -1209,16 +1209,16 @@ next_bio:
                bios = bios->bi_next;
                bio->bi_next = NULL;
 
-               /* strip off BIO_RW_UNPLUG unless it is the last bio */
+               /* strip off REQ_UNPLUG unless it is the last bio */
                if (bios)
-                       bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+                       bio->bi_rw &= ~REQ_UNPLUG;
 
                drbd_generic_make_request(mdev, fault_type, bio);
 
-               /* strip off BIO_RW_BARRIER,
+               /* strip off REQ_HARDBARRIER,
                 * unless it is the first or last bio */
                if (bios && bios->bi_next)
-                       bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+                       bios->bi_rw &= ~REQ_HARDBARRIER;
        } while (bios);
        maybe_kick_lo(mdev);
        return 0;
@@ -1233,7 +1233,7 @@ fail:
 }
 
 /**
- * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
+ * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
  * @mdev:      DRBD device.
  * @w:         work object.
  * @cancel:    The connection will be closed anyways (unused in this callback)
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
           (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
           so that we can finish that epoch in drbd_may_finish_epoch().
           That is necessary if we already have a long chain of Epochs, before
-          we realize that BIO_RW_BARRIER is actually not supported */
+          we realize that REQ_HARDBARRIER is actually not supported */
 
        /* As long as the -ENOTSUPP on the barrier is reported immediately
           that will never trigger. If it is reported late, we will just
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
                epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
                if (epoch == e->epoch) {
                        set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                       rw |= (1<<BIO_RW_BARRIER);
+                       rw |= REQ_HARDBARRIER;
                        e->flags |= EE_IS_BARRIER;
                } else {
                        if (atomic_read(&epoch->epoch_size) > 1 ||
                            !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
                                set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
                                set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                               rw |= (1<<BIO_RW_BARRIER);
+                               rw |= REQ_HARDBARRIER;
                                e->flags |= EE_IS_BARRIER;
                        }
                }
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
        dp_flags = be32_to_cpu(p->dp_flags);
        if (dp_flags & DP_HARDBARRIER) {
                dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
-               /* rw |= (1<<BIO_RW_BARRIER); */
+               /* rw |= REQ_HARDBARRIER; */
        }
        if (dp_flags & DP_RW_SYNC)
-               rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
        if (dp_flags & DP_MAY_SET_IN_SYNC)
                e->flags |= EE_MAY_SET_IN_SYNC;
 
index 654f1ef..f761d98 100644 (file)
@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
         * because of those XXX, this is not yet enabled,
         * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
         */
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
                /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
index 6120922..fedfdb7 100644 (file)
@@ -476,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
+               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index 8a549db..9f3e445 100644 (file)
@@ -1221,7 +1221,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
        pkt->bio->bi_flags = 1 << BIO_UPTODATE;
        pkt->bio->bi_idx = 0;
 
-       BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+       BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
        BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
        BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
        BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
index 2f9470f..8be5715 100644 (file)
@@ -478,7 +478,7 @@ static void process_page(unsigned long data)
                                le32_to_cpu(desc->local_addr)>>9,
                                le32_to_cpu(desc->transfer_size));
                        dump_dmastat(card, control);
-               } else if (test_bit(BIO_RW, &bio->bi_rw) &&
+               } else if ((bio->bi_rw & REQ_WRITE) &&
                           le32_to_cpu(desc->local_addr) >> 9 ==
                                card->init_size) {
                        card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
index 02712bf..766b3de 100644 (file)
@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
           touch it at all. */
 
        if (cgc->data_direction == CGC_DATA_WRITE)
-               flags |= REQ_RW;
+               flags |= REQ_WRITE;
 
        if (cgc->sense)
                memset(cgc->sense, 0, sizeof(struct request_sense));
index c7d0737..5406b6e 100644 (file)
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
        memcpy(rq->cmd, pc->c, 12);
 
        pc->rq = rq;
-       if (rq->cmd_flags & REQ_RW)
+       if (rq->cmd_flags & REQ_WRITE)
                pc->flags |= PC_FLAG_WRITING;
 
        pc->flags |= PC_FLAG_DMA_OK;
index 10f457c..0590c75 100644 (file)
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
        BUG_ON(num_regions > DM_IO_MAX_REGIONS);
 
        if (sync)
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
 
        /*
         * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
         */
        for (i = 0; i < num_regions; i++) {
                *dp = old_pages;
-               if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+               if (where[i].count || (rw & REQ_HARDBARRIER))
                        do_region(rw, i, where + i, dp, io);
        }
 
@@ -412,8 +412,8 @@ retry:
        }
        set_current_state(TASK_RUNNING);
 
-       if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
-               rw &= ~(1 << BIO_RW_BARRIER);
+       if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+               rw &= ~REQ_HARDBARRIER;
                goto retry;
        }
 
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
  * New collapsed (a)synchronous interface.
  *
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
index addf834..d8587ba 100644 (file)
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+               .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
                .mem.type = DM_IO_PAGE_LIST,
                .mem.ptr.pl = job->pages,
                .mem.offset = job->offset,
index ddda531..7413626 100644 (file)
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
        if (error == -EOPNOTSUPP)
                goto out;
 
-       if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
                goto out;
 
        if (unlikely(error)) {
index e610725..d6e28d7 100644 (file)
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
        if (!error)
                return 0; /* I/O complete */
 
-       if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
                return error;
 
        if (error == -EOPNOTSUPP)
index 1e0e6dd..d6f77ba 100644 (file)
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
                         */
                        spin_lock_irqsave(&md->deferred_lock, flags);
                        if (__noflush_suspending(md)) {
-                               if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+                               if (!(io->bio->bi_rw & REQ_HARDBARRIER))
                                        bio_list_add_head(&md->deferred,
                                                          io->bio);
                        } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
                io_error = io->error;
                bio = io->bio;
 
-               if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+               if (bio->bi_rw & REQ_HARDBARRIER) {
                        /*
                         * There can be just one barrier request so we use
                         * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
 
        clone->bi_sector = sector;
        clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
        clone->bi_vcnt = 1;
        clone->bi_size = to_bytes(len);
        clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
 
        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
        __bio_clone(clone, bio);
-       clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw &= ~REQ_HARDBARRIER;
        clone->bi_destructor = dm_bio_destructor;
        clone->bi_sector = sector;
        clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 
        ci.map = dm_get_live_table(md);
        if (unlikely(!ci.map)) {
-               if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+               if (!(bio->bi_rw & REQ_HARDBARRIER))
                        bio_io_error(bio);
                else
                        if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
         * we have to queue this io for later.
         */
        if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
-           unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+           unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                up_read(&md->io_lock);
 
                if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
                if (dm_request_based(md))
                        generic_make_request(c);
                else {
-                       if (bio_rw_flagged(c, BIO_RW_BARRIER))
+                       if (c->bi_rw & REQ_HARDBARRIER)
                                process_barrier(md, c);
                        else
                                __split_and_process_bio(md, c);
index 7e0e057..ba19060 100644 (file)
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
        dev_info_t *tmp_dev;
        sector_t start_sector;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
index cb20d0b..1893af6 100644 (file)
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
        else {
-               bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+               bio->bi_rw &= ~REQ_HARDBARRIER;
                if (mddev->pers->make_request(mddev, bio))
                        generic_make_request(bio);
                mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
         * if zero is reached.
         * If an error occurred, call md_error
         *
-        * As we might need to resubmit the request if BIO_RW_BARRIER
+        * As we might need to resubmit the request if REQ_HARDBARRIER
         * causes ENOTSUPP, we allocate a spare bio...
         */
        struct bio *bio = bio_alloc(GFP_NOIO, 1);
-       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+       int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
 
        bio->bi_bdev = rdev->bdev;
        bio->bi_sector = sector;
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
        atomic_inc(&mddev->pending_writes);
        if (!test_bit(BarriersNotsupp, &rdev->flags)) {
                struct bio *rbio;
-               rw |= (1<<BIO_RW_BARRIER);
+               rw |= REQ_HARDBARRIER;
                rbio = bio_clone(bio, GFP_NOIO);
                rbio->bi_private = bio;
                rbio->bi_end_io = super_written_barrier;
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
        struct completion event;
        int ret;
 
-       rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+       rw |= REQ_SYNC | REQ_UNPLUG;
 
        bio->bi_bdev = bdev;
        bio->bi_sector = sector;
index 10597bf..fc56e0f 100644 (file)
@@ -67,7 +67,7 @@ struct mdk_rdev_s
 #define        Faulty          1               /* device is known to have a fault */
 #define        In_sync         2               /* device is in_sync with rest of array */
 #define        WriteMostly     4               /* Avoid reading if at all possible */
-#define        BarriersNotsupp 5               /* BIO_RW_BARRIER is not supported */
+#define        BarriersNotsupp 5               /* REQ_HARDBARRIER is not supported */
 #define        AllReserved     6               /* If whole device is reserved for
                                         * one array */
 #define        AutoDetected    7               /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
                                                         * fails.  Only supported
                                                         */
        struct bio                      *biolist;       /* bios that need to be retried
-                                                        * because BIO_RW_BARRIER is not supported
+                                                        * because REQ_HARDBARRIER is not supported
                                                         */
 
        atomic_t                        recovery_active; /* blocks scheduled, but not written */
index 410fb60..0307d21 100644 (file)
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
 
        if (uptodate)
                multipath_end_bh_io(mp_bh, 0);
-       else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+       else if (!(bio->bi_rw & REQ_RAHEAD)) {
                /*
                 * oops, IO error:
                 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        struct multipath_bh * mp_bh;
        struct multipath_info *multipath;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        mp_bh->bio = *bio;
        mp_bh->bio.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
-       mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+       mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
                        *bio = *(mp_bh->master_bio);
                        bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
-                       bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+                       bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
                        bio->bi_private = mp_bh;
                        generic_make_request(bio);
index 563abed..6f7af46 100644 (file)
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
        struct strip_zone *zone;
        mdk_rdev_t *tmp_dev;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
index a948da8..73cc74f 100644 (file)
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        struct bio_list bl;
        struct page **behind_pages = NULL;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool do_sync = (bio->bi_rw & REQ_SYNC);
        bool do_barriers;
        mdk_rdev_t *blocked_rdev;
 
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                finish_wait(&conf->wait_barrier, &w);
        }
        if (unlikely(!mddev->barriers_work &&
-                    bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+                    (bio->bi_rw & REQ_HARDBARRIER))) {
                if (rw == WRITE)
                        md_write_end(mddev);
                bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
-               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+               read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r1_bio;
 
                generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        atomic_set(&r1_bio->remaining, 0);
        atomic_set(&r1_bio->behind_remaining, 0);
 
-       do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+       do_barriers = bio->bi_rw & REQ_HARDBARRIER;
        if (do_barriers)
                set_bit(R1BIO_Barrier, &r1_bio->state);
 
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
-                       (do_sync << BIO_RW_SYNCIO);
+               mbio->bi_rw = WRITE | do_barriers | do_sync;
                mbio->bi_private = r1_bio;
 
                if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
                        sync_request_write(mddev, r1_bio);
                        unplug = 1;
                } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
-                       /* some requests in the r1bio were BIO_RW_BARRIER
+                       /* some requests in the r1bio were REQ_HARDBARRIER
                         * requests which failed with -EOPNOTSUPP.  Hohumm..
                         * Better resubmit without the barrier.
                         * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
                         * We already have a nr_pending reference on these rdevs.
                         */
                        int i;
-                       const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+                       const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
                        clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
                        clear_bit(R1BIO_Barrier, &r1_bio->state);
                        for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
                                                conf->mirrors[i].rdev->data_offset;
                                        bio->bi_bdev = conf->mirrors[i].rdev->bdev;
                                        bio->bi_end_io = raid1_end_write_request;
-                                       bio->bi_rw = WRITE |
-                                               (do_sync << BIO_RW_SYNCIO);
+                                       bio->bi_rw = WRITE | do_sync;
                                        bio->bi_private = r1_bio;
                                        r1_bio->bios[i] = bio;
                                        generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
                                       (unsigned long long)r1_bio->sector);
                                raid_end_bio_io(r1_bio);
                        } else {
-                               const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+                               const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
                                r1_bio->bios[r1_bio->read_disk] =
                                        mddev->ro ? IO_BLOCKED : NULL;
                                r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
                                bio->bi_sector = r1_bio->sector + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                bio->bi_end_io = raid1_end_read_request;
-                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+                               bio->bi_rw = READ | do_sync;
                                bio->bi_private = r1_bio;
                                unplug = 1;
                                generic_make_request(bio);
index 42e64e4..62ecb66 100644 (file)
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool do_sync = (bio->bi_rw & REQ_SYNC);
        struct bio_list bl;
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+               read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r10_bio;
 
                generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        conf->mirrors[d].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+               mbio->bi_rw = WRITE | do_sync;
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
                                raid_end_bio_io(r10_bio);
                                bio_put(bio);
                        } else {
-                               const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
+                               const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
                                bio_put(bio);
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
                                bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
                                        + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
-                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+                               bio->bi_rw = READ | do_sync;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = raid10_end_read_request;
                                unplug = 1;
index 96c6902..20ac2f1 100644 (file)
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        const int rw = bio_data_dir(bi);
        int remaining;
 
-       if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+       if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
                /* Drain all pending writes.  We only really need
                 * to ensure they have been submitted, but this is
                 * easier.
index ee4b691..fda4de3 100644 (file)
@@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or,
                return PTR_ERR(bio);
        }
 
-       bio->bi_rw &= ~(1 << BIO_RW);
+       bio->bi_rw &= ~REQ_WRITE;
        or->in.bio = bio;
        or->in.total_bytes = bio->bi_size;
        return 0;
@@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
        WARN_ON(or->out.bio || or->out.total_bytes);
-       WARN_ON(0 ==  bio_rw_flagged(bio, BIO_RW));
+       WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
        or->out.bio = bio;
        or->out.total_bytes = len;
 }
@@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+       bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
        osd_req_write(or, obj, offset, bio, len);
        return 0;
 }
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
+       WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
index e7bf6ca..8abb2df 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
        if (!bio)
                goto out_bmd;
 
-       bio->bi_rw |= (!write_to_vm << BIO_RW);
+       if (!write_to_vm)
+               bio->bi_rw |= REQ_WRITE;
 
        ret = 0;
 
@@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
         * set data direction, and check if mapped pages need bouncing
         */
        if (!write_to_vm)
-               bio->bi_rw |= (1 << BIO_RW);
+               bio->bi_rw |= REQ_WRITE;
 
        bio->bi_bdev = bdev;
        bio->bi_flags |= (1 << BIO_USER_MAPPED);
index 34f7c37..64f1008 100644 (file)
@@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
        end_io_wq->work.func = end_workqueue_fn;
        end_io_wq->work.flags = 0;
 
-       if (bio->bi_rw & (1 << BIO_RW)) {
+       if (bio->bi_rw & REQ_WRITE) {
                if (end_io_wq->metadata)
                        btrfs_queue_worker(&fs_info->endio_meta_write_workers,
                                           &end_io_wq->work);
@@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
        atomic_inc(&fs_info->nr_async_submits);
 
-       if (rw & (1 << BIO_RW_SYNCIO))
+       if (rw & REQ_SYNC)
                btrfs_set_work_high_prio(&async->work);
 
        btrfs_queue_worker(&fs_info->workers, &async->work);
@@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                          bio, 1);
        BUG_ON(ret);
 
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                /*
                 * called for a read, do the setup so that checksum validation
                 * can happen in the async kernel threads
@@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
         * ram and up to date before trying to verify things.  For
         * blocksize <= pagesize, it is basically a noop
         */
-       if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
+       if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
            !bio_ready_for_csum(bio)) {
                btrfs_queue_worker(&fs_info->endio_meta_workers,
                                   &end_io_wq->work);
index 1bff92a..e975d71 100644 (file)
@@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
        BUG_ON(ret);
 
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                if (bio_flags & EXTENT_BIO_COMPRESSED) {
                        return btrfs_submit_compressed_read(inode, bio,
                                                    mirror_num, bio_flags);
@@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
        bio->bi_size = 0;
 
        bio_add_page(bio, page, failrec->len, start - page_offset(page));
-       if (failed_bio->bi_rw & (1 << BIO_RW))
+       if (failed_bio->bi_rw & REQ_WRITE)
                rw = WRITE;
        else
                rw = READ;
@@ -5642,7 +5642,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
        struct bio_vec *bvec = bio->bi_io_vec;
        u64 start;
        int skip_sum;
-       int write = rw & (1 << BIO_RW);
+       int write = rw & REQ_WRITE;
        int ret = 0;
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
index d6e3af8..dd318ff 100644 (file)
@@ -258,7 +258,7 @@ loop_lock:
 
                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 
-               if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
+               if (cur->bi_rw & REQ_SYNC)
                        num_sync_run++;
 
                submit_bio(cur->bi_rw, cur);
@@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        int max_errors = 0;
        struct btrfs_multi_bio *multi = NULL;
 
-       if (multi_ret && !(rw & (1 << BIO_RW)))
+       if (multi_ret && !(rw & REQ_WRITE))
                stripes_allocated = 1;
 again:
        if (multi_ret) {
@@ -2687,7 +2687,7 @@ again:
                mirror_num = 0;
 
        /* if our multi bio struct is too small, back off and try again */
-       if (rw & (1 << BIO_RW)) {
+       if (rw & REQ_WRITE) {
                if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
                                 BTRFS_BLOCK_GROUP_DUP)) {
                        stripes_required = map->num_stripes;
@@ -2697,7 +2697,7 @@ again:
                        max_errors = 1;
                }
        }
-       if (multi_ret && (rw & (1 << BIO_RW)) &&
+       if (multi_ret && (rw & REQ_WRITE) &&
            stripes_allocated < stripes_required) {
                stripes_allocated = map->num_stripes;
                free_extent_map(em);
@@ -2733,7 +2733,7 @@ again:
        num_stripes = 1;
        stripe_index = 0;
        if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
-               if (unplug_page || (rw & (1 << BIO_RW)))
+               if (unplug_page || (rw & REQ_WRITE))
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -2744,7 +2744,7 @@ again:
                }
 
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
-               if (rw & (1 << BIO_RW))
+               if (rw & REQ_WRITE)
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -2755,7 +2755,7 @@ again:
                stripe_index = do_div(stripe_nr, factor);
                stripe_index *= map->sub_stripes;
 
-               if (unplug_page || (rw & (1 << BIO_RW)))
+               if (unplug_page || (rw & REQ_WRITE))
                        num_stripes = map->sub_stripes;
                else if (mirror_num)
                        stripe_index += mirror_num - 1;
@@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        struct btrfs_pending_bios *pending_bios;
 
        /* don't bother with additional async steps for reads, right now */
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                bio_get(bio);
                submit_bio(rw, bio);
                bio_put(bio);
@@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        bio->bi_rw |= rw;
 
        spin_lock(&device->io_lock);
-       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
+       if (bio->bi_rw & REQ_SYNC)
                pending_bios = &device->pending_sync_bios;
        else
                pending_bios = &device->pending_bios;
index 4337cad..e273220 100644 (file)
@@ -599,7 +599,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
                        } else {
                                bio = master_dev->bio;
                                /* FIXME: bio_set_dir() */
-                               bio->bi_rw |= (1 << BIO_RW);
+                               bio->bi_rw |= REQ_WRITE;
                        }
 
                        osd_req_write(or, &ios->obj, per_dev->offset, bio,
index efc3539..cde1248 100644 (file)
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
                goto skip_barrier;
        get_bh(bh);
-       submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh);
+       submit_bh(WRITE_BARRIER | REQ_META, bh);
        wait_on_buffer(bh);
        if (buffer_eopnotsupp(bh)) {
                clear_buffer_eopnotsupp(bh);
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
                lock_buffer(bh);
 skip_barrier:
                get_bh(bh);
-               submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh);
+               submit_bh(WRITE_SYNC | REQ_META, bh);
                wait_on_buffer(bh);
        }
        if (!buffer_uptodate(bh))
index 18176d0..f3b071f 100644 (file)
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
        struct buffer_head *bh, *head;
        int nr_underway = 0;
-       int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
-                       WRITE_SYNC_PLUG : WRITE));
+       int write_op = REQ_META |
+               (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
 
        BUG_ON(!PageLocked(page));
        BUG_ON(!page_has_buffers(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        }
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(READ_SYNC | (1 << BIO_RW_META), bh);
+       submit_bh(READ_SYNC | REQ_META, bh);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh);
+               ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
 
        dblock++;
        extlen--;
index 3593b3a..fd4f894 100644 (file)
@@ -275,7 +275,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
+       submit_bio(READ_SYNC | REQ_META, bio);
        wait_on_page_locked(page);
        bio_put(bio);
        if (!PageUptodate(page)) {
index 2e6a272..4588fb9 100644 (file)
@@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
                 * Last BIO is always sent through the following
                 * submission.
                 */
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
                res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
        }
 
index 7fc5606..4d379c8 100644 (file)
@@ -138,55 +138,83 @@ struct bio {
 #define BIO_POOL_IDX(bio)      ((bio)->bi_flags >> BIO_POOL_OFFSET)    
 
 /*
- * bio bi_rw flags
- *
- * bit 0 -- data direction
- *     If not set, bio is a read from device. If set, it's a write to device.
- * bit 1 -- fail fast device errors
- * bit 2 -- fail fast transport errors
- * bit 3 -- fail fast driver errors
- * bit 4 -- rw-ahead when set
- * bit 5 -- barrier
- *     Insert a serialization point in the IO queue, forcing previously
- *     submitted IO to be completed before this one is issued.
- * bit 6 -- synchronous I/O hint.
- * bit 7 -- Unplug the device immediately after submitting this bio.
- * bit 8 -- metadata request
- *     Used for tracing to differentiate metadata and data IO. May also
- *     get some preferential treatment in the IO scheduler
- * bit 9 -- discard sectors
- *     Informs the lower level device that this range of sectors is no longer
- *     used by the file system and may thus be freed by the device. Used
- *     for flash based storage.
- *     Don't want driver retries for any fast fail whatever the reason.
- * bit 10 -- Tell the IO scheduler not to wait for more requests after this
-       one has been submitted, even if it is a SYNC request.
+ * Request flags.  For use in the cmd_flags field of struct request, and in
+ * bi_rw of struct bio.  Note that some flags are only valid in either one.
  */
-enum bio_rw_flags {
-       BIO_RW,
-       BIO_RW_FAILFAST_DEV,
-       BIO_RW_FAILFAST_TRANSPORT,
-       BIO_RW_FAILFAST_DRIVER,
-       /* above flags must match REQ_* */
-       BIO_RW_AHEAD,
-       BIO_RW_BARRIER,
-       BIO_RW_SYNCIO,
-       BIO_RW_UNPLUG,
-       BIO_RW_META,
-       BIO_RW_DISCARD,
-       BIO_RW_NOIDLE,
+enum rq_flag_bits {
+       /* common flags */
+       __REQ_WRITE,            /* not set, read. set, write */
+       __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
+       __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
+       __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
+
+       __REQ_HARDBARRIER,      /* may not be passed by drive either */
+       __REQ_SYNC,             /* request is sync (sync write or read) */
+       __REQ_META,             /* metadata io request */
+       __REQ_DISCARD,          /* request to discard sectors */
+       __REQ_NOIDLE,           /* don't anticipate more IO after this one */
+
+       /* bio only flags */
+       __REQ_UNPLUG,           /* unplug the immediately after submission */
+       __REQ_RAHEAD,           /* read ahead, can fail anytime */
+
+       /* request only flags */
+       __REQ_SORTED,           /* elevator knows about this request */
+       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
+       __REQ_FUA,              /* forced unit access */
+       __REQ_NOMERGE,          /* don't touch this for merging */
+       __REQ_STARTED,          /* drive already may have started this one */
+       __REQ_DONTPREP,         /* don't call prep for this one */
+       __REQ_QUEUED,           /* uses queueing */
+       __REQ_ELVPRIV,          /* elevator private data attached */
+       __REQ_FAILED,           /* set if the request failed */
+       __REQ_QUIET,            /* don't worry about errors */
+       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
+       __REQ_ORDERED_COLOR,    /* is before or after barrier */
+       __REQ_ALLOCED,          /* request came from our alloc pool */
+       __REQ_COPY_USER,        /* contains copies of user pages */
+       __REQ_INTEGRITY,        /* integrity metadata has been remapped */
+       __REQ_IO_STAT,          /* account I/O stat */
+       __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
+       __REQ_NR_BITS,          /* stops here */
 };
 
-/*
- * First four bits must match between bio->bi_rw and rq->cmd_flags, make
- * that explicit here.
- */
-#define BIO_RW_RQ_MASK         0xf
-
-static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
-{
-       return (bio->bi_rw & (1 << flag)) != 0;
-}
+#define REQ_WRITE              (1 << __REQ_WRITE)
+#define REQ_FAILFAST_DEV       (1 << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
+#define REQ_HARDBARRIER                (1 << __REQ_HARDBARRIER)
+#define REQ_SYNC               (1 << __REQ_SYNC)
+#define REQ_META               (1 << __REQ_META)
+#define REQ_DISCARD            (1 << __REQ_DISCARD)
+#define REQ_NOIDLE             (1 << __REQ_NOIDLE)
+
+#define REQ_FAILFAST_MASK \
+       (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
+#define REQ_COMMON_MASK \
+       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
+        REQ_META| REQ_DISCARD | REQ_NOIDLE)
+
+#define REQ_UNPLUG             (1 << __REQ_UNPLUG)
+#define REQ_RAHEAD             (1 << __REQ_RAHEAD)
+
+#define REQ_SORTED             (1 << __REQ_SORTED)
+#define REQ_SOFTBARRIER                (1 << __REQ_SOFTBARRIER)
+#define REQ_FUA                        (1 << __REQ_FUA)
+#define REQ_NOMERGE            (1 << __REQ_NOMERGE)
+#define REQ_STARTED            (1 << __REQ_STARTED)
+#define REQ_DONTPREP           (1 << __REQ_DONTPREP)
+#define REQ_QUEUED             (1 << __REQ_QUEUED)
+#define REQ_ELVPRIV            (1 << __REQ_ELVPRIV)
+#define REQ_FAILED             (1 << __REQ_FAILED)
+#define REQ_QUIET              (1 << __REQ_QUIET)
+#define REQ_PREEMPT            (1 << __REQ_PREEMPT)
+#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
+#define REQ_ALLOCED            (1 << __REQ_ALLOCED)
+#define REQ_COPY_USER          (1 << __REQ_COPY_USER)
+#define REQ_INTEGRITY          (1 << __REQ_INTEGRITY)
+#define REQ_IO_STAT            (1 << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE                (1 << __REQ_MIXED_MERGE)
 
 /*
  * upper 16 bits of bi_rw define the io priority of this bio
@@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
 #define bio_offset(bio)                bio_iovec((bio))->bv_offset
 #define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
+#define bio_empty_barrier(bio) \
+       ((bio->bi_rw & REQ_HARDBARRIER) && \
+        !bio_has_data(bio) && \
+        !(bio->bi_rw & REQ_DISCARD))
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
index 3ecd28e..3fc0f59 100644 (file)
@@ -84,70 +84,6 @@ enum {
        REQ_LB_OP_FLUSH = 0x41,         /* flush request */
 };
 
-/*
- * request type modified bits. first four bits match BIO_RW* bits, important
- */
-enum rq_flag_bits {
-       __REQ_RW,               /* not set, read. set, write */
-       __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
-       __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
-       __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
-       /* above flags must match BIO_RW_* */
-       __REQ_DISCARD,          /* request to discard sectors */
-       __REQ_SORTED,           /* elevator knows about this request */
-       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
-       __REQ_HARDBARRIER,      /* may not be passed by drive either */
-       __REQ_FUA,              /* forced unit access */
-       __REQ_NOMERGE,          /* don't touch this for merging */
-       __REQ_STARTED,          /* drive already may have started this one */
-       __REQ_DONTPREP,         /* don't call prep for this one */
-       __REQ_QUEUED,           /* uses queueing */
-       __REQ_ELVPRIV,          /* elevator private data attached */
-       __REQ_FAILED,           /* set if the request failed */
-       __REQ_QUIET,            /* don't worry about errors */
-       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
-       __REQ_ORDERED_COLOR,    /* is before or after barrier */
-       __REQ_RW_SYNC,          /* request is sync (sync write or read) */
-       __REQ_ALLOCED,          /* request came from our alloc pool */
-       __REQ_RW_META,          /* metadata io request */
-       __REQ_COPY_USER,        /* contains copies of user pages */
-       __REQ_INTEGRITY,        /* integrity metadata has been remapped */
-       __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
-       __REQ_IO_STAT,          /* account I/O stat */
-       __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
-       __REQ_NR_BITS,          /* stops here */
-};
-
-#define REQ_RW         (1 << __REQ_RW)
-#define REQ_FAILFAST_DEV       (1 << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
-#define REQ_DISCARD    (1 << __REQ_DISCARD)
-#define REQ_SORTED     (1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER        (1 << __REQ_SOFTBARRIER)
-#define REQ_HARDBARRIER        (1 << __REQ_HARDBARRIER)
-#define REQ_FUA                (1 << __REQ_FUA)
-#define REQ_NOMERGE    (1 << __REQ_NOMERGE)
-#define REQ_STARTED    (1 << __REQ_STARTED)
-#define REQ_DONTPREP   (1 << __REQ_DONTPREP)
-#define REQ_QUEUED     (1 << __REQ_QUEUED)
-#define REQ_ELVPRIV    (1 << __REQ_ELVPRIV)
-#define REQ_FAILED     (1 << __REQ_FAILED)
-#define REQ_QUIET      (1 << __REQ_QUIET)
-#define REQ_PREEMPT    (1 << __REQ_PREEMPT)
-#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
-#define REQ_RW_SYNC    (1 << __REQ_RW_SYNC)
-#define REQ_ALLOCED    (1 << __REQ_ALLOCED)
-#define REQ_RW_META    (1 << __REQ_RW_META)
-#define REQ_COPY_USER  (1 << __REQ_COPY_USER)
-#define REQ_INTEGRITY  (1 << __REQ_INTEGRITY)
-#define REQ_NOIDLE     (1 << __REQ_NOIDLE)
-#define REQ_IO_STAT    (1 << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE        (1 << __REQ_MIXED_MERGE)
-
-#define REQ_FAILFAST_MASK      (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
-                                REQ_FAILFAST_DRIVER)
-
 #define BLK_MAX_CDB    16
 
 /*
@@ -631,7 +567,7 @@ enum {
  */
 static inline bool rw_is_sync(unsigned int rw_flags)
 {
-       return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+       return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
 }
 
 static inline bool rq_is_sync(struct request *rq)
index 5988788..c5c9294 100644 (file)
@@ -144,29 +144,31 @@ struct inodes_stat_t {
  *                     of this IO.
  *
  */
-#define RW_MASK                1
-#define RWA_MASK       2
-#define READ 0
-#define WRITE 1
-#define READA 2                /* read-ahead  - don't block if no resources */
-#define SWRITE 3       /* for ll_rw_block() - wait for buffer lock */
-#define READ_SYNC      (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
-#define READ_META      (READ | (1 << BIO_RW_META))
-#define WRITE_SYNC_PLUG        (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
-#define WRITE_SYNC     (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
-#define WRITE_ODIRECT_PLUG     (WRITE | (1 << BIO_RW_SYNCIO))
-#define WRITE_META     (WRITE | (1 << BIO_RW_META))
-#define SWRITE_SYNC_PLUG       \
-                       (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
-#define SWRITE_SYNC    (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
-#define WRITE_BARRIER  (WRITE_SYNC | (1 << BIO_RW_BARRIER))
+#define RW_MASK                        1
+#define RWA_MASK               2
+
+#define READ                   0
+#define WRITE                  1
+#define READA                  2 /* readahead  - don't block if no resources */
+#define SWRITE                 3 /* for ll_rw_block() - wait for buffer lock */
+
+#define READ_SYNC              (READ | REQ_SYNC | REQ_UNPLUG)
+#define READ_META              (READ | REQ_META)
+#define WRITE_SYNC_PLUG                (WRITE | REQ_SYNC | REQ_NOIDLE)
+#define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
+#define WRITE_ODIRECT_PLUG     (WRITE | REQ_SYNC)
+#define WRITE_META             (WRITE | REQ_META)
+#define WRITE_BARRIER          (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
+                                REQ_HARDBARRIER)
+#define SWRITE_SYNC_PLUG       (SWRITE | REQ_SYNC | REQ_NOIDLE)
+#define SWRITE_SYNC            (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
 
 /*
  * These aren't really reads or writes, they pass down information about
  * parts of device that are now unused by the file system.
  */
-#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD))
-#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER))
+#define DISCARD_NOBARRIER      (WRITE | REQ_DISCARD)
+#define DISCARD_BARRIER                (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
 
 #define SEL_IN         1
 #define SEL_OUT                2
index 97024fd..83bbc7c 100644 (file)
@@ -28,7 +28,7 @@
 static int submit(int rw, struct block_device *bdev, sector_t sector,
                struct page *page, struct bio **bio_chain)
 {
-       const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+       const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG;
        struct bio *bio;
 
        bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
index 4f14994..3b4a695 100644 (file)
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
                                 BLK_TC_ACT(BLK_TC_WRITE) };
 
+#define BLK_TC_HARDBARRIER     BLK_TC_BARRIER
+#define BLK_TC_RAHEAD          BLK_TC_AHEAD
+
 /* The ilog2() calls fall out because they're constant */
-#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
-         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
+#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
+         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 
 /*
  * The worker for the various blk_add_trace*() types. Fills out a
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
                return;
 
        what |= ddir_act[rw & WRITE];
-       what |= MASK_TC_BIT(rw, BARRIER);
-       what |= MASK_TC_BIT(rw, SYNCIO);
-       what |= MASK_TC_BIT(rw, AHEAD);
+       what |= MASK_TC_BIT(rw, HARDBARRIER);
+       what |= MASK_TC_BIT(rw, SYNC);
+       what |= MASK_TC_BIT(rw, RAHEAD);
        what |= MASK_TC_BIT(rw, META);
        what |= MASK_TC_BIT(rw, DISCARD);
 
@@ -662,7 +665,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
                return;
 
        if (rq->cmd_flags & REQ_DISCARD)
-               rw |= (1 << BIO_RW_DISCARD);
+               rw |= REQ_DISCARD;
 
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                what |= BLK_TC_ACT(BLK_TC_PC);
@@ -1755,20 +1758,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
 
        if (rw & WRITE)
                rwbs[i++] = 'W';
-       else if (rw & 1 << BIO_RW_DISCARD)
+       else if (rw & REQ_DISCARD)
                rwbs[i++] = 'D';
        else if (bytes)
                rwbs[i++] = 'R';
        else
                rwbs[i++] = 'N';
 
-       if (rw & 1 << BIO_RW_AHEAD)
+       if (rw & REQ_RAHEAD)
                rwbs[i++] = 'A';
-       if (rw & 1 << BIO_RW_BARRIER)
+       if (rw & REQ_HARDBARRIER)
                rwbs[i++] = 'B';
-       if (rw & 1 << BIO_RW_SYNCIO)
+       if (rw & REQ_SYNC)
                rwbs[i++] = 'S';
-       if (rw & 1 << BIO_RW_META)
+       if (rw & REQ_META)
                rwbs[i++] = 'M';
 
        rwbs[i] = '\0';
@@ -1780,7 +1783,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
        int bytes;
 
        if (rq->cmd_flags & REQ_DISCARD)
-               rw |= (1 << BIO_RW_DISCARD);
+               rw |= REQ_DISCARD;
 
        bytes = blk_rq_bytes(rq);
 
index 31a3b96..2dee975 100644 (file)
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                goto out;
        }
        if (wbc->sync_mode == WB_SYNC_ALL)
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
        count_vm_event(PSWPOUT);
        set_page_writeback(page);
        unlock_page(page);