1efabf829c538ae5af7613ef0b0d19d06c111b76
[linux-2.6.git] / block / blk-barrier.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8
9 #include "blk.h"
10
11 /**
12  * blk_queue_ordered - does this queue support ordered writes
13  * @q:        the request queue
14  * @ordered:  one of QUEUE_ORDERED_*
15  * @prepare_flush_fn: rq setup helper for cache flush ordered writes
16  *
17  * Description:
18  *   For journalled file systems, doing ordered writes on a commit
19  *   block instead of explicitly doing wait_on_buffer (which is bad
20  *   for performance) can be a big win. Block drivers supporting this
21  *   feature should call this function and indicate so.
22  *
23  **/
24 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
25                       prepare_flush_fn *prepare_flush_fn)
26 {
27         if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
28                                              QUEUE_ORDERED_DO_POSTFLUSH))) {
29                 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30                 return -EINVAL;
31         }
32
33         if (ordered != QUEUE_ORDERED_NONE &&
34             ordered != QUEUE_ORDERED_DRAIN &&
35             ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
36             ordered != QUEUE_ORDERED_DRAIN_FUA &&
37             ordered != QUEUE_ORDERED_TAG &&
38             ordered != QUEUE_ORDERED_TAG_FLUSH &&
39             ordered != QUEUE_ORDERED_TAG_FUA) {
40                 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
41                 return -EINVAL;
42         }
43
44         q->ordered = ordered;
45         q->next_ordered = ordered;
46         q->prepare_flush_fn = prepare_flush_fn;
47
48         return 0;
49 }
50 EXPORT_SYMBOL(blk_queue_ordered);
51
52 /*
53  * Cache flushing for ordered writes handling
54  */
55 unsigned blk_ordered_cur_seq(struct request_queue *q)
56 {
57         if (!q->ordseq)
58                 return 0;
59         return 1 << ffz(q->ordseq);
60 }
61
62 unsigned blk_ordered_req_seq(struct request *rq)
63 {
64         struct request_queue *q = rq->q;
65
66         BUG_ON(q->ordseq == 0);
67
68         if (rq == &q->pre_flush_rq)
69                 return QUEUE_ORDSEQ_PREFLUSH;
70         if (rq == &q->bar_rq)
71                 return QUEUE_ORDSEQ_BAR;
72         if (rq == &q->post_flush_rq)
73                 return QUEUE_ORDSEQ_POSTFLUSH;
74
75         /*
76          * !fs requests don't need to follow barrier ordering.  Always
77          * put them at the front.  This fixes the following deadlock.
78          *
79          * http://thread.gmane.org/gmane.linux.kernel/537473
80          */
81         if (!blk_fs_request(rq))
82                 return QUEUE_ORDSEQ_DRAIN;
83
84         if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
85             (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
86                 return QUEUE_ORDSEQ_DRAIN;
87         else
88                 return QUEUE_ORDSEQ_DONE;
89 }
90
91 void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
92 {
93         struct request *rq;
94
95         if (error && !q->orderr)
96                 q->orderr = error;
97
98         BUG_ON(q->ordseq & seq);
99         q->ordseq |= seq;
100
101         if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102                 return;
103
104         /*
105          * Okay, sequence complete.
106          */
107         q->ordseq = 0;
108         rq = q->orig_bar_rq;
109
110         if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111                 BUG();
112 }
113
114 static void pre_flush_end_io(struct request *rq, int error)
115 {
116         elv_completed_request(rq->q, rq);
117         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
118 }
119
120 static void bar_end_io(struct request *rq, int error)
121 {
122         elv_completed_request(rq->q, rq);
123         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
124 }
125
126 static void post_flush_end_io(struct request *rq, int error)
127 {
128         elv_completed_request(rq->q, rq);
129         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
130 }
131
132 static void queue_flush(struct request_queue *q, unsigned which)
133 {
134         struct request *rq;
135         rq_end_io_fn *end_io;
136
137         if (which == QUEUE_ORDERED_DO_PREFLUSH) {
138                 rq = &q->pre_flush_rq;
139                 end_io = pre_flush_end_io;
140         } else {
141                 rq = &q->post_flush_rq;
142                 end_io = post_flush_end_io;
143         }
144
145         blk_rq_init(q, rq);
146         rq->cmd_flags = REQ_HARDBARRIER;
147         rq->rq_disk = q->bar_rq.rq_disk;
148         rq->end_io = end_io;
149         q->prepare_flush_fn(q, rq);
150
151         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152 }
153
154 static inline struct request *start_ordered(struct request_queue *q,
155                                             struct request *rq)
156 {
157         q->orderr = 0;
158         q->ordered = q->next_ordered;
159         q->ordseq |= QUEUE_ORDSEQ_STARTED;
160
161         /* stash away the original request */
162         elv_dequeue_request(q, rq);
163         q->orig_bar_rq = rq;
164         rq = NULL;
165
166         /*
167          * Queue ordered sequence.  As we stack them at the head, we
168          * need to queue in reverse order.  Note that we rely on that
169          * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
170          * request gets inbetween ordered sequence. If this request is
171          * an empty barrier, we don't need to do a postflush ever since
172          * there will be no data written between the pre and post flush.
173          * Hence a single flush will suffice.
174          */
175         if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) &&
176             !blk_empty_barrier(q->orig_bar_rq)) {
177                 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
178                 rq = &q->post_flush_rq;
179         } else
180                 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
181
182         if (q->ordered & QUEUE_ORDERED_DO_BAR) {
183                 rq = &q->bar_rq;
184
185                 /* initialize proxy request and queue it */
186                 blk_rq_init(q, rq);
187                 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
188                         rq->cmd_flags |= REQ_RW;
189                 if (q->ordered & QUEUE_ORDERED_DO_FUA)
190                         rq->cmd_flags |= REQ_FUA;
191                 init_request_from_bio(rq, q->orig_bar_rq->bio);
192                 rq->end_io = bar_end_io;
193
194                 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
195         } else
196                 q->ordseq |= QUEUE_ORDSEQ_BAR;
197
198         if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
199                 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
200                 rq = &q->pre_flush_rq;
201         } else
202                 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
203
204         if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
205                 rq = NULL;
206         else
207                 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
208
209         return rq;
210 }
211
212 int blk_do_ordered(struct request_queue *q, struct request **rqp)
213 {
214         struct request *rq = *rqp;
215         const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
216
217         if (!q->ordseq) {
218                 if (!is_barrier)
219                         return 1;
220
221                 if (q->next_ordered != QUEUE_ORDERED_NONE) {
222                         *rqp = start_ordered(q, rq);
223                         return 1;
224                 } else {
225                         /*
226                          * Queue ordering not supported.  Terminate
227                          * with prejudice.
228                          */
229                         elv_dequeue_request(q, rq);
230                         if (__blk_end_request(rq, -EOPNOTSUPP,
231                                               blk_rq_bytes(rq)))
232                                 BUG();
233                         *rqp = NULL;
234                         return 0;
235                 }
236         }
237
238         /*
239          * Ordered sequence in progress
240          */
241
242         /* Special requests are not subject to ordering rules. */
243         if (!blk_fs_request(rq) &&
244             rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
245                 return 1;
246
247         if (q->ordered & QUEUE_ORDERED_BY_TAG) {
248                 /* Ordered by tag.  Blocking the next barrier is enough. */
249                 if (is_barrier && rq != &q->bar_rq)
250                         *rqp = NULL;
251         } else {
252                 /* Ordered by draining.  Wait for turn. */
253                 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
254                 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
255                         *rqp = NULL;
256         }
257
258         return 1;
259 }
260
261 static void bio_end_empty_barrier(struct bio *bio, int err)
262 {
263         if (err) {
264                 if (err == -EOPNOTSUPP)
265                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
266                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
267         }
268
269         complete(bio->bi_private);
270 }
271
272 /**
273  * blkdev_issue_flush - queue a flush
274  * @bdev:       blockdev to issue flush for
275  * @error_sector:       error sector
276  *
277  * Description:
278  *    Issue a flush for the block device in question. Caller can supply
279  *    room for storing the error offset in case of a flush error, if they
280  *    wish to.  Caller must run wait_for_completion() on its own.
281  */
282 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
283 {
284         DECLARE_COMPLETION_ONSTACK(wait);
285         struct request_queue *q;
286         struct bio *bio;
287         int ret;
288
289         if (bdev->bd_disk == NULL)
290                 return -ENXIO;
291
292         q = bdev_get_queue(bdev);
293         if (!q)
294                 return -ENXIO;
295
296         bio = bio_alloc(GFP_KERNEL, 0);
297         if (!bio)
298                 return -ENOMEM;
299
300         bio->bi_end_io = bio_end_empty_barrier;
301         bio->bi_private = &wait;
302         bio->bi_bdev = bdev;
303         submit_bio(WRITE_BARRIER, bio);
304
305         wait_for_completion(&wait);
306
307         /*
308          * The driver must store the error location in ->bi_sector, if
309          * it supports it. For non-stacked drivers, this should be copied
310          * from rq->sector.
311          */
312         if (error_sector)
313                 *error_sector = bio->bi_sector;
314
315         ret = 0;
316         if (bio_flagged(bio, BIO_EOPNOTSUPP))
317                 ret = -EOPNOTSUPP;
318         else if (!bio_flagged(bio, BIO_UPTODATE))
319                 ret = -EIO;
320
321         bio_put(bio);
322         return ret;
323 }
324 EXPORT_SYMBOL(blkdev_issue_flush);
325
326 static void blkdev_discard_end_io(struct bio *bio, int err)
327 {
328         if (err) {
329                 if (err == -EOPNOTSUPP)
330                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
331                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
332         }
333
334         bio_put(bio);
335 }
336
337 /**
338  * blkdev_issue_discard - queue a discard
339  * @bdev:       blockdev to issue discard for
340  * @sector:     start sector
341  * @nr_sects:   number of sectors to discard
342  * @gfp_mask:   memory allocation flags (for bio_alloc)
343  *
344  * Description:
345  *    Issue a discard request for the sectors in question. Does not wait.
346  */
347 int blkdev_issue_discard(struct block_device *bdev,
348                          sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
349 {
350         struct request_queue *q;
351         struct bio *bio;
352         int ret = 0;
353
354         if (bdev->bd_disk == NULL)
355                 return -ENXIO;
356
357         q = bdev_get_queue(bdev);
358         if (!q)
359                 return -ENXIO;
360
361         if (!q->prepare_discard_fn)
362                 return -EOPNOTSUPP;
363
364         while (nr_sects && !ret) {
365                 bio = bio_alloc(gfp_mask, 0);
366                 if (!bio)
367                         return -ENOMEM;
368
369                 bio->bi_end_io = blkdev_discard_end_io;
370                 bio->bi_bdev = bdev;
371
372                 bio->bi_sector = sector;
373
374                 if (nr_sects > q->max_hw_sectors) {
375                         bio->bi_size = q->max_hw_sectors << 9;
376                         nr_sects -= q->max_hw_sectors;
377                         sector += q->max_hw_sectors;
378                 } else {
379                         bio->bi_size = nr_sects << 9;
380                         nr_sects = 0;
381                 }
382                 bio_get(bio);
383                 submit_bio(DISCARD_BARRIER, bio);
384
385                 /* Check if it failed immediately */
386                 if (bio_flagged(bio, BIO_EOPNOTSUPP))
387                         ret = -EOPNOTSUPP;
388                 else if (!bio_flagged(bio, BIO_UPTODATE))
389                         ret = -EIO;
390                 bio_put(bio);
391         }
392         return ret;
393 }
394 EXPORT_SYMBOL(blkdev_issue_discard);