block: make barrier completion more robust
[linux-2.6.git] / block / blk-barrier.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8
9 #include "blk.h"
10
11 /**
12  * blk_queue_ordered - does this queue support ordered writes
13  * @q:        the request queue
14  * @ordered:  one of QUEUE_ORDERED_*
15  * @prepare_flush_fn: rq setup helper for cache flush ordered writes
16  *
17  * Description:
18  *   For journalled file systems, doing ordered writes on a commit
19  *   block instead of explicitly doing wait_on_buffer (which is bad
20  *   for performance) can be a big win. Block drivers supporting this
21  *   feature should call this function and indicate so.
22  *
23  **/
24 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
25                       prepare_flush_fn *prepare_flush_fn)
26 {
27         if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
28                                              QUEUE_ORDERED_DO_POSTFLUSH))) {
29                 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30                 return -EINVAL;
31         }
32
33         if (ordered != QUEUE_ORDERED_NONE &&
34             ordered != QUEUE_ORDERED_DRAIN &&
35             ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
36             ordered != QUEUE_ORDERED_DRAIN_FUA &&
37             ordered != QUEUE_ORDERED_TAG &&
38             ordered != QUEUE_ORDERED_TAG_FLUSH &&
39             ordered != QUEUE_ORDERED_TAG_FUA) {
40                 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
41                 return -EINVAL;
42         }
43
44         q->ordered = ordered;
45         q->next_ordered = ordered;
46         q->prepare_flush_fn = prepare_flush_fn;
47
48         return 0;
49 }
50 EXPORT_SYMBOL(blk_queue_ordered);
51
52 /*
53  * Cache flushing for ordered writes handling
54  */
55 unsigned blk_ordered_cur_seq(struct request_queue *q)
56 {
57         if (!q->ordseq)
58                 return 0;
59         return 1 << ffz(q->ordseq);
60 }
61
62 unsigned blk_ordered_req_seq(struct request *rq)
63 {
64         struct request_queue *q = rq->q;
65
66         BUG_ON(q->ordseq == 0);
67
68         if (rq == &q->pre_flush_rq)
69                 return QUEUE_ORDSEQ_PREFLUSH;
70         if (rq == &q->bar_rq)
71                 return QUEUE_ORDSEQ_BAR;
72         if (rq == &q->post_flush_rq)
73                 return QUEUE_ORDSEQ_POSTFLUSH;
74
75         /*
76          * !fs requests don't need to follow barrier ordering.  Always
77          * put them at the front.  This fixes the following deadlock.
78          *
79          * http://thread.gmane.org/gmane.linux.kernel/537473
80          */
81         if (!blk_fs_request(rq))
82                 return QUEUE_ORDSEQ_DRAIN;
83
84         if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
85             (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
86                 return QUEUE_ORDSEQ_DRAIN;
87         else
88                 return QUEUE_ORDSEQ_DONE;
89 }
90
91 bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
92 {
93         struct request *rq;
94
95         if (error && !q->orderr)
96                 q->orderr = error;
97
98         BUG_ON(q->ordseq & seq);
99         q->ordseq |= seq;
100
101         if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102                 return false;
103
104         /*
105          * Okay, sequence complete.
106          */
107         q->ordseq = 0;
108         rq = q->orig_bar_rq;
109
110         if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111                 BUG();
112
113         return true;
114 }
115
116 static void pre_flush_end_io(struct request *rq, int error)
117 {
118         elv_completed_request(rq->q, rq);
119         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
120 }
121
122 static void bar_end_io(struct request *rq, int error)
123 {
124         elv_completed_request(rq->q, rq);
125         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
126 }
127
128 static void post_flush_end_io(struct request *rq, int error)
129 {
130         elv_completed_request(rq->q, rq);
131         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
132 }
133
134 static void queue_flush(struct request_queue *q, unsigned which)
135 {
136         struct request *rq;
137         rq_end_io_fn *end_io;
138
139         if (which == QUEUE_ORDERED_DO_PREFLUSH) {
140                 rq = &q->pre_flush_rq;
141                 end_io = pre_flush_end_io;
142         } else {
143                 rq = &q->post_flush_rq;
144                 end_io = post_flush_end_io;
145         }
146
147         blk_rq_init(q, rq);
148         rq->cmd_flags = REQ_HARDBARRIER;
149         rq->rq_disk = q->bar_rq.rq_disk;
150         rq->end_io = end_io;
151         q->prepare_flush_fn(q, rq);
152
153         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
154 }
155
156 static inline bool start_ordered(struct request_queue *q, struct request **rqp)
157 {
158         struct request *rq = *rqp;
159         unsigned skip = 0;
160
161         q->orderr = 0;
162         q->ordered = q->next_ordered;
163         q->ordseq |= QUEUE_ORDSEQ_STARTED;
164
165         /* stash away the original request */
166         elv_dequeue_request(q, rq);
167         q->orig_bar_rq = rq;
168         rq = NULL;
169
170         /*
171          * Queue ordered sequence.  As we stack them at the head, we
172          * need to queue in reverse order.  Note that we rely on that
173          * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
174          * request gets inbetween ordered sequence. If this request is
175          * an empty barrier, we don't need to do a postflush ever since
176          * there will be no data written between the pre and post flush.
177          * Hence a single flush will suffice.
178          */
179         if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) &&
180             !blk_empty_barrier(q->orig_bar_rq)) {
181                 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
182                 rq = &q->post_flush_rq;
183         } else
184                 skip |= QUEUE_ORDSEQ_POSTFLUSH;
185
186         if (q->ordered & QUEUE_ORDERED_DO_BAR) {
187                 rq = &q->bar_rq;
188
189                 /* initialize proxy request and queue it */
190                 blk_rq_init(q, rq);
191                 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
192                         rq->cmd_flags |= REQ_RW;
193                 if (q->ordered & QUEUE_ORDERED_DO_FUA)
194                         rq->cmd_flags |= REQ_FUA;
195                 init_request_from_bio(rq, q->orig_bar_rq->bio);
196                 rq->end_io = bar_end_io;
197
198                 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
199         } else
200                 skip |= QUEUE_ORDSEQ_BAR;
201
202         if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
203                 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
204                 rq = &q->pre_flush_rq;
205         } else
206                 skip |= QUEUE_ORDSEQ_PREFLUSH;
207
208         if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
209                 rq = NULL;
210         else
211                 skip |= QUEUE_ORDSEQ_DRAIN;
212
213         *rqp = rq;
214
215         /*
216          * Complete skipped sequences.  If whole sequence is complete,
217          * return false to tell elevator that this request is gone.
218          */
219         return !blk_ordered_complete_seq(q, skip, 0);
220 }
221
222 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
223 {
224         struct request *rq = *rqp;
225         const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
226
227         if (!q->ordseq) {
228                 if (!is_barrier)
229                         return true;
230
231                 if (q->next_ordered != QUEUE_ORDERED_NONE)
232                         return start_ordered(q, rqp);
233                 else {
234                         /*
235                          * Queue ordering not supported.  Terminate
236                          * with prejudice.
237                          */
238                         elv_dequeue_request(q, rq);
239                         if (__blk_end_request(rq, -EOPNOTSUPP,
240                                               blk_rq_bytes(rq)))
241                                 BUG();
242                         *rqp = NULL;
243                         return false;
244                 }
245         }
246
247         /*
248          * Ordered sequence in progress
249          */
250
251         /* Special requests are not subject to ordering rules. */
252         if (!blk_fs_request(rq) &&
253             rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
254                 return true;
255
256         if (q->ordered & QUEUE_ORDERED_BY_TAG) {
257                 /* Ordered by tag.  Blocking the next barrier is enough. */
258                 if (is_barrier && rq != &q->bar_rq)
259                         *rqp = NULL;
260         } else {
261                 /* Ordered by draining.  Wait for turn. */
262                 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
263                 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
264                         *rqp = NULL;
265         }
266
267         return true;
268 }
269
270 static void bio_end_empty_barrier(struct bio *bio, int err)
271 {
272         if (err) {
273                 if (err == -EOPNOTSUPP)
274                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
275                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
276         }
277
278         complete(bio->bi_private);
279 }
280
281 /**
282  * blkdev_issue_flush - queue a flush
283  * @bdev:       blockdev to issue flush for
284  * @error_sector:       error sector
285  *
286  * Description:
287  *    Issue a flush for the block device in question. Caller can supply
288  *    room for storing the error offset in case of a flush error, if they
289  *    wish to.  Caller must run wait_for_completion() on its own.
290  */
291 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
292 {
293         DECLARE_COMPLETION_ONSTACK(wait);
294         struct request_queue *q;
295         struct bio *bio;
296         int ret;
297
298         if (bdev->bd_disk == NULL)
299                 return -ENXIO;
300
301         q = bdev_get_queue(bdev);
302         if (!q)
303                 return -ENXIO;
304
305         bio = bio_alloc(GFP_KERNEL, 0);
306         if (!bio)
307                 return -ENOMEM;
308
309         bio->bi_end_io = bio_end_empty_barrier;
310         bio->bi_private = &wait;
311         bio->bi_bdev = bdev;
312         submit_bio(WRITE_BARRIER, bio);
313
314         wait_for_completion(&wait);
315
316         /*
317          * The driver must store the error location in ->bi_sector, if
318          * it supports it. For non-stacked drivers, this should be copied
319          * from rq->sector.
320          */
321         if (error_sector)
322                 *error_sector = bio->bi_sector;
323
324         ret = 0;
325         if (bio_flagged(bio, BIO_EOPNOTSUPP))
326                 ret = -EOPNOTSUPP;
327         else if (!bio_flagged(bio, BIO_UPTODATE))
328                 ret = -EIO;
329
330         bio_put(bio);
331         return ret;
332 }
333 EXPORT_SYMBOL(blkdev_issue_flush);
334
335 static void blkdev_discard_end_io(struct bio *bio, int err)
336 {
337         if (err) {
338                 if (err == -EOPNOTSUPP)
339                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
340                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
341         }
342
343         bio_put(bio);
344 }
345
346 /**
347  * blkdev_issue_discard - queue a discard
348  * @bdev:       blockdev to issue discard for
349  * @sector:     start sector
350  * @nr_sects:   number of sectors to discard
351  * @gfp_mask:   memory allocation flags (for bio_alloc)
352  *
353  * Description:
354  *    Issue a discard request for the sectors in question. Does not wait.
355  */
356 int blkdev_issue_discard(struct block_device *bdev,
357                          sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
358 {
359         struct request_queue *q;
360         struct bio *bio;
361         int ret = 0;
362
363         if (bdev->bd_disk == NULL)
364                 return -ENXIO;
365
366         q = bdev_get_queue(bdev);
367         if (!q)
368                 return -ENXIO;
369
370         if (!q->prepare_discard_fn)
371                 return -EOPNOTSUPP;
372
373         while (nr_sects && !ret) {
374                 bio = bio_alloc(gfp_mask, 0);
375                 if (!bio)
376                         return -ENOMEM;
377
378                 bio->bi_end_io = blkdev_discard_end_io;
379                 bio->bi_bdev = bdev;
380
381                 bio->bi_sector = sector;
382
383                 if (nr_sects > q->max_hw_sectors) {
384                         bio->bi_size = q->max_hw_sectors << 9;
385                         nr_sects -= q->max_hw_sectors;
386                         sector += q->max_hw_sectors;
387                 } else {
388                         bio->bi_size = nr_sects << 9;
389                         nr_sects = 0;
390                 }
391                 bio_get(bio);
392                 submit_bio(DISCARD_BARRIER, bio);
393
394                 /* Check if it failed immediately */
395                 if (bio_flagged(bio, BIO_EOPNOTSUPP))
396                         ret = -EOPNOTSUPP;
397                 else if (!bio_flagged(bio, BIO_UPTODATE))
398                         ret = -EIO;
399                 bio_put(bio);
400         }
401         return ret;
402 }
403 EXPORT_SYMBOL(blkdev_issue_discard);