Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[linux-2.6] / block / blk-barrier.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8
9 #include "blk.h"
10
11 /**
12  * blk_queue_ordered - does this queue support ordered writes
13  * @q:        the request queue
14  * @ordered:  one of QUEUE_ORDERED_*
15  * @prepare_flush_fn: rq setup helper for cache flush ordered writes
16  *
17  * Description:
18  *   For journalled file systems, doing ordered writes on a commit
19  *   block instead of explicitly doing wait_on_buffer (which is bad
20  *   for performance) can be a big win. Block drivers supporting this
21  *   feature should call this function and indicate so.
22  *
23  **/
24 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
25                       prepare_flush_fn *prepare_flush_fn)
26 {
27         if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28             prepare_flush_fn == NULL) {
29                 printk(KERN_ERR "%s: prepare_flush_fn required\n",
30                                                                 __FUNCTION__);
31                 return -EINVAL;
32         }
33
34         if (ordered != QUEUE_ORDERED_NONE &&
35             ordered != QUEUE_ORDERED_DRAIN &&
36             ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
37             ordered != QUEUE_ORDERED_DRAIN_FUA &&
38             ordered != QUEUE_ORDERED_TAG &&
39             ordered != QUEUE_ORDERED_TAG_FLUSH &&
40             ordered != QUEUE_ORDERED_TAG_FUA) {
41                 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
42                 return -EINVAL;
43         }
44
45         q->ordered = ordered;
46         q->next_ordered = ordered;
47         q->prepare_flush_fn = prepare_flush_fn;
48
49         return 0;
50 }
51 EXPORT_SYMBOL(blk_queue_ordered);
52
53 /*
54  * Cache flushing for ordered writes handling
55  */
56 unsigned blk_ordered_cur_seq(struct request_queue *q)
57 {
58         if (!q->ordseq)
59                 return 0;
60         return 1 << ffz(q->ordseq);
61 }
62
63 unsigned blk_ordered_req_seq(struct request *rq)
64 {
65         struct request_queue *q = rq->q;
66
67         BUG_ON(q->ordseq == 0);
68
69         if (rq == &q->pre_flush_rq)
70                 return QUEUE_ORDSEQ_PREFLUSH;
71         if (rq == &q->bar_rq)
72                 return QUEUE_ORDSEQ_BAR;
73         if (rq == &q->post_flush_rq)
74                 return QUEUE_ORDSEQ_POSTFLUSH;
75
76         /*
77          * !fs requests don't need to follow barrier ordering.  Always
78          * put them at the front.  This fixes the following deadlock.
79          *
80          * http://thread.gmane.org/gmane.linux.kernel/537473
81          */
82         if (!blk_fs_request(rq))
83                 return QUEUE_ORDSEQ_DRAIN;
84
85         if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
86             (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
87                 return QUEUE_ORDSEQ_DRAIN;
88         else
89                 return QUEUE_ORDSEQ_DONE;
90 }
91
92 void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
93 {
94         struct request *rq;
95
96         if (error && !q->orderr)
97                 q->orderr = error;
98
99         BUG_ON(q->ordseq & seq);
100         q->ordseq |= seq;
101
102         if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
103                 return;
104
105         /*
106          * Okay, sequence complete.
107          */
108         q->ordseq = 0;
109         rq = q->orig_bar_rq;
110
111         if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
112                 BUG();
113 }
114
115 static void pre_flush_end_io(struct request *rq, int error)
116 {
117         elv_completed_request(rq->q, rq);
118         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
119 }
120
121 static void bar_end_io(struct request *rq, int error)
122 {
123         elv_completed_request(rq->q, rq);
124         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
125 }
126
127 static void post_flush_end_io(struct request *rq, int error)
128 {
129         elv_completed_request(rq->q, rq);
130         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
131 }
132
133 static void queue_flush(struct request_queue *q, unsigned which)
134 {
135         struct request *rq;
136         rq_end_io_fn *end_io;
137
138         if (which == QUEUE_ORDERED_PREFLUSH) {
139                 rq = &q->pre_flush_rq;
140                 end_io = pre_flush_end_io;
141         } else {
142                 rq = &q->post_flush_rq;
143                 end_io = post_flush_end_io;
144         }
145
146         blk_rq_init(q, rq);
147         rq->cmd_flags = REQ_HARDBARRIER;
148         rq->rq_disk = q->bar_rq.rq_disk;
149         rq->end_io = end_io;
150         q->prepare_flush_fn(q, rq);
151
152         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
153 }
154
155 static inline struct request *start_ordered(struct request_queue *q,
156                                             struct request *rq)
157 {
158         q->orderr = 0;
159         q->ordered = q->next_ordered;
160         q->ordseq |= QUEUE_ORDSEQ_STARTED;
161
162         /*
163          * Prep proxy barrier request.
164          */
165         blkdev_dequeue_request(rq);
166         q->orig_bar_rq = rq;
167         rq = &q->bar_rq;
168         blk_rq_init(q, rq);
169         if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
170                 rq->cmd_flags |= REQ_RW;
171         if (q->ordered & QUEUE_ORDERED_FUA)
172                 rq->cmd_flags |= REQ_FUA;
173         init_request_from_bio(rq, q->orig_bar_rq->bio);
174         rq->end_io = bar_end_io;
175
176         /*
177          * Queue ordered sequence.  As we stack them at the head, we
178          * need to queue in reverse order.  Note that we rely on that
179          * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
180          * request gets inbetween ordered sequence. If this request is
181          * an empty barrier, we don't need to do a postflush ever since
182          * there will be no data written between the pre and post flush.
183          * Hence a single flush will suffice.
184          */
185         if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
186                 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
187         else
188                 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
189
190         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
191
192         if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
193                 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
194                 rq = &q->pre_flush_rq;
195         } else
196                 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
197
198         if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
199                 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
200         else
201                 rq = NULL;
202
203         return rq;
204 }
205
206 int blk_do_ordered(struct request_queue *q, struct request **rqp)
207 {
208         struct request *rq = *rqp;
209         const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
210
211         if (!q->ordseq) {
212                 if (!is_barrier)
213                         return 1;
214
215                 if (q->next_ordered != QUEUE_ORDERED_NONE) {
216                         *rqp = start_ordered(q, rq);
217                         return 1;
218                 } else {
219                         /*
220                          * This can happen when the queue switches to
221                          * ORDERED_NONE while this request is on it.
222                          */
223                         blkdev_dequeue_request(rq);
224                         if (__blk_end_request(rq, -EOPNOTSUPP,
225                                               blk_rq_bytes(rq)))
226                                 BUG();
227                         *rqp = NULL;
228                         return 0;
229                 }
230         }
231
232         /*
233          * Ordered sequence in progress
234          */
235
236         /* Special requests are not subject to ordering rules. */
237         if (!blk_fs_request(rq) &&
238             rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
239                 return 1;
240
241         if (q->ordered & QUEUE_ORDERED_TAG) {
242                 /* Ordered by tag.  Blocking the next barrier is enough. */
243                 if (is_barrier && rq != &q->bar_rq)
244                         *rqp = NULL;
245         } else {
246                 /* Ordered by draining.  Wait for turn. */
247                 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
248                 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
249                         *rqp = NULL;
250         }
251
252         return 1;
253 }
254
255 static void bio_end_empty_barrier(struct bio *bio, int err)
256 {
257         if (err) {
258                 if (err == -EOPNOTSUPP)
259                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
260                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
261         }
262
263         complete(bio->bi_private);
264 }
265
266 /**
267  * blkdev_issue_flush - queue a flush
268  * @bdev:       blockdev to issue flush for
269  * @error_sector:       error sector
270  *
271  * Description:
272  *    Issue a flush for the block device in question. Caller can supply
273  *    room for storing the error offset in case of a flush error, if they
274  *    wish to.  Caller must run wait_for_completion() on its own.
275  */
276 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
277 {
278         DECLARE_COMPLETION_ONSTACK(wait);
279         struct request_queue *q;
280         struct bio *bio;
281         int ret;
282
283         if (bdev->bd_disk == NULL)
284                 return -ENXIO;
285
286         q = bdev_get_queue(bdev);
287         if (!q)
288                 return -ENXIO;
289
290         bio = bio_alloc(GFP_KERNEL, 0);
291         if (!bio)
292                 return -ENOMEM;
293
294         bio->bi_end_io = bio_end_empty_barrier;
295         bio->bi_private = &wait;
296         bio->bi_bdev = bdev;
297         submit_bio(1 << BIO_RW_BARRIER, bio);
298
299         wait_for_completion(&wait);
300
301         /*
302          * The driver must store the error location in ->bi_sector, if
303          * it supports it. For non-stacked drivers, this should be copied
304          * from rq->sector.
305          */
306         if (error_sector)
307                 *error_sector = bio->bi_sector;
308
309         ret = 0;
310         if (bio_flagged(bio, BIO_EOPNOTSUPP))
311                 ret = -EOPNOTSUPP;
312         else if (!bio_flagged(bio, BIO_UPTODATE))
313                 ret = -EIO;
314
315         bio_put(bio);
316         return ret;
317 }
318 EXPORT_SYMBOL(blkdev_issue_flush);