blk-flush.c revision 18741986a4b1dc4b1f171634c4191abc3b0fa023
1/*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference.  The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered.  Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled.  When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step.  This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress.  This makes
41 *     double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 *     This avoids issuing separate POSTFLUSHes for requests which shared
45 *     PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49 *     starvation in the unlikely case where there are continuous stream of
50 *     FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete.  The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76
77/* FLUSH/FUA sequences */
78enum {
79	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
80	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
81	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
82	REQ_FSEQ_DONE		= (1 << 3),
83
84	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85				  REQ_FSEQ_POSTFLUSH,
86
87	/*
88	 * If flush has been pending longer than the following timeout,
89	 * it's issued even if flush_data requests are still in flight.
90	 */
91	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
92};
93
94static bool blk_kick_flush(struct request_queue *q);
95
96static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97{
98	unsigned int policy = 0;
99
100	if (blk_rq_sectors(rq))
101		policy |= REQ_FSEQ_DATA;
102
103	if (fflags & REQ_FLUSH) {
104		if (rq->cmd_flags & REQ_FLUSH)
105			policy |= REQ_FSEQ_PREFLUSH;
106		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107			policy |= REQ_FSEQ_POSTFLUSH;
108	}
109	return policy;
110}
111
112static unsigned int blk_flush_cur_seq(struct request *rq)
113{
114	return 1 << ffz(rq->flush.seq);
115}
116
117static void blk_flush_restore_request(struct request *rq)
118{
119	/*
120	 * After flush data completion, @rq->bio is %NULL but we need to
121	 * complete the bio again.  @rq->biotail is guaranteed to equal the
122	 * original @rq->bio.  Restore it.
123	 */
124	rq->bio = rq->biotail;
125
126	/* make @rq a normal request */
127	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128	rq->end_io = rq->flush.saved_end_io;
129
130	blk_clear_rq_complete(rq);
131}
132
133static void mq_flush_run(struct work_struct *work)
134{
135	struct request *rq;
136
137	rq = container_of(work, struct request, mq_flush_work);
138
139	memset(&rq->csd, 0, sizeof(rq->csd));
140	blk_mq_run_request(rq, true, false);
141}
142
143static bool blk_flush_queue_rq(struct request *rq)
144{
145	if (rq->q->mq_ops) {
146		INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147		kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148		return false;
149	} else {
150		list_add_tail(&rq->queuelist, &rq->q->queue_head);
151		return true;
152	}
153}
154
155/**
156 * blk_flush_complete_seq - complete flush sequence
157 * @rq: FLUSH/FUA request being sequenced
158 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
159 * @error: whether an error occurred
160 *
161 * @rq just completed @seq part of its flush sequence, record the
162 * completion and trigger the next step.
163 *
164 * CONTEXT:
165 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
166 *
167 * RETURNS:
168 * %true if requests were added to the dispatch queue, %false otherwise.
169 */
170static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
171				   int error)
172{
173	struct request_queue *q = rq->q;
174	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
175	bool queued = false, kicked;
176
177	BUG_ON(rq->flush.seq & seq);
178	rq->flush.seq |= seq;
179
180	if (likely(!error))
181		seq = blk_flush_cur_seq(rq);
182	else
183		seq = REQ_FSEQ_DONE;
184
185	switch (seq) {
186	case REQ_FSEQ_PREFLUSH:
187	case REQ_FSEQ_POSTFLUSH:
188		/* queue for flush */
189		if (list_empty(pending))
190			q->flush_pending_since = jiffies;
191		list_move_tail(&rq->flush.list, pending);
192		break;
193
194	case REQ_FSEQ_DATA:
195		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
196		queued = blk_flush_queue_rq(rq);
197		break;
198
199	case REQ_FSEQ_DONE:
200		/*
201		 * @rq was previously adjusted by blk_flush_issue() for
202		 * flush sequencing and may already have gone through the
203		 * flush data request completion path.  Restore @rq for
204		 * normal completion and end it.
205		 */
206		BUG_ON(!list_empty(&rq->queuelist));
207		list_del_init(&rq->flush.list);
208		blk_flush_restore_request(rq);
209		if (q->mq_ops)
210			blk_mq_end_io(rq, error);
211		else
212			__blk_end_request_all(rq, error);
213		break;
214
215	default:
216		BUG();
217	}
218
219	kicked = blk_kick_flush(q);
220	return kicked | queued;
221}
222
223static void flush_end_io(struct request *flush_rq, int error)
224{
225	struct request_queue *q = flush_rq->q;
226	struct list_head *running;
227	bool queued = false;
228	struct request *rq, *n;
229	unsigned long flags = 0;
230
231	if (q->mq_ops)
232		spin_lock_irqsave(&q->mq_flush_lock, flags);
233
234	running = &q->flush_queue[q->flush_running_idx];
235	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
236
237	/* account completion of the flush request */
238	q->flush_running_idx ^= 1;
239
240	if (!q->mq_ops)
241		elv_completed_request(q, flush_rq);
242
243	/* and push the waiting requests to the next stage */
244	list_for_each_entry_safe(rq, n, running, flush.list) {
245		unsigned int seq = blk_flush_cur_seq(rq);
246
247		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
248		queued |= blk_flush_complete_seq(rq, seq, error);
249	}
250
251	/*
252	 * Kick the queue to avoid stall for two cases:
253	 * 1. Moving a request silently to empty queue_head may stall the
254	 * queue.
255	 * 2. When flush request is running in non-queueable queue, the
256	 * queue is hold. Restart the queue after flush request is finished
257	 * to avoid stall.
258	 * This function is called from request completion path and calling
259	 * directly into request_fn may confuse the driver.  Always use
260	 * kblockd.
261	 */
262	if (queued || q->flush_queue_delayed) {
263		WARN_ON(q->mq_ops);
264		blk_run_queue_async(q);
265	}
266	q->flush_queue_delayed = 0;
267	if (q->mq_ops)
268		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
269}
270
271/**
272 * blk_kick_flush - consider issuing flush request
273 * @q: request_queue being kicked
274 *
275 * Flush related states of @q have changed, consider issuing flush request.
276 * Please read the comment at the top of this file for more info.
277 *
278 * CONTEXT:
279 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
280 *
281 * RETURNS:
282 * %true if flush was issued, %false otherwise.
283 */
284static bool blk_kick_flush(struct request_queue *q)
285{
286	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
287	struct request *first_rq =
288		list_first_entry(pending, struct request, flush.list);
289
290	/* C1 described at the top of this file */
291	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
292		return false;
293
294	/* C2 and C3 */
295	if (!list_empty(&q->flush_data_in_flight) &&
296	    time_before(jiffies,
297			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
298		return false;
299
300	/*
301	 * Issue flush and toggle pending_idx.  This makes pending_idx
302	 * different from running_idx, which means flush is in flight.
303	 */
304	q->flush_pending_idx ^= 1;
305
306	if (q->mq_ops) {
307		struct blk_mq_ctx *ctx = first_rq->mq_ctx;
308		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
309
310		blk_mq_rq_init(hctx, q->flush_rq);
311		q->flush_rq->mq_ctx = ctx;
312
313		/*
314		 * Reuse the tag value from the fist waiting request,
315		 * with blk-mq the tag is generated during request
316		 * allocation and drivers can rely on it being inside
317		 * the range they asked for.
318		 */
319		q->flush_rq->tag = first_rq->tag;
320	} else {
321		blk_rq_init(q, q->flush_rq);
322	}
323
324	q->flush_rq->cmd_type = REQ_TYPE_FS;
325	q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
326	q->flush_rq->rq_disk = first_rq->rq_disk;
327	q->flush_rq->end_io = flush_end_io;
328
329	return blk_flush_queue_rq(q->flush_rq);
330}
331
332static void flush_data_end_io(struct request *rq, int error)
333{
334	struct request_queue *q = rq->q;
335
336	/*
337	 * After populating an empty queue, kick it to avoid stall.  Read
338	 * the comment in flush_end_io().
339	 */
340	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
341		blk_run_queue_async(q);
342}
343
344static void mq_flush_data_end_io(struct request *rq, int error)
345{
346	struct request_queue *q = rq->q;
347	struct blk_mq_hw_ctx *hctx;
348	struct blk_mq_ctx *ctx;
349	unsigned long flags;
350
351	ctx = rq->mq_ctx;
352	hctx = q->mq_ops->map_queue(q, ctx->cpu);
353
354	/*
355	 * After populating an empty queue, kick it to avoid stall.  Read
356	 * the comment in flush_end_io().
357	 */
358	spin_lock_irqsave(&q->mq_flush_lock, flags);
359	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
360		blk_mq_run_hw_queue(hctx, true);
361	spin_unlock_irqrestore(&q->mq_flush_lock, flags);
362}
363
364/**
365 * blk_insert_flush - insert a new FLUSH/FUA request
366 * @rq: request to insert
367 *
368 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
369 * or __blk_mq_run_hw_queue() to dispatch request.
370 * @rq is being submitted.  Analyze what needs to be done and put it on the
371 * right queue.
372 *
373 * CONTEXT:
374 * spin_lock_irq(q->queue_lock) in !mq case
375 */
376void blk_insert_flush(struct request *rq)
377{
378	struct request_queue *q = rq->q;
379	unsigned int fflags = q->flush_flags;	/* may change, cache */
380	unsigned int policy = blk_flush_policy(fflags, rq);
381
382	/*
383	 * @policy now records what operations need to be done.  Adjust
384	 * REQ_FLUSH and FUA for the driver.
385	 */
386	rq->cmd_flags &= ~REQ_FLUSH;
387	if (!(fflags & REQ_FUA))
388		rq->cmd_flags &= ~REQ_FUA;
389
390	/*
391	 * An empty flush handed down from a stacking driver may
392	 * translate into nothing if the underlying device does not
393	 * advertise a write-back cache.  In this case, simply
394	 * complete the request.
395	 */
396	if (!policy) {
397		if (q->mq_ops)
398			blk_mq_end_io(rq, 0);
399		else
400			__blk_end_bidi_request(rq, 0, 0, 0);
401		return;
402	}
403
404	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
405
406	/*
407	 * If there's data but flush is not necessary, the request can be
408	 * processed directly without going through flush machinery.  Queue
409	 * for normal execution.
410	 */
411	if ((policy & REQ_FSEQ_DATA) &&
412	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
413		if (q->mq_ops) {
414			blk_mq_run_request(rq, false, true);
415		} else
416			list_add_tail(&rq->queuelist, &q->queue_head);
417		return;
418	}
419
420	/*
421	 * @rq should go through flush machinery.  Mark it part of flush
422	 * sequence and submit for further processing.
423	 */
424	memset(&rq->flush, 0, sizeof(rq->flush));
425	INIT_LIST_HEAD(&rq->flush.list);
426	rq->cmd_flags |= REQ_FLUSH_SEQ;
427	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
428	if (q->mq_ops) {
429		rq->end_io = mq_flush_data_end_io;
430
431		spin_lock_irq(&q->mq_flush_lock);
432		blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
433		spin_unlock_irq(&q->mq_flush_lock);
434		return;
435	}
436	rq->end_io = flush_data_end_io;
437
438	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
439}
440
441/**
442 * blk_abort_flushes - @q is being aborted, abort flush requests
443 * @q: request_queue being aborted
444 *
445 * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
446 * FLUSH/FUA requests for abortion.
447 *
448 * CONTEXT:
449 * spin_lock_irq(q->queue_lock)
450 */
451void blk_abort_flushes(struct request_queue *q)
452{
453	struct request *rq, *n;
454	int i;
455
456	/*
457	 * Requests in flight for data are already owned by the dispatch
458	 * queue or the device driver.  Just restore for normal completion.
459	 */
460	list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
461		list_del_init(&rq->flush.list);
462		blk_flush_restore_request(rq);
463	}
464
465	/*
466	 * We need to give away requests on flush queues.  Restore for
467	 * normal completion and put them on the dispatch queue.
468	 */
469	for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
470		list_for_each_entry_safe(rq, n, &q->flush_queue[i],
471					 flush.list) {
472			list_del_init(&rq->flush.list);
473			blk_flush_restore_request(rq);
474			list_add_tail(&rq->queuelist, &q->queue_head);
475		}
476	}
477}
478
479/**
480 * blkdev_issue_flush - queue a flush
481 * @bdev:	blockdev to issue flush for
482 * @gfp_mask:	memory allocation flags (for bio_alloc)
483 * @error_sector:	error sector
484 *
485 * Description:
486 *    Issue a flush for the block device in question. Caller can supply
487 *    room for storing the error offset in case of a flush error, if they
488 *    wish to. If WAIT flag is not passed then caller may check only what
489 *    request was pushed in some internal queue for later handling.
490 */
491int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
492		sector_t *error_sector)
493{
494	struct request_queue *q;
495	struct bio *bio;
496	int ret = 0;
497
498	if (bdev->bd_disk == NULL)
499		return -ENXIO;
500
501	q = bdev_get_queue(bdev);
502	if (!q)
503		return -ENXIO;
504
505	/*
506	 * some block devices may not have their queue correctly set up here
507	 * (e.g. loop device without a backing file) and so issuing a flush
508	 * here will panic. Ensure there is a request function before issuing
509	 * the flush.
510	 */
511	if (!q->make_request_fn)
512		return -ENXIO;
513
514	bio = bio_alloc(gfp_mask, 0);
515	bio->bi_bdev = bdev;
516
517	ret = submit_bio_wait(WRITE_FLUSH, bio);
518
519	/*
520	 * The driver must store the error location in ->bi_sector, if
521	 * it supports it. For non-stacked drivers, this should be
522	 * copied from blk_rq_pos(rq).
523	 */
524	if (error_sector)
525		*error_sector = bio->bi_iter.bi_sector;
526
527	bio_put(bio);
528	return ret;
529}
530EXPORT_SYMBOL(blkdev_issue_flush);
531
532void blk_mq_init_flush(struct request_queue *q)
533{
534	spin_lock_init(&q->mq_flush_lock);
535}
536