1/*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference.  The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered.  Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled.  When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step.  This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress.  This makes
41 *     double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 *     This avoids issuing separate POSTFLUSHes for requests which shared
45 *     PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49 *     starvation in the unlikely case where there are continuous stream of
50 *     FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete.  The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76
77/* FLUSH/FUA sequences */
78enum {
79	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
80	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
81	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
82	REQ_FSEQ_DONE		= (1 << 3),
83
84	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85				  REQ_FSEQ_POSTFLUSH,
86
87	/*
88	 * If flush has been pending longer than the following timeout,
89	 * it's issued even if flush_data requests are still in flight.
90	 */
91	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
92};
93
94static bool blk_kick_flush(struct request_queue *q,
95			   struct blk_flush_queue *fq);
96
97static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
98{
99	unsigned int policy = 0;
100
101	if (blk_rq_sectors(rq))
102		policy |= REQ_FSEQ_DATA;
103
104	if (fflags & REQ_FLUSH) {
105		if (rq->cmd_flags & REQ_FLUSH)
106			policy |= REQ_FSEQ_PREFLUSH;
107		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
108			policy |= REQ_FSEQ_POSTFLUSH;
109	}
110	return policy;
111}
112
113static unsigned int blk_flush_cur_seq(struct request *rq)
114{
115	return 1 << ffz(rq->flush.seq);
116}
117
118static void blk_flush_restore_request(struct request *rq)
119{
120	/*
121	 * After flush data completion, @rq->bio is %NULL but we need to
122	 * complete the bio again.  @rq->biotail is guaranteed to equal the
123	 * original @rq->bio.  Restore it.
124	 */
125	rq->bio = rq->biotail;
126
127	/* make @rq a normal request */
128	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
129	rq->end_io = rq->flush.saved_end_io;
130}
131
132static bool blk_flush_queue_rq(struct request *rq, bool add_front)
133{
134	if (rq->q->mq_ops) {
135		struct request_queue *q = rq->q;
136
137		blk_mq_add_to_requeue_list(rq, add_front);
138		blk_mq_kick_requeue_list(q);
139		return false;
140	} else {
141		if (add_front)
142			list_add(&rq->queuelist, &rq->q->queue_head);
143		else
144			list_add_tail(&rq->queuelist, &rq->q->queue_head);
145		return true;
146	}
147}
148
149/**
150 * blk_flush_complete_seq - complete flush sequence
151 * @rq: FLUSH/FUA request being sequenced
152 * @fq: flush queue
153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
154 * @error: whether an error occurred
155 *
156 * @rq just completed @seq part of its flush sequence, record the
157 * completion and trigger the next step.
158 *
159 * CONTEXT:
160 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
161 *
162 * RETURNS:
163 * %true if requests were added to the dispatch queue, %false otherwise.
164 */
165static bool blk_flush_complete_seq(struct request *rq,
166				   struct blk_flush_queue *fq,
167				   unsigned int seq, int error)
168{
169	struct request_queue *q = rq->q;
170	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
171	bool queued = false, kicked;
172
173	BUG_ON(rq->flush.seq & seq);
174	rq->flush.seq |= seq;
175
176	if (likely(!error))
177		seq = blk_flush_cur_seq(rq);
178	else
179		seq = REQ_FSEQ_DONE;
180
181	switch (seq) {
182	case REQ_FSEQ_PREFLUSH:
183	case REQ_FSEQ_POSTFLUSH:
184		/* queue for flush */
185		if (list_empty(pending))
186			fq->flush_pending_since = jiffies;
187		list_move_tail(&rq->flush.list, pending);
188		break;
189
190	case REQ_FSEQ_DATA:
191		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192		queued = blk_flush_queue_rq(rq, true);
193		break;
194
195	case REQ_FSEQ_DONE:
196		/*
197		 * @rq was previously adjusted by blk_flush_issue() for
198		 * flush sequencing and may already have gone through the
199		 * flush data request completion path.  Restore @rq for
200		 * normal completion and end it.
201		 */
202		BUG_ON(!list_empty(&rq->queuelist));
203		list_del_init(&rq->flush.list);
204		blk_flush_restore_request(rq);
205		if (q->mq_ops)
206			blk_mq_end_request(rq, error);
207		else
208			__blk_end_request_all(rq, error);
209		break;
210
211	default:
212		BUG();
213	}
214
215	kicked = blk_kick_flush(q, fq);
216	return kicked | queued;
217}
218
219static void flush_end_io(struct request *flush_rq, int error)
220{
221	struct request_queue *q = flush_rq->q;
222	struct list_head *running;
223	bool queued = false;
224	struct request *rq, *n;
225	unsigned long flags = 0;
226	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
227
228	if (q->mq_ops) {
229		spin_lock_irqsave(&fq->mq_flush_lock, flags);
230		flush_rq->tag = -1;
231	}
232
233	running = &fq->flush_queue[fq->flush_running_idx];
234	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
235
236	/* account completion of the flush request */
237	fq->flush_running_idx ^= 1;
238
239	if (!q->mq_ops)
240		elv_completed_request(q, flush_rq);
241
242	/* and push the waiting requests to the next stage */
243	list_for_each_entry_safe(rq, n, running, flush.list) {
244		unsigned int seq = blk_flush_cur_seq(rq);
245
246		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
247		queued |= blk_flush_complete_seq(rq, fq, seq, error);
248	}
249
250	/*
251	 * Kick the queue to avoid stall for two cases:
252	 * 1. Moving a request silently to empty queue_head may stall the
253	 * queue.
254	 * 2. When flush request is running in non-queueable queue, the
255	 * queue is hold. Restart the queue after flush request is finished
256	 * to avoid stall.
257	 * This function is called from request completion path and calling
258	 * directly into request_fn may confuse the driver.  Always use
259	 * kblockd.
260	 */
261	if (queued || fq->flush_queue_delayed) {
262		WARN_ON(q->mq_ops);
263		blk_run_queue_async(q);
264	}
265	fq->flush_queue_delayed = 0;
266	if (q->mq_ops)
267		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
268}
269
270/**
271 * blk_kick_flush - consider issuing flush request
272 * @q: request_queue being kicked
273 * @fq: flush queue
274 *
275 * Flush related states of @q have changed, consider issuing flush request.
276 * Please read the comment at the top of this file for more info.
277 *
278 * CONTEXT:
279 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
280 *
281 * RETURNS:
282 * %true if flush was issued, %false otherwise.
283 */
284static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
285{
286	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
287	struct request *first_rq =
288		list_first_entry(pending, struct request, flush.list);
289	struct request *flush_rq = fq->flush_rq;
290
291	/* C1 described at the top of this file */
292	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
293		return false;
294
295	/* C2 and C3 */
296	if (!list_empty(&fq->flush_data_in_flight) &&
297	    time_before(jiffies,
298			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
299		return false;
300
301	/*
302	 * Issue flush and toggle pending_idx.  This makes pending_idx
303	 * different from running_idx, which means flush is in flight.
304	 */
305	fq->flush_pending_idx ^= 1;
306
307	blk_rq_init(q, flush_rq);
308
309	/*
310	 * Borrow tag from the first request since they can't
311	 * be in flight at the same time.
312	 */
313	if (q->mq_ops) {
314		flush_rq->mq_ctx = first_rq->mq_ctx;
315		flush_rq->tag = first_rq->tag;
316	}
317
318	flush_rq->cmd_type = REQ_TYPE_FS;
319	flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
320	flush_rq->rq_disk = first_rq->rq_disk;
321	flush_rq->end_io = flush_end_io;
322
323	return blk_flush_queue_rq(flush_rq, false);
324}
325
326static void flush_data_end_io(struct request *rq, int error)
327{
328	struct request_queue *q = rq->q;
329	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
330
331	/*
332	 * After populating an empty queue, kick it to avoid stall.  Read
333	 * the comment in flush_end_io().
334	 */
335	if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
336		blk_run_queue_async(q);
337}
338
339static void mq_flush_data_end_io(struct request *rq, int error)
340{
341	struct request_queue *q = rq->q;
342	struct blk_mq_hw_ctx *hctx;
343	struct blk_mq_ctx *ctx = rq->mq_ctx;
344	unsigned long flags;
345	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
346
347	hctx = q->mq_ops->map_queue(q, ctx->cpu);
348
349	/*
350	 * After populating an empty queue, kick it to avoid stall.  Read
351	 * the comment in flush_end_io().
352	 */
353	spin_lock_irqsave(&fq->mq_flush_lock, flags);
354	if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
355		blk_mq_run_hw_queue(hctx, true);
356	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
357}
358
359/**
360 * blk_insert_flush - insert a new FLUSH/FUA request
361 * @rq: request to insert
362 *
363 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
364 * or __blk_mq_run_hw_queue() to dispatch request.
365 * @rq is being submitted.  Analyze what needs to be done and put it on the
366 * right queue.
367 *
368 * CONTEXT:
369 * spin_lock_irq(q->queue_lock) in !mq case
370 */
371void blk_insert_flush(struct request *rq)
372{
373	struct request_queue *q = rq->q;
374	unsigned int fflags = q->flush_flags;	/* may change, cache */
375	unsigned int policy = blk_flush_policy(fflags, rq);
376	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
377
378	/*
379	 * @policy now records what operations need to be done.  Adjust
380	 * REQ_FLUSH and FUA for the driver.
381	 */
382	rq->cmd_flags &= ~REQ_FLUSH;
383	if (!(fflags & REQ_FUA))
384		rq->cmd_flags &= ~REQ_FUA;
385
386	/*
387	 * An empty flush handed down from a stacking driver may
388	 * translate into nothing if the underlying device does not
389	 * advertise a write-back cache.  In this case, simply
390	 * complete the request.
391	 */
392	if (!policy) {
393		if (q->mq_ops)
394			blk_mq_end_request(rq, 0);
395		else
396			__blk_end_bidi_request(rq, 0, 0, 0);
397		return;
398	}
399
400	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
401
402	/*
403	 * If there's data but flush is not necessary, the request can be
404	 * processed directly without going through flush machinery.  Queue
405	 * for normal execution.
406	 */
407	if ((policy & REQ_FSEQ_DATA) &&
408	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
409		if (q->mq_ops) {
410			blk_mq_insert_request(rq, false, false, true);
411		} else
412			list_add_tail(&rq->queuelist, &q->queue_head);
413		return;
414	}
415
416	/*
417	 * @rq should go through flush machinery.  Mark it part of flush
418	 * sequence and submit for further processing.
419	 */
420	memset(&rq->flush, 0, sizeof(rq->flush));
421	INIT_LIST_HEAD(&rq->flush.list);
422	rq->cmd_flags |= REQ_FLUSH_SEQ;
423	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
424	if (q->mq_ops) {
425		rq->end_io = mq_flush_data_end_io;
426
427		spin_lock_irq(&fq->mq_flush_lock);
428		blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
429		spin_unlock_irq(&fq->mq_flush_lock);
430		return;
431	}
432	rq->end_io = flush_data_end_io;
433
434	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
435}
436
437/**
438 * blkdev_issue_flush - queue a flush
439 * @bdev:	blockdev to issue flush for
440 * @gfp_mask:	memory allocation flags (for bio_alloc)
441 * @error_sector:	error sector
442 *
443 * Description:
444 *    Issue a flush for the block device in question. Caller can supply
445 *    room for storing the error offset in case of a flush error, if they
446 *    wish to. If WAIT flag is not passed then caller may check only what
447 *    request was pushed in some internal queue for later handling.
448 */
449int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
450		sector_t *error_sector)
451{
452	struct request_queue *q;
453	struct bio *bio;
454	int ret = 0;
455
456	if (bdev->bd_disk == NULL)
457		return -ENXIO;
458
459	q = bdev_get_queue(bdev);
460	if (!q)
461		return -ENXIO;
462
463	/*
464	 * some block devices may not have their queue correctly set up here
465	 * (e.g. loop device without a backing file) and so issuing a flush
466	 * here will panic. Ensure there is a request function before issuing
467	 * the flush.
468	 */
469	if (!q->make_request_fn)
470		return -ENXIO;
471
472	bio = bio_alloc(gfp_mask, 0);
473	bio->bi_bdev = bdev;
474
475	ret = submit_bio_wait(WRITE_FLUSH, bio);
476
477	/*
478	 * The driver must store the error location in ->bi_sector, if
479	 * it supports it. For non-stacked drivers, this should be
480	 * copied from blk_rq_pos(rq).
481	 */
482	if (error_sector)
483		*error_sector = bio->bi_iter.bi_sector;
484
485	bio_put(bio);
486	return ret;
487}
488EXPORT_SYMBOL(blkdev_issue_flush);
489
490struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
491		int node, int cmd_size)
492{
493	struct blk_flush_queue *fq;
494	int rq_sz = sizeof(struct request);
495
496	fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
497	if (!fq)
498		goto fail;
499
500	if (q->mq_ops) {
501		spin_lock_init(&fq->mq_flush_lock);
502		rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
503	}
504
505	fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
506	if (!fq->flush_rq)
507		goto fail_rq;
508
509	INIT_LIST_HEAD(&fq->flush_queue[0]);
510	INIT_LIST_HEAD(&fq->flush_queue[1]);
511	INIT_LIST_HEAD(&fq->flush_data_in_flight);
512
513	return fq;
514
515 fail_rq:
516	kfree(fq);
517 fail:
518	return NULL;
519}
520
521void blk_free_flush_queue(struct blk_flush_queue *fq)
522{
523	/* bio based request queue hasn't flush queue */
524	if (!fq)
525		return;
526
527	kfree(fq->flush_rq);
528	kfree(fq);
529}
530