blk-flush.c revision 0bae352da54a95435f721705d3670a6eaefdcf87
1/* 2 * Functions to sequence FLUSH and FUA writes. 3 * 4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 6 * 7 * This file is released under the GPLv2. 8 * 9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three 10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 11 * properties and hardware capability. 12 * 13 * If a request doesn't have data, only REQ_FLUSH makes sense, which 14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates 15 * that the device cache should be flushed before the data is executed, and 16 * REQ_FUA means that the data must be on non-volatile media on request 17 * completion. 18 * 19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any 20 * difference. The requests are either completed immediately if there's no 21 * data or executed as normal requests otherwise. 22 * 23 * If the device has writeback cache and supports FUA, REQ_FLUSH is 24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 25 * 26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is 27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH. 28 * 29 * The actual execution of flush is double buffered. Whenever a request 30 * needs to execute PRE or POSTFLUSH, it queues at 31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a 32 * flush is issued and the pending_idx is toggled. When the flush 33 * completes, all the requests which were pending are proceeded to the next 34 * step. This allows arbitrary merging of different types of FLUSH/FUA 35 * requests. 36 * 37 * Currently, the following conditions are used to determine when to issue 38 * flush. 39 * 40 * C1. At any given time, only one flush shall be in progress. This makes 41 * double buffering sufficient. 42 * 43 * C2. Flush is deferred if any request is executing DATA of its sequence. 44 * This avoids issuing separate POSTFLUSHes for requests which shared 45 * PREFLUSH. 46 * 47 * C3. The second condition is ignored if there is a request which has 48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 49 * starvation in the unlikely case where there are continuous stream of 50 * FUA (without FLUSH) requests. 51 * 52 * For devices which support FUA, it isn't clear whether C2 (and thus C3) 53 * is beneficial. 54 * 55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice. 56 * Once while executing DATA and again after the whole sequence is 57 * complete. The first completion updates the contained bio but doesn't 58 * finish it so that the bio submitter is notified only after the whole 59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 60 * req_bio_endio(). 61 * 62 * The above peculiarity requires that each FLUSH/FUA request has only one 63 * bio attached to it, which is guaranteed as they aren't allowed to be 64 * merged in the usual way. 65 */ 66 67#include <linux/kernel.h> 68#include <linux/module.h> 69#include <linux/bio.h> 70#include <linux/blkdev.h> 71#include <linux/gfp.h> 72#include <linux/blk-mq.h> 73 74#include "blk.h" 75#include "blk-mq.h" 76 77/* FLUSH/FUA sequences */ 78enum { 79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 80 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ 81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ 82 REQ_FSEQ_DONE = (1 << 3), 83 84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | 85 REQ_FSEQ_POSTFLUSH, 86 87 /* 88 * If flush has been pending longer than the following timeout, 89 * it's issued even if flush_data requests are still in flight. 90 */ 91 FLUSH_PENDING_TIMEOUT = 5 * HZ, 92}; 93 94static bool blk_kick_flush(struct request_queue *q, 95 struct blk_flush_queue *fq); 96 97static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) 98{ 99 unsigned int policy = 0; 100 101 if (blk_rq_sectors(rq)) 102 policy |= REQ_FSEQ_DATA; 103 104 if (fflags & REQ_FLUSH) { 105 if (rq->cmd_flags & REQ_FLUSH) 106 policy |= REQ_FSEQ_PREFLUSH; 107 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) 108 policy |= REQ_FSEQ_POSTFLUSH; 109 } 110 return policy; 111} 112 113static unsigned int blk_flush_cur_seq(struct request *rq) 114{ 115 return 1 << ffz(rq->flush.seq); 116} 117 118static void blk_flush_restore_request(struct request *rq) 119{ 120 /* 121 * After flush data completion, @rq->bio is %NULL but we need to 122 * complete the bio again. @rq->biotail is guaranteed to equal the 123 * original @rq->bio. Restore it. 124 */ 125 rq->bio = rq->biotail; 126 127 /* make @rq a normal request */ 128 rq->cmd_flags &= ~REQ_FLUSH_SEQ; 129 rq->end_io = rq->flush.saved_end_io; 130} 131 132static bool blk_flush_queue_rq(struct request *rq, bool add_front) 133{ 134 if (rq->q->mq_ops) { 135 struct request_queue *q = rq->q; 136 137 blk_mq_add_to_requeue_list(rq, add_front); 138 blk_mq_kick_requeue_list(q); 139 return false; 140 } else { 141 if (add_front) 142 list_add(&rq->queuelist, &rq->q->queue_head); 143 else 144 list_add_tail(&rq->queuelist, &rq->q->queue_head); 145 return true; 146 } 147} 148 149/** 150 * blk_flush_complete_seq - complete flush sequence 151 * @rq: FLUSH/FUA request being sequenced 152 * @fq: flush queue 153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 154 * @error: whether an error occurred 155 * 156 * @rq just completed @seq part of its flush sequence, record the 157 * completion and trigger the next step. 158 * 159 * CONTEXT: 160 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) 161 * 162 * RETURNS: 163 * %true if requests were added to the dispatch queue, %false otherwise. 164 */ 165static bool blk_flush_complete_seq(struct request *rq, 166 struct blk_flush_queue *fq, 167 unsigned int seq, int error) 168{ 169 struct request_queue *q = rq->q; 170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 171 bool queued = false, kicked; 172 173 BUG_ON(rq->flush.seq & seq); 174 rq->flush.seq |= seq; 175 176 if (likely(!error)) 177 seq = blk_flush_cur_seq(rq); 178 else 179 seq = REQ_FSEQ_DONE; 180 181 switch (seq) { 182 case REQ_FSEQ_PREFLUSH: 183 case REQ_FSEQ_POSTFLUSH: 184 /* queue for flush */ 185 if (list_empty(pending)) 186 fq->flush_pending_since = jiffies; 187 list_move_tail(&rq->flush.list, pending); 188 break; 189 190 case REQ_FSEQ_DATA: 191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); 192 queued = blk_flush_queue_rq(rq, true); 193 break; 194 195 case REQ_FSEQ_DONE: 196 /* 197 * @rq was previously adjusted by blk_flush_issue() for 198 * flush sequencing and may already have gone through the 199 * flush data request completion path. Restore @rq for 200 * normal completion and end it. 201 */ 202 BUG_ON(!list_empty(&rq->queuelist)); 203 list_del_init(&rq->flush.list); 204 blk_flush_restore_request(rq); 205 if (q->mq_ops) 206 blk_mq_end_request(rq, error); 207 else 208 __blk_end_request_all(rq, error); 209 break; 210 211 default: 212 BUG(); 213 } 214 215 kicked = blk_kick_flush(q, fq); 216 return kicked | queued; 217} 218 219static void flush_end_io(struct request *flush_rq, int error) 220{ 221 struct request_queue *q = flush_rq->q; 222 struct list_head *running; 223 bool queued = false; 224 struct request *rq, *n; 225 unsigned long flags = 0; 226 struct blk_flush_queue *fq = blk_get_flush_queue(q); 227 228 if (q->mq_ops) { 229 spin_lock_irqsave(&fq->mq_flush_lock, flags); 230 flush_rq->tag = -1; 231 } 232 233 running = &fq->flush_queue[fq->flush_running_idx]; 234 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); 235 236 /* account completion of the flush request */ 237 fq->flush_running_idx ^= 1; 238 239 if (!q->mq_ops) 240 elv_completed_request(q, flush_rq); 241 242 /* and push the waiting requests to the next stage */ 243 list_for_each_entry_safe(rq, n, running, flush.list) { 244 unsigned int seq = blk_flush_cur_seq(rq); 245 246 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 247 queued |= blk_flush_complete_seq(rq, fq, seq, error); 248 } 249 250 /* 251 * Kick the queue to avoid stall for two cases: 252 * 1. Moving a request silently to empty queue_head may stall the 253 * queue. 254 * 2. When flush request is running in non-queueable queue, the 255 * queue is hold. Restart the queue after flush request is finished 256 * to avoid stall. 257 * This function is called from request completion path and calling 258 * directly into request_fn may confuse the driver. Always use 259 * kblockd. 260 */ 261 if (queued || fq->flush_queue_delayed) { 262 WARN_ON(q->mq_ops); 263 blk_run_queue_async(q); 264 } 265 fq->flush_queue_delayed = 0; 266 if (q->mq_ops) 267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 268} 269 270/** 271 * blk_kick_flush - consider issuing flush request 272 * @q: request_queue being kicked 273 * @fq: flush queue 274 * 275 * Flush related states of @q have changed, consider issuing flush request. 276 * Please read the comment at the top of this file for more info. 277 * 278 * CONTEXT: 279 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) 280 * 281 * RETURNS: 282 * %true if flush was issued, %false otherwise. 283 */ 284static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) 285{ 286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; 287 struct request *first_rq = 288 list_first_entry(pending, struct request, flush.list); 289 struct request *flush_rq = fq->flush_rq; 290 291 /* C1 described at the top of this file */ 292 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) 293 return false; 294 295 /* C2 and C3 */ 296 if (!list_empty(&fq->flush_data_in_flight) && 297 time_before(jiffies, 298 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 299 return false; 300 301 /* 302 * Issue flush and toggle pending_idx. This makes pending_idx 303 * different from running_idx, which means flush is in flight. 304 */ 305 fq->flush_pending_idx ^= 1; 306 307 blk_rq_init(q, flush_rq); 308 if (q->mq_ops) 309 blk_mq_clone_flush_request(flush_rq, first_rq); 310 311 flush_rq->cmd_type = REQ_TYPE_FS; 312 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 313 flush_rq->rq_disk = first_rq->rq_disk; 314 flush_rq->end_io = flush_end_io; 315 316 return blk_flush_queue_rq(flush_rq, false); 317} 318 319static void flush_data_end_io(struct request *rq, int error) 320{ 321 struct request_queue *q = rq->q; 322 struct blk_flush_queue *fq = blk_get_flush_queue(q); 323 324 /* 325 * After populating an empty queue, kick it to avoid stall. Read 326 * the comment in flush_end_io(). 327 */ 328 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) 329 blk_run_queue_async(q); 330} 331 332static void mq_flush_data_end_io(struct request *rq, int error) 333{ 334 struct request_queue *q = rq->q; 335 struct blk_mq_hw_ctx *hctx; 336 struct blk_mq_ctx *ctx; 337 unsigned long flags; 338 struct blk_flush_queue *fq = blk_get_flush_queue(q); 339 340 ctx = rq->mq_ctx; 341 hctx = q->mq_ops->map_queue(q, ctx->cpu); 342 343 /* 344 * After populating an empty queue, kick it to avoid stall. Read 345 * the comment in flush_end_io(). 346 */ 347 spin_lock_irqsave(&fq->mq_flush_lock, flags); 348 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) 349 blk_mq_run_hw_queue(hctx, true); 350 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 351} 352 353/** 354 * blk_insert_flush - insert a new FLUSH/FUA request 355 * @rq: request to insert 356 * 357 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 358 * or __blk_mq_run_hw_queue() to dispatch request. 359 * @rq is being submitted. Analyze what needs to be done and put it on the 360 * right queue. 361 * 362 * CONTEXT: 363 * spin_lock_irq(q->queue_lock) in !mq case 364 */ 365void blk_insert_flush(struct request *rq) 366{ 367 struct request_queue *q = rq->q; 368 unsigned int fflags = q->flush_flags; /* may change, cache */ 369 unsigned int policy = blk_flush_policy(fflags, rq); 370 struct blk_flush_queue *fq = blk_get_flush_queue(q); 371 372 /* 373 * @policy now records what operations need to be done. Adjust 374 * REQ_FLUSH and FUA for the driver. 375 */ 376 rq->cmd_flags &= ~REQ_FLUSH; 377 if (!(fflags & REQ_FUA)) 378 rq->cmd_flags &= ~REQ_FUA; 379 380 /* 381 * An empty flush handed down from a stacking driver may 382 * translate into nothing if the underlying device does not 383 * advertise a write-back cache. In this case, simply 384 * complete the request. 385 */ 386 if (!policy) { 387 if (q->mq_ops) 388 blk_mq_end_request(rq, 0); 389 else 390 __blk_end_bidi_request(rq, 0, 0, 0); 391 return; 392 } 393 394 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ 395 396 /* 397 * If there's data but flush is not necessary, the request can be 398 * processed directly without going through flush machinery. Queue 399 * for normal execution. 400 */ 401 if ((policy & REQ_FSEQ_DATA) && 402 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 403 if (q->mq_ops) { 404 blk_mq_insert_request(rq, false, false, true); 405 } else 406 list_add_tail(&rq->queuelist, &q->queue_head); 407 return; 408 } 409 410 /* 411 * @rq should go through flush machinery. Mark it part of flush 412 * sequence and submit for further processing. 413 */ 414 memset(&rq->flush, 0, sizeof(rq->flush)); 415 INIT_LIST_HEAD(&rq->flush.list); 416 rq->cmd_flags |= REQ_FLUSH_SEQ; 417 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 418 if (q->mq_ops) { 419 rq->end_io = mq_flush_data_end_io; 420 421 spin_lock_irq(&fq->mq_flush_lock); 422 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); 423 spin_unlock_irq(&fq->mq_flush_lock); 424 return; 425 } 426 rq->end_io = flush_data_end_io; 427 428 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); 429} 430 431/** 432 * blkdev_issue_flush - queue a flush 433 * @bdev: blockdev to issue flush for 434 * @gfp_mask: memory allocation flags (for bio_alloc) 435 * @error_sector: error sector 436 * 437 * Description: 438 * Issue a flush for the block device in question. Caller can supply 439 * room for storing the error offset in case of a flush error, if they 440 * wish to. If WAIT flag is not passed then caller may check only what 441 * request was pushed in some internal queue for later handling. 442 */ 443int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 444 sector_t *error_sector) 445{ 446 struct request_queue *q; 447 struct bio *bio; 448 int ret = 0; 449 450 if (bdev->bd_disk == NULL) 451 return -ENXIO; 452 453 q = bdev_get_queue(bdev); 454 if (!q) 455 return -ENXIO; 456 457 /* 458 * some block devices may not have their queue correctly set up here 459 * (e.g. loop device without a backing file) and so issuing a flush 460 * here will panic. Ensure there is a request function before issuing 461 * the flush. 462 */ 463 if (!q->make_request_fn) 464 return -ENXIO; 465 466 bio = bio_alloc(gfp_mask, 0); 467 bio->bi_bdev = bdev; 468 469 ret = submit_bio_wait(WRITE_FLUSH, bio); 470 471 /* 472 * The driver must store the error location in ->bi_sector, if 473 * it supports it. For non-stacked drivers, this should be 474 * copied from blk_rq_pos(rq). 475 */ 476 if (error_sector) 477 *error_sector = bio->bi_iter.bi_sector; 478 479 bio_put(bio); 480 return ret; 481} 482EXPORT_SYMBOL(blkdev_issue_flush); 483 484struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q) 485{ 486 struct blk_flush_queue *fq; 487 int rq_sz = sizeof(struct request); 488 489 fq = kzalloc(sizeof(*fq), GFP_KERNEL); 490 if (!fq) 491 goto fail; 492 493 if (q->mq_ops) { 494 spin_lock_init(&fq->mq_flush_lock); 495 rq_sz = round_up(rq_sz + q->tag_set->cmd_size, 496 cache_line_size()); 497 } 498 499 fq->flush_rq = kzalloc(rq_sz, GFP_KERNEL); 500 if (!fq->flush_rq) 501 goto fail_rq; 502 503 INIT_LIST_HEAD(&fq->flush_queue[0]); 504 INIT_LIST_HEAD(&fq->flush_queue[1]); 505 INIT_LIST_HEAD(&fq->flush_data_in_flight); 506 507 return fq; 508 509 fail_rq: 510 kfree(fq); 511 fail: 512 return NULL; 513} 514 515void blk_free_flush_queue(struct blk_flush_queue *fq) 516{ 517 /* bio based request queue hasn't flush queue */ 518 if (!fq) 519 return; 520 521 kfree(fq->flush_rq); 522 kfree(fq); 523} 524