blk-flush.c revision 4fed947cb311e5aa51781d316cefca836352f6ce
1/* 2 * Functions to sequence FLUSH and FUA writes. 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <linux/gfp.h> 9 10#include "blk.h" 11 12/* FLUSH/FUA sequences */ 13enum { 14 QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ 15 QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ 16 QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ 17 QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ 18 QUEUE_FSEQ_DONE = (1 << 4), 19}; 20 21static struct request *queue_next_fseq(struct request_queue *q); 22 23unsigned blk_flush_cur_seq(struct request_queue *q) 24{ 25 if (!q->flush_seq) 26 return 0; 27 return 1 << ffz(q->flush_seq); 28} 29 30static struct request *blk_flush_complete_seq(struct request_queue *q, 31 unsigned seq, int error) 32{ 33 struct request *next_rq = NULL; 34 35 if (error && !q->flush_err) 36 q->flush_err = error; 37 38 BUG_ON(q->flush_seq & seq); 39 q->flush_seq |= seq; 40 41 if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { 42 /* not complete yet, queue the next flush sequence */ 43 next_rq = queue_next_fseq(q); 44 } else { 45 /* complete this flush request */ 46 __blk_end_request_all(q->orig_flush_rq, q->flush_err); 47 q->orig_flush_rq = NULL; 48 q->flush_seq = 0; 49 50 /* dispatch the next flush if there's one */ 51 if (!list_empty(&q->pending_flushes)) { 52 next_rq = list_entry_rq(q->pending_flushes.next); 53 list_move(&next_rq->queuelist, &q->queue_head); 54 } 55 } 56 return next_rq; 57} 58 59static void pre_flush_end_io(struct request *rq, int error) 60{ 61 elv_completed_request(rq->q, rq); 62 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error); 63} 64 65static void flush_data_end_io(struct request *rq, int error) 66{ 67 elv_completed_request(rq->q, rq); 68 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error); 69} 70 71static void post_flush_end_io(struct request *rq, int error) 72{ 73 elv_completed_request(rq->q, rq); 74 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error); 75} 76 77static void queue_flush(struct request_queue *q, struct request *rq, 78 rq_end_io_fn *end_io) 79{ 80 blk_rq_init(q, rq); 81 rq->cmd_type = REQ_TYPE_FS; 82 rq->cmd_flags = REQ_FLUSH; 83 rq->rq_disk = q->orig_flush_rq->rq_disk; 84 rq->end_io = end_io; 85 86 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 87} 88 89static struct request *queue_next_fseq(struct request_queue *q) 90{ 91 struct request *orig_rq = q->orig_flush_rq; 92 struct request *rq = &q->flush_rq; 93 94 switch (blk_flush_cur_seq(q)) { 95 case QUEUE_FSEQ_PREFLUSH: 96 queue_flush(q, rq, pre_flush_end_io); 97 break; 98 99 case QUEUE_FSEQ_DATA: 100 /* initialize proxy request, inherit FLUSH/FUA and queue it */ 101 blk_rq_init(q, rq); 102 init_request_from_bio(rq, orig_rq->bio); 103 rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA); 104 rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA); 105 rq->end_io = flush_data_end_io; 106 107 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 108 break; 109 110 case QUEUE_FSEQ_POSTFLUSH: 111 queue_flush(q, rq, post_flush_end_io); 112 break; 113 114 default: 115 BUG(); 116 } 117 return rq; 118} 119 120struct request *blk_do_flush(struct request_queue *q, struct request *rq) 121{ 122 unsigned int fflags = q->flush_flags; /* may change, cache it */ 123 bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; 124 bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); 125 bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); 126 unsigned skip = 0; 127 128 /* 129 * Special case. If there's data but flush is not necessary, 130 * the request can be issued directly. 131 * 132 * Flush w/o data should be able to be issued directly too but 133 * currently some drivers assume that rq->bio contains 134 * non-zero data if it isn't NULL and empty FLUSH requests 135 * getting here usually have bio's without data. 136 */ 137 if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { 138 rq->cmd_flags &= ~REQ_FLUSH; 139 if (!has_fua) 140 rq->cmd_flags &= ~REQ_FUA; 141 return rq; 142 } 143 144 /* 145 * Sequenced flushes can't be processed in parallel. If 146 * another one is already in progress, queue for later 147 * processing. 148 */ 149 if (q->flush_seq) { 150 list_move_tail(&rq->queuelist, &q->pending_flushes); 151 return NULL; 152 } 153 154 /* 155 * Start a new flush sequence 156 */ 157 q->flush_err = 0; 158 q->flush_seq |= QUEUE_FSEQ_STARTED; 159 160 /* adjust FLUSH/FUA of the original request and stash it away */ 161 rq->cmd_flags &= ~REQ_FLUSH; 162 if (!has_fua) 163 rq->cmd_flags &= ~REQ_FUA; 164 blk_dequeue_request(rq); 165 q->orig_flush_rq = rq; 166 167 /* skip unneded sequences and return the first one */ 168 if (!do_preflush) 169 skip |= QUEUE_FSEQ_PREFLUSH; 170 if (!blk_rq_sectors(rq)) 171 skip |= QUEUE_FSEQ_DATA; 172 if (!do_postflush) 173 skip |= QUEUE_FSEQ_POSTFLUSH; 174 return blk_flush_complete_seq(q, skip, 0); 175} 176 177static void bio_end_empty_barrier(struct bio *bio, int err) 178{ 179 if (err) { 180 if (err == -EOPNOTSUPP) 181 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 182 clear_bit(BIO_UPTODATE, &bio->bi_flags); 183 } 184 if (bio->bi_private) 185 complete(bio->bi_private); 186 bio_put(bio); 187} 188 189/** 190 * blkdev_issue_flush - queue a flush 191 * @bdev: blockdev to issue flush for 192 * @gfp_mask: memory allocation flags (for bio_alloc) 193 * @error_sector: error sector 194 * @flags: BLKDEV_IFL_* flags to control behaviour 195 * 196 * Description: 197 * Issue a flush for the block device in question. Caller can supply 198 * room for storing the error offset in case of a flush error, if they 199 * wish to. If WAIT flag is not passed then caller may check only what 200 * request was pushed in some internal queue for later handling. 201 */ 202int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 203 sector_t *error_sector, unsigned long flags) 204{ 205 DECLARE_COMPLETION_ONSTACK(wait); 206 struct request_queue *q; 207 struct bio *bio; 208 int ret = 0; 209 210 if (bdev->bd_disk == NULL) 211 return -ENXIO; 212 213 q = bdev_get_queue(bdev); 214 if (!q) 215 return -ENXIO; 216 217 /* 218 * some block devices may not have their queue correctly set up here 219 * (e.g. loop device without a backing file) and so issuing a flush 220 * here will panic. Ensure there is a request function before issuing 221 * the barrier. 222 */ 223 if (!q->make_request_fn) 224 return -ENXIO; 225 226 bio = bio_alloc(gfp_mask, 0); 227 bio->bi_end_io = bio_end_empty_barrier; 228 bio->bi_bdev = bdev; 229 if (test_bit(BLKDEV_WAIT, &flags)) 230 bio->bi_private = &wait; 231 232 bio_get(bio); 233 submit_bio(WRITE_BARRIER, bio); 234 if (test_bit(BLKDEV_WAIT, &flags)) { 235 wait_for_completion(&wait); 236 /* 237 * The driver must store the error location in ->bi_sector, if 238 * it supports it. For non-stacked drivers, this should be 239 * copied from blk_rq_pos(rq). 240 */ 241 if (error_sector) 242 *error_sector = bio->bi_sector; 243 } 244 245 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 246 ret = -EOPNOTSUPP; 247 else if (!bio_flagged(bio, BIO_UPTODATE)) 248 ret = -EIO; 249 250 bio_put(bio); 251 return ret; 252} 253EXPORT_SYMBOL(blkdev_issue_flush); 254