blk-flush.c revision 337238be1bf52e1242f940fc6fe83fb395e55057
1/* 2 * Functions to sequence FLUSH and FUA writes. 3 */ 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/bio.h> 7#include <linux/blkdev.h> 8#include <linux/gfp.h> 9 10#include "blk.h" 11 12/* FLUSH/FUA sequences */ 13enum { 14 QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ 15 QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ 16 QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ 17 QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ 18 QUEUE_FSEQ_DONE = (1 << 4), 19}; 20 21static struct request *queue_next_fseq(struct request_queue *q); 22 23unsigned blk_flush_cur_seq(struct request_queue *q) 24{ 25 if (!q->flush_seq) 26 return 0; 27 return 1 << ffz(q->flush_seq); 28} 29 30static struct request *blk_flush_complete_seq(struct request_queue *q, 31 unsigned seq, int error) 32{ 33 struct request *next_rq = NULL; 34 35 if (error && !q->flush_err) 36 q->flush_err = error; 37 38 BUG_ON(q->flush_seq & seq); 39 q->flush_seq |= seq; 40 41 if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { 42 /* not complete yet, queue the next flush sequence */ 43 next_rq = queue_next_fseq(q); 44 } else { 45 /* complete this flush request */ 46 __blk_end_request_all(q->orig_flush_rq, q->flush_err); 47 q->orig_flush_rq = NULL; 48 q->flush_seq = 0; 49 50 /* dispatch the next flush if there's one */ 51 if (!list_empty(&q->pending_flushes)) { 52 next_rq = list_entry_rq(q->pending_flushes.next); 53 list_move(&next_rq->queuelist, &q->queue_head); 54 } 55 } 56 return next_rq; 57} 58 59static void pre_flush_end_io(struct request *rq, int error) 60{ 61 elv_completed_request(rq->q, rq); 62 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error); 63} 64 65static void flush_data_end_io(struct request *rq, int error) 66{ 67 elv_completed_request(rq->q, rq); 68 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error); 69} 70 71static void post_flush_end_io(struct request *rq, int error) 72{ 73 elv_completed_request(rq->q, rq); 74 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error); 75} 76 77static void init_flush_request(struct request *rq, struct gendisk *disk) 78{ 79 rq->cmd_type = REQ_TYPE_FS; 80 rq->cmd_flags = WRITE_FLUSH; 81 rq->rq_disk = disk; 82} 83 84static struct request *queue_next_fseq(struct request_queue *q) 85{ 86 struct request *orig_rq = q->orig_flush_rq; 87 struct request *rq = &q->flush_rq; 88 89 blk_rq_init(q, rq); 90 91 switch (blk_flush_cur_seq(q)) { 92 case QUEUE_FSEQ_PREFLUSH: 93 init_flush_request(rq, orig_rq->rq_disk); 94 rq->end_io = pre_flush_end_io; 95 break; 96 case QUEUE_FSEQ_DATA: 97 init_request_from_bio(rq, orig_rq->bio); 98 rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA); 99 rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA); 100 rq->end_io = flush_data_end_io; 101 break; 102 case QUEUE_FSEQ_POSTFLUSH: 103 init_flush_request(rq, orig_rq->rq_disk); 104 rq->end_io = post_flush_end_io; 105 break; 106 default: 107 BUG(); 108 } 109 110 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 111 return rq; 112} 113 114struct request *blk_do_flush(struct request_queue *q, struct request *rq) 115{ 116 unsigned int fflags = q->flush_flags; /* may change, cache it */ 117 bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; 118 bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); 119 bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); 120 unsigned skip = 0; 121 122 /* 123 * Special case. If there's data but flush is not necessary, 124 * the request can be issued directly. 125 * 126 * Flush w/o data should be able to be issued directly too but 127 * currently some drivers assume that rq->bio contains 128 * non-zero data if it isn't NULL and empty FLUSH requests 129 * getting here usually have bio's without data. 130 */ 131 if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { 132 rq->cmd_flags &= ~REQ_FLUSH; 133 if (!has_fua) 134 rq->cmd_flags &= ~REQ_FUA; 135 return rq; 136 } 137 138 /* 139 * Sequenced flushes can't be processed in parallel. If 140 * another one is already in progress, queue for later 141 * processing. 142 */ 143 if (q->flush_seq) { 144 list_move_tail(&rq->queuelist, &q->pending_flushes); 145 return NULL; 146 } 147 148 /* 149 * Start a new flush sequence 150 */ 151 q->flush_err = 0; 152 q->flush_seq |= QUEUE_FSEQ_STARTED; 153 154 /* adjust FLUSH/FUA of the original request and stash it away */ 155 rq->cmd_flags &= ~REQ_FLUSH; 156 if (!has_fua) 157 rq->cmd_flags &= ~REQ_FUA; 158 blk_dequeue_request(rq); 159 q->orig_flush_rq = rq; 160 161 /* skip unneded sequences and return the first one */ 162 if (!do_preflush) 163 skip |= QUEUE_FSEQ_PREFLUSH; 164 if (!blk_rq_sectors(rq)) 165 skip |= QUEUE_FSEQ_DATA; 166 if (!do_postflush) 167 skip |= QUEUE_FSEQ_POSTFLUSH; 168 return blk_flush_complete_seq(q, skip, 0); 169} 170 171static void bio_end_empty_barrier(struct bio *bio, int err) 172{ 173 if (err) { 174 if (err == -EOPNOTSUPP) 175 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 176 clear_bit(BIO_UPTODATE, &bio->bi_flags); 177 } 178 if (bio->bi_private) 179 complete(bio->bi_private); 180 bio_put(bio); 181} 182 183/** 184 * blkdev_issue_flush - queue a flush 185 * @bdev: blockdev to issue flush for 186 * @gfp_mask: memory allocation flags (for bio_alloc) 187 * @error_sector: error sector 188 * @flags: BLKDEV_IFL_* flags to control behaviour 189 * 190 * Description: 191 * Issue a flush for the block device in question. Caller can supply 192 * room for storing the error offset in case of a flush error, if they 193 * wish to. If WAIT flag is not passed then caller may check only what 194 * request was pushed in some internal queue for later handling. 195 */ 196int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 197 sector_t *error_sector, unsigned long flags) 198{ 199 DECLARE_COMPLETION_ONSTACK(wait); 200 struct request_queue *q; 201 struct bio *bio; 202 int ret = 0; 203 204 if (bdev->bd_disk == NULL) 205 return -ENXIO; 206 207 q = bdev_get_queue(bdev); 208 if (!q) 209 return -ENXIO; 210 211 /* 212 * some block devices may not have their queue correctly set up here 213 * (e.g. loop device without a backing file) and so issuing a flush 214 * here will panic. Ensure there is a request function before issuing 215 * the barrier. 216 */ 217 if (!q->make_request_fn) 218 return -ENXIO; 219 220 bio = bio_alloc(gfp_mask, 0); 221 bio->bi_end_io = bio_end_empty_barrier; 222 bio->bi_bdev = bdev; 223 if (test_bit(BLKDEV_WAIT, &flags)) 224 bio->bi_private = &wait; 225 226 bio_get(bio); 227 submit_bio(WRITE_BARRIER, bio); 228 if (test_bit(BLKDEV_WAIT, &flags)) { 229 wait_for_completion(&wait); 230 /* 231 * The driver must store the error location in ->bi_sector, if 232 * it supports it. For non-stacked drivers, this should be 233 * copied from blk_rq_pos(rq). 234 */ 235 if (error_sector) 236 *error_sector = bio->bi_sector; 237 } 238 239 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 240 ret = -EOPNOTSUPP; 241 else if (!bio_flagged(bio, BIO_UPTODATE)) 242 ret = -EIO; 243 244 bio_put(bio); 245 return ret; 246} 247EXPORT_SYMBOL(blkdev_issue_flush); 248