/block/ |
H A D | cmdline-parser.c | 42 char *next = strchr(++partdef, ')'); local 44 if (!next) { 50 length = min_t(int, next - partdef, 55 partdef = ++next; 92 char *next; local 104 next = strchr(bdevdef, ':'); 105 if (!next) { 110 length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1); 117 while (next && *(++next)) { 168 char *next; local [all...] |
H A D | blk-merge.c | 302 struct bio *next = bio->bi_next; local 306 bio->bi_next = next; 389 struct request *next) 393 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; 399 if (req_no_special_merge(req) || req_no_special_merge(next)) 405 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 409 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 410 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { 413 if (next->nr_phys_segments == 1) 414 next 388 ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) argument 479 attempt_merge(struct request_queue *q, struct request *req, struct request *next) argument 558 struct request *next = elv_latter_request(q, rq); local 576 blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) argument [all...] |
H A D | noop-iosched.c | 16 struct request *next) 18 list_del_init(&next->queuelist); 27 rq = list_entry(nd->queue.next, struct request, queuelist); 57 if (rq->queuelist.next == &nd->queue) 59 return list_entry(rq->queuelist.next, struct request, queuelist); 15 noop_merged_requests(struct request_queue *q, struct request *rq, struct request *next) argument
|
H A D | blk-softirq.c | 33 rq = list_entry(local_list.next, struct request, ipi_list); 50 if (list->next == &rq->ipi_list) 147 if (list->next == &req->ipi_list)
|
H A D | blk-timeout.c | 133 unsigned long flags, next = 0; local 140 blk_rq_check_expired(rq, &next, &next_set); 143 mod_timer(&q->timeout, round_jiffies_up(next)); 212 * than an existing one, modify the timer. Round up to next nearest
|
H A D | t10-pi.c | 86 goto next; 100 goto next; 114 next:
|
H A D | deadline-iosched.c | 38 * next in sort order. read, write or both are NULL 170 struct request *next) 173 * if next expires before rq, assign its expire time to rq 174 * and move into next position (next will be deleted) in fifo 176 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 177 if (time_before(next->fifo_time, req->fifo_time)) { 178 list_move(&req->queuelist, &next->queuelist); 179 req->fifo_time = next->fifo_time; 184 * kill knowledge of next, thi 169 deadline_merged_requests(struct request_queue *q, struct request *req, struct request *next) argument [all...] |
H A D | blk-integrity.c | 190 struct request *next) 192 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0) 195 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0) 199 bio_integrity(next->bio)->bip_flags) 202 if (req->nr_integrity_segments + next->nr_integrity_segments > 214 struct bio *next = bio->bi_next; local 227 bio->bi_next = next; 189 blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) argument
|
H A D | blk.h | 123 rq = list_entry_rq(q->queue_head.next); 130 * finished. Even we don't do this, driver can't dispatch next 188 struct request *next);
|
H A D | blk-mq.c | 444 struct request *rq, *next; local 451 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 461 rq = list_entry(rq_list.next, struct request, queuelist); 522 unsigned long next; member in struct:blk_mq_timeout_data 573 } else if (!data->next_set || time_after(data->next, rq->deadline)) { 574 data->next = rq->deadline; 583 .next = 0, 601 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 602 mod_timer(&q->timeout, data.next); [all...] |
H A D | blk-iopoll.c | 104 iop = list_entry(list->next, struct blk_iopoll, list);
|
H A D | genhd.c | 119 * disk_part_iter_next - proceed iterator to the next partition and return it 122 * Proceed @piter to the next partition and return it. 153 /* iterate to the next partition */ 244 struct blk_major_name *next; member in struct:blk_major_name 262 for (dp = major_names[offset]; dp; dp = dp->next) 317 p->next = NULL; 320 for (n = &major_names[index]; *n; n = &(*n)->next) { 348 for (n = &major_names[index]; *n; n = &(*n)->next) 355 *n = p->next; 873 .next [all...] |
H A D | elevator.c | 279 struct hlist_node *next; local 282 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { 515 struct request *next) 518 const int next_sorted = next->cmd_flags & REQ_SORTED; 521 e->type->ops.elevator_merge_req_fn(q, rq, next); 526 elv_rqhash_del(q, next); 514 elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) argument
|
H A D | cfq-iosched.c | 114 /* if fifo isn't expired, next request to serve */ 417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 1213 struct request *next = NULL, *prev = NULL; local 1221 next = rb_entry_rq(rbnext); 1225 next = rb_entry_rq(rbnext); 1228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); 2046 * the next service time further away in the tree. 2257 * check if this request is a better next-serve candidate 2371 struct request *next) 2377 * reposition in fifo if next i 2370 cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) argument 3398 struct cfq_queue *__cfqq, *next; local [all...] |
H A D | blk-ioc.c | 224 struct io_cq *icq = list_entry(q->icq_list.next,
|
H A D | blk-cgroup.c | 410 * The next function used by blk_queue_for_each_rl(). It's a bit tricky 433 /* walk to the next list_head, skip root blkcg */ 434 ent = ent->next; 436 ent = ent->next;
|
H A D | bio.c | 1703 struct bio *next = bio->bi_private; local 1708 bio = next;
|
H A D | blk-core.c | 2054 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 2386 * If @req has leftover, sets it up for the next range of segments. 2650 * If @rq has leftover, sets it up for the next range of segments. 2702 * blk_end_request_err - Finish a request till the next failure boundary. 2703 * @rq: the request to finish till the next failure boundary for 2707 * Complete @rq till the next failure boundary. 2780 * __blk_end_request_err - Finish a request till the next failure boundary. 2781 * @rq: the request to finish till the next failure boundary for 2785 * Complete @rq till the next failure boundary. Must be called 3111 rq = list_entry_rq(list.next); [all...] |
/block/partitions/ |
H A D | msdos.c | 114 * table start). The second is a pointer to the next logical partition 139 if (state->next == state->limit) 152 * the 2nd entry is the next extended partition, or empty, 163 sector_t offs, size, next; local 172 next = this_sector + offs; 176 if (next < first_sector) 178 if (next + size > first_sector + first_size) 182 put_partition(state, state->next, next, size); 183 set_info(state, state->next, disksi [all...] |
H A D | check.h | 19 int next; member in struct:parsed_partitions
|
H A D | acorn.c | 177 * with pointer to next 'drive'. 180 * next partition relative to the start of this one - I'm assuming 223 /* RISCiX - we don't know how to find the next one. */ 511 * 2. The start address of the next entry. 535 sector_t next; local 540 next = le32_to_cpu(p->start); 542 put_partition(state, slot++, start, next - start); 543 start = next;
|