Lines Matching refs:sector

341 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
369 peer_req->i.sector = sector;
1373 sector_t sector = peer_req->i.sector;
1391 sector, data_size >> 9, GFP_NOIO))
1417 /* > peer_req->i.sector, unless this is the first bio */
1418 bio->bi_iter.bi_sector = sector;
1450 sector += len >> 9;
1587 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1627 if (sector + (data_size>>9) > capacity) {
1629 "capacity: %llus < sector: %llus + size: %u\n",
1631 (unsigned long long)sector, data_size);
1638 peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO);
1668 (unsigned long long)sector, data_size);
1706 sector_t sector, int data_size)
1729 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
1763 sector_t sector = peer_req->i.sector;
1769 drbd_set_in_sync(device, sector, peer_req->i.size);
1773 drbd_rs_failed_io(device, sector, peer_req->i.size);
1782 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
1788 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
1823 sector_t sector, bool missing_ok, const char *func)
1829 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1832 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
1833 (unsigned long)id, (unsigned long long)sector);
1843 sector_t sector;
1852 sector = be64_to_cpu(p->sector);
1855 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1863 err = recv_dless_read(peer_device, req, sector, pi->size);
1877 sector_t sector;
1886 sector = be64_to_cpu(p->sector);
1893 err = recv_resync_read(peer_device, sector, pi);
1909 sector_t sector, int size)
1914 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
1936 sector_t sector = peer_req->i.sector;
1947 drbd_set_in_sync(device, sector, peer_req->i.size);
1963 restart_conflicting_writes(device, sector, peer_req->i.size);
2045 if (overlaps(peer_req->i.sector, peer_req->i.size,
2046 rs_req->i.sector, rs_req->i.size)) {
2136 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2142 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2166 sector_t sector = peer_req->i.sector;
2179 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2197 equal = i->sector == sector && i->size == size;
2205 bool superseded = i->sector <= sector && i->sector +
2206 (i->size >> 9) >= sector + (size >> 9);
2212 (unsigned long long)i->sector, i->size,
2213 (unsigned long long)sector, size,
2230 (unsigned long long)i->sector, i->size,
2231 (unsigned long long)sector, size);
2249 fail_postponed_requests(device, sector, size);
2275 sector_t sector;
2306 sector = be64_to_cpu(p->sector);
2307 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2403 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2442 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2452 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2516 sector_t sector;
2530 sector = be64_to_cpu(p->sector);
2534 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2535 (unsigned long long)sector, size);
2538 if (sector + (size>>9) > capacity) {
2539 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2540 (unsigned long long)sector, size);
2558 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2574 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2592 /* used in the sector offset progress display */
2593 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2615 /* used in the sector offset progress display */
2616 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2635 device->ov_start_sector = sector;
2636 device->ov_position = sector;
2637 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2643 drbd_info(device, "Online Verify start sector: %llu\n",
2644 (unsigned long long)sector);
2687 && drbd_rs_should_slow_down(device, sector, false))
2690 if (drbd_rs_begin_io(device, sector))
4094 /* explicit verify finished notification, stop sector reached. */
4558 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
5165 sector_t sector = be64_to_cpu(p->sector);
5178 drbd_rs_complete_io(device, sector);
5179 drbd_set_in_sync(device, sector, blksize);
5191 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
5199 req = find_request(device, root, id, sector, missing_ok, func);
5217 sector_t sector = be64_to_cpu(p->sector);
5229 drbd_set_in_sync(device, sector, blksize);
5253 return validate_req_change_req_state(device, p->block_id, sector,
5263 sector_t sector = be64_to_cpu(p->sector);
5276 drbd_rs_failed_io(device, sector, size);
5280 err = validate_req_change_req_state(device, p->block_id, sector,
5289 drbd_set_out_of_sync(device, sector, size);
5299 sector_t sector = be64_to_cpu(p->sector);
5309 (unsigned long long)sector, be32_to_cpu(p->blksize));
5311 return validate_req_change_req_state(device, p->block_id, sector,
5320 sector_t sector;
5329 sector = be64_to_cpu(p->sector);
5337 drbd_rs_complete_io(device, sector);
5340 drbd_rs_failed_io(device, sector, size);
5382 sector_t sector;
5390 sector = be64_to_cpu(p->sector);
5396 drbd_ov_out_of_sync_found(device, sector, size);
5403 drbd_rs_complete_io(device, sector);