Searched refs:sector (Results 1 - 25 of 79) sorted by relevance

1234

/drivers/block/drbd/
H A Ddrbd_interval.c25 sector_t max = node->sector + (node->size >> 9);
50 sector_t this_end = this->sector + (this->size >> 9);
61 if (this->sector < here->sector)
63 else if (this->sector > here->sector)
81 * @sector: start sector of @interval
84 * Returns if the tree contains the node @interval with start sector @start.
87 * sector numbe
90 drbd_contains_interval(struct rb_root *root, sector_t sector, struct drbd_interval *interval) argument
134 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) argument
164 drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size) argument
[all...]
H A Ddrbd_interval.h9 sector_t sector; /* start sector of the interval */ member in struct:drbd_interval
37 #define drbd_for_each_overlap(i, root, sector, size) \
38 for (i = drbd_find_overlap(root, sector, size); \
40 i = drbd_next_overlap(i, sector, size))
H A Ddrbd_actlog.c140 sector_t sector, int rw)
156 bio->bi_iter.bi_sector = sector;
191 sector_t sector, int rw)
200 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
203 if (sector < drbd_md_first_sector(bdev) ||
204 sector + 7 > drbd_md_last_sector(bdev))
207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
209 err = _drbd_md_sync_page_io(device, bdev, sector, rw);
212 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
256 unsigned first = i->sector >> (AL_EXTENT_SHIF
138 _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, sector_t sector, int rw) argument
190 drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, sector_t sector, int rw) argument
463 sector_t sector; local
829 __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, enum update_sync_bits_mode mode, const char *file, const unsigned int line) argument
944 drbd_rs_begin_io(struct drbd_device *device, sector_t sector) argument
996 drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) argument
1118 drbd_rs_complete_io(struct drbd_device *device, sector_t sector) argument
[all...]
H A Ddrbd_worker.c158 drbd_rs_complete_io(device, i.sector);
185 (unsigned long long)peer_req->i.sector);
190 (unsigned long long)peer_req->i.sector);
360 sector_t sector = peer_req->i.sector; local
371 err = drbd_send_drequest_csum(peer_device, sector, size,
391 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) argument
401 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
591 sector_t sector; local
650 sector
758 sector_t sector; local
1174 sector_t sector = peer_req->i.sector; local
1215 drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) argument
1233 sector_t sector = peer_req->i.sector; local
[all...]
H A Ddrbd_receiver.c341 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
369 peer_req->i.sector = sector;
1373 sector_t sector = peer_req->i.sector; local
1391 sector, data_size >> 9, GFP_NOIO))
1417 /* > peer_req->i.sector, unless this is the first bio */
1418 bio->bi_iter.bi_sector = sector;
1450 sector += len >> 9;
1587 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1705 recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req, sector_t sector, int data_size) argument
1763 sector_t sector = peer_req->i.sector; local
1822 find_request(struct drbd_device *device, struct rb_root *root, u64 id, sector_t sector, bool missing_ok, const char *func) argument
1843 sector_t sector; local
1877 sector_t sector; local
1908 restart_conflicting_writes(struct drbd_device *device, sector_t sector, int size) argument
1936 sector_t sector = peer_req->i.sector; local
2136 fail_postponed_requests(struct drbd_device *device, sector_t sector, unsigned int size) argument
2166 sector_t sector = peer_req->i.sector; local
2275 sector_t sector; local
2442 drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, bool throttle_if_app_is_waiting) argument
2516 sector_t sector; local
5165 sector_t sector = be64_to_cpu(p->sector); local
5191 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, struct rb_root *root, const char *func, enum drbd_req_event what, bool missing_ok) argument
5217 sector_t sector = be64_to_cpu(p->sector); local
5263 sector_t sector = be64_to_cpu(p->sector); local
5299 sector_t sector = be64_to_cpu(p->sector); local
5320 sector_t sector; local
5382 sector_t sector; local
[all...]
H A Ddrbd_req.c34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
80 req->i.sector = bio_src->bi_iter.bi_sector;
147 s, (unsigned long long)req->i.sector, req->i.size);
166 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
169 drbd_set_in_sync(device, req->i.sector, req->i.size);
189 (unsigned long long) req->i.sector, req->i.size);
558 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
560 (unsigned long long)req->i.sector,
654 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
924 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, in argument
944 remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, enum drbd_read_balancing rbm) argument
989 sector_t sector = req->i.sector; local
[all...]
H A Ddrbd_protocol.h121 u64 sector; /* 64 bits sector number */ member in struct:p_data
141 u64 sector; member in struct:p_block_ack
148 u64 sector; member in struct:p_block_req
271 u64 sector; member in struct:p_block_desc
/drivers/block/
H A Dbrd.c52 * Look up and return a brd's page for a given sector.
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) argument
72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
82 * Look up and return a brd's page for a given sector.
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) argument
92 page = brd_lookup_page(brd, sector);
119 idx = sector >> PAGE_SECTORS_SHIFT;
134 static void brd_free_page(struct brd_device *brd, sector_t sector) argument
140 idx = sector >> PAGE_SECTORS_SHIF
147 brd_zero_page(struct brd_device *brd, sector_t sector) argument
196 copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) argument
212 discard_from_brd(struct brd_device *brd, sector_t sector, size_t n) argument
233 copy_to_brd(struct brd_device *brd, const void *src, sector_t sector, size_t n) argument
265 copy_from_brd(void *dst, struct brd_device *brd, sector_t sector, size_t n) argument
299 brd_do_bvec(struct brd_device *brd, struct page *page, unsigned int len, unsigned int off, int rw, sector_t sector) argument
332 sector_t sector; local
363 brd_rw_page(struct block_device *bdev, sector_t sector, struct page *page, int rw) argument
373 brd_direct_access(struct block_device *bdev, sector_t sector, void **kaddr, unsigned long *pfn) argument
[all...]
/drivers/scsi/
H A Dsr_vendor.c91 is followed by a read for the same sector - aeb */
161 unsigned long sector; local
173 sector = 0; /* the multisession sector offset goes here */
199 sector = buffer[11] + (buffer[10] << 8) +
202 /* ignore sector offsets from first track */
203 sector = 0;
231 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
259 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
260 if (sector)
[all...]
/drivers/mtd/nand/
H A Datmel_nand_ecc.h120 #define pmecc_readb_ecc_relaxed(addr, sector, n) \
121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
123 #define pmecc_readl_rem_relaxed(addr, sector, n) \
124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
H A Dsh_flctl.c298 "reading empty sector %d, ecc error ignored\n",
448 (struct sh_flctl *flctl, uint8_t *buff, int sector)
454 res = wait_recfifo_ready(flctl , sector);
584 int sector, page_sectors; local
600 for (sector = 0; sector < page_sectors; sector++) {
601 read_fiforeg(flctl, 512, 512 * sector);
604 &flctl->done_buff[mtd->writesize + 16 * sector],
605 sector);
447 read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector) argument
655 int sector, page_sectors; local
681 int sector, page_sectors; local
[all...]
/drivers/md/
H A Draid0.c270 * now since we have the hard sector sizes, we can make sure
271 * chunk size is a multiple of that sector size
309 sector_t sector = *sectorp; local
312 if (sector < z[i].zone_end) {
314 *sectorp = sector - z[i-1].zone_end;
325 sector_t sector, sector_t *sector_offset)
335 /* find the sector offset inside the chunk */
336 sect_in_chunk = sector & (chunk_sects - 1);
337 sector >>= chunksect_bits;
343 sect_in_chunk = sector_div(sector, chunk_sect
324 map_sector(struct mddev *mddev, struct strip_zone *zone, sector_t sector, sector_t *sector_offset) argument
371 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); local
507 sector_t sector = bio->bi_iter.bi_sector; local
525 sector_t sector = bio->bi_iter.bi_sector; local
[all...]
H A Draid5.c134 * This function is used to determine the 'next' bio in the list, given the sector
137 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) argument
140 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
451 (unsigned long long)sh->sector);
458 struct hlist_head *hp = stripe_hash(conf, sh->sector);
461 (unsigned long long)sh->sector);
523 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) argument
533 (unsigned long long)sector);
538 sh->sector = sector;
562 __find_stripe(struct r5conf *conf, sector_t sector, short generation) argument
658 get_active_stripe(struct r5conf *conf, sector_t sector, int previous, int noblock, int noquiesce) argument
946 async_copy_data(int frombio, struct bio *bio, struct page **page, sector_t sector, struct dma_async_tx_descriptor *tx, struct stripe_head *sh) argument
2671 sector_t sector = sh->dev[dd_idx].sector; local
4111 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); local
4131 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); local
5088 sector_t sector, logical_sector, last_sector; local
[all...]
H A Ddm-exception-store.h197 sector_t sector)
199 return sector >> store->chunk_shift;
196 sector_to_chunk(struct dm_exception_store *store, sector_t sector) argument
H A Draid1.h125 sector_t sector; member in struct:r1bio
H A Draid10.h96 sector_t sector; /* virtual sector number */ member in struct:r10bio
H A Ddm-stripe.c71 * Parse a single <dev> <sector> pair
211 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, argument
214 sector_t chunk = dm_target_offset(sc->ti, sector);
239 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, argument
244 stripe_map_sector(sc, sector, &stripe, result);
249 sector = *result;
251 *result -= sector_div(sector, sc->chunk_size);
253 *result = sector & ~(sector_t)(sc->chunk_size - 1);
H A Dlinear.c30 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) argument
46 if (sector < conf->disks[mid].end_sector)
70 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); local
75 dev0 = which_dev(mddev, sector);
76 maxsectors = dev0->end_sector - sector;
/drivers/char/
H A Dps3flash.c39 int tag; /* Start sector of buffer, -1 if invalid */
110 u64 size, sector, offset; local
130 sector = *pos / dev->bounce_size * priv->chunk_sectors;
140 res = ps3flash_fetch(dev, sector);
163 sector += priv->chunk_sectors;
179 u64 size, sector, offset; local
199 sector = *pos / dev->bounce_size * priv->chunk_sectors;
210 res = ps3flash_fetch(dev, sector);
211 else if (sector != priv->tag)
231 priv->tag = sector;
[all...]
/drivers/usb/storage/
H A Djumpshot.c105 unsigned long sectors; /* total sector count */
106 unsigned long ssize; /* sector size in bytes */
164 u32 sector,
180 if (sector > 0x0FFFFFFF)
202 command[2] = sector & 0xFF;
203 command[3] = (sector >> 8) & 0xFF;
204 command[4] = (sector >> 16) & 0xFF;
206 command[5] = 0xE0 | ((sector >> 24) & 0x0F);
226 sector += thistime;
241 u32 sector,
162 jumpshot_read_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) argument
239 jumpshot_write_data(struct us_data *us, struct jumpshot_info *info, u32 sector, u32 sectors) argument
[all...]
H A Ddatafab.c68 unsigned long sectors; /* total sector count */
69 unsigned long ssize; /* sector size in bytes */
145 u32 sector,
190 command[2] = sector & 0xFF;
191 command[3] = (sector >> 8) & 0xFF;
192 command[4] = (sector >> 16) & 0xFF;
195 command[5] |= (sector >> 24) & 0x0F;
213 sector += thistime;
228 u32 sector,
278 command[2] = sector
143 datafab_read_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) argument
226 datafab_write_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) argument
[all...]
H A Dshuttle_usbat.c104 #define USBAT_ATA_SECCNT 0x12 /* sector count (R/W) */
105 #define USBAT_ATA_SECNUM 0x13 /* sector number (R/W) */
143 unsigned long sectors; /* total sector count */
144 unsigned long ssize; /* sector size in bytes */
208 u32 sector, unsigned char cmd)
212 buf[2] = sector & 0xFF;
213 buf[3] = (sector >> 8) & 0xFF;
214 buf[4] = (sector >> 16) & 0xFF;
215 buf[5] = 0xE0 | ((sector >> 24) & 0x0F);
1116 u32 sector,
206 usbat_pack_ata_sector_cmd(unsigned char *buf, unsigned char thistime, u32 sector, unsigned char cmd) argument
1114 usbat_flash_read_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) argument
1205 usbat_flash_write_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) argument
1303 unsigned int sector; local
[all...]
/drivers/target/
H A Dtarget_core_sbc.c182 * Use 8-bit sector value. SBC-3 says:
832 * Currently enforce COMPARE_AND_WRITE for a single sector
1085 sector_t sector = cmd->t_task_lba; local
1106 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1109 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1111 (unsigned long long)sector, sdt->guard_tag,
1114 sector++;
1125 const void *p, sector_t sector, unsigned int ei_lba)
1133 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1134 " csum 0x%04x\n", (unsigned long long)sector,
1124 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, const void *p, sector_t sector, unsigned int ei_lba) argument
1206 sector_t sector = start; local
1261 sector_t sector = start; local
[all...]
/drivers/mtd/devices/
H A Ddocg3.c407 * @sector: the sector
409 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) argument
412 doc_flash_address(docg3, sector & 0xff);
413 doc_flash_address(docg3, (sector >> 8) & 0xff);
414 doc_flash_address(docg3, (sector >> 16) & 0xff);
421 * @sector: the sector
424 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) argument
429 doc_flash_address(docg3, sector
450 int sector, ret = 0; local
499 int ret = 0, sector; local
826 uint sector, pages_biblock; local
1160 int ret, sector; local
[all...]
/drivers/mtd/
H A Dftl.c788 u_long sector, u_long nblocks)
796 part, sector, nblocks);
804 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
808 log_addr = part->VirtualBlockMap[sector+i];
902 u_long sector, u_long nblocks)
910 part, sector, nblocks);
924 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
961 old_addr = part->VirtualBlockMap[sector+i];
963 part->VirtualBlockMap[sector+i] = 0xffffffff;
972 part->VirtualBlockMap[sector
787 ftl_read(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) argument
901 ftl_write(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) argument
1008 ftl_discardsect(struct mtd_blktrans_dev *dev, unsigned long sector, unsigned nr_sects) argument
[all...]

Completed in 327 milliseconds

1234