Lines Matching refs:brd

31  * the pages containing the block device's contents. A brd page's ->index is
52 * Look up and return a brd's page for a given sector.
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
62 * device node -- brd pages will never be deleted under us, so we
73 page = radix_tree_lookup(&brd->brd_pages, idx);
82 * Look up and return a brd's page for a given sector.
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
92 page = brd_lookup_page(brd, sector);
118 spin_lock(&brd->brd_lock);
121 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
123 page = radix_tree_lookup(&brd->brd_pages, idx);
127 spin_unlock(&brd->brd_lock);
134 static void brd_free_page(struct brd_device *brd, sector_t sector)
139 spin_lock(&brd->brd_lock);
141 page = radix_tree_delete(&brd->brd_pages, idx);
142 spin_unlock(&brd->brd_lock);
147 static void brd_zero_page(struct brd_device *brd, sector_t sector)
151 page = brd_lookup_page(brd, sector);
161 static void brd_free_pages(struct brd_device *brd)
170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
178 ret = radix_tree_delete(&brd->brd_pages, pos);
196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
202 if (!brd_insert_page(brd, sector))
206 if (!brd_insert_page(brd, sector))
212 static void discard_from_brd(struct brd_device *brd,
222 brd_free_page(brd, sector);
224 brd_zero_page(brd, sector);
231 * Copy n bytes from src to the brd starting at sector. Does not sleep.
233 static void copy_to_brd(struct brd_device *brd, const void *src,
242 page = brd_lookup_page(brd, sector);
253 page = brd_lookup_page(brd, sector);
263 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
265 static void copy_from_brd(void *dst, struct brd_device *brd,
274 page = brd_lookup_page(brd, sector);
286 page = brd_lookup_page(brd, sector);
299 static int brd_do_bvec(struct brd_device *brd, struct page *page,
307 err = copy_to_brd_setup(brd, sector, len);
314 copy_from_brd(mem + off, brd, sector, len);
318 copy_to_brd(brd, mem + off, sector, len);
329 struct brd_device *brd = bdev->bd_disk->private_data;
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
352 err = brd_do_bvec(brd, bvec.bv_page, len,
366 struct brd_device *brd = bdev->bd_disk->private_data;
367 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
376 struct brd_device *brd = bdev->bd_disk->private_data;
379 if (!brd)
385 page = brd_insert_page(brd, sector);
399 struct brd_device *brd = bdev->bd_disk->private_data;
420 brd_free_pages(brd);
447 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
477 struct brd_device *brd;
480 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
481 if (!brd)
483 brd->brd_number = i;
484 spin_lock_init(&brd->brd_lock);
485 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
487 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
488 if (!brd->brd_queue)
490 blk_queue_make_request(brd->brd_queue, brd_make_request);
491 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
492 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
494 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
495 brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
496 brd->brd_queue->limits.discard_zeroes_data = 1;
497 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
499 disk = brd->brd_disk = alloc_disk(1 << part_shift);
505 disk->private_data = brd;
506 disk->queue = brd->brd_queue;
512 return brd;
515 blk_cleanup_queue(brd->brd_queue);
517 kfree(brd);
522 static void brd_free(struct brd_device *brd)
524 put_disk(brd->brd_disk);
525 blk_cleanup_queue(brd->brd_queue);
526 brd_free_pages(brd);
527 kfree(brd);
532 struct brd_device *brd;
534 list_for_each_entry(brd, &brd_devices, brd_list) {
535 if (brd->brd_number == i)
539 brd = brd_alloc(i);
540 if (brd) {
541 add_disk(brd->brd_disk);
542 list_add_tail(&brd->brd_list, &brd_devices);
545 return brd;
548 static void brd_del_one(struct brd_device *brd)
550 list_del(&brd->brd_list);
551 del_gendisk(brd->brd_disk);
552 brd_free(brd);
557 struct brd_device *brd;
561 brd = brd_init_one(MINOR(dev) >> part_shift);
562 kobj = brd ? get_disk(brd->brd_disk) : NULL;
573 struct brd_device *brd, *next;
576 * brd module now has a feature to instantiate underlying device
586 * extend brd device by create dev node themselves and have
623 brd = brd_alloc(i);
624 if (!brd)
626 list_add_tail(&brd->brd_list, &brd_devices);
631 list_for_each_entry(brd, &brd_devices, brd_list)
632 add_disk(brd->brd_disk);
637 printk(KERN_INFO "brd: module loaded\n");
641 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
642 list_del(&brd->brd_list);
643 brd_free(brd);
653 struct brd_device *brd, *next;
657 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
658 brd_del_one(brd);