Lines Matching defs:md

57 	struct mapped_device *md;
81 struct mapped_device *md;
115 * Bits for the md->flags field.
338 int dm_deleting_md(struct mapped_device *md)
340 return test_bit(DMF_DELETING, &md->flags);
345 struct mapped_device *md;
349 md = bdev->bd_disk->private_data;
350 if (!md)
353 if (test_bit(DMF_FREEING, &md->flags) ||
354 dm_deleting_md(md)) {
355 md = NULL;
359 dm_get(md);
360 atomic_inc(&md->open_count);
365 return md ? 0 : -ENXIO;
370 struct mapped_device *md = disk->private_data;
374 atomic_dec(&md->open_count);
375 dm_put(md);
382 int dm_open_count(struct mapped_device *md)
384 return atomic_read(&md->open_count);
390 int dm_lock_for_deletion(struct mapped_device *md)
396 if (dm_open_count(md))
399 set_bit(DMF_DELETING, &md->flags);
408 struct mapped_device *md = bdev->bd_disk->private_data;
410 return dm_get_geometry(md, geo);
416 struct mapped_device *md = bdev->bd_disk->private_data;
417 struct dm_table *map = dm_get_live_table(md);
430 if (dm_suspended_md(md)) {
444 static struct dm_io *alloc_io(struct mapped_device *md)
446 return mempool_alloc(md->io_pool, GFP_NOIO);
449 static void free_io(struct mapped_device *md, struct dm_io *io)
451 mempool_free(io, md->io_pool);
454 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
456 mempool_free(tio, md->tio_pool);
459 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
462 return mempool_alloc(md->tio_pool, gfp_mask);
467 mempool_free(tio, tio->md->tio_pool);
470 static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
472 return mempool_alloc(md->io_pool, GFP_ATOMIC);
477 mempool_free(info, info->tio->md->io_pool);
480 static int md_in_flight(struct mapped_device *md)
482 return atomic_read(&md->pending[READ]) +
483 atomic_read(&md->pending[WRITE]);
488 struct mapped_device *md = io->md;
495 part_round_stats(cpu, &dm_disk(md)->part0);
497 atomic_set(&dm_disk(md)->part0.in_flight[rw],
498 atomic_inc_return(&md->pending[rw]));
503 struct mapped_device *md = io->md;
510 part_round_stats(cpu, &dm_disk(md)->part0);
511 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
518 pending = atomic_dec_return(&md->pending[rw]);
519 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
520 pending += atomic_read(&md->pending[rw^0x1]);
524 wake_up(&md->wait);
530 static void queue_io(struct mapped_device *md, struct bio *bio)
534 spin_lock_irqsave(&md->deferred_lock, flags);
535 bio_list_add(&md->deferred, bio);
536 spin_unlock_irqrestore(&md->deferred_lock, flags);
537 queue_work(md->wq, &md->work);
542 * function to access the md->map field, and make sure they call
545 struct dm_table *dm_get_live_table(struct mapped_device *md)
550 read_lock_irqsave(&md->map_lock, flags);
551 t = md->map;
554 read_unlock_irqrestore(&md->map_lock, flags);
562 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
564 *geo = md->geometry;
572 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
581 md->geometry = *geo;
595 static int __noflush_suspending(struct mapped_device *md)
597 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
609 struct mapped_device *md = io->md;
614 if (!(io->error > 0 && __noflush_suspending(md)))
624 spin_lock_irqsave(&md->deferred_lock, flags);
625 if (__noflush_suspending(md))
626 bio_list_add_head(&md->deferred, io->bio);
630 spin_unlock_irqrestore(&md->deferred_lock, flags);
636 free_io(md, io);
647 queue_io(md, bio);
650 trace_block_bio_complete(md->queue, bio, io_error);
661 struct mapped_device *md = tio->io->md;
685 * Store md for cleanup instead of tio which is about to get freed.
687 bio->bi_private = md->bs;
689 free_tio(md, tio);
745 * Don't touch any member of the md after calling this function because
746 * the md may be freed in dm_put() at the end of this function.
749 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
751 atomic_dec(&md->pending[rw]);
754 if (!md_in_flight(md))
755 wake_up(&md->wait);
758 blk_run_queue(md->queue);
763 dm_put(md);
782 struct mapped_device *md = tio->md;
800 rq_completed(md, rw, true);
820 struct mapped_device *md = tio->md;
831 rq_completed(md, rw, 0);
992 struct mapped_device *md;
1014 md = tio->io->md;
1020 clone->bi_private = md->bs;
1022 free_tio(md, tio);
1030 struct mapped_device *md;
1110 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1132 clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1175 ci->md->bs);
1253 ci->md->bs);
1282 ci->md->bs);
1300 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1305 ci.map = dm_get_live_table(md);
1311 ci.md = md;
1312 ci.io = alloc_io(md);
1316 ci.io->md = md;
1323 ci.bio = &ci.md->flush_bio;
1346 struct mapped_device *md = q->queuedata;
1347 struct dm_table *map = dm_get_live_table(md);
1406 struct mapped_device *md = q->queuedata;
1409 down_read(&md->io_lock);
1412 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1413 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1417 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1418 up_read(&md->io_lock);
1421 queue_io(md, bio);
1427 __split_and_process_bio(md, bio);
1428 up_read(&md->io_lock);
1432 static int dm_request_based(struct mapped_device *md)
1434 return blk_queue_stackable(md->queue);
1439 struct mapped_device *md = q->queuedata;
1441 if (dm_request_based(md))
1464 struct mapped_device *md = info->tio->md;
1467 bio_free(bio, md->bs);
1474 struct mapped_device *md = tio->md;
1475 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1494 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1509 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1515 tio = alloc_rq_tio(md, gfp_mask);
1519 tio->md = md;
1540 struct mapped_device *md = q->queuedata;
1548 clone = clone_rq(rq, md, GFP_ATOMIC);
1564 struct mapped_device *md)
1570 * Hold the md reference here for the in-flight I/O.
1576 dm_get(md);
1586 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1615 struct mapped_device *md = q->queuedata;
1616 struct dm_table *map = dm_get_live_table(md);
1645 atomic_inc(&md->pending[rq_data_dir(clone)]);
1648 if (map_request(ti, clone, md))
1678 struct mapped_device *md = q->queuedata;
1679 struct dm_table *map = dm_get_live_table(md);
1681 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1694 struct mapped_device *md = congested_data;
1697 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1698 map = dm_get_live_table(md);
1704 if (dm_request_based(md))
1705 r = md->queue->backing_dev_info.state &
1794 static void dm_init_md_queue(struct mapped_device *md)
1805 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1807 md->queue->queuedata = md;
1808 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1809 md->queue->backing_dev_info.congested_data = md;
1810 blk_queue_make_request(md->queue, dm_request);
1811 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1812 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1821 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1824 if (!md) {
1840 md->type = DM_TYPE_NONE;
1841 init_rwsem(&md->io_lock);
1842 mutex_init(&md->suspend_lock);
1843 mutex_init(&md->type_lock);
1844 spin_lock_init(&md->deferred_lock);
1845 rwlock_init(&md->map_lock);
1846 atomic_set(&md->holders, 1);
1847 atomic_set(&md->open_count, 0);
1848 atomic_set(&md->event_nr, 0);
1849 atomic_set(&md->uevent_seq, 0);
1850 INIT_LIST_HEAD(&md->uevent_list);
1851 spin_lock_init(&md->uevent_lock);
1853 md->queue = blk_alloc_queue(GFP_KERNEL);
1854 if (!md->queue)
1857 dm_init_md_queue(md);
1859 md->disk = alloc_disk(1);
1860 if (!md->disk)
1863 atomic_set(&md->pending[0], 0);
1864 atomic_set(&md->pending[1], 0);
1865 init_waitqueue_head(&md->wait);
1866 INIT_WORK(&md->work, dm_wq_work);
1867 init_waitqueue_head(&md->eventq);
1869 md->disk->major = _major;
1870 md->disk->first_minor = minor;
1871 md->disk->fops = &dm_blk_dops;
1872 md->disk->queue = md->queue;
1873 md->disk->private_data = md;
1874 sprintf(md->disk->disk_name, "dm-%d", minor);
1875 add_disk(md->disk);
1876 format_dev_t(md->name, MKDEV(_major, minor));
1878 md->wq = alloc_workqueue("kdmflush",
1880 if (!md->wq)
1883 md->bdev = bdget_disk(md->disk, 0);
1884 if (!md->bdev)
1887 bio_init(&md->flush_bio);
1888 md->flush_bio.bi_bdev = md->bdev;
1889 md->flush_bio.bi_rw = WRITE_FLUSH;
1893 old_md = idr_replace(&_minor_idr, md, minor);
1898 return md;
1901 destroy_workqueue(md->wq);
1903 del_gendisk(md->disk);
1904 put_disk(md->disk);
1906 blk_cleanup_queue(md->queue);
1912 kfree(md);
1916 static void unlock_fs(struct mapped_device *md);
1918 static void free_dev(struct mapped_device *md)
1920 int minor = MINOR(disk_devt(md->disk));
1922 unlock_fs(md);
1923 bdput(md->bdev);
1924 destroy_workqueue(md->wq);
1925 if (md->tio_pool)
1926 mempool_destroy(md->tio_pool);
1927 if (md->io_pool)
1928 mempool_destroy(md->io_pool);
1929 if (md->bs)
1930 bioset_free(md->bs);
1931 blk_integrity_unregister(md->disk);
1932 del_gendisk(md->disk);
1936 md->disk->private_data = NULL;
1939 put_disk(md->disk);
1940 blk_cleanup_queue(md->queue);
1942 kfree(md);
1945 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1949 if (md->io_pool && md->tio_pool && md->bs)
1950 /* the md already has necessary mempools */
1954 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1956 md->io_pool = p->io_pool;
1958 md->tio_pool = p->tio_pool;
1960 md->bs = p->bs;
1975 struct mapped_device *md = (struct mapped_device *) context;
1977 spin_lock_irqsave(&md->uevent_lock, flags);
1978 list_splice_init(&md->uevent_list, &uevents);
1979 spin_unlock_irqrestore(&md->uevent_lock, flags);
1981 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1983 atomic_inc(&md->event_nr);
1984 wake_up(&md->eventq);
1988 * Protected by md->suspend_lock obtained by dm_swap_table().
1990 static void __set_size(struct mapped_device *md, sector_t size)
1992 set_capacity(md->disk, size);
1994 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2053 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2057 struct request_queue *q = md->queue;
2067 if (size != get_capacity(md->disk))
2068 memset(&md->geometry, 0, sizeof(md->geometry));
2070 __set_size(md, size);
2072 dm_table_event_callback(t, event_callback, md);
2084 __bind_mempools(md, t);
2088 write_lock_irqsave(&md->map_lock, flags);
2089 old_map = md->map;
2090 md->map = t;
2091 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2095 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2097 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2098 write_unlock_irqrestore(&md->map_lock, flags);
2106 static struct dm_table *__unbind(struct mapped_device *md)
2108 struct dm_table *map = md->map;
2115 write_lock_irqsave(&md->map_lock, flags);
2116 md->map = NULL;
2117 write_unlock_irqrestore(&md->map_lock, flags);
2127 struct mapped_device *md;
2129 md = alloc_dev(minor);
2130 if (!md)
2133 dm_sysfs_init(md);
2135 *result = md;
2140 * Functions to manage md->type.
2141 * All are required to hold md->type_lock.
2143 void dm_lock_md_type(struct mapped_device *md)
2145 mutex_lock(&md->type_lock);
2148 void dm_unlock_md_type(struct mapped_device *md)
2150 mutex_unlock(&md->type_lock);
2153 void dm_set_md_type(struct mapped_device *md, unsigned type)
2155 md->type = type;
2158 unsigned dm_get_md_type(struct mapped_device *md)
2160 return md->type;
2163 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2165 return md->immutable_target_type;
2171 static int dm_init_request_based_queue(struct mapped_device *md)
2175 if (md->queue->elevator)
2179 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2183 md->queue = q;
2184 dm_init_md_queue(md);
2185 blk_queue_softirq_done(md->queue, dm_softirq_done);
2186 blk_queue_prep_rq(md->queue, dm_prep_fn);
2187 blk_queue_lld_busy(md->queue, dm_lld_busy);
2189 elv_register_queue(md->queue);
2195 * Setup the DM device's queue based on md's type
2197 int dm_setup_md_queue(struct mapped_device *md)
2199 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2200 !dm_init_request_based_queue(md)) {
2210 struct mapped_device *md;
2218 md = idr_find(&_minor_idr, minor);
2219 if (md && (md == MINOR_ALLOCED ||
2220 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2221 dm_deleting_md(md) ||
2222 test_bit(DMF_FREEING, &md->flags))) {
2223 md = NULL;
2230 return md;
2235 struct mapped_device *md = dm_find_md(dev);
2237 if (md)
2238 dm_get(md);
2240 return md;
2244 void *dm_get_mdptr(struct mapped_device *md)
2246 return md->interface_ptr;
2249 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2251 md->interface_ptr = ptr;
2254 void dm_get(struct mapped_device *md)
2256 atomic_inc(&md->holders);
2257 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2260 const char *dm_device_name(struct mapped_device *md)
2262 return md->name;
2266 static void __dm_destroy(struct mapped_device *md, bool wait)
2273 map = dm_get_live_table(md);
2274 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2275 set_bit(DMF_FREEING, &md->flags);
2278 if (!dm_suspended_md(md)) {
2290 while (atomic_read(&md->holders))
2292 else if (atomic_read(&md->holders))
2294 dm_device_name(md), atomic_read(&md->holders));
2296 dm_sysfs_exit(md);
2298 dm_table_destroy(__unbind(md));
2299 free_dev(md);
2302 void dm_destroy(struct mapped_device *md)
2304 __dm_destroy(md, true);
2307 void dm_destroy_immediate(struct mapped_device *md)
2309 __dm_destroy(md, false);
2312 void dm_put(struct mapped_device *md)
2314 atomic_dec(&md->holders);
2318 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2323 add_wait_queue(&md->wait, &wait);
2328 if (!md_in_flight(md))
2341 remove_wait_queue(&md->wait, &wait);
2351 struct mapped_device *md = container_of(work, struct mapped_device,
2355 down_read(&md->io_lock);
2357 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2358 spin_lock_irq(&md->deferred_lock);
2359 c = bio_list_pop(&md->deferred);
2360 spin_unlock_irq(&md->deferred_lock);
2365 up_read(&md->io_lock);
2367 if (dm_request_based(md))
2370 __split_and_process_bio(md, c);
2372 down_read(&md->io_lock);
2375 up_read(&md->io_lock);
2378 static void dm_queue_flush(struct mapped_device *md)
2380 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2382 queue_work(md->wq, &md->work);
2388 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2394 mutex_lock(&md->suspend_lock);
2397 if (!dm_suspended_md(md))
2406 map = __bind(md, table, &limits);
2409 mutex_unlock(&md->suspend_lock);
2417 static int lock_fs(struct mapped_device *md)
2421 WARN_ON(md->frozen_sb);
2423 md->frozen_sb = freeze_bdev(md->bdev);
2424 if (IS_ERR(md->frozen_sb)) {
2425 r = PTR_ERR(md->frozen_sb);
2426 md->frozen_sb = NULL;
2430 set_bit(DMF_FROZEN, &md->flags);
2435 static void unlock_fs(struct mapped_device *md)
2437 if (!test_bit(DMF_FROZEN, &md->flags))
2440 thaw_bdev(md->bdev, md->frozen_sb);
2441 md->frozen_sb = NULL;
2442 clear_bit(DMF_FROZEN, &md->flags);
2461 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2468 mutex_lock(&md->suspend_lock);
2470 if (dm_suspended_md(md)) {
2475 map = dm_get_live_table(md);
2482 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2494 r = lock_fs(md);
2509 * flush_workqueue(md->wq).
2511 down_write(&md->io_lock);
2512 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2513 up_write(&md->io_lock);
2516 * Stop md->queue before flushing md->wq in case request-based
2517 * dm defers requests to md->wq from md->queue.
2519 if (dm_request_based(md))
2520 stop_queue(md->queue);
2522 flush_workqueue(md->wq);
2529 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2531 down_write(&md->io_lock);
2533 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2534 up_write(&md->io_lock);
2538 dm_queue_flush(md);
2540 if (dm_request_based(md))
2541 start_queue(md->queue);
2543 unlock_fs(md);
2550 * requests are being added to md->deferred list.
2553 set_bit(DMF_SUSPENDED, &md->flags);
2561 mutex_unlock(&md->suspend_lock);
2565 int dm_resume(struct mapped_device *md)
2570 mutex_lock(&md->suspend_lock);
2571 if (!dm_suspended_md(md))
2574 map = dm_get_live_table(md);
2582 dm_queue_flush(md);
2589 if (dm_request_based(md))
2590 start_queue(md->queue);
2592 unlock_fs(md);
2594 clear_bit(DMF_SUSPENDED, &md->flags);
2599 mutex_unlock(&md->suspend_lock);
2607 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2614 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2618 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2623 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2625 return atomic_add_return(1, &md->uevent_seq);
2628 uint32_t dm_get_event_nr(struct mapped_device *md)
2630 return atomic_read(&md->event_nr);
2633 int dm_wait_event(struct mapped_device *md, int event_nr)
2635 return wait_event_interruptible(md->eventq,
2636 (event_nr != atomic_read(&md->event_nr)));
2639 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2643 spin_lock_irqsave(&md->uevent_lock, flags);
2644 list_add(elist, &md->uevent_list);
2645 spin_unlock_irqrestore(&md->uevent_lock, flags);
2650 * count on 'md'.
2652 struct gendisk *dm_disk(struct mapped_device *md)
2654 return md->disk;
2657 struct kobject *dm_kobject(struct mapped_device *md)
2659 return &md->kobj;
2664 * so use this check to verify that kobj is part of md structure
2668 struct mapped_device *md;
2670 md = container_of(kobj, struct mapped_device, kobj);
2671 if (&md->kobj != kobj)
2674 if (test_bit(DMF_FREEING, &md->flags) ||
2675 dm_deleting_md(md))
2678 dm_get(md);
2679 return md;
2682 int dm_suspended_md(struct mapped_device *md)
2684 return test_bit(DMF_SUSPENDED, &md->flags);