Lines Matching refs:ms

42 	struct mirror_set *ms;
88 struct mirror_set *ms = context;
90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
95 struct mirror_set *ms = (struct mirror_set *) data;
97 clear_bit(0, &ms->timer_pending);
98 wakeup_mirrord(ms);
101 static void delayed_wake(struct mirror_set *ms)
103 if (test_and_set_bit(0, &ms->timer_pending))
106 ms->timer.expires = jiffies + HZ / 5;
107 ms->timer.data = (unsigned long) ms;
108 ms->timer.function = delayed_wake_fn;
109 add_timer(&ms->timer);
117 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
124 spin_lock_irqsave(&ms->lock, flags);
127 spin_unlock_irqrestore(&ms->lock, flags);
130 wakeup_mirrord(ms);
135 struct mirror_set *ms = context;
139 queue_bio(ms, bio, WRITE);
170 static struct mirror *get_default_mirror(struct mirror_set *ms)
172 return &ms->mirror[atomic_read(&ms->default_mirror)];
177 struct mirror_set *ms = m->ms;
178 struct mirror *m0 = &(ms->mirror[0]);
180 atomic_set(&ms->default_mirror, m - m0);
183 static struct mirror *get_valid_mirror(struct mirror_set *ms)
187 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
210 struct mirror_set *ms = m->ms;
213 ms->leg_failure = 1;
225 if (!errors_handled(ms))
228 if (m != get_default_mirror(ms))
231 if (!ms->in_sync) {
241 new = get_valid_mirror(ms);
248 schedule_work(&ms->trigger_event);
253 struct mirror_set *ms = ti->private;
257 struct dm_io_region io[ms->nr_mirrors];
263 .client = ms->io_client,
266 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
273 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
275 for (i = 0; i < ms->nr_mirrors; i++)
277 fail_mirror(ms->mirror + i,
296 struct mirror_set *ms = dm_rh_region_context(reg);
302 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
312 for (m = 0; m < ms->nr_mirrors; m++) {
313 if (&ms->mirror[m] == get_default_mirror(ms))
316 fail_mirror(ms->mirror + m,
325 static int recover(struct mirror_set *ms, struct dm_region *reg)
333 sector_t region_size = dm_rh_get_region_size(ms->rh);
336 m = get_default_mirror(ms);
338 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
339 if (key == (ms->nr_regions - 1)) {
344 from.count = ms->ti->len & (region_size - 1);
351 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
352 if (&ms->mirror[i] == get_default_mirror(ms))
355 m = ms->mirror + i;
357 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
363 if (!errors_handled(ms))
366 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
372 static void do_recovery(struct mirror_set *ms)
375 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
381 dm_rh_recovery_prepare(ms->rh);
386 while ((reg = dm_rh_recovery_start(ms->rh))) {
387 r = recover(ms, reg);
395 if (!ms->in_sync &&
396 (log->type->get_sync_count(log) == ms->nr_regions)) {
398 dm_table_event(ms->ti->table);
399 ms->in_sync = 1;
406 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
408 struct mirror *m = get_default_mirror(ms);
414 if (m-- == ms->mirror)
415 m += ms->nr_mirrors;
416 } while (m != get_default_mirror(ms));
423 struct mirror *default_mirror = get_default_mirror(m->ms);
428 static int mirror_available(struct mirror_set *ms, struct bio *bio)
430 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
431 region_t region = dm_rh_bio_to_region(ms->rh, bio);
434 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
446 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
463 static void hold_bio(struct mirror_set *ms, struct bio *bio)
469 spin_lock_irq(&ms->lock);
471 if (atomic_read(&ms->suspend)) {
472 spin_unlock_irq(&ms->lock);
477 if (dm_noflush_suspending(ms->ti))
487 bio_list_add(&ms->holds, bio);
488 spin_unlock_irq(&ms->lock);
509 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
513 queue_bio(m->ms, bio, bio_rw(bio));
532 .client = m->ms->io_client,
540 static inline int region_in_sync(struct mirror_set *ms, region_t region,
543 int state = dm_rh_get_state(ms->rh, region, may_block);
547 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
554 region = dm_rh_bio_to_region(ms->rh, bio);
555 m = get_default_mirror(ms);
560 if (likely(region_in_sync(ms, region, 1)))
561 m = choose_mirror(ms, bio->bi_sector);
588 struct mirror_set *ms;
592 ms = bio_get_m(bio)->ms;
606 for (i = 0; i < ms->nr_mirrors; i++)
608 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
615 spin_lock_irqsave(&ms->lock, flags);
616 if (!ms->failures.head)
618 bio_list_add(&ms->failures, bio);
619 spin_unlock_irqrestore(&ms->lock, flags);
621 wakeup_mirrord(ms);
624 static void do_write(struct mirror_set *ms, struct bio *bio)
627 struct dm_io_region io[ms->nr_mirrors], *dest = io;
635 .client = ms->io_client,
644 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
651 bio_set_m(bio, get_default_mirror(ms));
653 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
656 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
662 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
683 region = dm_rh_bio_to_region(ms->rh, bio);
691 state = dm_rh_get_state(ms->rh, region, 1);
715 spin_lock_irq(&ms->lock);
716 bio_list_merge(&ms->writes, &requeue);
717 spin_unlock_irq(&ms->lock);
718 delayed_wake(ms);
726 dm_rh_inc_pending(ms->rh, &sync);
727 dm_rh_inc_pending(ms->rh, &nosync);
734 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
739 if (unlikely(ms->log_failure) && errors_handled(ms)) {
740 spin_lock_irq(&ms->lock);
741 bio_list_merge(&ms->failures, &sync);
742 spin_unlock_irq(&ms->lock);
743 wakeup_mirrord(ms);
746 do_write(ms, bio);
749 dm_rh_delay(ms->rh, bio);
752 if (unlikely(ms->leg_failure) && errors_handled(ms)) {
753 spin_lock_irq(&ms->lock);
754 bio_list_add(&ms->failures, bio);
755 spin_unlock_irq(&ms->lock);
756 wakeup_mirrord(ms);
758 map_bio(get_default_mirror(ms), bio);
764 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
789 if (!ms->log_failure) {
790 ms->in_sync = 0;
791 dm_rh_mark_nosync(ms->rh, bio);
802 if (!get_valid_mirror(ms))
804 else if (errors_handled(ms))
805 hold_bio(ms, bio);
813 struct mirror_set *ms =
816 dm_table_event(ms->ti->table);
824 struct mirror_set *ms = container_of(work, struct mirror_set,
829 spin_lock_irqsave(&ms->lock, flags);
830 reads = ms->reads;
831 writes = ms->writes;
832 failures = ms->failures;
833 bio_list_init(&ms->reads);
834 bio_list_init(&ms->writes);
835 bio_list_init(&ms->failures);
836 spin_unlock_irqrestore(&ms->lock, flags);
838 dm_rh_update_states(ms->rh, errors_handled(ms));
839 do_recovery(ms);
840 do_reads(ms, &reads);
841 do_writes(ms, &writes);
842 do_failures(ms, &failures);
854 struct mirror_set *ms = NULL;
856 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
858 ms = kzalloc(len, GFP_KERNEL);
859 if (!ms) {
864 spin_lock_init(&ms->lock);
865 bio_list_init(&ms->reads);
866 bio_list_init(&ms->writes);
867 bio_list_init(&ms->failures);
868 bio_list_init(&ms->holds);
870 ms->ti = ti;
871 ms->nr_mirrors = nr_mirrors;
872 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
873 ms->in_sync = 0;
874 ms->log_failure = 0;
875 ms->leg_failure = 0;
876 atomic_set(&ms->suspend, 0);
877 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
879 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
882 if (!ms->read_record_pool) {
884 kfree(ms);
888 ms->io_client = dm_io_client_create();
889 if (IS_ERR(ms->io_client)) {
891 mempool_destroy(ms->read_record_pool);
892 kfree(ms);
896 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
898 ms->ti->begin, MAX_RECOVERY,
899 dl, region_size, ms->nr_regions);
900 if (IS_ERR(ms->rh)) {
902 dm_io_client_destroy(ms->io_client);
903 mempool_destroy(ms->read_record_pool);
904 kfree(ms);
908 return ms;
911 static void free_context(struct mirror_set *ms, struct dm_target *ti,
915 dm_put_device(ti, ms->mirror[m].dev);
917 dm_io_client_destroy(ms->io_client);
918 dm_region_hash_destroy(ms->rh);
919 mempool_destroy(ms->read_record_pool);
920 kfree(ms);
923 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
935 &ms->mirror[mirror].dev)) {
940 ms->mirror[mirror].ms = ms;
941 atomic_set(&(ms->mirror[mirror].error_count), 0);
942 ms->mirror[mirror].error_type = 0;
943 ms->mirror[mirror].offset = offset;
986 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
990 struct dm_target *ti = ms->ti;
1013 ms->features |= DM_RAID1_HANDLE_ERRORS;
1040 struct mirror_set *ms;
1066 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1067 if (!ms) {
1074 r = get_mirror(ms, ti, m, argv);
1076 free_context(ms, ti, m);
1083 ti->private = ms;
1084 ti->split_io = dm_rh_get_region_size(ms->rh);
1088 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1090 if (!ms->kmirrord_wq) {
1095 INIT_WORK(&ms->kmirrord_work, do_mirror);
1096 init_timer(&ms->timer);
1097 ms->timer_pending = 0;
1098 INIT_WORK(&ms->trigger_event, trigger_event);
1100 r = parse_features(ms, argc, argv, &args_used);
1122 ms->kcopyd_client = dm_kcopyd_client_create();
1123 if (IS_ERR(ms->kcopyd_client)) {
1124 r = PTR_ERR(ms->kcopyd_client);
1128 wakeup_mirrord(ms);
1132 destroy_workqueue(ms->kmirrord_wq);
1134 free_context(ms, ti, ms->nr_mirrors);
1140 struct mirror_set *ms = (struct mirror_set *) ti->private;
1142 del_timer_sync(&ms->timer);
1143 flush_workqueue(ms->kmirrord_wq);
1144 flush_work_sync(&ms->trigger_event);
1145 dm_kcopyd_client_destroy(ms->kcopyd_client);
1146 destroy_workqueue(ms->kmirrord_wq);
1147 free_context(ms, ti, ms->nr_mirrors);
1158 struct mirror_set *ms = ti->private;
1160 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1164 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1165 queue_bio(ms, bio, rw);
1169 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1180 queue_bio(ms, bio, rw);
1188 m = choose_mirror(ms, bio->bi_sector);
1192 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1208 struct mirror_set *ms = (struct mirror_set *) ti->private;
1218 dm_rh_dec(ms->rh, map_context->ll);
1250 if (default_ok(m) || mirror_available(ms, bio)) {
1254 mempool_free(read_record, ms->read_record_pool);
1256 queue_bio(ms, bio, rw);
1264 mempool_free(read_record, ms->read_record_pool);
1273 struct mirror_set *ms = (struct mirror_set *) ti->private;
1274 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1279 atomic_set(&ms->suspend, 1);
1284 * a chance to be added in the hold list because ms->suspend
1287 spin_lock_irq(&ms->lock);
1288 holds = ms->holds;
1289 bio_list_init(&ms->holds);
1290 spin_unlock_irq(&ms->lock);
1293 hold_bio(ms, bio);
1299 dm_rh_stop_recovery(ms->rh);
1302 !dm_rh_recovery_in_flight(ms->rh));
1314 flush_workqueue(ms->kmirrord_wq);
1319 struct mirror_set *ms = ti->private;
1320 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1329 struct mirror_set *ms = ti->private;
1330 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1332 atomic_set(&ms->suspend, 0);
1336 dm_rh_start_recovery(ms->rh);
1368 struct mirror_set *ms = (struct mirror_set *) ti->private;
1369 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1370 char buffer[ms->nr_mirrors + 1];
1374 DMEMIT("%d ", ms->nr_mirrors);
1375 for (m = 0; m < ms->nr_mirrors; m++) {
1376 DMEMIT("%s ", ms->mirror[m].dev->name);
1377 buffer[m] = device_status_char(&(ms->mirror[m]));
1383 (unsigned long long)ms->nr_regions, buffer);
1392 DMEMIT("%d", ms->nr_mirrors);
1393 for (m = 0; m < ms->nr_mirrors; m++)
1394 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1395 (unsigned long long)ms->mirror[m].offset);
1397 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1407 struct mirror_set *ms = ti->private;
1411 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1412 ret = fn(ti, ms->mirror[i].dev,
1413 ms->mirror[i].offset, ti->len, data);