Lines Matching refs:msb

151 static int msb_validate_used_block_bitmap(struct msb_data *msb)
159 for (i = 0; i < msb->zone_count; i++)
160 total_free_blocks += msb->free_block_count[i];
162 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
163 msb->block_count) == total_free_blocks)
167 msb->read_only = true;
172 static void msb_mark_block_used(struct msb_data *msb, int pba)
176 if (test_bit(pba, msb->used_blocks_bitmap)) {
179 msb->read_only = true;
183 if (msb_validate_used_block_bitmap(msb))
187 __set_bit(pba, msb->used_blocks_bitmap);
188 msb->free_block_count[zone]--;
192 static void msb_mark_block_unused(struct msb_data *msb, int pba)
196 if (!test_bit(pba, msb->used_blocks_bitmap)) {
198 msb->read_only = true;
202 if (msb_validate_used_block_bitmap(msb))
206 __clear_bit(pba, msb->used_blocks_bitmap);
207 msb->free_block_count[zone]++;
211 static void msb_invalidate_reg_window(struct msb_data *msb)
213 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
214 msb->reg_addr.w_length = sizeof(struct ms_id_register);
215 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
216 msb->reg_addr.r_length = sizeof(struct ms_id_register);
217 msb->addr_valid = false;
221 static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
224 struct memstick_dev *card = msb->card;
226 WARN_ON(msb->state != -1);
227 msb->int_polling = false;
228 msb->state = 0;
229 msb->exit_error = 0;
237 WARN_ON(msb->state != -1);
238 return msb->exit_error;
242 static int msb_exit_state_machine(struct msb_data *msb, int error)
244 WARN_ON(msb->state == -1);
246 msb->state = -1;
247 msb->exit_error = error;
248 msb->card->next_request = h_msb_default_bad;
252 msb_invalidate_reg_window(msb);
254 complete(&msb->card->mrq_complete);
259 static int msb_read_int_reg(struct msb_data *msb, long timeout)
261 struct memstick_request *mrq = &msb->card->current_mrq;
263 WARN_ON(msb->state == -1);
265 if (!msb->int_polling) {
266 msb->int_timeout = jiffies +
268 msb->int_polling = true;
269 } else if (time_after(jiffies, msb->int_timeout)) {
274 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
286 static int msb_read_regs(struct msb_data *msb, int offset, int len)
288 struct memstick_request *req = &msb->card->current_mrq;
290 if (msb->reg_addr.r_offset != offset ||
291 msb->reg_addr.r_length != len || !msb->addr_valid) {
293 msb->reg_addr.r_offset = offset;
294 msb->reg_addr.r_length = len;
295 msb->addr_valid = true;
298 &msb->reg_addr, sizeof(msb->reg_addr));
307 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
309 struct memstick_request *req = &msb->card->current_mrq;
311 if (msb->reg_addr.w_offset != offset ||
312 msb->reg_addr.w_length != len || !msb->addr_valid) {
314 msb->reg_addr.w_offset = offset;
315 msb->reg_addr.w_length = len;
316 msb->addr_valid = true;
319 &msb->reg_addr, sizeof(msb->reg_addr));
336 * Writes output to msb->current_sg, takes sector address from msb->reg.param
342 struct msb_data *msb = memstick_get_drvdata(card);
349 return msb_exit_state_machine(msb, mrq->error);
352 switch (msb->state) {
357 if (!msb_write_regs(msb,
360 (unsigned char *)&msb->regs.param))
363 msb->state = MSB_RP_SEND_READ_COMMAND;
369 msb->state = MSB_RP_SEND_INT_REQ;
373 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
376 if (msb_read_int_reg(msb, -1))
382 msb->regs.status.interrupt = intreg;
385 return msb_exit_state_machine(msb, -EIO);
388 msb->state = MSB_RP_SEND_INT_REQ;
392 msb->int_polling = false;
393 msb->state = (intreg & MEMSTICK_INT_ERR) ?
399 if (!msb_read_regs(msb,
404 msb->state = MSB_RP_RECEIVE_STATUS_REG;
408 msb->regs.status = *(struct ms_status_register *)mrq->data;
409 msb->state = MSB_RP_SEND_OOB_READ;
413 if (!msb_read_regs(msb,
418 msb->state = MSB_RP_RECEIVE_OOB_READ;
422 msb->regs.extra_data =
424 msb->state = MSB_RP_SEND_READ_DATA;
429 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
430 msb->state = MSB_RP_RECEIVE_READ_DATA;
435 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
436 msb->current_sg_offset,
437 msb->page_size);
440 msb->state = MSB_RP_RECEIVE_READ_DATA;
444 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
445 msb->current_sg_offset += msb->page_size;
446 return msb_exit_state_machine(msb, 0);
449 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
451 return msb_exit_state_machine(msb, -EBADMSG);
454 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
456 msb->current_sg_offset += msb->page_size;
457 return msb_exit_state_machine(msb, -EUCLEAN);
460 return msb_exit_state_machine(msb, -EIO);
469 * Takes address from msb->regs.param.
471 * from msb->regs.extra
478 struct msb_data *msb = memstick_get_drvdata(card);
484 return msb_exit_state_machine(msb, mrq->error);
487 switch (msb->state) {
496 if (!msb_write_regs(msb,
499 &msb->regs.param))
502 msb->state = MSB_WB_SEND_WRITE_OOB;
506 if (!msb_write_regs(msb,
509 &msb->regs.extra_data))
511 msb->state = MSB_WB_SEND_WRITE_COMMAND;
518 msb->state = MSB_WB_SEND_INT_REQ;
522 msb->state = MSB_WB_RECEIVE_INT_REQ;
523 if (msb_read_int_reg(msb, -1))
529 msb->regs.status.interrupt = intreg;
533 return msb_exit_state_machine(msb, -EIO);
536 return msb_exit_state_machine(msb, -EBADMSG);
540 if (msb->current_page == msb->pages_in_block) {
542 return msb_exit_state_machine(msb, 0);
543 msb->state = MSB_WB_SEND_INT_REQ;
550 msb->state = MSB_WB_SEND_INT_REQ;
554 msb->int_polling = false;
555 msb->state = MSB_WB_SEND_WRITE_DATA;
561 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
562 msb->current_sg_offset,
563 msb->page_size) < msb->page_size)
564 return msb_exit_state_machine(msb, -EIO);
568 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
572 msb->current_page++;
573 msb->current_sg_offset += msb->page_size;
574 msb->state = MSB_WB_SEND_INT_REQ;
590 struct msb_data *msb = memstick_get_drvdata(card);
596 return msb_exit_state_machine(msb, mrq->error);
599 switch (msb->state) {
603 if (!msb_write_regs(msb,
606 &msb->regs.param))
608 msb->state = MSB_SC_SEND_WRITE_OOB;
612 if (!msb->command_need_oob) {
613 msb->state = MSB_SC_SEND_COMMAND;
617 if (!msb_write_regs(msb,
620 &msb->regs.extra_data))
623 msb->state = MSB_SC_SEND_COMMAND;
627 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
628 msb->state = MSB_SC_SEND_INT_REQ;
632 msb->state = MSB_SC_RECEIVE_INT_REQ;
633 if (msb_read_int_reg(msb, -1))
641 return msb_exit_state_machine(msb, -EIO);
643 return msb_exit_state_machine(msb, -EBADMSG);
646 msb->state = MSB_SC_SEND_INT_REQ;
650 return msb_exit_state_machine(msb, 0);
661 struct msb_data *msb = memstick_get_drvdata(card);
665 return msb_exit_state_machine(msb, mrq->error);
667 switch (msb->state) {
671 msb->state = MSB_RS_CONFIRM;
674 return msb_exit_state_machine(msb, 0);
683 struct msb_data *msb = memstick_get_drvdata(card);
689 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
690 return msb_exit_state_machine(msb, mrq->error);
693 switch (msb->state) {
696 msb->regs.param.system |= MEMSTICK_SYS_PAM;
698 if (!msb_write_regs(msb,
701 (unsigned char *)&msb->regs.param))
704 msb->state = MSB_PS_SWICH_HOST;
712 msb->state = MSB_PS_CONFIRM;
716 return msb_exit_state_machine(msb, 0);
722 static int msb_switch_to_parallel(struct msb_data *msb);
725 static int msb_reset(struct msb_data *msb, bool full)
728 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
729 struct memstick_dev *card = msb->card;
734 msb->regs.param.system = MEMSTICK_SYS_BAMD;
742 msb_invalidate_reg_window(msb);
754 msb->read_only = true;
759 error = msb_run_state_machine(msb, h_msb_reset);
762 msb->read_only = true;
768 msb_switch_to_parallel(msb);
773 static int msb_switch_to_parallel(struct msb_data *msb)
777 error = msb_run_state_machine(msb, h_msb_parallel_switch);
780 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
781 msb_reset(msb, true);
785 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
790 static int msb_set_overwrite_flag(struct msb_data *msb,
793 if (msb->read_only)
796 msb->regs.param.block_address = cpu_to_be16(pba);
797 msb->regs.param.page_address = page;
798 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
799 msb->regs.extra_data.overwrite_flag = flag;
800 msb->command_value = MS_CMD_BLOCK_WRITE;
801 msb->command_need_oob = true;
805 return msb_run_state_machine(msb, h_msb_send_command);
808 static int msb_mark_bad(struct msb_data *msb, int pba)
811 msb_reset(msb, true);
813 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
816 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
819 msb_reset(msb, true);
820 return msb_set_overwrite_flag(msb,
825 static int msb_erase_block(struct msb_data *msb, u16 pba)
828 if (msb->read_only)
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
848 msb_mark_bad(msb, pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
858 static int msb_read_page(struct msb_data *msb,
867 size_t len = msb->page_size;
901 if (pba >= msb->block_count) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
923 *extra = msb->regs.extra_data;
925 if (!error || msb_reset(msb, true))
935 if (msb->regs.extra_data.overwrite_flag &
937 msb_mark_page_bad(msb, pba, page);
948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
958 if (pba > msb->block_count) {
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
976 static int msb_verify_block(struct msb_data *msb, u16 pba,
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
984 while (page < msb->pages_in_block) {
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
994 msb->block_buffer, msb->block_size))
1000 static int msb_write_block(struct msb_data *msb,
1004 BUG_ON(sg->length < msb->page_size);
1006 if (msb->read_only)
1015 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1026 if (pba == msb->boot_block_locations[0] ||
1027 pba == msb->boot_block_locations[1]) {
1034 if (msb->read_only)
1037 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038 msb->regs.param.page_address = 0;
1039 msb->regs.param.block_address = cpu_to_be16(pba);
1041 msb->regs.extra_data.management_flag = 0xFF;
1042 msb->regs.extra_data.overwrite_flag = 0xF8;
1043 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045 msb->current_sg = sg;
1046 msb->current_sg_offset = offset;
1047 msb->current_page = 0;
1049 error = msb_run_state_machine(msb, h_msb_write_block);
1058 !test_bit(pba, msb->erased_blocks_bitmap)))
1059 error = msb_verify_block(msb, pba, sg, offset);
1064 if (current_try > 1 || msb_reset(msb, true))
1068 error = msb_erase_block(msb, pba);
1078 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1086 if (!msb->free_block_count[zone]) {
1088 msb->read_only = true;
1092 pos %= msb->free_block_count[zone];
1095 msb->free_block_count[zone], pos);
1097 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098 msb->block_count, pba);
1100 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101 msb->block_count, pba + 1);
1105 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1107 msb->read_only = true;
1111 msb_mark_block_used(msb, pba);
1115 static int msb_update_block(struct msb_data *msb, u16 lba,
1121 pba = msb->lba_to_pba_table[lba];
1126 msb_set_overwrite_flag(msb, pba, 0,
1131 new_pba = msb_get_free_block(msb,
1141 error = msb_write_block(msb, new_pba, lba, sg, offset);
1143 msb_mark_bad(msb, new_pba);
1151 msb_erase_block(msb, pba);
1152 msb->lba_to_pba_table[lba] = new_pba;
1158 msb->read_only = true;
1190 static int msb_read_boot_blocks(struct msb_data *msb)
1197 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199 msb->boot_block_count = 0;
1203 if (!msb->boot_page) {
1208 msb->boot_page = page;
1210 page = msb->boot_page;
1212 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1217 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1234 msb->boot_block_locations[msb->boot_block_count] = pba;
1237 msb->boot_block_count++;
1239 if (msb->boot_block_count == 2)
1243 if (!msb->boot_block_count) {
1252 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1263 boot_block = &msb->boot_page[block_nr];
1264 pba = msb->boot_block_locations[block_nr];
1266 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1275 page = data_offset / msb->page_size;
1276 page_offset = data_offset % msb->page_size;
1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1279 msb->page_size;
1292 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1297 offset += msb->page_size;
1299 if (page == msb->pages_in_block) {
1311 if (bad_block >= msb->block_count) {
1317 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1324 msb_mark_block_used(msb, bad_block);
1331 static int msb_ftl_initialize(struct msb_data *msb)
1335 if (msb->ftl_initialized)
1338 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1339 msb->logical_block_count = msb->zone_count * 496 - 2;
1341 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1342 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343 msb->lba_to_pba_table =
1344 kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
1346 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1347 !msb->erased_blocks_bitmap) {
1348 kfree(msb->used_blocks_bitmap);
1349 kfree(msb->lba_to_pba_table);
1350 kfree(msb->erased_blocks_bitmap);
1354 for (i = 0; i < msb->zone_count; i++)
1355 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1357 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1358 msb->logical_block_count * sizeof(u16));
1361 msb->zone_count, msb->logical_block_count);
1363 msb->ftl_initialized = true;
1367 static int msb_ftl_scan(struct msb_data *msb)
1373 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1379 for (pba = 0; pba < msb->block_count; pba++) {
1381 if (pba == msb->boot_block_locations[0] ||
1382 pba == msb->boot_block_locations[1]) {
1384 msb_mark_block_used(msb, pba);
1388 if (test_bit(pba, msb->used_blocks_bitmap)) {
1394 error = msb_read_oob(msb, pba, 0, &extra);
1400 msb_mark_block_used(msb, pba);
1401 msb_erase_block(msb, pba);
1419 msb_mark_block_used(msb, pba);
1428 msb_mark_block_used(msb, pba);
1436 msb_mark_block_used(msb, pba);
1437 msb_erase_block(msb, pba);
1446 msb_mark_block_used(msb, pba);
1452 msb_erase_block(msb, pba);
1457 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1459 msb->lba_to_pba_table[lba] = pba;
1463 other_block = msb->lba_to_pba_table[lba];
1471 msb_erase_block(msb, other_block);
1472 msb->lba_to_pba_table[lba] = pba;
1479 msb_erase_block(msb, pba);
1486 msb_erase_block(msb, other_block);
1487 msb->lba_to_pba_table[lba] = pba;
1497 struct msb_data *msb = (struct msb_data *)data;
1498 msb->need_flush_cache = true;
1499 queue_work(msb->io_queue, &msb->io_work);
1503 static void msb_cache_discard(struct msb_data *msb)
1505 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1508 del_timer_sync(&msb->cache_flush_timer);
1511 msb->cache_block_lba = MS_BLOCK_INVALID;
1512 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1515 static int msb_cache_init(struct msb_data *msb)
1517 setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
1518 (unsigned long)msb);
1520 if (!msb->cache)
1521 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1522 if (!msb->cache)
1525 msb_cache_discard(msb);
1529 static int msb_cache_flush(struct msb_data *msb)
1536 if (msb->read_only)
1539 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1542 lba = msb->cache_block_lba;
1543 pba = msb->lba_to_pba_table[lba];
1546 pba, msb->cache_block_lba);
1548 sg_init_one(&sg, msb->cache , msb->block_size);
1551 for (page = 0; page < msb->pages_in_block; page++) {
1553 if (test_bit(page, &msb->valid_cache_bitmap))
1556 offset = page * msb->page_size;
1560 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1577 set_bit(page, &msb->valid_cache_bitmap);
1581 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1582 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1586 for (page = 0; page < msb->pages_in_block; page++) {
1588 if (test_bit(page, &msb->valid_cache_bitmap))
1593 msb_set_overwrite_flag(msb,
1598 msb_cache_discard(msb);
1602 static int msb_cache_write(struct msb_data *msb, int lba,
1608 if (msb->read_only)
1611 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1612 lba != msb->cache_block_lba)
1617 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1618 lba != msb->cache_block_lba) {
1620 error = msb_cache_flush(msb);
1625 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1626 msb->cache_block_lba = lba;
1627 mod_timer(&msb->cache_flush_timer,
1634 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1637 msb->cache + page * msb->page_size, msb->page_size);
1639 set_bit(page, &msb->valid_cache_bitmap);
1643 static int msb_cache_read(struct msb_data *msb, int lba,
1646 int pba = msb->lba_to_pba_table[lba];
1650 if (lba == msb->cache_block_lba &&
1651 test_bit(page, &msb->valid_cache_bitmap)) {
1658 offset, msb->page_size);
1660 msb->cache + msb->page_size * page,
1661 msb->page_size);
1666 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1670 msb_cache_write(msb, lba, page, true, sg, offset);
1695 struct msb_data *msb = memstick_get_drvdata(card);
1700 msb->caps = 0;
1704 msb->read_only = true;
1706 msb->state = -1;
1707 error = msb_reset(msb, false);
1715 msb_switch_to_parallel(msb);
1717 msb->page_size = sizeof(struct ms_boot_page);
1720 error = msb_read_boot_blocks(msb);
1724 boot_block = &msb->boot_page[0];
1727 msb->block_count = boot_block->attr.number_of_blocks;
1728 msb->page_size = boot_block->attr.page_size;
1730 msb->pages_in_block = boot_block->attr.block_size * 2;
1731 msb->block_size = msb->page_size * msb->pages_in_block;
1733 if (msb->page_size > PAGE_SIZE) {
1735 dbg("device page %d size isn't supported", msb->page_size);
1739 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1740 if (!msb->block_buffer)
1743 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750 msb->geometry.cylinders = chs_table[i].cyl;
1751 msb->geometry.heads = chs_table[i].head;
1752 msb->geometry.sectors = chs_table[i].sec;
1757 msb->caps |= MEMSTICK_CAP_PAR4;
1760 msb->read_only = true;
1762 dbg("Total block count = %d", msb->block_count);
1763 dbg("Each block consists of %d pages", msb->pages_in_block);
1764 dbg("Page size = %d bytes", msb->page_size);
1765 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1766 dbg("Read only: %d", msb->read_only);
1770 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1771 msb_switch_to_parallel(msb);
1774 error = msb_cache_init(msb);
1778 error = msb_ftl_initialize(msb);
1784 error = msb_read_bad_block_table(msb, 0);
1788 error = msb_read_bad_block_table(msb, 1);
1795 error = msb_ftl_scan(msb);
1805 static int msb_do_write_request(struct msb_data *msb, int lba,
1813 if (page == 0 && len - offset >= msb->block_size) {
1815 if (msb->cache_block_lba == lba)
1816 msb_cache_discard(msb);
1819 error = msb_update_block(msb, lba, sg, offset);
1823 offset += msb->block_size;
1824 *sucessfuly_written += msb->block_size;
1829 error = msb_cache_write(msb, lba, page, false, sg, offset);
1833 offset += msb->page_size;
1834 *sucessfuly_written += msb->page_size;
1837 if (page == msb->pages_in_block) {
1845 static int msb_do_read_request(struct msb_data *msb, int lba,
1854 error = msb_cache_read(msb, lba, page, sg, offset);
1858 offset += msb->page_size;
1859 *sucessfuly_read += msb->page_size;
1862 if (page == msb->pages_in_block) {
1872 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1876 struct scatterlist *sg = msb->prealloc_sg;
1881 spin_lock_irqsave(&msb->q_lock, flags);
1883 if (msb->need_flush_cache) {
1884 msb->need_flush_cache = false;
1885 spin_unlock_irqrestore(&msb->q_lock, flags);
1886 msb_cache_flush(msb);
1890 if (!msb->req) {
1891 msb->req = blk_fetch_request(msb->queue);
1892 if (!msb->req) {
1894 spin_unlock_irqrestore(&msb->q_lock, flags);
1899 spin_unlock_irqrestore(&msb->q_lock, flags);
1902 if (!msb->req)
1907 blk_rq_map_sg(msb->queue, msb->req, sg);
1909 lba = blk_rq_pos(msb->req);
1911 sector_div(lba, msb->page_size / 512);
1912 page = do_div(lba, msb->pages_in_block);
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(msb->req), &len);
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(msb->req), &len);
1921 spin_lock_irqsave(&msb->q_lock, flags);
1924 if (!__blk_end_request(msb->req, 0, len))
1925 msb->req = NULL;
1927 if (error && msb->req) {
1929 if (!__blk_end_request(msb->req, error, msb->page_size))
1930 msb->req = NULL;
1933 if (msb->req)
1936 spin_unlock_irqrestore(&msb->q_lock, flags);
1946 struct msb_data *msb = disk->private_data;
1952 if (msb && msb->card)
1953 msb->usage_count++;
1959 static void msb_data_clear(struct msb_data *msb)
1961 kfree(msb->boot_page);
1962 kfree(msb->used_blocks_bitmap);
1963 kfree(msb->lba_to_pba_table);
1964 kfree(msb->cache);
1965 msb->card = NULL;
1970 struct msb_data *msb = disk->private_data;
1975 if (msb) {
1976 if (msb->usage_count)
1977 msb->usage_count--;
1979 if (!msb->usage_count) {
1981 idr_remove(&msb_disk_idr, msb->disk_id);
1983 kfree(msb);
1998 struct msb_data *msb = bdev->bd_disk->private_data;
1999 *geo = msb->geometry;
2017 struct msb_data *msb = memstick_get_drvdata(card);
2022 if (msb->card_dead) {
2025 WARN_ON(!msb->io_queue_stopped);
2032 if (msb->req)
2035 if (!msb->io_queue_stopped)
2036 queue_work(msb->io_queue, &msb->io_work);
2041 struct msb_data *msb = memstick_get_drvdata(card);
2042 return (msb->card_dead == 0);
2047 struct msb_data *msb = memstick_get_drvdata(card);
2052 spin_lock_irqsave(&msb->q_lock, flags);
2053 blk_stop_queue(msb->queue);
2054 msb->io_queue_stopped = true;
2055 spin_unlock_irqrestore(&msb->q_lock, flags);
2057 del_timer_sync(&msb->cache_flush_timer);
2058 flush_workqueue(msb->io_queue);
2060 if (msb->req) {
2061 spin_lock_irqsave(&msb->q_lock, flags);
2062 blk_requeue_request(msb->queue, msb->req);
2063 msb->req = NULL;
2064 spin_unlock_irqrestore(&msb->q_lock, flags);
2071 struct msb_data *msb = memstick_get_drvdata(card);
2076 msb_invalidate_reg_window(msb);
2078 spin_lock_irqsave(&msb->q_lock, flags);
2079 if (!msb->io_queue_stopped || msb->card_dead) {
2080 spin_unlock_irqrestore(&msb->q_lock, flags);
2083 spin_unlock_irqrestore(&msb->q_lock, flags);
2086 msb->need_flush_cache = true;
2087 msb->io_queue_stopped = false;
2089 spin_lock_irqsave(&msb->q_lock, flags);
2090 blk_start_queue(msb->queue);
2091 spin_unlock_irqrestore(&msb->q_lock, flags);
2093 queue_work(msb->io_queue, &msb->io_work);
2107 struct msb_data *msb = memstick_get_drvdata(card);
2117 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2120 if (msb->disk_id < 0)
2121 return msb->disk_id;
2123 msb->disk = alloc_disk(0);
2124 if (!msb->disk) {
2129 msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2130 if (!msb->queue) {
2135 msb->queue->queuedata = card;
2136 blk_queue_prep_rq(msb->queue, msb_prepare_req);
2138 blk_queue_bounce_limit(msb->queue, limit);
2139 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2140 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2141 blk_queue_max_segment_size(msb->queue,
2142 MS_BLOCK_MAX_PAGES * msb->page_size);
2143 blk_queue_logical_block_size(msb->queue, msb->page_size);
2145 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2146 msb->disk->fops = &msb_bdops;
2147 msb->disk->private_data = msb;
2148 msb->disk->queue = msb->queue;
2149 msb->disk->driverfs_dev = &card->dev;
2150 msb->disk->flags |= GENHD_FL_EXT_DEVT;
2152 capacity = msb->pages_in_block * msb->logical_block_count;
2153 capacity *= (msb->page_size / 512);
2154 set_capacity(msb->disk, capacity);
2157 msb->usage_count = 1;
2158 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2159 INIT_WORK(&msb->io_work, msb_io_work);
2160 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2162 if (msb->read_only)
2163 set_disk_ro(msb->disk, 1);
2166 add_disk(msb->disk);
2171 put_disk(msb->disk);
2174 idr_remove(&msb_disk_idr, msb->disk_id);
2181 struct msb_data *msb;
2184 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2185 if (!msb)
2187 memstick_set_drvdata(card, msb);
2188 msb->card = card;
2189 spin_lock_init(&msb->q_lock);
2204 msb_data_clear(msb);
2205 kfree(msb);
2211 struct msb_data *msb = memstick_get_drvdata(card);
2214 if (!msb->io_queue_stopped)
2220 spin_lock_irqsave(&msb->q_lock, flags);
2221 msb->card_dead = true;
2222 blk_start_queue(msb->queue);
2223 spin_unlock_irqrestore(&msb->q_lock, flags);
2226 del_gendisk(msb->disk);
2227 blk_cleanup_queue(msb->queue);
2228 msb->queue = NULL;
2231 msb_data_clear(msb);
2234 msb_disk_release(msb->disk);
2248 struct msb_data *msb = memstick_get_drvdata(card);
2253 msb->card_dead = true;
2265 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2270 if (msb->block_size != new_msb->block_size)
2273 if (memcmp(msb->boot_page, new_msb->boot_page,
2277 if (msb->logical_block_count != new_msb->logical_block_count ||
2278 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2279 msb->logical_block_count))
2282 if (msb->block_count != new_msb->block_count ||
2283 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2284 msb->block_count / 8))
2292 msb->card_dead = card_dead;
2293 memstick_set_drvdata(card, msb);