Lines Matching refs:host

149 static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
151 if (__blk_end_request(host->req, err, nr_bytes))
154 host->req = NULL;
158 static bool mg_end_request_cur(struct mg_host *host, int err)
160 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
164 struct mg_host *host)
168 if (host->req)
169 name = host->req->rq_disk->disk_name;
188 host->error = 0;
190 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
192 host->error & 0xff);
193 if (host->error & ATA_BBK)
195 if (host->error & ATA_UNC)
197 if (host->error & ATA_IDNF)
199 if (host->error & ATA_ABORTED)
201 if (host->error & ATA_AMNF)
204 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
205 if (host->req)
207 (unsigned int)blk_rq_pos(host->req));
213 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
217 struct mg_drv_data *prv_data = host->dev->platform_data;
219 host->error = MG_ERR_NONE;
228 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
229 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
232 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
242 mg_dump_status("mg_wait", status, host);
255 mg_dump_status("not ready", status, host);
259 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
263 host->error = MG_ERR_TIMEOUT;
265 return host->error;
282 static void mg_unexpected_intr(struct mg_host *host)
284 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
286 mg_dump_status("mg_unexpected_intr", status, host);
291 struct mg_host *host = dev_id;
292 void (*handler)(struct mg_host *) = host->mg_do_intr;
294 spin_lock(&host->lock);
296 host->mg_do_intr = NULL;
297 del_timer(&host->timer);
300 handler(host);
302 spin_unlock(&host->lock);
343 static int mg_get_disk_id(struct mg_host *host)
347 const u16 *id = host->id;
348 struct mg_drv_data *prv_data = host->dev->platform_data;
354 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
356 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
357 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
362 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
365 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
366 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
373 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
374 host->cyls = id[ATA_ID_CYLS];
375 host->heads = id[ATA_ID_HEADS];
376 host->sectors = id[ATA_ID_SECTORS];
378 if (MG_RES_SEC && host->heads && host->sectors) {
380 host->cyls = (host->n_sectors - MG_RES_SEC) /
381 host->heads / host->sectors;
382 host->nres_sectors = host->n_sectors - host->cyls *
383 host->heads * host->sectors;
384 host->n_sectors -= host->nres_sectors;
394 host->n_sectors, host->nres_sectors);
397 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
403 static int mg_disk_init(struct mg_host *host)
405 struct mg_drv_data *prv_data = host->dev->platform_data;
410 gpio_set_value(host->rst, 0);
411 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
416 gpio_set_value(host->rst, 1);
417 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
423 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
424 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
430 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
431 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
435 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
443 static void mg_bad_rw_intr(struct mg_host *host)
445 if (host->req)
446 if (++host->req->errors >= MG_MAX_ERRORS ||
447 host->error == MG_ERR_TIMEOUT)
448 mg_end_request_cur(host, -EIO);
451 static unsigned int mg_out(struct mg_host *host,
457 struct mg_drv_data *prv_data = host->dev->platform_data;
459 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
460 return host->error;
463 host->mg_do_intr = intr_addr;
464 mod_timer(&host->timer, jiffies + 3 * HZ);
468 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
469 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
470 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
472 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
475 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
476 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
480 static void mg_read_one(struct mg_host *host, struct request *req)
486 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
492 struct mg_host *host = req->rq_disk->private_data;
494 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
496 mg_bad_rw_intr(host);
502 if (mg_wait(host, ATA_DRQ,
504 mg_bad_rw_intr(host);
508 mg_read_one(host, req);
510 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
512 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
515 static void mg_write_one(struct mg_host *host, struct request *req)
521 outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
527 struct mg_host *host = req->rq_disk->private_data;
530 if (mg_out(host, blk_rq_pos(req), rem,
532 mg_bad_rw_intr(host);
539 if (mg_wait(host, ATA_DRQ,
541 mg_bad_rw_intr(host);
546 mg_write_one(host, req);
548 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
552 if (rem > 1 && mg_wait(host, ATA_DRQ,
554 mg_bad_rw_intr(host);
556 } else if (mg_wait(host, MG_STAT_READY,
558 mg_bad_rw_intr(host);
561 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
564 static void mg_read_intr(struct mg_host *host)
566 struct request *req = host->req;
571 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
579 mg_dump_status("mg_read_intr", i, host);
580 mg_bad_rw_intr(host);
581 mg_request(host->breq);
585 mg_read_one(host, req);
591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
595 host->mg_do_intr = mg_read_intr;
596 mod_timer(&host->timer, jiffies + 3 * HZ);
598 mg_request(host->breq);
601 static void mg_write_intr(struct mg_host *host)
603 struct request *req = host->req;
609 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
617 mg_dump_status("mg_write_intr", i, host);
618 mg_bad_rw_intr(host);
619 mg_request(host->breq);
623 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
625 mg_write_one(host, req);
628 host->mg_do_intr = mg_write_intr;
629 mod_timer(&host->timer, jiffies + 3 * HZ);
633 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
636 mg_request(host->breq);
641 struct mg_host *host = (struct mg_host *)data;
644 spin_lock_irq(&host->lock);
646 if (!host->req)
649 host->mg_do_intr = NULL;
651 name = host->req->rq_disk->disk_name;
654 host->error = MG_ERR_TIMEOUT;
655 mg_bad_rw_intr(host);
658 mg_request(host->breq);
659 spin_unlock_irq(&host->lock);
664 struct mg_host *host = q->queuedata;
667 if (!host->req) {
668 host->req = blk_fetch_request(q);
669 if (!host->req)
673 if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
674 mg_end_request_cur(host, -EIO);
678 if (rq_data_dir(host->req) == READ)
679 mg_read(host->req);
681 mg_write(host->req);
686 struct mg_host *host,
692 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
694 mg_bad_rw_intr(host);
695 return host->error;
700 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
701 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
703 mg_bad_rw_intr(host);
704 return host->error;
706 del_timer(&host->timer);
707 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
708 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
709 if (host->error) {
710 mg_bad_rw_intr(host);
711 return host->error;
713 mg_write_one(host, req);
714 mod_timer(&host->timer, jiffies + 3 * HZ);
715 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
725 struct mg_host *host = q->queuedata;
730 if (!host->req) {
731 host->req = blk_fetch_request(q);
732 if (!host->req)
735 req = host->req;
738 if (host->mg_do_intr)
741 del_timer(&host->timer);
755 mg_end_request_cur(host, -EIO);
760 mg_end_request_cur(host, -EIO);
764 if (!mg_issue_req(req, host, sect_num, sect_cnt))
771 struct mg_host *host = bdev->bd_disk->private_data;
773 geo->cylinders = (unsigned short)host->cyls;
774 geo->heads = (unsigned char)host->heads;
775 geo->sectors = (unsigned char)host->sectors;
787 struct mg_host *host = prv_data->host;
789 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
793 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
795 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
799 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
801 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
811 struct mg_host *host = prv_data->host;
813 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
816 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
820 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
824 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
834 struct mg_host *host;
847 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
848 if (!host) {
854 host->major = MG_DISK_MAJ;
857 prv_data->host = host;
858 host->dev = &plat_dev->dev;
868 host->dev_base = ioremap(rsc->start, resource_size(rsc));
869 if (!host->dev_base) {
875 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
886 host->rst = rsc->start;
889 err = gpio_request(host->rst, MG_RST_PIN);
892 gpio_direction_output(host->rst, 1);
909 host->rstout = rsc->start;
910 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
913 gpio_direction_input(host->rstout);
919 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
922 err = mg_disk_init(host);
933 host->irq = platform_get_irq(plat_dev, 0);
934 if (host->irq == -ENXIO) {
935 err = host->irq;
938 err = request_irq(host->irq, mg_irq,
940 MG_DEV_NAME, host);
950 err = mg_get_disk_id(host);
958 err = register_blkdev(host->major, MG_DISK_NAME);
964 if (!host->major)
965 host->major = err;
967 spin_lock_init(&host->lock);
970 host->breq = blk_init_queue(mg_request_poll, &host->lock);
972 host->breq = blk_init_queue(mg_request, &host->lock);
974 if (!host->breq) {
980 host->breq->queuedata = host;
983 err = elevator_change(host->breq, "noop");
989 blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
990 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
992 init_timer(&host->timer);
993 host->timer.function = mg_times_out;
994 host->timer.data = (unsigned long)host;
996 host->gd = alloc_disk(MG_DISK_MAX_PART);
997 if (!host->gd) {
1003 host->gd->major = host->major;
1004 host->gd->first_minor = 0;
1005 host->gd->fops = &mg_disk_ops;
1006 host->gd->queue = host->breq;
1007 host->gd->private_data = host;
1008 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
1010 set_capacity(host->gd, host->n_sectors);
1012 add_disk(host->gd);
1017 del_timer_sync(&host->timer);
1019 blk_cleanup_queue(host->breq);
1024 free_irq(host->irq, host);
1026 gpio_free(host->rstout);
1028 gpio_free(host->rst);
1030 iounmap(host->dev_base);
1032 kfree(host);
1040 struct mg_host *host = prv_data->host;
1044 del_timer_sync(&host->timer);
1047 if (host->gd) {
1048 del_gendisk(host->gd);
1049 put_disk(host->gd);
1052 if (host->breq)
1053 blk_cleanup_queue(host->breq);
1056 unregister_blkdev(host->major, MG_DISK_NAME);
1060 free_irq(host->irq, host);
1064 gpio_free(host->rstout);
1067 if (host->rst)
1068 gpio_free(host->rst);
1071 if (host->dev_base)
1072 iounmap(host->dev_base);
1075 kfree(host);