Lines Matching refs:hba

373 static struct status_msg *stex_get_status(struct st_hba *hba)
375 struct status_msg *status = hba->status_buffer + hba->status_tail;
377 ++hba->status_tail;
378 hba->status_tail %= hba->sts_count+1;
394 static struct req_msg *stex_alloc_req(struct st_hba *hba)
396 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
398 ++hba->req_head;
399 hba->req_head %= hba->rq_count+1;
404 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
406 return (struct req_msg *)(hba->dma_mem +
407 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
410 static int stex_map_sg(struct st_hba *hba,
427 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
442 static int stex_ss_map_sg(struct st_hba *hba,
459 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
475 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
480 p = hba->copy_buffer;
483 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
491 p->bus = hba->pdev->bus->number;
492 p->slot = hba->pdev->devfn;
494 p->irq_vec = hba->pdev->irq;
495 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
497 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
503 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
507 hba->ccb[tag].req = req;
508 hba->out_req_cnt++;
510 writel(hba->req_head, hba->mmio_base + IMR0);
511 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
512 readl(hba->mmio_base + IDBL); /* flush */
516 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
524 hba->ccb[tag].req = req;
525 hba->out_req_cnt++;
527 cmd = hba->ccb[tag].cmd;
533 addr = hba->dma_handle + hba->req_head * hba->rq_size;
534 addr += (hba->ccb[tag].sg_count+4)/11;
537 ++hba->req_head;
538 hba->req_head %= hba->rq_count+1;
540 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
541 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
542 writel(addr, hba->mmio_base + YH2I_REQ);
543 readl(hba->mmio_base + YH2I_REQ); /* flush */
577 struct st_hba *hba;
586 hba = (struct st_hba *) &host->hostdata[0];
588 if (unlikely(hba->mu_status == MU_STATE_RESETTING))
614 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
654 ver.host_no = hba->host->host_no;
673 req = hba->alloc_rq(hba);
688 hba->ccb[tag].cmd = cmd;
689 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
690 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
692 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
693 hba->ccb[tag].sg_count = 0;
697 hba->send(hba, req, tag);
760 static void stex_check_cmd(struct st_hba *hba,
769 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
771 void __iomem *base = hba->mmio_base;
781 hba->status_head = readl(base + OMR1);
782 if (unlikely(hba->status_head > hba->sts_count)) {
784 pci_name(hba->pdev));
796 if (unlikely(hba->out_req_cnt <= 0 ||
797 (hba->mu_status == MU_STATE_RESETTING &&
798 hba->cardtype != st_yosemite))) {
799 hba->status_tail = hba->status_head;
803 while (hba->status_tail != hba->status_head) {
804 resp = stex_get_status(hba);
806 if (unlikely(tag >= hba->host->can_queue)) {
808 "(%s): invalid tag\n", pci_name(hba->pdev));
812 hba->out_req_cnt--;
813 ccb = &hba->ccb[tag];
814 if (unlikely(hba->wait_ccb == ccb))
815 hba->wait_ccb = NULL;
818 "(%s): lagging req\n", pci_name(hba->pdev));
826 pci_name(hba->pdev));
838 if (hba->cardtype == st_yosemite)
839 stex_check_cmd(hba, ccb, resp);
843 stex_controller_info(hba, ccb);
852 writel(hba->status_head, base + IMR1);
858 struct st_hba *hba = __hba;
859 void __iomem *base = hba->mmio_base;
863 spin_lock_irqsave(hba->host->host_lock, flags);
871 stex_mu_intr(hba, data);
872 spin_unlock_irqrestore(hba->host->host_lock, flags);
874 hba->cardtype == st_shasta))
875 queue_work(hba->work_q, &hba->reset_work);
879 spin_unlock_irqrestore(hba->host->host_lock, flags);
884 static void stex_ss_mu_intr(struct st_hba *hba)
894 if (unlikely(hba->out_req_cnt <= 0 ||
895 hba->mu_status == MU_STATE_RESETTING))
898 while (count < hba->sts_count) {
899 scratch = hba->scratch + hba->status_tail;
904 resp = hba->status_buffer + hba->status_tail;
907 ++hba->status_tail;
908 hba->status_tail %= hba->sts_count+1;
911 if (unlikely(tag >= hba->host->can_queue)) {
913 "(%s): invalid tag\n", pci_name(hba->pdev));
917 hba->out_req_cnt--;
918 ccb = &hba->ccb[tag];
919 if (unlikely(hba->wait_ccb == ccb))
920 hba->wait_ccb = NULL;
923 "(%s): lagging req\n", pci_name(hba->pdev));
939 pci_name(hba->pdev));
946 stex_check_cmd(hba, ccb, resp);
959 struct st_hba *hba = __hba;
960 void __iomem *base = hba->mmio_base;
964 spin_lock_irqsave(hba->host->host_lock, flags);
970 stex_ss_mu_intr(hba);
971 spin_unlock_irqrestore(hba->host->host_lock, flags);
973 queue_work(hba->work_q, &hba->reset_work);
977 spin_unlock_irqrestore(hba->host->host_lock, flags);
982 static int stex_common_handshake(struct st_hba *hba)
984 void __iomem *base = hba->mmio_base;
998 pci_name(hba->pdev));
1011 if (hba->host->can_queue > data) {
1012 hba->host->can_queue = data;
1013 hba->host->cmd_per_lun = data;
1017 h = (struct handshake_frame *)hba->status_buffer;
1018 h->rb_phy = cpu_to_le64(hba->dma_handle);
1019 h->req_sz = cpu_to_le16(hba->rq_size);
1020 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1022 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1025 if (hba->extra_offset) {
1026 h->extra_offset = cpu_to_le32(hba->extra_offset);
1027 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1031 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1048 pci_name(hba->pdev));
1066 static int stex_ss_handshake(struct st_hba *hba)
1068 void __iomem *base = hba->mmio_base;
1081 pci_name(hba->pdev));
1087 msg_h = (struct st_msg_header *)hba->dma_mem;
1088 msg_h->handle = cpu_to_le64(hba->dma_handle);
1092 h->rb_phy = cpu_to_le64(hba->dma_handle);
1093 h->req_sz = cpu_to_le16(hba->rq_size);
1094 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1096 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1100 scratch_size = (hba->sts_count+1)*sizeof(u32);
1106 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1108 writel(hba->dma_handle, base + YH2I_REQ);
1111 scratch = hba->scratch;
1117 pci_name(hba->pdev));
1130 static int stex_handshake(struct st_hba *hba)
1136 err = (hba->cardtype == st_yel) ?
1137 stex_ss_handshake(hba) : stex_common_handshake(hba);
1138 spin_lock_irqsave(hba->host->host_lock, flags);
1139 mu_status = hba->mu_status;
1141 hba->req_head = 0;
1142 hba->req_tail = 0;
1143 hba->status_head = 0;
1144 hba->status_tail = 0;
1145 hba->out_req_cnt = 0;
1146 hba->mu_status = MU_STATE_STARTED;
1148 hba->mu_status = MU_STATE_FAILED;
1150 wake_up_all(&hba->reset_waitq);
1151 spin_unlock_irqrestore(hba->host->host_lock, flags);
1158 struct st_hba *hba = (struct st_hba *)host->hostdata;
1166 "(%s): aborting command\n", pci_name(hba->pdev));
1169 base = hba->mmio_base;
1172 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1173 hba->wait_ccb = &hba->ccb[tag];
1177 if (hba->cardtype == st_yel) {
1183 stex_ss_mu_intr(hba);
1192 stex_mu_intr(hba, data);
1194 if (hba->wait_ccb == NULL) {
1196 "(%s): lost interrupt\n", pci_name(hba->pdev));
1202 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1203 hba->wait_ccb = NULL;
1210 static void stex_hard_reset(struct st_hba *hba)
1218 pci_read_config_dword(hba->pdev, i * 4,
1219 &hba->pdev->saved_config_space[i]);
1223 bus = hba->pdev->bus;
1237 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1245 pci_write_config_dword(hba->pdev, i * 4,
1246 hba->pdev->saved_config_space[i]);
1249 static int stex_yos_reset(struct st_hba *hba)
1255 base = hba->mmio_base;
1259 while (hba->out_req_cnt > 0) {
1262 "(%s): reset timeout\n", pci_name(hba->pdev));
1269 spin_lock_irqsave(hba->host->host_lock, flags);
1271 hba->mu_status = MU_STATE_FAILED;
1273 hba->mu_status = MU_STATE_STARTED;
1274 wake_up_all(&hba->reset_waitq);
1275 spin_unlock_irqrestore(hba->host->host_lock, flags);
1280 static void stex_ss_reset(struct st_hba *hba)
1282 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1283 readl(hba->mmio_base + YH2I_INT);
1287 static int stex_do_reset(struct st_hba *hba)
1294 spin_lock_irqsave(hba->host->host_lock, flags);
1295 if (hba->mu_status == MU_STATE_STARTING) {
1296 spin_unlock_irqrestore(hba->host->host_lock, flags);
1298 pci_name(hba->pdev));
1301 while (hba->mu_status == MU_STATE_RESETTING) {
1302 spin_unlock_irqrestore(hba->host->host_lock, flags);
1303 wait_event_timeout(hba->reset_waitq,
1304 hba->mu_status != MU_STATE_RESETTING,
1306 spin_lock_irqsave(hba->host->host_lock, flags);
1307 mu_status = hba->mu_status;
1311 spin_unlock_irqrestore(hba->host->host_lock, flags);
1315 hba->mu_status = MU_STATE_RESETTING;
1316 spin_unlock_irqrestore(hba->host->host_lock, flags);
1318 if (hba->cardtype == st_yosemite)
1319 return stex_yos_reset(hba);
1321 if (hba->cardtype == st_shasta)
1322 stex_hard_reset(hba);
1323 else if (hba->cardtype == st_yel)
1324 stex_ss_reset(hba);
1326 spin_lock_irqsave(hba->host->host_lock, flags);
1327 for (tag = 0; tag < hba->host->can_queue; tag++) {
1328 ccb = &hba->ccb[tag];
1339 spin_unlock_irqrestore(hba->host->host_lock, flags);
1341 if (stex_handshake(hba) == 0)
1345 pci_name(hba->pdev));
1351 struct st_hba *hba;
1353 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1356 "(%s): resetting host\n", pci_name(hba->pdev));
1359 return stex_do_reset(hba) ? FAILED : SUCCESS;
1364 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1366 stex_do_reset(hba);
1508 static int stex_request_irq(struct st_hba *hba)
1510 struct pci_dev *pdev = hba->pdev;
1520 hba->msi_enabled = 1;
1522 hba->msi_enabled = 0;
1524 status = request_irq(pdev->irq, hba->cardtype == st_yel ?
1525 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1528 if (hba->msi_enabled)
1534 static void stex_free_irq(struct st_hba *hba)
1536 struct pci_dev *pdev = hba->pdev;
1538 free_irq(pdev->irq, hba);
1539 if (hba->msi_enabled)
1545 struct st_hba *hba;
1566 hba = (struct st_hba *)host->hostdata;
1567 memset(hba, 0, sizeof(struct st_hba));
1576 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1577 if ( !hba->mmio_base) {
1591 hba->cardtype = (unsigned int) id->driver_data;
1592 ci = &stex_card_info[hba->cardtype];
1594 if (hba->cardtype == st_yel)
1597 hba->dma_size = cp_offset + sizeof(struct st_frame);
1598 if (hba->cardtype == st_seq ||
1599 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1600 hba->extra_offset = hba->dma_size;
1601 hba->dma_size += ST_ADDITIONAL_MEM;
1603 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1604 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1605 if (!hba->dma_mem) {
1607 if (hba->cardtype == st_seq ||
1608 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1612 hba->dma_size = hba->extra_offset
1614 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1615 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1618 if (!hba->dma_mem) {
1626 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1627 if (!hba->ccb) {
1634 if (hba->cardtype == st_yel)
1635 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1636 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1637 hba->copy_buffer = hba->dma_mem + cp_offset;
1638 hba->rq_count = ci->rq_count;
1639 hba->rq_size = ci->rq_size;
1640 hba->sts_count = ci->sts_count;
1641 hba->alloc_rq = ci->alloc_rq;
1642 hba->map_sg = ci->map_sg;
1643 hba->send = ci->send;
1644 hba->mu_status = MU_STATE_STARTING;
1646 if (hba->cardtype == st_yel)
1658 hba->host = host;
1659 hba->pdev = pdev;
1660 init_waitqueue_head(&hba->reset_waitq);
1662 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1664 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1665 if (!hba->work_q) {
1671 INIT_WORK(&hba->reset_work, stex_reset_work);
1673 err = stex_request_irq(hba);
1680 err = stex_handshake(hba);
1691 pci_set_drvdata(pdev, hba);
1705 stex_free_irq(hba);
1707 destroy_workqueue(hba->work_q);
1709 kfree(hba->ccb);
1711 dma_free_coherent(&pdev->dev, hba->dma_size,
1712 hba->dma_mem, hba->dma_handle);
1714 iounmap(hba->mmio_base);
1725 static void stex_hba_stop(struct st_hba *hba)
1733 spin_lock_irqsave(hba->host->host_lock, flags);
1734 req = hba->alloc_rq(hba);
1735 if (hba->cardtype == st_yel) {
1737 memset(msg_h, 0, hba->rq_size);
1739 memset(req, 0, hba->rq_size);
1741 if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
1752 hba->ccb[tag].cmd = NULL;
1753 hba->ccb[tag].sg_count = 0;
1754 hba->ccb[tag].sense_bufflen = 0;
1755 hba->ccb[tag].sense_buffer = NULL;
1756 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1758 hba->send(hba, req, tag);
1759 spin_unlock_irqrestore(hba->host->host_lock, flags);
1762 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1764 hba->ccb[tag].req_type = 0;
1771 static void stex_hba_free(struct st_hba *hba)
1773 stex_free_irq(hba);
1775 destroy_workqueue(hba->work_q);
1777 iounmap(hba->mmio_base);
1779 pci_release_regions(hba->pdev);
1781 kfree(hba->ccb);
1783 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1784 hba->dma_mem, hba->dma_handle);
1789 struct st_hba *hba = pci_get_drvdata(pdev);
1791 scsi_remove_host(hba->host);
1793 stex_hba_stop(hba);
1795 stex_hba_free(hba);
1797 scsi_host_put(hba->host);
1804 struct st_hba *hba = pci_get_drvdata(pdev);
1806 stex_hba_stop(hba);