Lines Matching refs:ha

102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
125 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
160 struct qla_hw_data *ha = vha->hw;
169 BUG_ON(ha->tgt.tgt_vp_map == NULL);
170 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
171 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
172 return ha->tgt.tgt_vp_map[vp_idx].vha;
181 struct qla_hw_data *ha = vha->hw;
186 BUG_ON(ha->tgt.tgt_vp_map == NULL);
187 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
188 return ha->tgt.tgt_vp_map[vp_idx].vha;
383 struct qla_hw_data *ha = vha->hw;
390 ha->tgt.tgt_ops->free_session(sess);
405 /* ha->hardware_lock supposed to be held on entry */
421 /* ha->hardware_lock supposed to be held on entry */
424 struct qla_hw_data *ha = vha->hw;
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
463 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
485 /* ha->hardware_lock supposed to be held on entry */
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
518 /* ha->hardware_lock supposed to be held on entry */
532 struct qla_hw_data *ha = vha->hw;
539 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
544 vha->vp_idx, qla2x00_gid_list_size(ha));
569 id_iter += ha->gid_list_info_size;
573 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
578 /* ha->hardware_lock supposed to be held on entry */
592 struct qla_hw_data *ha = vha->hw;
596 spin_lock_irqsave(&ha->hardware_lock, flags);
607 ha->tgt.tgt_ops->shutdown_sess(sess);
608 ha->tgt.tgt_ops->put_sess(sess);
615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
627 struct qla_hw_data *ha = vha->hw;
633 spin_lock_irqsave(&ha->hardware_lock, flags);
650 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
655 spin_unlock_irqrestore(&ha->hardware_lock, flags);
660 spin_unlock_irqrestore(&ha->hardware_lock, flags);
690 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
705 spin_lock_irqsave(&ha->hardware_lock, flags);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
725 struct qla_hw_data *ha = vha->hw;
739 spin_lock_irqsave(&ha->hardware_lock, flags);
741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
746 spin_unlock_irqrestore(&ha->hardware_lock, flags);
752 spin_lock_irqsave(&ha->hardware_lock, flags);
768 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
779 ha->tgt.tgt_ops->put_sess(sess);
780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
785 struct qla_hw_data *ha = vha->hw;
796 spin_lock_irqsave(&ha->hardware_lock, flags);
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
816 struct qla_hw_data *ha = tgt->ha;
823 spin_lock_irqsave(&ha->hardware_lock, flags);
828 spin_unlock_irqrestore(&ha->hardware_lock, flags);
837 struct qla_hw_data *ha = tgt->ha;
869 spin_lock_irqsave(&ha->hardware_lock, flags);
872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
896 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
908 struct qla_hw_data *ha = tgt->ha;
909 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
924 spin_lock_irqsave(&ha->hardware_lock, flags);
926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
928 spin_lock_irqsave(&ha->hardware_lock, flags);
932 spin_unlock_irqrestore(&ha->hardware_lock, flags);
956 /* ha->hardware_lock supposed to be held on entry */
989 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
996 struct qla_hw_data *ha = vha->hw;
1000 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1050 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1056 struct qla_hw_data *ha = vha->hw;
1062 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1063 ha, abts, status);
1129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1137 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1178 /* ha->hardware_lock supposed to be held on entry */
1182 struct qla_hw_data *ha = vha->hw;
1222 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1236 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1241 struct qla_hw_data *ha = vha->hw;
1273 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1298 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1300 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1307 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1308 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1309 ha, atio, resp_code);
1312 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1315 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1317 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1319 "request packet\n", ha->vp_idx, __func__);
1328 ctio->vp_index = ha->vp_idx;
1345 qla2x00_start_iocbs(ha, ha->req);
1358 struct qla_hw_data *ha = vha->hw;
1365 spin_lock_irqsave(&ha->hardware_lock, flags);
1367 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1375 ha->chip_reset);
1376 ha->tgt.tgt_ops->free_mcmd(mcmd);
1377 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1400 ha->tgt.tgt_ops->free_mcmd(mcmd);
1401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1413 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1440 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1468 struct qla_hw_data *ha = vha->hw;
1473 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1477 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1481 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1484 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1516 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1531 /* ha->hardware_lock supposed to be held on entry */
1534 struct qla_hw_data *ha = vha->hw;
1537 h = ha->tgt.current_handle;
1543 if (h == ha->tgt.current_handle) {
1546 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1552 (ha->tgt.cmds[h-1] != NULL));
1555 ha->tgt.current_handle = h;
1560 /* ha->hardware_lock supposed to be held on entry */
1566 struct qla_hw_data *ha = vha->hw;
1587 ha->tgt.cmds[h-1] = prm->cmd;
1605 * ha->hardware_lock supposed to be held on entry. We have already made sure
1664 * ha->hardware_lock supposed to be held on entry. We have already made sure
1718 * Called without ha->hardware_lock held
1726 struct qla_hw_data *ha = vha->hw;
1790 (IS_FWI2_CAPABLE(ha) &&
1801 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1804 if (ha->tgt.enable_class_2)
1810 return ha->tgt.enable_explicit_conf &&
1910 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1920 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1950 "lost", prm->tgt->ha->vp_idx,
2089 struct qla_hw_data *ha;
2100 ha = vha->hw;
2141 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2184 ha->tgt.cmds[h-1] = prm->cmd;
2219 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2266 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2269 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2278 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2299 struct qla_hw_data *ha = vha->hw;
2324 spin_lock_irqsave(&ha->hardware_lock, flags);
2326 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2336 ha->chip_reset);
2337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2372 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2444 struct qla_hw_data *ha = vha->hw;
2464 spin_lock_irqsave(&ha->hardware_lock, flags);
2466 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2476 ha->chip_reset);
2477 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2505 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2661 struct qla_hw_data *ha = vha->hw;
2666 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2825 /* ha->hardware_lock supposed to be held on entry */
2910 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2930 /* ha->hardware_lock supposed to be held on entry */
2934 struct qla_hw_data *ha = vha->hw;
2937 if (ha->tgt.cmds[handle] != NULL) {
2938 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2939 ha->tgt.cmds[handle] = NULL;
2945 /* ha->hardware_lock supposed to be held on entry */
2989 struct qla_hw_data *ha = vha->hw;
3008 ha->tgt.tgt_ops->handle_data(cmd);
3021 ha->tgt.tgt_ops->free_cmd(cmd);
3025 qlt_host_reset_handler(struct qla_hw_data *ha)
3029 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3047 spin_lock_irqsave(&ha->hardware_lock, flags);
3052 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3061 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3066 struct qla_hw_data *ha = vha->hw;
3146 ha->tgt.tgt_ops->handle_dif_err(cmd);
3193 ha->tgt.tgt_ops->handle_data(cmd);
3211 ha->tgt.tgt_ops->free_cmd(cmd);
3254 struct qla_hw_data *ha = vha->hw;
3290 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3297 spin_lock_irqsave(&ha->hardware_lock, flags);
3298 ha->tgt.tgt_ops->put_sess(sess);
3299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3309 spin_lock_irqsave(&ha->hardware_lock, flags);
3314 ha->tgt.tgt_ops->put_sess(sess);
3315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3361 struct qla_hw_data *ha = vha->hw;
3392 spin_lock_irqsave(&ha->hardware_lock, flags);
3394 ha->tgt.tgt_ops->put_sess(sess);
3395 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3400 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3408 spin_lock_irqsave(&ha->hardware_lock, flags);
3410 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3415 /* ha->hardware_lock supposed to be held on entry */
3419 struct qla_hw_data *ha = vha->hw;
3430 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3452 ha->tgt.tgt_ops->put_sess(sess);
3469 /* ha->hardware_lock supposed to be held on entry */
3474 struct qla_hw_data *ha = vha->hw;
3565 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
3577 /* ha->hardware_lock supposed to be held on entry */
3581 struct qla_hw_data *ha = vha->hw;
3592 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3607 /* ha->hardware_lock supposed to be held on entry */
3612 struct qla_hw_data *ha = vha->hw;
3634 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3647 /* ha->hardware_lock supposed to be held on entry */
3651 struct qla_hw_data *ha = vha->hw;
3655 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
3657 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
3670 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3845 struct qla_hw_data *ha = vha->hw;
3861 spin_lock_irqsave(&ha->hardware_lock, flags);
3864 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3887 spin_lock_irqsave(&ha->hardware_lock, flags);
3890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3920 spin_lock_irqsave(&ha->hardware_lock, flags);
3923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3953 spin_lock_irqsave(&ha->hardware_lock, flags);
3965 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3971 struct qla_hw_data *ha = vha->hw;
3975 spin_lock_irqsave(&ha->hardware_lock, flags);
3983 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4068 /* ha->hardware_lock supposed to be held on entry */
4157 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4162 struct qla_hw_data *ha = vha->hw;
4253 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
4295 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4302 struct qla_hw_data *ha = vha->hw;
4306 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4360 struct qla_hw_data *ha = vha->hw;
4388 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4445 struct qla_hw_data *ha = vha->hw;
4451 if (list_empty(&ha->tgt.q_full_list))
4458 if (list_empty(&ha->tgt.q_full_list)) {
4463 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
4521 struct qla_hw_data *ha = vha->hw;
4524 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
4532 /* ha->hardware_lock supposed to be held on entry */
4537 struct qla_hw_data *ha = vha->hw;
4543 "ATIO pkt, but no tgt (ha %p)", ha);
4626 /* ha->hardware_lock supposed to be held on entry */
4630 struct qla_hw_data *ha = vha->hw;
4636 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
4811 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4816 struct qla_hw_data *ha = vha->hw;
4820 if (!ha->tgt.tgt_ops)
4825 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
4830 IS_QLA2100(ha))
4989 struct qla_hw_data *ha = vha->hw;
4996 spin_lock_irqsave(&ha->hardware_lock, flags);
5005 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5008 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5015 spin_lock_irqsave(&ha->hardware_lock, flags);
5029 ha->tgt.tgt_ops->put_sess(sess);
5030 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5036 ha->tgt.tgt_ops->put_sess(sess);
5037 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5045 struct qla_hw_data *ha = vha->hw;
5054 spin_lock_irqsave(&ha->hardware_lock, flags);
5060 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5062 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5069 spin_lock_irqsave(&ha->hardware_lock, flags);
5086 ha->tgt.tgt_ops->put_sess(sess);
5087 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5093 ha->tgt.tgt_ops->put_sess(sess);
5094 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5139 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5146 if (!IS_TGT_MODE_CAPABLE(ha)) {
5153 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5167 tgt->ha = ha;
5205 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5223 vha->host_no, ha);
5264 struct qla_hw_data *ha;
5273 ha = vha->hw;
5282 spin_lock_irqsave(&ha->hardware_lock, flags);
5286 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5292 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5329 struct qla_hw_data *ha = vha->hw;
5335 ha->tgt.tgt_ops = NULL;
5346 struct qla_hw_data *ha = vha->hw;
5360 if (ha->tgt.ini_mode_force_reverse)
5367 struct qla_hw_data *ha = vha->hw;
5383 if (ha->tgt.ini_mode_force_reverse)
5395 struct qla_hw_data *ha = vha->hw;
5398 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5408 spin_lock_irqsave(&ha->hardware_lock, flags);
5411 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5431 struct qla_hw_data *ha = vha->hw;
5443 spin_lock_irqsave(&ha->hardware_lock, flags);
5445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5458 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
5476 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
5478 qlt_add_target(ha, vha);
5499 * @ha: HA context
5509 struct qla_hw_data *ha = vha->hw;
5511 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
5516 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
5525 * @ha: SCSI driver HA context
5530 struct qla_hw_data *ha = vha->hw;
5537 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
5538 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5544 ha->tgt.atio_ring_index++;
5545 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
5546 ha->tgt.atio_ring_index = 0;
5547 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
5549 ha->tgt.atio_ring_ptr++;
5552 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5558 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
5564 struct qla_hw_data *ha = vha->hw;
5572 if (IS_ATIO_MSIX_CAPABLE(ha)) {
5573 struct qla_msix_entry *msix = &ha->msix_entries[2];
5574 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
5586 struct qla_hw_data *ha = vha->hw;
5589 if (!ha->tgt.saved_set) {
5591 ha->tgt.saved_exchange_count = nv->exchange_count;
5592 ha->tgt.saved_firmware_options_1 =
5594 ha->tgt.saved_firmware_options_2 =
5596 ha->tgt.saved_firmware_options_3 =
5598 ha->tgt.saved_set = 1;
5626 if (ha->tgt.saved_set) {
5627 nv->exchange_count = ha->tgt.saved_exchange_count;
5629 ha->tgt.saved_firmware_options_1;
5631 ha->tgt.saved_firmware_options_2;
5633 ha->tgt.saved_firmware_options_3;
5641 if (ha->tgt.enable_class_2) {
5659 struct qla_hw_data *ha = vha->hw;
5661 if (ha->tgt.node_name_set) {
5662 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5670 struct qla_hw_data *ha = vha->hw;
5676 if (!ha->tgt.saved_set) {
5678 ha->tgt.saved_exchange_count = nv->exchange_count;
5679 ha->tgt.saved_firmware_options_1 =
5681 ha->tgt.saved_firmware_options_2 =
5683 ha->tgt.saved_firmware_options_3 =
5685 ha->tgt.saved_set = 1;
5714 if (ha->tgt.saved_set) {
5715 nv->exchange_count = ha->tgt.saved_exchange_count;
5717 ha->tgt.saved_firmware_options_1;
5719 ha->tgt.saved_firmware_options_2;
5721 ha->tgt.saved_firmware_options_3;
5729 if (ha->tgt.enable_class_2) {
5747 struct qla_hw_data *ha = vha->hw;
5752 if (ha->tgt.node_name_set) {
5753 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5759 qlt_83xx_iospace_config(struct qla_hw_data *ha)
5764 ha->msix_count += 1; /* For ATIO Q */
5795 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5800 if (ha->mqenable || IS_QLA83XX(ha)) {
5801 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5802 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5804 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
5805 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
5818 struct qla_hw_data *ha;
5822 ha = rsp->hw;
5823 vha = pci_get_drvdata(ha->pdev);
5825 spin_lock_irqsave(&ha->hardware_lock, flags);
5830 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5836 qlt_mem_alloc(struct qla_hw_data *ha)
5841 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
5843 if (!ha->tgt.tgt_vp_map)
5846 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
5847 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
5848 &ha->tgt.atio_dma, GFP_KERNEL);
5849 if (!ha->tgt.atio_ring) {
5850 kfree(ha->tgt.tgt_vp_map);
5857 qlt_mem_free(struct qla_hw_data *ha)
5862 if (ha->tgt.atio_ring) {
5863 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
5864 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
5865 ha->tgt.atio_dma);
5867 kfree(ha->tgt.tgt_vp_map);