Lines Matching refs:evt

150  * @evt:		ibmvfc event struct
153 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
155 struct ibmvfc_host *vhost = evt->vhost;
156 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
157 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
161 entry->evt = evt;
163 entry->fmt = evt->crq.format;
184 * @evt: ibmvfc event struct
187 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
189 struct ibmvfc_host *vhost = evt->vhost;
190 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
191 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
194 entry->evt = evt;
196 entry->fmt = evt->crq.format;
222 #define ibmvfc_trc_start(evt) do { } while (0)
223 #define ibmvfc_trc_end(evt) do { } while (0)
743 * @evt: ibmvfc event to be checked for validity
749 struct ibmvfc_event *evt)
751 int index = evt - pool->events;
754 if (evt != pool->events + index) /* unaligned */
761 * @evt: ibmvfc_event to be freed
764 static void ibmvfc_free_event(struct ibmvfc_event *evt)
766 struct ibmvfc_host *vhost = evt->vhost;
769 BUG_ON(!ibmvfc_valid_event(pool, evt));
770 BUG_ON(atomic_inc_return(&evt->free) != 1);
771 list_add_tail(&evt->queue, &vhost->free);
776 * @evt: ibmvfc event struct
781 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
783 struct scsi_cmnd *cmnd = evt->cmnd;
790 if (evt->eh_comp)
791 complete(evt->eh_comp);
793 ibmvfc_free_event(evt);
798 * @evt: ibmvfc event struct
804 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
806 if (evt->cmnd) {
807 evt->cmnd->result = (error_code << 16);
808 evt->done = ibmvfc_scsi_eh_done;
810 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
812 list_del(&evt->queue);
813 del_timer(&evt->timer);
814 ibmvfc_trc_end(evt);
815 evt->done(evt);
828 struct ibmvfc_event *evt, *pos;
831 list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
832 ibmvfc_fail_request(evt, error_code);
1225 struct ibmvfc_event *evt = &pool->events[i];
1226 atomic_set(&evt->free, 1);
1227 evt->crq.valid = 0x80;
1228 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
1229 evt->xfer_iu = pool->iu_storage + i;
1230 evt->vhost = vhost;
1231 evt->ext_list = NULL;
1232 list_add_tail(&evt->queue, &vhost->free);
1274 struct ibmvfc_event *evt;
1277 evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1278 atomic_set(&evt->free, 0);
1279 list_del(&evt->queue);
1280 return evt;
1286 * @evt: The event
1290 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1293 evt->cmnd = NULL;
1294 evt->sync_iu = NULL;
1295 evt->crq.format = format;
1296 evt->done = done;
1297 evt->eh_comp = NULL;
1322 * @evt: ibmvfc event struct
1330 struct ibmvfc_event *evt,
1363 if (!evt->ext_list) {
1364 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1365 &evt->ext_list_token);
1367 if (!evt->ext_list) {
1375 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1377 data->va = cpu_to_be64(evt->ext_list_token);
1385 * @evt: struct ibmvfc_event that timed out
1389 static void ibmvfc_timeout(struct ibmvfc_event *evt)
1391 struct ibmvfc_host *vhost = evt->vhost;
1392 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1398 * @evt: event to be sent
1404 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1407 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1411 *evt->xfer_iu = evt->iu;
1412 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1413 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1414 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1415 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1419 list_add_tail(&evt->queue, &vhost->sent);
1420 init_timer(&evt->timer);
1423 evt->timer.data = (unsigned long) evt;
1424 evt->timer.expires = jiffies + (timeout * HZ);
1425 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1426 add_timer(&evt->timer);
1433 list_del(&evt->queue);
1434 del_timer(&evt->timer);
1444 if (evt->cmnd)
1445 scsi_dma_unmap(evt->cmnd);
1446 ibmvfc_free_event(evt);
1451 if (evt->cmnd) {
1452 evt->cmnd->result = DID_ERROR << 16;
1453 evt->done = ibmvfc_scsi_eh_done;
1455 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1457 evt->done(evt);
1459 ibmvfc_trc_start(evt);
1466 * @evt: ibmvfc event to log
1469 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1471 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1472 struct ibmvfc_host *vhost = evt->vhost;
1474 struct scsi_cmnd *cmnd = evt->cmnd;
1520 * @evt: ibmvfc event to be handled
1524 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1526 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1528 struct scsi_cmnd *cmnd = evt->cmnd;
1556 ibmvfc_log_error(evt);
1567 if (evt->eh_comp)
1568 complete(evt->eh_comp);
1570 ibmvfc_free_event(evt);
1617 struct ibmvfc_event *evt;
1629 evt = ibmvfc_get_event(vhost);
1630 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1631 evt->cmnd = cmnd;
1633 vfc_cmd = &evt->iu.cmd;
1635 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
1661 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1662 return ibmvfc_send_event(evt, vhost, 0);
1664 ibmvfc_free_event(evt);
1681 * @evt: ibmvfc event struct
1684 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1687 if (evt->sync_iu)
1688 *evt->sync_iu = *evt->xfer_iu;
1690 complete(&evt->comp);
1695 * @evt: struct ibmvfc_event
1698 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1700 struct ibmvfc_host *vhost = evt->vhost;
1702 ibmvfc_free_event(evt);
1718 struct ibmvfc_event *evt;
1732 evt = ibmvfc_get_event(vhost);
1733 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1735 tmf = &evt->iu.tmf;
1743 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1771 struct ibmvfc_event *evt;
1790 evt = ibmvfc_get_event(vhost);
1791 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1792 plogi = &evt->iu.plogi;
1798 evt->sync_iu = &rsp_iu;
1799 init_completion(&evt->comp);
1801 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1807 wait_for_completion(&evt->comp);
1813 ibmvfc_free_event(evt);
1832 struct ibmvfc_event *evt;
1904 evt = ibmvfc_get_event(vhost);
1905 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1906 mad = &evt->iu.passthru;
1913 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
1927 mad->iu.tag = cpu_to_be64((u64)evt);
1930 evt->sync_iu = &rsp_iu;
1931 init_completion(&evt->comp);
1932 rc = ibmvfc_send_event(evt, vhost, 0);
1940 wait_for_completion(&evt->comp);
1948 ibmvfc_free_event(evt);
1977 struct ibmvfc_event *evt = NULL;
1986 evt = ibmvfc_get_event(vhost);
1987 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1989 tmf = &evt->iu.cmd;
1991 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
2001 evt->sync_iu = &rsp_iu;
2003 init_completion(&evt->comp);
2004 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2015 wait_for_completion(&evt->comp);
2034 ibmvfc_free_event(evt);
2041 * @evt: ibmvfc event struct
2047 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2051 if (evt->cmnd) {
2052 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2061 * @evt: ibmvfc event struct
2067 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2069 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2076 * @evt: ibmvfc event struct
2082 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2084 if (evt->cmnd && evt->cmnd->device == device)
2101 struct ibmvfc_event *evt;
2111 list_for_each_entry(evt, &vhost->sent, queue) {
2112 if (match(evt, device)) {
2113 evt->eh_comp = &comp;
2125 list_for_each_entry(evt, &vhost->sent, queue) {
2126 if (match(evt, device)) {
2127 evt->eh_comp = NULL;
2161 struct ibmvfc_event *evt, *found_evt;
2170 list_for_each_entry(evt, &vhost->sent, queue) {
2171 if (evt->cmnd && evt->cmnd->device == sdev) {
2172 found_evt = evt;
2185 evt = ibmvfc_get_event(vhost);
2186 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2188 tmf = &evt->iu.tmf;
2204 evt->sync_iu = &rsp;
2205 init_completion(&evt->comp);
2206 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2221 wait_for_completion(&evt->comp);
2224 ibmvfc_free_event(evt);
2246 * @evt: ibmvfc event struct
2252 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2256 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2257 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2264 * @evt: ibmvfc event struct
2270 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2272 if (evt == match)
2292 struct ibmvfc_event *evt, *found_evt;
2301 list_for_each_entry(evt, &vhost->sent, queue) {
2302 if (evt->cmnd && evt->cmnd->device == sdev) {
2303 found_evt = evt;
2316 evt = ibmvfc_get_event(vhost);
2317 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2319 tmf = &evt->iu.cmd;
2321 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
2331 evt->sync_iu = &rsp_iu;
2333 init_completion(&evt->comp);
2334 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2345 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2364 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2394 ibmvfc_free_event(evt);
2735 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
2789 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2795 if (unlikely(atomic_read(&evt->free))) {
2801 del_timer(&evt->timer);
2802 list_del(&evt->queue);
2803 ibmvfc_trc_end(evt);
2804 evt->done(evt);
3332 * @evt: ibmvfc event struct
3335 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3337 struct ibmvfc_target *tgt = evt->tgt;
3338 struct ibmvfc_host *vhost = evt->vhost;
3339 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3395 ibmvfc_free_event(evt);
3408 struct ibmvfc_event *evt;
3414 evt = ibmvfc_get_event(vhost);
3416 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3417 evt->tgt = tgt;
3418 prli = &evt->iu.prli;
3430 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3440 * @evt: ibmvfc event struct
3443 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3445 struct ibmvfc_target *tgt = evt->tgt;
3446 struct ibmvfc_host *vhost = evt->vhost;
3447 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3491 ibmvfc_free_event(evt);
3504 struct ibmvfc_event *evt;
3511 evt = ibmvfc_get_event(vhost);
3514 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3515 evt->tgt = tgt;
3516 plogi = &evt->iu.plogi;
3523 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3533 * @evt: ibmvfc event struct
3536 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3538 struct ibmvfc_target *tgt = evt->tgt;
3539 struct ibmvfc_host *vhost = evt->vhost;
3540 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3544 ibmvfc_free_event(evt);
3579 struct ibmvfc_event *evt;
3585 evt = ibmvfc_get_event(vhost);
3587 ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
3588 evt->tgt = tgt;
3589 mad = &evt->iu.implicit_logout;
3597 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3629 * @evt: ibmvfc event struct
3632 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3634 struct ibmvfc_target *tgt = evt->tgt;
3635 struct ibmvfc_host *vhost = evt->vhost;
3636 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3666 ibmvfc_free_event(evt);
3672 * @evt: ibmvfc event struct
3675 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3677 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3683 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3688 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3692 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3700 * @evt: ibmvfc event struct
3708 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3710 struct ibmvfc_host *vhost = evt->vhost;
3711 struct ibmvfc_target *tgt = evt->tgt;
3715 ibmvfc_free_event(evt);
3731 struct ibmvfc_event *evt;
3748 evt = ibmvfc_get_event(vhost);
3749 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
3751 evt->tgt = tgt;
3752 tmf = &evt->iu.tmf;
3760 rc = ibmvfc_send_event(evt, vhost, default_timeout);
3787 struct ibmvfc_event *evt;
3793 evt = ibmvfc_get_event(vhost);
3795 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3796 evt->tgt = tgt;
3798 ibmvfc_init_passthru(evt);
3799 mad = &evt->iu.passthru;
3821 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
3832 * @evt: ibmvfc event struct
3835 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3837 struct ibmvfc_target *tgt = evt->tgt;
3838 struct ibmvfc_host *vhost = evt->vhost;
3839 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3879 ibmvfc_free_event(evt);
3892 struct ibmvfc_event *evt;
3898 evt = ibmvfc_get_event(vhost);
3900 evt->tgt = tgt;
3901 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
3902 query_tgt = &evt->iu.query_tgt;
3910 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3986 * @evt: ibmvfc event struct
3989 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3991 struct ibmvfc_host *vhost = evt->vhost;
3992 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4016 ibmvfc_free_event(evt);
4028 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4030 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4031 mad = &evt->iu.discover_targets;
4041 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4049 * @evt: ibmvfc event struct
4052 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4054 struct ibmvfc_host *vhost = evt->vhost;
4055 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4062 ibmvfc_free_event(evt);
4072 ibmvfc_free_event(evt);
4077 ibmvfc_free_event(evt);
4082 ibmvfc_free_event(evt);
4139 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4143 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4146 mad = &evt->iu.npiv_login;
4156 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4167 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4169 struct ibmvfc_host *vhost = evt->vhost;
4170 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
4172 ibmvfc_free_event(evt);
4202 struct ibmvfc_event *evt;
4204 evt = ibmvfc_get_event(vhost);
4205 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4207 mad = &evt->iu.npiv_logout;
4215 if (!ibmvfc_send_event(evt, vhost, default_timeout))