Lines Matching defs:ch

97 static void srpt_release_channel(struct srpt_rdma_ch *ch);
123 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
128 spin_lock_irqsave(&ch->spinlock, flags);
129 state = ch->state;
130 spin_unlock_irqrestore(&ch->spinlock, flags);
135 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
140 spin_lock_irqsave(&ch->spinlock, flags);
141 prev = ch->state;
142 ch->state = new_state;
143 spin_unlock_irqrestore(&ch->spinlock, flags);
153 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
159 spin_lock_irqsave(&ch->spinlock, flags);
160 prev = ch->state;
162 ch->state = new;
163 spin_unlock_irqrestore(&ch->spinlock, flags);
226 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
233 ib_cm_notify(ch->cm_id, event->event);
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
238 srpt_release_channel(ch);
241 ch->sess_name, srpt_get_ch_state(ch));
800 static int srpt_post_send(struct srpt_rdma_ch *ch,
805 struct srpt_device *sdev = ch->sport->sdev;
808 atomic_inc(&ch->req_lim);
811 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
830 ret = ib_post_send(ch->qp, &wr, &bad_wr);
834 atomic_inc(&ch->sq_wr_avail);
835 atomic_dec(&ch->req_lim);
951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
963 attr->port_num = ch->sport->port;
976 * @ch: channel of the queue pair.
985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
992 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1006 * @ch: channel of the queue pair.
1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1022 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1037 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1048 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1054 BUG_ON(!ch);
1069 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1078 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1096 BUG_ON(!ch);
1106 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1208 sge->lkey = ch->sport->sdev->mr->lkey;
1245 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1253 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1258 BUG_ON(!ch);
1261 spin_lock_irqsave(&ch->spinlock, flags);
1262 if (!list_empty(&ch->free_list)) {
1263 ioctx = list_first_entry(&ch->free_list,
1267 spin_unlock_irqrestore(&ch->spinlock, flags);
1272 BUG_ON(ioctx->ch != ch);
1299 struct srpt_rdma_ch *ch;
1303 ch = ioctx->ch;
1304 BUG_ON(!ch);
1308 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1317 spin_lock_irqsave(&ch->spinlock, flags);
1318 list_add(&ioctx->free_list, &ch->free_list);
1319 spin_unlock_irqrestore(&ch->spinlock, flags);
1391 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1413 static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1420 atomic_inc(&ch->sq_wr_avail);
1423 ioctx = ch->ioctx_ring[index];
1432 /* If SRP_RSP sending failed, undo the ch->req_lim change. */
1435 atomic_dec(&ch->req_lim);
1443 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1448 atomic_inc(&ch->sq_wr_avail);
1471 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1476 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1495 static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1513 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1534 * @ch: RDMA channel through which the request has been received.
1547 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1571 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1577 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1594 * @ch: RDMA channel through which the request has been received.
1605 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1622 + atomic_xchg(&ch->req_lim_delta, 0));
1720 static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1793 * @ch: RDMA channel of the task management request.
1807 struct srpt_rdma_ch *ch;
1812 ch = ioctx->ch;
1813 BUG_ON(!ch);
1814 BUG_ON(!ch->sport);
1815 sdev = ch->sport->sdev;
1818 for (i = 0; i < ch->rq_size; ++i) {
1819 target = ch->ioctx_ring[i];
1858 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1875 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1918 * @ch: RDMA channel through which the information unit has been received.
1921 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1928 BUG_ON(!ch);
1931 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1935 ch_state = srpt_get_ch_state(ch);
1937 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1947 send_ioctx = srpt_get_send_ioctx(ch);
1950 &ch->cmd_wait_list);
1955 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
1961 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1964 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1984 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1990 struct srpt_rdma_ch *ch,
1993 struct srpt_device *sdev = ch->sport->sdev;
2001 req_lim = atomic_dec_return(&ch->req_lim);
2005 srpt_handle_new_iu(ch, ioctx, NULL);
2028 struct srpt_rdma_ch *ch,
2037 send_ioctx = ch->ioctx_ring[index];
2040 srpt_handle_send_comp(ch, send_ioctx);
2044 srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2050 srpt_handle_send_err_comp(ch, wc->wr_id);
2054 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2059 && !list_empty(&ch->cmd_wait_list)
2060 && srpt_get_ch_state(ch) == CH_LIVE
2061 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2064 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2068 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2072 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2074 struct ib_wc *const wc = ch->wc;
2077 WARN_ON(cq != ch->cq);
2080 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2083 srpt_process_rcv_completion(cq, ch, &wc[i]);
2085 srpt_process_send_completion(cq, ch, &wc[i]);
2103 struct srpt_rdma_ch *ch = ctx;
2105 wake_up_interruptible(&ch->wait_queue);
2110 struct srpt_rdma_ch *ch;
2115 ch = arg;
2116 BUG_ON(!ch);
2118 ch->sess_name, ch->thread->comm, current->pid);
2120 wait_event_interruptible(ch->wait_queue,
2121 (srpt_process_completion(ch->cq, ch),
2125 ch->sess_name, ch->thread->comm, current->pid);
2132 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2135 struct srpt_port *sport = ch->sport;
2140 WARN_ON(ch->rq_size < 1);
2147 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2148 ch->rq_size + srp_sq_size, 0);
2149 if (IS_ERR(ch->cq)) {
2150 ret = PTR_ERR(ch->cq);
2152 ch->rq_size + srp_sq_size, ret);
2156 qp_init->qp_context = (void *)ch;
2159 qp_init->send_cq = ch->cq;
2160 qp_init->recv_cq = ch->cq;
2167 ch->qp = ib_create_qp(sdev->pd, qp_init);
2168 if (IS_ERR(ch->qp)) {
2169 ret = PTR_ERR(ch->qp);
2174 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2177 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2178 qp_init->cap.max_send_wr, ch->cm_id);
2180 ret = srpt_init_ch_qp(ch, ch->qp);
2184 init_waitqueue_head(&ch->wait_queue);
2186 pr_debug("creating thread for session %s\n", ch->sess_name);
2188 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2189 if (IS_ERR(ch->thread)) {
2191 PTR_ERR(ch->thread));
2192 ch->thread = NULL;
2201 ib_destroy_qp(ch->qp);
2203 ib_destroy_cq(ch->cq);
2207 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2209 if (ch->thread)
2210 kthread_stop(ch->thread);
2212 ib_destroy_qp(ch->qp);
2213 ib_destroy_cq(ch->cq);
2222 * Note: The caller must hold ch->sport->sdev->spinlock.
2224 static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2230 sdev = ch->sport->sdev;
2232 spin_lock_irqsave(&ch->spinlock, flags);
2233 prev_state = ch->state;
2237 ch->state = CH_DISCONNECTING;
2242 spin_unlock_irqrestore(&ch->spinlock, flags);
2246 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2250 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2264 static void srpt_close_ch(struct srpt_rdma_ch *ch)
2268 sdev = ch->sport->sdev;
2270 __srpt_close_ch(ch);
2289 struct srpt_rdma_ch *ch;
2298 list_for_each_entry(ch, &sdev->rch_list, list) {
2299 if (ch->cm_id == cm_id) {
2300 do_reset = srpt_test_and_set_ch_state(ch,
2302 srpt_test_and_set_ch_state(ch,
2304 srpt_test_and_set_ch_state(ch,
2312 ret = srpt_ch_qp_err(ch);
2328 struct srpt_rdma_ch *ch;
2336 list_for_each_entry(ch, &sdev->rch_list, list) {
2337 if (ch->cm_id == cm_id) {
2344 return found ? ch : NULL;
2355 static void srpt_release_channel(struct srpt_rdma_ch *ch)
2357 schedule_work(&ch->release_work);
2362 struct srpt_rdma_ch *ch;
2365 ch = container_of(w, struct srpt_rdma_ch, release_work);
2366 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2367 ch->release_done);
2369 sdev = ch->sport->sdev;
2372 transport_deregister_session_configfs(ch->sess);
2373 transport_deregister_session(ch->sess);
2374 ch->sess = NULL;
2376 srpt_destroy_ch_ib(ch);
2378 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2379 ch->sport->sdev, ch->rq_size,
2380 ch->rsp_size, DMA_TO_DEVICE);
2383 list_del(&ch->list);
2386 ib_destroy_cm_id(ch->cm_id);
2388 if (ch->release_done)
2389 complete(ch->release_done);
2393 kfree(ch);
2437 struct srpt_rdma_ch *ch, *tmp_ch;
2497 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2498 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2499 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2500 && param->port == ch->sport->port
2501 && param->listen_id == ch->sport->sdev->cm_id
2502 && ch->cm_id) {
2505 ch_state = srpt_get_ch_state(ch);
2513 ch->sess_name, ch->cm_id, ch_state);
2515 __srpt_close_ch(ch);
2538 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2539 if (!ch) {
2547 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2548 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2549 memcpy(ch->t_port_id, req->target_port_id, 16);
2550 ch->sport = &sdev->port[param->port - 1];
2551 ch->cm_id = cm_id;
2556 ch->rq_size = SRPT_RQ_SIZE;
2557 spin_lock_init(&ch->spinlock);
2558 ch->state = CH_CONNECTING;
2559 INIT_LIST_HEAD(&ch->cmd_wait_list);
2560 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2562 ch->ioctx_ring = (struct srpt_send_ioctx **)
2563 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2564 sizeof(*ch->ioctx_ring[0]),
2565 ch->rsp_size, DMA_TO_DEVICE);
2566 if (!ch->ioctx_ring)
2569 INIT_LIST_HEAD(&ch->free_list);
2570 for (i = 0; i < ch->rq_size; i++) {
2571 ch->ioctx_ring[i]->ch = ch;
2572 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2575 ret = srpt_create_ch_ib(ch);
2584 ret = srpt_ch_qp_rtr(ch, ch->qp);
2595 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2596 be64_to_cpu(*(__be64 *)ch->i_port_id),
2597 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2599 pr_debug("registering session %s\n", ch->sess_name);
2601 nacl = srpt_lookup_acl(sport, ch->i_port_id);
2604 " configured yet for initiator %s.\n", ch->sess_name);
2610 ch->sess = transport_init_session();
2611 if (IS_ERR(ch->sess)) {
2617 ch->sess->se_node_acl = &nacl->nacl;
2618 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2620 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2621 ch->sess_name, ch->cm_id);
2628 ch->max_ti_iu_len = it_iu_len;
2631 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2632 atomic_set(&ch->req_lim, ch->rq_size);
2633 atomic_set(&ch->req_lim_delta, 0);
2636 rep_param->qp_num = ch->qp->qp_num;
2654 list_add_tail(&ch->list, &sdev->rch_list);
2660 srpt_set_ch_state(ch, CH_RELEASING);
2661 transport_deregister_session_configfs(ch->sess);
2664 transport_deregister_session(ch->sess);
2665 ch->sess = NULL;
2668 srpt_destroy_ch_ib(ch);
2671 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2672 ch->sport->sdev, ch->rq_size,
2673 ch->rsp_size, DMA_TO_DEVICE);
2675 kfree(ch);
2708 struct srpt_rdma_ch *ch;
2711 ch = srpt_find_channel(cm_id->context, cm_id);
2712 BUG_ON(!ch);
2714 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2717 ret = srpt_ch_qp_rts(ch, ch->qp);
2719 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2722 srpt_handle_new_iu(ch, ioctx, NULL);
2725 srpt_close_ch(ch);
2746 struct srpt_rdma_ch *ch;
2750 ch = srpt_find_channel(cm_id->context, cm_id);
2751 BUG_ON(!ch);
2753 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2755 spin_lock_irqsave(&ch->spinlock, flags);
2756 switch (ch->state) {
2760 ch->state = CH_DISCONNECTING;
2765 WARN(true, "unexpected channel state %d\n", ch->state);
2768 spin_unlock_irqrestore(&ch->spinlock, flags);
2771 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2774 ch->sess_name);
2847 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2863 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2900 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2912 while (ch->state == CH_LIVE &&
2913 ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2918 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2926 atomic_add(n_rdma, &ch->sq_wr_avail);
2933 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2938 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2944 ret = srpt_perform_rdmas(ch, ioctx);
2958 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2975 struct srpt_rdma_ch *ch;
2986 ch = ioctx->ch;
2987 BUG_ON(!ch);
2989 ch_state = srpt_get_ch_state(ch);
3006 ret = srpt_xfer_data(ch, ioctx);
3031 struct srpt_rdma_ch *ch;
3043 ch = ioctx->ch;
3044 BUG_ON(!ch);
3057 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3058 ch, ioctx->ioctx.index, ioctx->state);
3065 atomic_inc(&ch->req_lim_delta);
3075 ret = srpt_xfer_data(ch, ioctx);
3084 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3089 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3092 ret = srpt_post_send(ch, ioctx, resp_len);
3096 srpt_unmap_sg_to_ib_sge(ch, ioctx);
3141 struct srpt_rdma_ch *ch, *tmp_ch;
3149 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3150 __srpt_close_ch(ch);
3509 struct srpt_rdma_ch *ch;
3513 ch = se_sess->fabric_sess_ptr;
3514 WARN_ON(ch->sess != se_sess);
3516 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3518 sdev = ch->sport->sdev;
3520 BUG_ON(ch->release_done);
3521 ch->release_done = &release_done;
3522 __srpt_close_ch(ch);