Lines Matching defs:ib_conn

199 int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
201 struct iser_device *device = ib_conn->device;
205 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
208 if (!ib_conn->fmr.page_vec)
211 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
227 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
228 if (!IS_ERR(ib_conn->fmr.pool))
232 kfree(ib_conn->fmr.page_vec);
233 ib_conn->fmr.page_vec = NULL;
235 ret = PTR_ERR(ib_conn->fmr.pool);
236 ib_conn->fmr.pool = NULL;
249 void iser_free_fmr_pool(struct ib_conn *ib_conn)
252 ib_conn, ib_conn->fmr.pool);
254 if (ib_conn->fmr.pool != NULL)
255 ib_destroy_fmr_pool(ib_conn->fmr.pool);
257 ib_conn->fmr.pool = NULL;
259 kfree(ib_conn->fmr.page_vec);
260 ib_conn->fmr.page_vec = NULL;
353 int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
355 struct iser_device *device = ib_conn->device;
359 INIT_LIST_HEAD(&ib_conn->fastreg.pool);
360 ib_conn->fastreg.pool_size = 0;
370 ib_conn->pi_support, desc);
378 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
379 ib_conn->fastreg.pool_size++;
385 iser_free_fastreg_pool(ib_conn);
392 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
397 if (list_empty(&ib_conn->fastreg.pool))
400 iser_info("freeing conn %p fr pool\n", ib_conn);
402 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
416 if (i < ib_conn->fastreg.pool_size)
418 ib_conn->fastreg.pool_size - i);
426 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
433 BUG_ON(ib_conn->device == NULL);
435 device = ib_conn->device;
446 ib_conn->comp = &device->comps[min_index];
447 ib_conn->comp->active_qps++;
449 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
452 init_attr.qp_context = (void *)ib_conn;
453 init_attr.send_cq = ib_conn->comp->cq;
454 init_attr.recv_cq = ib_conn->comp->cq;
460 if (ib_conn->pi_support) {
467 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
471 ib_conn->qp = ib_conn->cma_id->qp;
473 ib_conn, ib_conn->cma_id,
474 ib_conn->cma_id->qp);
581 struct ib_conn *ib_conn = &iser_conn->ib_conn;
582 struct iser_device *device = ib_conn->device;
585 iser_conn, ib_conn->cma_id, ib_conn->qp);
589 if (ib_conn->qp != NULL) {
590 ib_conn->comp->active_qps--;
591 rdma_destroy_qp(ib_conn->cma_id);
592 ib_conn->qp = NULL;
597 ib_conn->device = NULL;
606 struct ib_conn *ib_conn = &iser_conn->ib_conn;
623 if (ib_conn->cma_id != NULL) {
624 rdma_destroy_id(ib_conn->cma_id);
625 ib_conn->cma_id = NULL;
637 struct ib_conn *ib_conn = &iser_conn->ib_conn;
657 if (ib_conn->cma_id) {
658 err = rdma_disconnect(ib_conn->cma_id);
664 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
666 iser_err("conn %p failed to post beacon", ib_conn);
668 wait_for_completion(&ib_conn->flush_comp);
692 struct ib_conn *ib_conn;
700 ib_conn = &iser_conn->ib_conn;
708 ib_conn->device = device;
716 ib_conn->device->ib_device->name);
717 ib_conn->pi_support = false;
719 ib_conn->pi_support = true;
740 struct ib_conn *ib_conn = &iser_conn->ib_conn;
741 struct iser_device *device = ib_conn->device;
747 ret = iser_create_ib_conn_res(ib_conn);
858 iser_conn->ib_conn.cma_id = NULL;
876 iser_conn->ib_conn.post_recv_buf_count = 0;
877 init_completion(&iser_conn->ib_conn.flush_comp);
882 spin_lock_init(&iser_conn->ib_conn.lock);
895 struct ib_conn *ib_conn = &iser_conn->ib_conn;
905 ib_conn->device = NULL;
909 ib_conn->beacon.wr_id = ISER_BEACON_WRID;
910 ib_conn->beacon.opcode = IB_WR_SEND;
912 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
915 if (IS_ERR(ib_conn->cma_id)) {
916 err = PTR_ERR(ib_conn->cma_id);
921 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
943 ib_conn->cma_id = NULL;
957 int iser_reg_page_vec(struct ib_conn *ib_conn,
969 mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
1028 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1036 spin_lock_bh(&ib_conn->lock);
1037 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
1038 spin_unlock_bh(&ib_conn->lock);
1044 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1050 sge.lkey = ib_conn->device->mr->lkey;
1057 ib_conn->post_recv_buf_count++;
1058 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1061 ib_conn->post_recv_buf_count--;
1070 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1074 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1086 ib_conn->post_recv_buf_count += count;
1087 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1090 ib_conn->post_recv_buf_count -= count;
1102 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1108 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1119 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1151 * @ib_conn: connection RDMA resources
1160 iser_handle_comp_error(struct ib_conn *ib_conn,
1163 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
1164 ib_conn);
1177 ib_conn->post_recv_buf_count--;
1191 struct ib_conn *ib_conn;
1195 ib_conn = wc->qp->qp_context;
1200 ib_conn);
1204 iser_snd_completion(tx_desc, ib_conn);
1217 iser_handle_comp_error(ib_conn, wc);
1221 complete(&ib_conn->flush_comp);