Lines Matching refs:rdma

49 #include <rdma/ib_verbs.h>
50 #include <rdma/rdma_cm.h>
161 * parse_opts - parse mount options into rdma options structure
163 * @opts: rdma transport-specific structure to parse options into
231 struct p9_trans_rdma *rdma = c->trans;
234 BUG_ON(rdma->state != P9_RDMA_INIT);
235 rdma->state = P9_RDMA_ADDR_RESOLVED;
239 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
240 rdma->state = P9_RDMA_ROUTE_RESOLVED;
244 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
245 rdma->state = P9_RDMA_CONNECTED;
249 if (rdma)
250 rdma->state = P9_RDMA_CLOSED;
270 rdma_disconnect(rdma->cm_id);
275 complete(&rdma->cm_done);
280 handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
288 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
316 rdma->state = P9_RDMA_FLUSHING;
321 handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
324 ib_dma_unmap_single(rdma->cm_id->device,
338 struct p9_trans_rdma *rdma = client->trans;
342 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
348 handle_recv(client, rdma, c, wc.status, wc.byte_len);
349 up(&rdma->rq_sem);
353 handle_send(client, rdma, c, wc.status, wc.byte_len);
354 up(&rdma->sq_sem);
371 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
373 if (!rdma)
376 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
377 ib_dereg_mr(rdma->dma_mr);
379 if (rdma->qp && !IS_ERR(rdma->qp))
380 ib_destroy_qp(rdma->qp);
382 if (rdma->pd && !IS_ERR(rdma->pd))
383 ib_dealloc_pd(rdma->pd);
385 if (rdma->cq && !IS_ERR(rdma->cq))
386 ib_destroy_cq(rdma->cq);
388 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
389 rdma_destroy_id(rdma->cm_id);
391 kfree(rdma);
397 struct p9_trans_rdma *rdma = client->trans;
401 c->busa = ib_dma_map_single(rdma->cm_id->device,
404 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
409 sge.lkey = rdma->lkey;
416 return ib_post_recv(rdma->qp, &wr, &bad_wr);
425 struct p9_trans_rdma *rdma = client->trans;
441 if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
442 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
449 atomic_inc(&rdma->excess_rc);
468 if (down_interruptible(&rdma->rq_sem)) {
490 c->busa = ib_dma_map_single(rdma->cm_id->device,
493 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
500 sge.lkey = rdma->lkey;
510 if (down_interruptible(&rdma->sq_sem)) {
520 err = ib_post_send(rdma->qp, &wr, &bad_wr);
536 atomic_inc(&rdma->excess_rc);
542 spin_lock_irqsave(&rdma->req_lock, flags);
543 if (rdma->state < P9_RDMA_CLOSING) {
544 rdma->state = P9_RDMA_CLOSING;
545 spin_unlock_irqrestore(&rdma->req_lock, flags);
546 rdma_disconnect(rdma->cm_id);
548 spin_unlock_irqrestore(&rdma->req_lock, flags);
554 struct p9_trans_rdma *rdma;
559 rdma = client->trans;
560 if (!rdma)
564 rdma_disconnect(rdma->cm_id);
565 rdma_destroy_trans(rdma);
569 * alloc_rdma - Allocate and initialize the rdma transport structure
574 struct p9_trans_rdma *rdma;
576 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
577 if (!rdma)
580 rdma->sq_depth = opts->sq_depth;
581 rdma->rq_depth = opts->rq_depth;
582 rdma->timeout = opts->timeout;
583 spin_lock_init(&rdma->req_lock);
584 init_completion(&rdma->cm_done);
585 sema_init(&rdma->sq_sem, rdma->sq_depth);
586 sema_init(&rdma->rq_sem, rdma->rq_depth);
587 atomic_set(&rdma->excess_rc, 0);
589 return rdma;
605 struct p9_trans_rdma *rdma = client->trans;
606 atomic_inc(&rdma->excess_rc);
621 struct p9_trans_rdma *rdma;
632 rdma = alloc_rdma(&opts);
633 if (!rdma)
637 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
639 if (IS_ERR(rdma->cm_id))
643 client->trans = rdma;
646 rdma->addr.sin_family = AF_INET;
647 rdma->addr.sin_addr.s_addr = in_aton(addr);
648 rdma->addr.sin_port = htons(opts.port);
649 err = rdma_resolve_addr(rdma->cm_id, NULL,
650 (struct sockaddr *)&rdma->addr,
651 rdma->timeout);
654 err = wait_for_completion_interruptible(&rdma->cm_done);
655 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
659 err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
662 err = wait_for_completion_interruptible(&rdma->cm_done);
663 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
667 err = ib_query_device(rdma->cm_id->device, &devattr);
672 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
675 if (IS_ERR(rdma->cq))
677 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
680 rdma->pd = ib_alloc_pd(rdma->cm_id->device);
681 if (IS_ERR(rdma->pd))
685 rdma->dma_mr = NULL;
687 rdma->lkey = rdma->cm_id->device->local_dma_lkey;
689 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
690 if (IS_ERR(rdma->dma_mr))
692 rdma->lkey = rdma->dma_mr->lkey;
705 qp_attr.send_cq = rdma->cq;
706 qp_attr.recv_cq = rdma->cq;
707 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
710 rdma->qp = rdma->cm_id->qp;
718 err = rdma_connect(rdma->cm_id, &conn_param);
721 err = wait_for_completion_interruptible(&rdma->cm_done);
722 if (err || (rdma->state != P9_RDMA_CONNECTED))
730 rdma_destroy_trans(rdma);
735 .name = "rdma",