Lines Matching defs:tx

44 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
53 LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
54 LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
55 LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
56 LASSERT (tx->tx_pool != NULL);
58 kiblnd_unmap_tx(ni, tx);
60 /* tx may have up to 2 lnet msgs to finalise */
61 lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
62 lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
63 rc = tx->tx_status;
65 if (tx->tx_conn != NULL) {
66 LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
68 kiblnd_conn_decref(tx->tx_conn);
69 tx->tx_conn = NULL;
72 tx->tx_nwrq = 0;
73 tx->tx_status = 0;
75 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
89 kib_tx_t *tx;
92 tx = list_entry (txlist->next, kib_tx_t, tx_list);
94 list_del(&tx->tx_list);
96 tx->tx_waiting = 0;
97 tx->tx_status = status;
98 kiblnd_tx_done(ni, tx);
107 kib_tx_t *tx;
114 tx = container_of(node, kib_tx_t, tx_list);
116 LASSERT (tx->tx_nwrq == 0);
117 LASSERT (!tx->tx_queued);
118 LASSERT (tx->tx_sending == 0);
119 LASSERT (!tx->tx_waiting);
120 LASSERT (tx->tx_status == 0);
121 LASSERT (tx->tx_conn == NULL);
122 LASSERT (tx->tx_lntmsg[0] == NULL);
123 LASSERT (tx->tx_lntmsg[1] == NULL);
124 LASSERT (tx->tx_u.pmr == NULL);
125 LASSERT (tx->tx_nfrags == 0);
127 return tx;
218 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
220 LASSERT (!tx->tx_queued);
221 LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
223 if (tx->tx_cookie != cookie)
226 if (tx->tx_waiting &&
227 tx->tx_msg->ibm_type == txtype)
228 return tx;
231 tx->tx_waiting ? "" : "NOT ",
232 tx->tx_msg->ibm_type, txtype);
240 kib_tx_t *tx;
246 tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
247 if (tx == NULL) {
256 if (tx->tx_status == 0) { /* success so far */
258 tx->tx_status = status;
260 lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
264 tx->tx_waiting = 0;
266 idle = !tx->tx_queued && (tx->tx_sending == 0);
268 list_del(&tx->tx_list);
273 kiblnd_tx_done(ni, tx);
280 kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
282 if (tx == NULL) {
283 CERROR("Can't get tx for completion %x for %s\n",
288 tx->tx_msg->ibm_u.completion.ibcm_status = status;
289 tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
290 kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
292 kiblnd_queue_tx(tx, conn);
302 kib_tx_t *tx;
392 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
394 if (tx != NULL)
395 list_del(&tx->tx_list);
398 if (tx == NULL) {
405 LASSERT (tx->tx_waiting);
410 tx->tx_nwrq = 0; /* overwrite PUT_REQ */
412 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
421 tx->tx_waiting = 0; /* clear waiting and queue atomically */
422 kiblnd_queue_tx_locked(tx, conn);
551 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
554 __u64 *pages = tx->tx_pages;
562 LASSERT(tx->tx_pool != NULL);
563 LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
565 hdev = tx->tx_pool->tpo_hdev;
575 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr);
586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
587 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
596 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
604 LASSERT(tx->tx_pool != NULL);
605 LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
607 hdev = tx->tx_pool->tpo_hdev;
611 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
614 rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr);
622 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
623 tx->tx_u.pmr->pmr_mr->lkey;
632 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
638 if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) {
639 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
640 tx->tx_u.fmr.fmr_pfmr = NULL;
642 } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) {
643 kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
644 tx->tx_u.pmr = NULL;
647 if (tx->tx_nfrags != 0) {
648 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
649 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
650 tx->tx_nfrags = 0;
655 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
658 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
666 tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
667 tx->tx_nfrags = nfrags;
671 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
675 hdev->ibh_ibdev, &tx->tx_frags[i]);
677 hdev->ibh_ibdev, &tx->tx_frags[i]);
685 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
690 return kiblnd_fmr_map_tx(net, tx, rd, nob);
692 return kiblnd_pmr_map_tx(net, tx, rd, nob);
699 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
720 sg = tx->tx_frags;
748 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
752 kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
772 sg = tx->tx_frags;
788 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
792 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
796 kib_msg_t *msg = tx->tx_msg;
803 LASSERT (tx->tx_queued);
805 LASSERT (tx->tx_nwrq > 0);
806 LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
815 /* tx completions outstanding... */
836 list_del(&tx->tx_list);
837 tx->tx_queued = 0;
847 kiblnd_tx_done(peer->ibp_ni, tx);
864 /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
870 tx->tx_sending++;
871 list_add(&tx->tx_list, &conn->ibc_active_txs);
876 } else if (tx->tx_pool->tpo_pool.po_failed ||
877 conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
882 tx->tx_wrq, &bad_wrq);
898 tx->tx_status = rc;
899 tx->tx_waiting = 0;
900 tx->tx_sending--;
902 done = (tx->tx_sending == 0);
904 list_del(&tx->tx_list);
918 kiblnd_tx_done(peer->ibp_ni, tx);
930 kib_tx_t *tx;
948 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
950 list_del(&tx->tx_list);
951 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
958 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
959 if (tx != NULL)
960 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
963 if (tx != NULL)
964 kiblnd_queue_tx_locked(tx, conn);
974 tx = list_entry(conn->ibc_tx_queue_nocred.next,
979 tx = list_entry(conn->ibc_tx_noops.next,
983 tx = list_entry(conn->ibc_tx_queue.next,
988 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
998 kiblnd_tx_complete (kib_tx_t *tx, int status)
1001 kib_conn_t *conn = tx->tx_conn;
1004 LASSERT (tx->tx_sending > 0);
1010 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1020 /* I could be racing with rdma completion. Whoever makes 'tx' idle
1023 tx->tx_sending--;
1025 if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1029 tx->tx_waiting = 0; /* don't wait for peer */
1030 tx->tx_status = -EIO;
1033 idle = (tx->tx_sending == 0) && /* This is the final callback */
1034 !tx->tx_waiting && /* Not waiting for peer */
1035 !tx->tx_queued; /* Not re-queued (PUT_DONE) */
1037 list_del(&tx->tx_list);
1044 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
1052 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1054 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
1055 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
1056 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
1060 LASSERT (tx->tx_nwrq >= 0);
1061 LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1064 kiblnd_init_msg(tx->tx_msg, type, body_nob);
1066 mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
1070 sge->addr = tx->tx_msgaddr;
1076 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1082 tx->tx_nwrq++;
1086 kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1089 kib_msg_t *ibmsg = tx->tx_msg;
1090 kib_rdma_desc_t *srcrd = tx->tx_rd;
1091 struct ib_sge *sge = &tx->tx_sge[0];
1092 struct ib_send_wr *wrq = &tx->tx_wrq[0];
1099 LASSERT (tx->tx_nwrq == 0);
1118 if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
1132 sge = &tx->tx_sge[tx->tx_nwrq];
1137 wrq = &tx->tx_wrq[tx->tx_nwrq];
1140 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1154 tx->tx_nwrq++;
1160 tx->tx_nwrq = 0;
1164 kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1171 kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
1175 LASSERT (tx->tx_nwrq > 0); /* work items set up */
1176 LASSERT (!tx->tx_queued); /* not queued for sending already */
1179 tx->tx_queued = 1;
1180 tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
1182 if (tx->tx_conn == NULL) {
1184 tx->tx_conn = conn;
1185 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1188 LASSERT (tx->tx_conn == conn);
1189 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1192 switch (tx->tx_msg->ibm_type) {
1220 list_add_tail(&tx->tx_list, q);
1224 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1227 kiblnd_queue_tx_locked(tx, conn);
1336 kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
1345 /* If I get here, I've committed to send, so I complete the tx with
1348 LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1349 LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
1363 if (tx != NULL)
1364 kiblnd_queue_tx(tx, conn);
1379 if (tx != NULL)
1380 list_add_tail(&tx->tx_list,
1389 if (tx != NULL)
1390 kiblnd_queue_tx(tx, conn);
1402 if (tx != NULL) {
1403 tx->tx_status = -EHOSTUNREACH;
1404 tx->tx_waiting = 0;
1405 kiblnd_tx_done(ni, tx);
1418 if (tx != NULL)
1419 list_add_tail(&tx->tx_list,
1428 if (tx != NULL)
1429 kiblnd_queue_tx(tx, conn);
1444 if (tx != NULL)
1445 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1470 kib_tx_t *tx;
1505 tx = kiblnd_get_idle_tx(ni, target.nid);
1506 if (tx == NULL) {
1512 ibmsg = tx->tx_msg;
1515 rc = kiblnd_setup_rd_iov(ni, tx,
1521 rc = kiblnd_setup_rd_kiov(ni, tx,
1529 kiblnd_tx_done(ni, tx);
1533 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
1534 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1537 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1539 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1540 if (tx->tx_lntmsg[1] == NULL) {
1543 kiblnd_tx_done(ni, tx);
1547 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
1548 tx->tx_waiting = 1; /* waiting for GET_DONE */
1549 kiblnd_launch_tx(ni, tx, target.nid);
1559 tx = kiblnd_get_idle_tx(ni, target.nid);
1560 if (tx == NULL) {
1568 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1572 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1578 kiblnd_tx_done(ni, tx);
1582 ibmsg = tx->tx_msg;
1584 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1585 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1587 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1588 tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
1589 kiblnd_launch_tx(ni, tx, target.nid);
1598 tx = kiblnd_get_idle_tx(ni, target.nid);
1599 if (tx == NULL) {
1600 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1605 ibmsg = tx->tx_msg;
1620 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1622 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1623 kiblnd_launch_tx(ni, tx, target.nid);
1636 kib_tx_t *tx;
1639 tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1640 if (tx == NULL) {
1641 CERROR("Can't get tx for REPLY to %s\n",
1649 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1652 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1661 rc = kiblnd_init_rdma(rx->rx_conn, tx,
1677 tx->tx_lntmsg[0] = lntmsg;
1680 kiblnd_queue_tx(tx, rx->rx_conn);
1684 kiblnd_tx_done(ni, tx);
1697 kib_tx_t *tx;
1743 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1744 if (tx == NULL) {
1745 CERROR("Can't allocate tx for %s\n",
1752 txmsg = tx->tx_msg;
1754 rc = kiblnd_setup_rd_iov(ni, tx,
1758 rc = kiblnd_setup_rd_kiov(ni, tx,
1764 kiblnd_tx_done(ni, tx);
1771 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
1773 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1775 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1777 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1778 tx->tx_waiting = 1; /* waiting for PUT_DONE */
1779 kiblnd_queue_tx(tx, conn);
1963 kib_tx_t *tx;
1968 tx = list_entry (tmp, kib_tx_t, tx_list);
1971 LASSERT (!tx->tx_queued);
1972 LASSERT (tx->tx_waiting ||
1973 tx->tx_sending != 0);
1975 LASSERT (tx->tx_queued);
1978 tx->tx_status = -ECONNABORTED;
1979 tx->tx_waiting = 0;
1981 if (tx->tx_sending == 0) {
1982 tx->tx_queued = 0;
1983 list_del (&tx->tx_list);
1984 list_add (&tx->tx_list, &zombies);
2006 /* Complete all tx descs not waiting for sends to complete.
2076 kib_tx_t *tx;
2154 tx = list_entry(txs.next, kib_tx_t, tx_list);
2155 list_del(&tx->tx_list);
2157 kiblnd_queue_tx_locked(tx, conn);
2991 kib_tx_t *tx;
2995 tx = list_entry (ttmp, kib_tx_t, tx_list);
2998 LASSERT (tx->tx_queued);
3000 LASSERT (!tx->tx_queued);
3001 LASSERT (tx->tx_waiting || tx->tx_sending != 0);
3004 if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
3005 CERROR("Timed out tx: %s, %lu seconds\n",
3007 cfs_duration_sec(jiffies - tx->tx_deadline));
3097 * NOOP, but there were no non-blocking tx descs
3257 * failing RDMA because 'tx' might be back on the idle list or
3260 CNETERR("RDMA (tx: %p) failed: %d\n",