Lines Matching refs:qp

84 	struct ntb_transport_qp *qp;
109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
203 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
398 struct ntb_transport_qp *qp;
408 qp = filp->private_data;
413 "rx_bytes - \t%llu\n", qp->rx_bytes);
415 "rx_pkts - \t%llu\n", qp->rx_pkts);
417 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
419 "rx_async - \t%llu\n", qp->rx_async);
421 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
423 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
425 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
427 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
429 "rx_buff - \t%p\n", qp->rx_buff);
431 "rx_index - \t%u\n", qp->rx_index);
433 "rx_max_entry - \t%u\n", qp->rx_max_entry);
436 "tx_bytes - \t%llu\n", qp->tx_bytes);
438 "tx_pkts - \t%llu\n", qp->tx_pkts);
440 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
442 "tx_async - \t%llu\n", qp->tx_async);
444 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
446 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
448 "tx_mw - \t%p\n", qp->tx_mw);
450 "tx_index - \t%u\n", qp->tx_index);
452 "tx_max_entry - \t%u\n", qp->tx_max_entry);
455 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
503 struct ntb_transport_qp *qp = &nt->qps[qp_num];
519 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
522 qp->remote_rx_info = qp->rx_buff + rx_size;
525 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
526 qp->rx_max_entry = rx_size / qp->rx_max_frame;
527 qp->rx_index = 0;
529 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
532 for (i = 0; i < qp->rx_max_entry; i++) {
533 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
538 qp->rx_pkts = 0;
539 qp->tx_pkts = 0;
540 qp->tx_index = 0;
598 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
600 struct ntb_transport *nt = qp->transport;
603 if (qp->qp_link == NTB_LINK_DOWN) {
604 cancel_delayed_work_sync(&qp->link_work);
608 if (qp->event_handler)
609 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
611 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
612 qp->qp_link = NTB_LINK_DOWN;
617 struct ntb_transport_qp *qp = container_of(work,
620 struct ntb_transport *nt = qp->transport;
622 ntb_qp_link_cleanup(qp);
625 schedule_delayed_work(&qp->link_work,
629 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
631 schedule_work(&qp->link_cleanup);
793 struct ntb_transport_qp *qp = &nt->qps[i];
797 if (qp->client_ready == NTB_LINK_UP)
798 schedule_delayed_work(&qp->link_work, 0);
814 struct ntb_transport_qp *qp = container_of(work,
817 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
818 struct ntb_transport *nt = qp->transport;
829 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
832 val | 1 << qp->qp_num, QP_LINKS);
834 /* query remote spad for qp ready bits */
842 if (1 << qp->qp_num & val) {
843 qp->qp_link = NTB_LINK_UP;
845 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
846 if (qp->event_handler)
847 qp->event_handler(qp->cb_data, NTB_LINK_UP);
849 schedule_delayed_work(&qp->link_work,
856 struct ntb_transport_qp *qp;
864 qp = &nt->qps[qp_num];
865 qp->qp_num = qp_num;
866 qp->transport = nt;
867 qp->ndev = nt->ndev;
868 qp->qp_link = NTB_LINK_DOWN;
869 qp->client_ready = NTB_LINK_DOWN;
870 qp->event_handler = NULL;
877 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
879 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
880 if (!qp->tx_mw)
883 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
884 if (!qp->tx_mw_phys)
888 qp->rx_info = qp->tx_mw + tx_size;
891 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
892 qp->tx_max_entry = tx_size / qp->tx_max_frame;
897 snprintf(debugfs_name, 4, "qp%d", qp_num);
898 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
901 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
902 qp->debugfs_dir, qp,
906 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
907 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
909 spin_lock_init(&qp->ntb_rx_pend_q_lock);
910 spin_lock_init(&qp->ntb_rx_free_q_lock);
911 spin_lock_init(&qp->ntb_tx_free_q_lock);
913 INIT_LIST_HEAD(&qp->rx_pend_q);
914 INIT_LIST_HEAD(&qp->rx_free_q);
915 INIT_LIST_HEAD(&qp->tx_free_q);
1001 /* verify that all the qp's are freed */
1026 struct ntb_transport_qp *qp = entry->qp;
1035 iowrite32(entry->index, &qp->rx_info->entry);
1037 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1039 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1040 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1057 struct ntb_transport_qp *qp = entry->qp;
1058 struct dma_chan *chan = qp->dma_chan;
1115 qp->last_cookie = cookie;
1117 qp->rx_async++;
1130 dma_sync_wait(chan, qp->last_cookie);
1133 qp->rx_memcpy++;
1136 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1142 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1143 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1145 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1147 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1150 qp->rx_err_no_buf++;
1155 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1156 &qp->rx_pend_q);
1157 qp->rx_ring_empty++;
1161 if (hdr->ver != (u32) qp->rx_pkts) {
1162 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1163 "qp %d: version mismatch, expected %llu - got %u\n",
1164 qp->qp_num, qp->rx_pkts, hdr->ver);
1165 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1166 &qp->rx_pend_q);
1167 qp->rx_err_ver++;
1172 ntb_qp_link_down(qp);
1177 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1179 qp->rx_index, hdr->ver, hdr->len, entry->len);
1181 qp->rx_bytes += hdr->len;
1182 qp->rx_pkts++;
1185 qp->rx_err_oflow++;
1186 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1193 entry->index = qp->rx_index;
1199 qp->rx_index++;
1200 qp->rx_index %= qp->rx_max_entry;
1205 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1209 iowrite32(qp->rx_index, &qp->rx_info->entry);
1216 struct ntb_transport_qp *qp = data;
1219 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1225 for (i = 0; i < qp->rx_max_entry; i++) {
1226 rc = ntb_process_rxc(qp);
1231 if (qp->dma_chan)
1232 dma_async_issue_pending(qp->dma_chan);
1240 struct ntb_transport_qp *qp = entry->qp;
1247 ntb_ring_doorbell(qp->ndev, qp->qp_num);
1254 qp->tx_bytes += entry->len;
1256 if (qp->tx_handler)
1257 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1261 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1271 static void ntb_async_tx(struct ntb_transport_qp *qp,
1276 struct dma_chan *chan = qp->dma_chan;
1286 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1287 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1291 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1300 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1335 qp->tx_async++;
1344 qp->tx_memcpy++;
1347 static int ntb_process_tx(struct ntb_transport_qp *qp,
1350 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1351 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1353 if (qp->tx_index == qp->remote_rx_info->entry) {
1354 qp->tx_ring_full++;
1358 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1359 if (qp->tx_handler)
1360 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1362 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1363 &qp->tx_free_q);
1367 ntb_async_tx(qp, entry);
1369 qp->tx_index++;
1370 qp->tx_index %= qp->tx_max_entry;
1372 qp->tx_pkts++;
1377 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1379 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1383 if (qp->qp_link == NTB_LINK_DOWN)
1386 qp->qp_link = NTB_LINK_DOWN;
1387 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1390 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1404 rc = ntb_process_tx(qp, entry);
1407 qp->qp_num);
1429 struct ntb_transport_qp *qp;
1447 qp = &nt->qps[free_queue];
1448 qp->cb_data = data;
1449 qp->rx_handler = handlers->rx_handler;
1450 qp->tx_handler = handlers->tx_handler;
1451 qp->event_handler = handlers->event_handler;
1454 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1455 if (!qp->dma_chan) {
1465 entry->qp = qp;
1466 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1467 &qp->rx_free_q);
1475 entry->qp = qp;
1476 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1477 &qp->tx_free_q);
1480 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1485 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1487 return qp;
1490 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1493 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1495 if (qp->dma_chan)
1505 * @qp: NTB queue to be freed
1509 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1514 if (!qp)
1517 pdev = ntb_query_pdev(qp->ndev);
1519 if (qp->dma_chan) {
1520 struct dma_chan *chan = qp->dma_chan;
1524 qp->dma_chan = NULL;
1529 dma_sync_wait(chan, qp->last_cookie);
1534 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1536 cancel_delayed_work_sync(&qp->link_work);
1538 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1541 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1546 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1549 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1551 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1557 * @qp: NTB queue to be freed
1561 * shutdown of qp.
1565 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1570 if (!qp || qp->client_ready == NTB_LINK_UP)
1573 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1580 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1588 * @qp: NTB transport layer queue the entry is to be enqueued on
1598 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1603 if (!qp)
1606 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1614 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1622 * @qp: NTB transport layer queue the entry is to be enqueued on
1629 * serialize access to the qp.
1633 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1639 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1642 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1644 qp->tx_err_no_buf++;
1653 rc = ntb_process_tx(qp, entry);
1655 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1656 &qp->tx_free_q);
1664 * @qp: NTB transport layer queue to be enabled
1668 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1670 if (!qp)
1673 qp->client_ready = NTB_LINK_UP;
1675 if (qp->transport->transport_link == NTB_LINK_UP)
1676 schedule_delayed_work(&qp->link_work, 0);
1682 * @qp: NTB transport layer queue to be disabled
1688 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1693 if (!qp)
1696 pdev = ntb_query_pdev(qp->ndev);
1697 qp->client_ready = NTB_LINK_DOWN;
1699 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1705 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1706 val & ~(1 << qp->qp_num));
1709 val & ~(1 << qp->qp_num), QP_LINKS);
1711 if (qp->qp_link == NTB_LINK_UP)
1712 ntb_send_link_down(qp);
1714 cancel_delayed_work_sync(&qp->link_work);
1720 * @qp: NTB transport layer queue to be queried
1726 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1728 if (!qp)
1731 return qp->qp_link == NTB_LINK_UP;
1736 * ntb_transport_qp_num - Query the qp number
1737 * @qp: NTB transport layer queue to be queried
1739 * Query qp number of the NTB transport queue
1741 * RETURNS: a zero based number specifying the qp number
1743 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1745 if (!qp)
1748 return qp->qp_num;
1753 * ntb_transport_max_size - Query the max payload size of a qp
1754 * @qp: NTB transport layer queue to be queried
1756 * Query the maximum payload size permissible on the given qp
1758 * RETURNS: the max payload size of a qp
1760 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1764 if (!qp)
1767 if (!qp->dma_chan)
1768 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1771 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1772 max -= max % (1 << qp->dma_chan->device->copy_align);