Lines Matching refs:rq

202 					"%s: rq[%d] error 0x%x\n",
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
582 rq->stats.rx_buf_alloc_failure++;
601 rq->stats.rx_buf_alloc_failure++;
1126 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1129 rq->stats.drop_err++;
1131 rq->stats.drop_fcs++;
1133 rq->stats.drop_total++;
1153 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1162 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1167 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1169 while (rcd->gen == rq->comp_ring.gen) {
1184 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1187 ring = rq->rx_ring + ring_idx;
1188 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1190 rbi = rq->buf_info[ring_idx] + idx;
1196 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1202 rcd->rqID != rq->qid);
1224 rq->stats.rx_buf_alloc_failure++;
1226 rq->stats.drop_total++;
1272 rq->stats.rx_buf_alloc_failure++;
1312 napi_gro_receive(&rq->napi, skb);
1321 ring = rq->rx_ring + ring_idx;
1334 if (unlikely(rq->shared->updateRxProd)) {
1336 rxprod_reg[ring_idx] + rq->qid * 8,
1340 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1342 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1350 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1357 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1362 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1365 rq->buf_info[ring_idx][i].skb) {
1368 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1369 rq->buf_info[ring_idx][i].skb = NULL;
1371 rq->buf_info[ring_idx][i].page) {
1374 put_page(rq->buf_info[ring_idx][i].page);
1375 rq->buf_info[ring_idx][i].page = NULL;
1379 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1380 rq->rx_ring[ring_idx].next2fill =
1381 rq->rx_ring[ring_idx].next2comp = 0;
1384 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1385 rq->comp_ring.next2proc = 0;
1399 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1407 if (rq->buf_info[i]) {
1408 for (j = 0; j < rq->rx_ring[i].size; j++)
1409 BUG_ON(rq->buf_info[i][j].page != NULL);
1415 if (rq->rx_ring[i].base) {
1417 rq->rx_ring[i].size
1419 rq->rx_ring[i].base,
1420 rq->rx_ring[i].basePA);
1421 rq->rx_ring[i].base = NULL;
1423 rq->buf_info[i] = NULL;
1426 if (rq->comp_ring.base) {
1427 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1429 rq->comp_ring.base, rq->comp_ring.basePA);
1430 rq->comp_ring.base = NULL;
1433 if (rq->buf_info[0]) {
1435 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1436 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1437 rq->buf_info_pa);
1443 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1449 for (i = 0; i < rq->rx_ring[0].size; i++) {
1453 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1454 rq->buf_info[0][i].len = adapter->skb_buf_size;
1456 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1457 rq->buf_info[0][i].len = PAGE_SIZE;
1460 for (i = 0; i < rq->rx_ring[1].size; i++) {
1461 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1462 rq->buf_info[1][i].len = PAGE_SIZE;
1467 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1469 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1471 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1473 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1478 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1481 rq->comp_ring.next2proc = 0;
1482 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1484 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1487 rq->rx_ctx.skb = NULL;
1514 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1522 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1523 rq->rx_ring[i].base = dma_alloc_coherent(
1525 &rq->rx_ring[i].basePA,
1527 if (!rq->rx_ring[i].base) {
1534 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1535 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1536 &rq->comp_ring.basePA,
1538 if (!rq->comp_ring.base) {
1543 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1544 rq->rx_ring[1].size);
1545 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1550 rq->buf_info[0] = bi;
1551 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1556 vmxnet3_rq_destroy(rq, adapter);
1624 struct vmxnet3_rx_queue *rq = container_of(napi,
1626 struct vmxnet3_adapter *adapter = rq->adapter;
1634 &adapter->tx_queue[rq - adapter->rx_queue];
1638 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1642 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1688 struct vmxnet3_rx_queue *rq = data;
1689 struct vmxnet3_adapter *adapter = rq->adapter;
1693 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1694 napi_schedule(&rq->napi);
1879 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1880 rq->qid = i;
1881 rq->qid2 = i + adapter->num_rx_queues;
2181 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2183 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2184 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2185 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2186 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2187 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2188 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2189 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2194 rqc->intrIdx = rq->comp_ring.intr_idx;
2487 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2518 rq = &adapter->rx_queue[i];
2519 rq->rx_ring[0].size = ring0_size;
2520 rq->rx_ring[1].size = ring1_size;
2521 rq->comp_ring.size = comp_size;
2554 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2557 rq->shared = &adapter->rqd_start[i].ctrl;
2558 rq->adapter = adapter;
2559 err = vmxnet3_rq_create(rq, adapter);