Lines Matching refs:rq

105 	struct receive_queue *rq;
191 static void give_pages(struct receive_queue *rq, struct page *page)
195 /* Find end of list, sew whole thing into vi->rq.pages. */
197 end->private = (unsigned long)rq->pages;
198 rq->pages = page;
201 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
203 struct page *p = rq->pages;
206 rq->pages = (struct page *)p->private;
244 static struct sk_buff *page_to_skb(struct receive_queue *rq,
248 struct virtnet_info *vi = rq->vq->vdev->priv;
315 give_pages(rq, page);
331 struct receive_queue *rq,
336 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
345 give_pages(rq, page);
350 struct receive_queue *rq,
361 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
369 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
411 ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
417 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
433 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
435 struct virtnet_info *vi = rq->vq->vdev->priv;
449 give_pages(rq, buf);
457 skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
459 skb = receive_big(dev, rq, buf, len);
528 skb_mark_napi_id(skb, &rq->napi);
538 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
540 struct virtnet_info *vi = rq->vq->vdev->priv;
552 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
553 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
554 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
556 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
563 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
569 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
571 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
573 first = get_a_page(rq, gfp);
576 give_pages(rq, list);
579 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
586 first = get_a_page(rq, gfp);
588 give_pages(rq, list);
593 /* rq->sg[0], rq->sg[1] share the same page */
594 /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
595 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
597 /* rq->sg[1] for data packet, from offset */
599 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
603 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
606 give_pages(rq, first);
621 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
623 struct page_frag *alloc_frag = &rq->alloc_frag;
629 len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
648 sg_init_one(rq->sg, buf, len);
649 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
663 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
665 struct virtnet_info *vi = rq->vq->vdev->priv;
672 err = add_recvbuf_mergeable(rq, gfp);
674 err = add_recvbuf_big(rq, gfp);
676 err = add_recvbuf_small(rq, gfp);
681 } while (rq->vq->num_free);
682 virtqueue_kick(rq->vq);
689 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
692 if (napi_schedule_prep(&rq->napi)) {
694 __napi_schedule(&rq->napi);
698 static void virtnet_napi_enable(struct receive_queue *rq)
700 napi_enable(&rq->napi);
706 if (napi_schedule_prep(&rq->napi)) {
707 virtqueue_disable_cb(rq->vq);
709 __napi_schedule(&rq->napi);
722 struct receive_queue *rq = &vi->rq[i];
724 napi_disable(&rq->napi);
725 still_empty = !try_fill_recv(rq, GFP_KERNEL);
726 virtnet_napi_enable(rq);
736 static int virtnet_receive(struct receive_queue *rq, int budget)
738 struct virtnet_info *vi = rq->vq->vdev->priv;
743 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
744 receive_buf(rq, buf, len);
748 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
749 if (!try_fill_recv(rq, GFP_ATOMIC))
758 struct receive_queue *rq =
763 received += virtnet_receive(rq, budget - received);
767 r = virtqueue_enable_cb_prepare(rq->vq);
769 if (unlikely(virtqueue_poll(rq->vq, r)) &&
771 virtqueue_disable_cb(rq->vq);
784 struct receive_queue *rq =
786 struct virtnet_info *vi = rq->vq->vdev->priv;
795 virtqueue_disable_cb(rq->vq);
798 received += virtnet_receive(rq, budget);
800 r = virtqueue_enable_cb_prepare(rq->vq);
802 if (unlikely(virtqueue_poll(rq->vq, r)) &&
804 virtqueue_disable_cb(rq->vq);
825 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
827 virtnet_napi_enable(&vi->rq[i]);
1093 napi_schedule(&vi->rq[i].napi);
1142 napi_disable(&vi->rq[i].napi);
1252 virtqueue_set_affinity(vi->rq[i].vq, -1);
1277 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1312 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1456 netif_napi_del(&vi->rq[i].napi);
1458 kfree(vi->rq);
1467 while (vi->rq[i].pages)
1468 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1476 if (vi->rq[i].alloc_frag.page)
1477 put_page(vi->rq[i].alloc_frag.page);
1492 struct virtqueue *vq = vi->rq[i].vq;
1500 give_pages(&vi->rq[i], buf);
1555 sprintf(vi->rq[i].name, "input.%d", i);
1557 names[rxq2vq(i)] = vi->rq[i].name;
1573 vi->rq[i].vq = vqs[rxq2vq(i)];
1600 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1601 if (!vi->rq)
1606 vi->rq[i].pages = NULL;
1607 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1609 napi_hash_add(&vi->rq[i].napi);
1611 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1612 ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
1658 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
1847 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1850 if (vi->rq[i].vq->num_free ==
1851 virtqueue_get_vring_size(vi->rq[i].vq)) {
1943 napi_disable(&vi->rq[i].napi);
1944 napi_hash_del(&vi->rq[i].napi);
1945 netif_napi_del(&vi->rq[i].napi);
1967 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1971 virtnet_napi_enable(&vi->rq[i]);