Lines Matching refs:vi
110 static void give_pages(struct virtnet_info *vi, struct page *page)
114 /* Find end of list, sew whole thing into vi->pages. */
116 end->private = (unsigned long)vi->pages;
117 vi->pages = page;
120 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
122 struct page *p = vi->pages;
125 vi->pages = (struct page *)p->private;
135 struct virtnet_info *vi = svq->vdev->priv;
141 netif_wake_queue(vi->dev);
160 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
171 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
177 if (vi->mergeable_rx_bufs) {
218 give_pages(vi, page);
223 static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
237 page = virtqueue_get_buf(vi->rvq, &len);
250 --vi->num;
257 struct virtnet_info *vi = netdev_priv(dev);
258 struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
266 if (vi->mergeable_rx_bufs || vi->big_packets)
267 give_pages(vi, buf);
273 if (!vi->mergeable_rx_bufs && !vi->big_packets) {
279 skb = page_to_skb(vi, page, len);
282 give_pages(vi, page);
285 if (vi->mergeable_rx_bufs)
286 if (receive_mergeable(vi, skb)) {
356 static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
362 skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
369 sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
371 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
373 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
380 static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
386 /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
388 first = get_a_page(vi, gfp);
391 give_pages(vi, list);
394 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
401 first = get_a_page(vi, gfp);
403 give_pages(vi, list);
408 /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
409 /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
410 sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
412 /* vi->rx_sg[1] for data packet, from offset */
414 sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
418 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
421 give_pages(vi, first);
426 static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
431 page = get_a_page(vi, gfp);
435 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
437 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
439 give_pages(vi, page);
451 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
457 if (vi->mergeable_rx_bufs)
458 err = add_recvbuf_mergeable(vi, gfp);
459 else if (vi->big_packets)
460 err = add_recvbuf_big(vi, gfp);
462 err = add_recvbuf_small(vi, gfp);
467 ++vi->num;
469 if (unlikely(vi->num > vi->max))
470 vi->max = vi->num;
471 virtqueue_kick(vi->rvq);
477 struct virtnet_info *vi = rvq->vdev->priv;
479 if (napi_schedule_prep(&vi->napi)) {
481 __napi_schedule(&vi->napi);
485 static void virtnet_napi_enable(struct virtnet_info *vi)
487 napi_enable(&vi->napi);
493 if (napi_schedule_prep(&vi->napi)) {
494 virtqueue_disable_cb(vi->rvq);
495 __napi_schedule(&vi->napi);
501 struct virtnet_info *vi;
504 vi = container_of(work, struct virtnet_info, refill.work);
505 napi_disable(&vi->napi);
506 still_empty = !try_fill_recv(vi, GFP_KERNEL);
507 virtnet_napi_enable(vi);
512 queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
517 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
523 (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
524 receive_buf(vi->dev, buf, len);
525 --vi->num;
529 if (vi->num < vi->max / 2) {
530 if (!try_fill_recv(vi, GFP_ATOMIC))
531 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
537 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
539 virtqueue_disable_cb(vi->rvq);
548 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
552 struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
554 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
568 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
573 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
605 if (vi->mergeable_rx_bufs)
606 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
608 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
610 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
611 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
617 struct virtnet_info *vi = netdev_priv(dev);
621 free_old_xmit_skbs(vi);
624 capacity = xmit_skb(vi, skb);
643 virtqueue_kick(vi->svq);
653 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
655 capacity += free_old_xmit_skbs(vi);
658 virtqueue_disable_cb(vi->svq);
668 struct virtnet_info *vi = netdev_priv(dev);
669 struct virtio_device *vdev = vi->vdev;
686 struct virtnet_info *vi = netdev_priv(dev);
692 = per_cpu_ptr(vi->stats, cpu);
721 struct virtnet_info *vi = netdev_priv(dev);
723 napi_schedule(&vi->napi);
729 struct virtnet_info *vi = netdev_priv(dev);
732 if (!try_fill_recv(vi, GFP_KERNEL))
733 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
735 virtnet_napi_enable(vi);
744 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
754 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
770 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
772 virtqueue_kick(vi->cvq);
778 while (!virtqueue_get_buf(vi->cvq, &tmp))
786 struct virtnet_info *vi = netdev_priv(dev);
789 cancel_delayed_work_sync(&vi->refill);
790 napi_disable(&vi->napi);
797 struct virtnet_info *vi = netdev_priv(dev);
808 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
816 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
824 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
863 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
873 struct virtnet_info *vi = netdev_priv(dev);
878 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
886 struct virtnet_info *vi = netdev_priv(dev);
891 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
900 struct virtnet_info *vi = netdev_priv(dev);
902 ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
903 ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
913 struct virtnet_info *vi = netdev_priv(dev);
914 struct virtio_device *vdev = vi->vdev;
955 static void virtnet_update_status(struct virtnet_info *vi)
959 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
967 if (vi->status == v)
970 vi->status = v;
972 if (vi->status & VIRTIO_NET_S_LINK_UP) {
973 netif_carrier_on(vi->dev);
974 netif_wake_queue(vi->dev);
976 netif_carrier_off(vi->dev);
977 netif_stop_queue(vi->dev);
983 struct virtnet_info *vi = vdev->priv;
985 virtnet_update_status(vi);
988 static int init_vqs(struct virtnet_info *vi)
997 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
999 err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
1003 vi->rvq = vqs[0];
1004 vi->svq = vqs[1];
1006 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1007 vi->cvq = vqs[2];
1009 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1010 vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
1019 struct virtnet_info *vi;
1067 vi = netdev_priv(dev);
1068 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
1069 vi->dev = dev;
1070 vi->vdev = vdev;
1071 vdev->priv = vi;
1072 vi->pages = NULL;
1073 vi->stats = alloc_percpu(struct virtnet_stats);
1075 if (vi->stats == NULL)
1078 INIT_DELAYED_WORK(&vi->refill, refill_work);
1079 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1080 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1086 vi->big_packets = true;
1089 vi->mergeable_rx_bufs = true;
1091 err = init_vqs(vi);
1102 try_fill_recv(vi, GFP_KERNEL);
1105 if (vi->num == 0) {
1112 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1114 virtnet_update_status(vi);
1116 vi->status = VIRTIO_NET_S_LINK_UP;
1128 free_percpu(vi->stats);
1134 static void free_unused_bufs(struct virtnet_info *vi)
1138 buf = virtqueue_detach_unused_buf(vi->svq);
1144 buf = virtqueue_detach_unused_buf(vi->rvq);
1147 if (vi->mergeable_rx_bufs || vi->big_packets)
1148 give_pages(vi, buf);
1151 --vi->num;
1153 BUG_ON(vi->num != 0);
1156 static void remove_vq_common(struct virtnet_info *vi)
1158 vi->vdev->config->reset(vi->vdev);
1161 free_unused_bufs(vi);
1163 vi->vdev->config->del_vqs(vi->vdev);
1165 while (vi->pages)
1166 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1171 struct virtnet_info *vi = vdev->priv;
1173 unregister_netdev(vi->dev);
1175 remove_vq_common(vi);
1177 free_percpu(vi->stats);
1178 free_netdev(vi->dev);
1184 struct virtnet_info *vi = vdev->priv;
1186 virtqueue_disable_cb(vi->rvq);
1187 virtqueue_disable_cb(vi->svq);
1188 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
1189 virtqueue_disable_cb(vi->cvq);
1191 netif_device_detach(vi->dev);
1192 cancel_delayed_work_sync(&vi->refill);
1194 if (netif_running(vi->dev))
1195 napi_disable(&vi->napi);
1197 remove_vq_common(vi);
1204 struct virtnet_info *vi = vdev->priv;
1207 err = init_vqs(vi);
1211 if (netif_running(vi->dev))
1212 virtnet_napi_enable(vi);
1214 netif_device_attach(vi->dev);
1216 if (!try_fill_recv(vi, GFP_KERNEL))
1217 queue_delayed_work(system_nrt_wq, &vi->refill, 0);