Lines Matching refs:vdev

76 void *vnic_dev_priv(struct vnic_dev *vdev)
78 return vdev->priv;
81 static int vnic_dev_discover_res(struct vnic_dev *vdev,
164 vdev->res[type].count = count;
165 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
167 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
173 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
176 return vdev->res[type].count;
180 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
183 if (!vdev->res[type].vaddr)
191 return (char __iomem *)vdev->res[type].vaddr +
194 return (char __iomem *)vdev->res[type].vaddr;
231 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
236 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
258 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
261 pci_free_consistent(vdev->pdev,
269 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
272 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
290 writeq(vdev->args[i], &devcmd->args[i]);
326 vdev->args[i] = readq(&devcmd->args[i]);
337 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
344 memset(vdev->args, 0, sizeof(vdev->args));
346 vdev->args[0] = vdev->proxy_index;
347 vdev->args[1] = cmd;
348 vdev->args[2] = *a0;
349 vdev->args[3] = *a1;
351 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
355 status = (u32)vdev->args[0];
357 err = (int)vdev->args[1];
364 *a0 = vdev->args[1];
365 *a1 = vdev->args[2];
370 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
375 vdev->args[0] = *a0;
376 vdev->args[1] = *a1;
378 err = _vnic_dev_cmd(vdev, cmd, wait);
380 *a0 = vdev->args[0];
381 *a1 = vdev->args[1];
386 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
388 vdev->proxy = PROXY_BY_INDEX;
389 vdev->proxy_index = index;
392 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
394 vdev->proxy = PROXY_NONE;
395 vdev->proxy_index = 0;
398 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
401 memset(vdev->args, 0, sizeof(vdev->args));
403 switch (vdev->proxy) {
405 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
408 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
412 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
416 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
422 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
427 int vnic_dev_fw_info(struct vnic_dev *vdev,
434 if (!vdev->fw_info) {
435 vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
437 &vdev->fw_info_pa);
438 if (!vdev->fw_info)
441 a0 = vdev->fw_info_pa;
445 if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
446 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
449 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
453 *fw_info = vdev->fw_info;
458 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
468 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
481 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
486 if (!vdev->stats) {
487 vdev->stats = pci_alloc_consistent(vdev->pdev,
488 sizeof(struct vnic_stats), &vdev->stats_pa);
489 if (!vdev->stats)
493 *stats = vdev->stats;
494 a0 = vdev->stats_pa;
497 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
500 int vnic_dev_close(struct vnic_dev *vdev)
504 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
507 int vnic_dev_enable_wait(struct vnic_dev *vdev)
512 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
513 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
515 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
518 int vnic_dev_disable(struct vnic_dev *vdev)
522 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
525 int vnic_dev_open(struct vnic_dev *vdev, int arg)
529 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
532 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
540 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
549 static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
553 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
556 static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
564 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
573 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
579 if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
580 return vnic_dev_cmd(vdev, CMD_HANG_RESET,
583 err = vnic_dev_soft_reset(vdev, arg);
586 return vnic_dev_init(vdev, 0);
590 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
598 if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
599 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
604 return vnic_dev_soft_reset_done(vdev, done);
612 int vnic_dev_hang_notify(struct vnic_dev *vdev)
616 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
619 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
628 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
638 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
651 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
658 int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
668 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
675 int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
685 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
692 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
698 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
699 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
705 static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
713 vdev->notify = notify_addr;
714 vdev->notify_pa = notify_pa;
720 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
721 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
725 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
730 if (vdev->notify || vdev->notify_pa) {
731 pr_err("notify block %p still allocated", vdev->notify);
735 notify_addr = pci_alloc_consistent(vdev->pdev,
741 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
744 static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
754 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
755 vdev->notify = NULL;
756 vdev->notify_pa = 0;
757 vdev->notify_sz = 0;
762 int vnic_dev_notify_unset(struct vnic_dev *vdev)
764 if (vdev->notify) {
765 pci_free_consistent(vdev->pdev,
767 vdev->notify,
768 vdev->notify_pa);
771 return vnic_dev_notify_unsetcmd(vdev);
774 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
777 unsigned int nwords = vdev->notify_sz / 4;
781 if (!vdev->notify || !vdev->notify_sz)
786 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
787 words = (u32 *)&vdev->notify_copy;
795 int vnic_dev_init(struct vnic_dev *vdev, int arg)
801 if (vnic_dev_capable(vdev, CMD_INIT))
802 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
804 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
809 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
810 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
816 int vnic_dev_deinit(struct vnic_dev *vdev)
821 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
824 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
827 vdev->intr_coal_timer_info.mul = 2;
828 vdev->intr_coal_timer_info.div = 3;
829 vdev->intr_coal_timer_info.max_usec =
830 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
833 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
838 memset(vdev->args, 0, sizeof(vdev->args));
840 if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
841 err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
849 (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
851 vnic_dev_intr_coal_timer_info_default(vdev);
856 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
857 vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
858 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
864 int vnic_dev_link_status(struct vnic_dev *vdev)
866 if (!vnic_dev_notify_ready(vdev))
869 return vdev->notify_copy.link_state;
872 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
874 if (!vnic_dev_notify_ready(vdev))
877 return vdev->notify_copy.port_speed;
880 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
882 if (!vnic_dev_notify_ready(vdev))
885 return vdev->notify_copy.msglvl;
888 u32 vnic_dev_mtu(struct vnic_dev *vdev)
890 if (!vnic_dev_notify_ready(vdev))
893 return vdev->notify_copy.mtu;
896 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
899 vdev->intr_mode = intr_mode;
903 struct vnic_dev *vdev)
905 return vdev->intr_mode;
908 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
910 return (usec * vdev->intr_coal_timer_info.mul) /
911 vdev->intr_coal_timer_info.div;
914 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
916 return (hw_cycles * vdev->intr_coal_timer_info.div) /
917 vdev->intr_coal_timer_info.mul;
920 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
922 return vdev->intr_coal_timer_info.max_usec;
925 void vnic_dev_unregister(struct vnic_dev *vdev)
927 if (vdev) {
928 if (vdev->notify)
929 pci_free_consistent(vdev->pdev,
931 vdev->notify,
932 vdev->notify_pa);
933 if (vdev->stats)
934 pci_free_consistent(vdev->pdev,
936 vdev->stats, vdev->stats_pa);
937 if (vdev->fw_info)
938 pci_free_consistent(vdev->pdev,
940 vdev->fw_info, vdev->fw_info_pa);
941 kfree(vdev);
946 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
950 if (!vdev) {
951 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
952 if (!vdev)
956 vdev->priv = priv;
957 vdev->pdev = pdev;
959 if (vnic_dev_discover_res(vdev, bar, num_bars))
962 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
963 if (!vdev->devcmd)
966 return vdev;
969 vnic_dev_unregister(vdev);
974 struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
976 return vdev->pdev;
980 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
988 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
996 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
998 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
1003 int vnic_dev_enable2(struct vnic_dev *vdev, int active)
1010 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
1013 static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
1020 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
1027 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
1029 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
1032 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
1034 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
1037 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
1046 return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
1050 * @vdev: vdev of the device
1063 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
1078 tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
1099 ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
1101 pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
1104 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);