Lines Matching refs:np

794 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
964 static bool nv_optimized(struct fe_priv *np)
966 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1001 struct fe_priv *np = get_nvpriv(dev);
1004 if (!nv_optimized(np)) {
1006 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1008 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1011 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1012 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1015 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1016 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1023 struct fe_priv *np = get_nvpriv(dev);
1025 if (!nv_optimized(np)) {
1026 if (np->rx_ring.orig)
1027 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1028 np->rx_ring.orig, np->ring_addr);
1030 if (np->rx_ring.ex)
1031 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1032 np->rx_ring.ex, np->ring_addr);
1034 kfree(np->rx_skb);
1035 kfree(np->tx_skb);
1040 struct fe_priv *np = get_nvpriv(dev);
1042 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1043 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1044 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1052 struct fe_priv *np = get_nvpriv(dev);
1056 if (!np->mac_in_use &&
1057 (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1069 struct fe_priv *np = get_nvpriv(dev);
1072 if (np->msi_flags & NV_MSI_X_ENABLED)
1073 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1075 enable_irq(np->pci_dev->irq);
1077 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1078 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1085 struct fe_priv *np = get_nvpriv(dev);
1088 if (np->msi_flags & NV_MSI_X_ENABLED)
1089 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1091 disable_irq(np->pci_dev->irq);
1093 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1094 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1109 struct fe_priv *np = get_nvpriv(dev);
1112 if (np->msi_flags & NV_MSI_X_ENABLED) {
1115 if (np->msi_flags & NV_MSI_ENABLED)
1123 struct fe_priv *np = get_nvpriv(dev);
1125 napi_enable(&np->napi);
1130 struct fe_priv *np = get_nvpriv(dev);
1132 napi_disable(&np->napi);
1178 struct fe_priv *np = netdev_priv(dev);
1183 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1192 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1200 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1217 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1224 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1239 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1241 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1243 if (mii_rw(dev, np->phyaddr,
1246 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1249 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1252 if (mii_rw(dev, np->phyaddr,
1259 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1263 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1264 phy_reserved = mii_rw(dev, np->phyaddr,
1267 if (mii_rw(dev, np->phyaddr,
1275 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1280 if (mii_rw(dev, np->phyaddr,
1283 phy_reserved = mii_rw(dev, np->phyaddr,
1287 if (mii_rw(dev, np->phyaddr,
1290 if (mii_rw(dev, np->phyaddr,
1298 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1304 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1307 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1309 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1311 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1314 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1316 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1322 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1326 if (mii_rw(dev, np->phyaddr,
1329 if (mii_rw(dev, np->phyaddr,
1332 phy_reserved = mii_rw(dev, np->phyaddr,
1334 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1336 phy_reserved = mii_rw(dev, np->phyaddr,
1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1342 if (mii_rw(dev, np->phyaddr,
1345 if (mii_rw(dev, np->phyaddr,
1348 phy_reserved = mii_rw(dev, np->phyaddr,
1352 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1354 phy_reserved = mii_rw(dev, np->phyaddr,
1356 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1358 if (mii_rw(dev, np->phyaddr,
1361 if (mii_rw(dev, np->phyaddr,
1364 phy_reserved = mii_rw(dev, np->phyaddr,
1366 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1368 phy_reserved = mii_rw(dev, np->phyaddr,
1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1374 if (mii_rw(dev, np->phyaddr,
1377 if (mii_rw(dev, np->phyaddr,
1386 struct fe_priv *np = get_nvpriv(dev);
1392 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1393 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1395 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1397 pci_name(np->pci_dev));
1401 if (np->phy_oui == PHY_OUI_REALTEK) {
1402 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1403 np->phy_rev == PHY_REV_REALTEK_8211B) {
1404 if (init_realtek_8211b(dev, np)) {
1406 pci_name(np->pci_dev));
1409 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1410 np->phy_rev == PHY_REV_REALTEK_8211C) {
1411 if (init_realtek_8211c(dev, np)) {
1413 pci_name(np->pci_dev));
1416 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1417 if (init_realtek_8201(dev, np)) {
1419 pci_name(np->pci_dev));
1426 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1430 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1432 pci_name(np->pci_dev));
1440 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1442 np->gigabit = PHY_GIGABIT;
1443 mii_control_1000 = mii_rw(dev, np->phyaddr,
1451 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1453 pci_name(np->pci_dev));
1457 np->gigabit = 0;
1459 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1462 if (np->phy_oui == PHY_OUI_REALTEK &&
1463 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1464 np->phy_rev == PHY_REV_REALTEK_8211C) {
1467 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1469 pci_name(np->pci_dev));
1478 pci_name(np->pci_dev));
1484 if (np->phy_oui == PHY_OUI_CICADA) {
1485 if (init_cicada(dev, np, phyinterface)) {
1487 pci_name(np->pci_dev));
1490 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1491 if (init_vitesse(dev, np)) {
1493 pci_name(np->pci_dev));
1496 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1497 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1498 np->phy_rev == PHY_REV_REALTEK_8211B) {
1500 if (init_realtek_8211b(dev, np)) {
1502 pci_name(np->pci_dev));
1505 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1506 if (init_realtek_8201(dev, np) ||
1507 init_realtek_8201_cross(dev, np)) {
1509 pci_name(np->pci_dev));
1516 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1519 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1523 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1531 struct fe_priv *np = netdev_priv(dev);
1536 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1541 writel(np->linkspeed, base + NvRegLinkSpeed);
1544 if (np->mac_in_use)
1552 struct fe_priv *np = netdev_priv(dev);
1556 if (!np->mac_in_use)
1567 if (!np->mac_in_use)
1573 struct fe_priv *np = netdev_priv(dev);
1578 if (np->mac_in_use)
1586 struct fe_priv *np = netdev_priv(dev);
1590 if (!np->mac_in_use)
1601 if (!np->mac_in_use)
1620 struct fe_priv *np = netdev_priv(dev);
1623 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1626 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1632 struct fe_priv *np = netdev_priv(dev);
1636 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1656 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1663 struct fe_priv *np = netdev_priv(dev);
1670 assert_spin_locked(&np->hwstats_lock);
1673 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1674 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1675 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1676 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1677 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1678 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1679 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1680 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1681 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1682 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1683 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1684 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1685 np->estats.rx_runt += readl(base + NvRegRxRunt);
1686 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1687 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1688 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1689 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1690 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1691 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1692 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1693 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1694 np->estats.rx_packets =
1695 np->estats.rx_unicast +
1696 np->estats.rx_multicast +
1697 np->estats.rx_broadcast;
1698 np->estats.rx_errors_total =
1699 np->estats.rx_crc_errors +
1700 np->estats.rx_over_errors +
1701 np->estats.rx_frame_error +
1702 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1703 np->estats.rx_late_collision +
1704 np->estats.rx_runt +
1705 np->estats.rx_frame_too_long;
1706 np->estats.tx_errors_total =
1707 np->estats.tx_late_collision +
1708 np->estats.tx_fifo_errors +
1709 np->estats.tx_carrier_errors +
1710 np->estats.tx_excess_deferral +
1711 np->estats.tx_retry_error;
1713 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1714 np->estats.tx_deferral += readl(base + NvRegTxDef);
1715 np->estats.tx_packets += readl(base + NvRegTxFrame);
1716 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1717 np->estats.tx_pause += readl(base + NvRegTxPause);
1718 np->estats.rx_pause += readl(base + NvRegRxPause);
1719 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1720 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1723 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1724 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1725 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1726 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1741 struct fe_priv *np = netdev_priv(dev);
1756 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1757 storage->rx_packets = np->stat_rx_packets;
1758 storage->rx_bytes = np->stat_rx_bytes;
1759 storage->rx_dropped = np->stat_rx_dropped;
1760 storage->rx_missed_errors = np->stat_rx_missed_errors;
1761 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1764 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1765 storage->tx_packets = np->stat_tx_packets;
1766 storage->tx_bytes = np->stat_tx_bytes;
1767 storage->tx_dropped = np->stat_tx_dropped;
1768 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1772 spin_lock_bh(&np->hwstats_lock);
1777 storage->rx_errors = np->estats.rx_errors_total;
1778 storage->tx_errors = np->estats.tx_errors_total;
1781 storage->multicast = np->estats.rx_multicast;
1784 storage->rx_length_errors = np->estats.rx_length_error;
1785 storage->rx_over_errors = np->estats.rx_over_errors;
1786 storage->rx_crc_errors = np->estats.rx_crc_errors;
1787 storage->rx_frame_errors = np->estats.rx_frame_align_error;
1788 storage->rx_fifo_errors = np->estats.rx_drop_frame;
1791 storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1792 storage->tx_fifo_errors = np->estats.tx_fifo_errors;
1794 spin_unlock_bh(&np->hwstats_lock);
1807 struct fe_priv *np = netdev_priv(dev);
1810 less_rx = np->get_rx.orig;
1811 if (less_rx-- == np->first_rx.orig)
1812 less_rx = np->last_rx.orig;
1814 while (np->put_rx.orig != less_rx) {
1815 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1817 np->put_rx_ctx->skb = skb;
1818 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1822 if (pci_dma_mapping_error(np->pci_dev,
1823 np->put_rx_ctx->dma)) {
1827 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1828 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1830 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1831 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1832 np->put_rx.orig = np->first_rx.orig;
1833 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1834 np->put_rx_ctx = np->first_rx_ctx;
1837 u64_stats_update_begin(&np->swstats_rx_syncp);
1838 np->stat_rx_dropped++;
1839 u64_stats_update_end(&np->swstats_rx_syncp);
1848 struct fe_priv *np = netdev_priv(dev);
1851 less_rx = np->get_rx.ex;
1852 if (less_rx-- == np->first_rx.ex)
1853 less_rx = np->last_rx.ex;
1855 while (np->put_rx.ex != less_rx) {
1856 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1858 np->put_rx_ctx->skb = skb;
1859 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1863 if (pci_dma_mapping_error(np->pci_dev,
1864 np->put_rx_ctx->dma)) {
1868 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1869 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1870 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1872 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1873 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1874 np->put_rx.ex = np->first_rx.ex;
1875 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1876 np->put_rx_ctx = np->first_rx_ctx;
1879 u64_stats_update_begin(&np->swstats_rx_syncp);
1880 np->stat_rx_dropped++;
1881 u64_stats_update_end(&np->swstats_rx_syncp);
1892 struct fe_priv *np = netdev_priv(dev);
1895 napi_schedule(&np->napi);
1900 struct fe_priv *np = netdev_priv(dev);
1903 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1905 if (!nv_optimized(np))
1906 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1908 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1909 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1910 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1912 for (i = 0; i < np->rx_ring_size; i++) {
1913 if (!nv_optimized(np)) {
1914 np->rx_ring.orig[i].flaglen = 0;
1915 np->rx_ring.orig[i].buf = 0;
1917 np->rx_ring.ex[i].flaglen = 0;
1918 np->rx_ring.ex[i].txvlan = 0;
1919 np->rx_ring.ex[i].bufhigh = 0;
1920 np->rx_ring.ex[i].buflow = 0;
1922 np->rx_skb[i].skb = NULL;
1923 np->rx_skb[i].dma = 0;
1929 struct fe_priv *np = netdev_priv(dev);
1932 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1934 if (!nv_optimized(np))
1935 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1937 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1938 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1939 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1940 netdev_reset_queue(np->dev);
1941 np->tx_pkts_in_progress = 0;
1942 np->tx_change_owner = NULL;
1943 np->tx_end_flip = NULL;
1944 np->tx_stop = 0;
1946 for (i = 0; i < np->tx_ring_size; i++) {
1947 if (!nv_optimized(np)) {
1948 np->tx_ring.orig[i].flaglen = 0;
1949 np->tx_ring.orig[i].buf = 0;
1951 np->tx_ring.ex[i].flaglen = 0;
1952 np->tx_ring.ex[i].txvlan = 0;
1953 np->tx_ring.ex[i].bufhigh = 0;
1954 np->tx_ring.ex[i].buflow = 0;
1956 np->tx_skb[i].skb = NULL;
1957 np->tx_skb[i].dma = 0;
1958 np->tx_skb[i].dma_len = 0;
1959 np->tx_skb[i].dma_single = 0;
1960 np->tx_skb[i].first_tx_desc = NULL;
1961 np->tx_skb[i].next_tx_ctx = NULL;
1967 struct fe_priv *np = netdev_priv(dev);
1972 if (!nv_optimized(np))
1978 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1982 pci_unmap_single(np->pci_dev, tx_skb->dma,
1986 pci_unmap_page(np->pci_dev, tx_skb->dma,
1993 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1995 nv_unmap_txskb(np, tx_skb);
2006 struct fe_priv *np = netdev_priv(dev);
2009 for (i = 0; i < np->tx_ring_size; i++) {
2010 if (!nv_optimized(np)) {
2011 np->tx_ring.orig[i].flaglen = 0;
2012 np->tx_ring.orig[i].buf = 0;
2014 np->tx_ring.ex[i].flaglen = 0;
2015 np->tx_ring.ex[i].txvlan = 0;
2016 np->tx_ring.ex[i].bufhigh = 0;
2017 np->tx_ring.ex[i].buflow = 0;
2019 if (nv_release_txskb(np, &np->tx_skb[i])) {
2020 u64_stats_update_begin(&np->swstats_tx_syncp);
2021 np->stat_tx_dropped++;
2022 u64_stats_update_end(&np->swstats_tx_syncp);
2024 np->tx_skb[i].dma = 0;
2025 np->tx_skb[i].dma_len = 0;
2026 np->tx_skb[i].dma_single = 0;
2027 np->tx_skb[i].first_tx_desc = NULL;
2028 np->tx_skb[i].next_tx_ctx = NULL;
2030 np->tx_pkts_in_progress = 0;
2031 np->tx_change_owner = NULL;
2032 np->tx_end_flip = NULL;
2037 struct fe_priv *np = netdev_priv(dev);
2040 for (i = 0; i < np->rx_ring_size; i++) {
2041 if (!nv_optimized(np)) {
2042 np->rx_ring.orig[i].flaglen = 0;
2043 np->rx_ring.orig[i].buf = 0;
2045 np->rx_ring.ex[i].flaglen = 0;
2046 np->rx_ring.ex[i].txvlan = 0;
2047 np->rx_ring.ex[i].bufhigh = 0;
2048 np->rx_ring.ex[i].buflow = 0;
2051 if (np->rx_skb[i].skb) {
2052 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
2053 (skb_end_pointer(np->rx_skb[i].skb) -
2054 np->rx_skb[i].skb->data),
2056 dev_kfree_skb(np->rx_skb[i].skb);
2057 np->rx_skb[i].skb = NULL;
2068 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2070 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2085 * Caller has already gained np->lock.
2187 struct fe_priv *np = netdev_priv(dev);
2189 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2212 spin_lock_irqsave(&np->lock, flags);
2213 empty_slots = nv_get_empty_tx_slots(np);
2216 np->tx_stop = 1;
2217 spin_unlock_irqrestore(&np->lock, flags);
2220 spin_unlock_irqrestore(&np->lock, flags);
2222 start_tx = put_tx = np->put_tx.orig;
2227 prev_tx_ctx = np->put_tx_ctx;
2229 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2231 if (pci_dma_mapping_error(np->pci_dev,
2232 np->put_tx_ctx->dma)) {
2235 u64_stats_update_begin(&np->swstats_tx_syncp);
2236 np->stat_tx_dropped++;
2237 u64_stats_update_end(&np->swstats_tx_syncp);
2240 np->put_tx_ctx->dma_len = bcnt;
2241 np->put_tx_ctx->dma_single = 1;
2242 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2245 tx_flags = np->tx_flags;
2248 if (unlikely(put_tx++ == np->last_tx.orig))
2249 put_tx = np->first_tx.orig;
2250 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2251 np->put_tx_ctx = np->first_tx_ctx;
2262 prev_tx_ctx = np->put_tx_ctx;
2264 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2267 np->put_tx_ctx->dma = skb_frag_dma_map(
2268 &np->pci_dev->dev,
2272 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2276 nv_unmap_txskb(np, start_tx_ctx);
2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2278 tmp_tx_ctx = np->first_tx_ctx;
2279 } while (tmp_tx_ctx != np->put_tx_ctx);
2281 np->put_tx_ctx = start_tx_ctx;
2282 u64_stats_update_begin(&np->swstats_tx_syncp);
2283 np->stat_tx_dropped++;
2284 u64_stats_update_end(&np->swstats_tx_syncp);
2288 np->put_tx_ctx->dma_len = bcnt;
2289 np->put_tx_ctx->dma_single = 0;
2290 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2295 if (unlikely(put_tx++ == np->last_tx.orig))
2296 put_tx = np->first_tx.orig;
2297 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2298 np->put_tx_ctx = np->first_tx_ctx;
2314 spin_lock_irqsave(&np->lock, flags);
2319 netdev_sent_queue(np->dev, skb->len);
2323 np->put_tx.orig = put_tx;
2325 spin_unlock_irqrestore(&np->lock, flags);
2327 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2334 struct fe_priv *np = netdev_priv(dev);
2360 spin_lock_irqsave(&np->lock, flags);
2361 empty_slots = nv_get_empty_tx_slots(np);
2364 np->tx_stop = 1;
2365 spin_unlock_irqrestore(&np->lock, flags);
2368 spin_unlock_irqrestore(&np->lock, flags);
2370 start_tx = put_tx = np->put_tx.ex;
2371 start_tx_ctx = np->put_tx_ctx;
2376 prev_tx_ctx = np->put_tx_ctx;
2378 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2380 if (pci_dma_mapping_error(np->pci_dev,
2381 np->put_tx_ctx->dma)) {
2384 u64_stats_update_begin(&np->swstats_tx_syncp);
2385 np->stat_tx_dropped++;
2386 u64_stats_update_end(&np->swstats_tx_syncp);
2389 np->put_tx_ctx->dma_len = bcnt;
2390 np->put_tx_ctx->dma_single = 1;
2391 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2392 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2398 if (unlikely(put_tx++ == np->last_tx.ex))
2399 put_tx = np->first_tx.ex;
2400 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2401 np->put_tx_ctx = np->first_tx_ctx;
2412 prev_tx_ctx = np->put_tx_ctx;
2415 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2416 np->put_tx_ctx->dma = skb_frag_dma_map(
2417 &np->pci_dev->dev,
2422 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2426 nv_unmap_txskb(np, start_tx_ctx);
2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2428 tmp_tx_ctx = np->first_tx_ctx;
2429 } while (tmp_tx_ctx != np->put_tx_ctx);
2431 np->put_tx_ctx = start_tx_ctx;
2432 u64_stats_update_begin(&np->swstats_tx_syncp);
2433 np->stat_tx_dropped++;
2434 u64_stats_update_end(&np->swstats_tx_syncp);
2437 np->put_tx_ctx->dma_len = bcnt;
2438 np->put_tx_ctx->dma_single = 0;
2439 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2440 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2445 if (unlikely(put_tx++ == np->last_tx.ex))
2446 put_tx = np->first_tx.ex;
2447 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2448 np->put_tx_ctx = np->first_tx_ctx;
2471 spin_lock_irqsave(&np->lock, flags);
2473 if (np->tx_limit) {
2479 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2480 if (!np->tx_change_owner)
2481 np->tx_change_owner = start_tx_ctx;
2486 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2487 np->tx_end_flip = np->put_tx_ctx;
2489 np->tx_pkts_in_progress++;
2496 netdev_sent_queue(np->dev, skb->len);
2500 np->put_tx.ex = put_tx;
2502 spin_unlock_irqrestore(&np->lock, flags);
2504 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2510 struct fe_priv *np = netdev_priv(dev);
2512 np->tx_pkts_in_progress--;
2513 if (np->tx_change_owner) {
2514 np->tx_change_owner->first_tx_desc->flaglen |=
2516 np->tx_pkts_in_progress++;
2518 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2519 if (np->tx_change_owner == np->tx_end_flip)
2520 np->tx_change_owner = NULL;
2522 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2529 * Caller must own np->lock.
2533 struct fe_priv *np = netdev_priv(dev);
2536 struct ring_desc *orig_get_tx = np->get_tx.orig;
2539 while ((np->get_tx.orig != np->put_tx.orig) &&
2540 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2543 nv_unmap_txskb(np, np->get_tx_ctx);
2545 if (np->desc_ver == DESC_VER_1) {
2552 u64_stats_update_begin(&np->swstats_tx_syncp);
2553 np->stat_tx_packets++;
2554 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2555 u64_stats_update_end(&np->swstats_tx_syncp);
2557 bytes_compl += np->get_tx_ctx->skb->len;
2558 dev_kfree_skb_any(np->get_tx_ctx->skb);
2559 np->get_tx_ctx->skb = NULL;
2569 u64_stats_update_begin(&np->swstats_tx_syncp);
2570 np->stat_tx_packets++;
2571 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2572 u64_stats_update_end(&np->swstats_tx_syncp);
2574 bytes_compl += np->get_tx_ctx->skb->len;
2575 dev_kfree_skb_any(np->get_tx_ctx->skb);
2576 np->get_tx_ctx->skb = NULL;
2580 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2581 np->get_tx.orig = np->first_tx.orig;
2582 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2583 np->get_tx_ctx = np->first_tx_ctx;
2586 netdev_completed_queue(np->dev, tx_work, bytes_compl);
2588 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2589 np->tx_stop = 0;
2597 struct fe_priv *np = netdev_priv(dev);
2600 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2603 while ((np->get_tx.ex != np->put_tx.ex) &&
2604 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2607 nv_unmap_txskb(np, np->get_tx_ctx);
2613 if (np->driver_data & DEV_HAS_GEAR_MODE)
2619 u64_stats_update_begin(&np->swstats_tx_syncp);
2620 np->stat_tx_packets++;
2621 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2622 u64_stats_update_end(&np->swstats_tx_syncp);
2625 bytes_cleaned += np->get_tx_ctx->skb->len;
2626 dev_kfree_skb_any(np->get_tx_ctx->skb);
2627 np->get_tx_ctx->skb = NULL;
2630 if (np->tx_limit)
2634 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2635 np->get_tx.ex = np->first_tx.ex;
2636 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2637 np->get_tx_ctx = np->first_tx_ctx;
2640 netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2642 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2643 np->tx_stop = 0;
2655 struct fe_priv *np = netdev_priv(dev);
2661 if (np->msi_flags & NV_MSI_X_ENABLED)
2671 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2673 for (i = 0; i <= np->register_size; i += 32) {
2684 for (i = 0; i < np->tx_ring_size; i += 4) {
2685 if (!nv_optimized(np)) {
2690 le32_to_cpu(np->tx_ring.orig[i].buf),
2691 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2692 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2693 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2694 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2695 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2696 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2697 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2705 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2706 le32_to_cpu(np->tx_ring.ex[i].buflow),
2707 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2708 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2709 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2710 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2711 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2712 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2713 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2714 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2715 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2716 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2721 spin_lock_irq(&np->lock);
2727 saved_tx_limit = np->tx_limit;
2728 np->tx_limit = 0; /* prevent giving HW any limited pkts */
2729 np->tx_stop = 0; /* prevent waking tx queue */
2730 if (!nv_optimized(np))
2731 nv_tx_done(dev, np->tx_ring_size);
2733 nv_tx_done_optimized(dev, np->tx_ring_size);
2736 if (np->tx_change_owner)
2737 put_tx.ex = np->tx_change_owner->first_tx_desc;
2739 put_tx = np->put_tx;
2746 np->get_tx = np->put_tx = put_tx;
2747 np->tx_limit = saved_tx_limit;
2752 spin_unlock_irq(&np->lock);
2800 struct fe_priv *np = netdev_priv(dev);
2806 while ((np->get_rx.orig != np->put_rx.orig) &&
2807 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2815 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2816 np->get_rx_ctx->dma_len,
2818 skb = np->get_rx_ctx->skb;
2819 np->get_rx_ctx->skb = NULL;
2822 if (np->desc_ver == DESC_VER_1) {
2841 u64_stats_update_begin(&np->swstats_rx_syncp);
2842 np->stat_rx_missed_errors++;
2843 u64_stats_update_end(&np->swstats_rx_syncp);
2886 napi_gro_receive(&np->napi, skb);
2887 u64_stats_update_begin(&np->swstats_rx_syncp);
2888 np->stat_rx_packets++;
2889 np->stat_rx_bytes += len;
2890 u64_stats_update_end(&np->swstats_rx_syncp);
2892 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2893 np->get_rx.orig = np->first_rx.orig;
2894 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2895 np->get_rx_ctx = np->first_rx_ctx;
2905 struct fe_priv *np = netdev_priv(dev);
2912 while ((np->get_rx.ex != np->put_rx.ex) &&
2913 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2921 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2922 np->get_rx_ctx->dma_len,
2924 skb = np->get_rx_ctx->skb;
2925 np->get_rx_ctx->skb = NULL;
2959 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2972 napi_gro_receive(&np->napi, skb);
2973 u64_stats_update_begin(&np->swstats_rx_syncp);
2974 np->stat_rx_packets++;
2975 np->stat_rx_bytes += len;
2976 u64_stats_update_end(&np->swstats_rx_syncp);
2981 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2982 np->get_rx.ex = np->first_rx.ex;
2983 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2984 np->get_rx_ctx = np->first_rx_ctx;
2994 struct fe_priv *np = netdev_priv(dev);
2997 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2999 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3008 struct fe_priv *np = netdev_priv(dev);
3011 if (new_mtu < 64 || new_mtu > np->pkt_limit)
3036 spin_lock(&np->lock);
3045 if (!np->in_shutdown)
3046 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3049 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3051 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3054 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3059 spin_unlock(&np->lock);
3087 struct fe_priv *np = netdev_priv(dev);
3099 spin_lock_irq(&np->lock);
3109 spin_unlock_irq(&np->lock);
3124 struct fe_priv *np = netdev_priv(dev);
3171 spin_lock_irq(&np->lock);
3179 spin_unlock_irq(&np->lock);
3184 struct fe_priv *np = netdev_priv(dev);
3187 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3189 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3193 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3198 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3202 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3204 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3211 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3221 struct fe_priv *np = netdev_priv(dev);
3226 np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3227 np->duplex = duplex;
3230 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3232 np->gigabit = PHY_GIGABIT;
3235 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3237 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3239 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3246 if (np->duplex == 0)
3248 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3250 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3256 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3266 if (np->desc_ver == DESC_VER_1) {
3269 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3277 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3280 writel(np->linkspeed, base + NvRegLinkSpeed);
3299 struct fe_priv *np = netdev_priv(dev);
3304 int newls = np->linkspeed;
3305 int newdup = np->duplex;
3316 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3329 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3330 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3339 if (np->autoneg == 0) {
3340 if (np->fixed_mode & LPA_100FULL) {
3343 } else if (np->fixed_mode & LPA_100HALF) {
3346 } else if (np->fixed_mode & LPA_10FULL) {
3365 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3366 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3369 if (np->gigabit == PHY_GIGABIT) {
3370 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3371 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3401 if (np->duplex == newdup && np->linkspeed == newls)
3404 np->duplex = newdup;
3405 np->linkspeed = newls;
3417 if (np->gigabit == PHY_GIGABIT) {
3420 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3421 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3423 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3430 if (np->duplex == 0)
3432 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3434 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3438 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3440 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3443 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3444 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3453 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3460 if (np->desc_ver == DESC_VER_1) {
3463 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3470 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3473 writel(np->linkspeed, base + NvRegLinkSpeed);
3478 if (netif_running(dev) && (np->duplex != 0)) {
3479 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3487 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3498 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3506 pause_flags = np->pause_flags;
3550 static void nv_msi_workaround(struct fe_priv *np)
3556 if (np->msi_flags & NV_MSI_ENABLED) {
3557 u8 __iomem *base = np->base;
3566 struct fe_priv *np = netdev_priv(dev);
3571 np->quiet_count = 0;
3572 if (np->irqmask != NVREG_IRQMASK_CPU) {
3573 np->irqmask = NVREG_IRQMASK_CPU;
3577 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3578 np->quiet_count++;
3582 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3583 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3595 struct fe_priv *np = netdev_priv(dev);
3598 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3599 np->events = readl(base + NvRegIrqStatus);
3600 writel(np->events, base + NvRegIrqStatus);
3602 np->events = readl(base + NvRegMSIXIrqStatus);
3603 writel(np->events, base + NvRegMSIXIrqStatus);
3605 if (!(np->events & np->irqmask))
3608 nv_msi_workaround(np);
3610 if (napi_schedule_prep(&np->napi)) {
3615 __napi_schedule(&np->napi);
3628 struct fe_priv *np = netdev_priv(dev);
3631 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3632 np->events = readl(base + NvRegIrqStatus);
3633 writel(np->events, base + NvRegIrqStatus);
3635 np->events = readl(base + NvRegMSIXIrqStatus);
3636 writel(np->events, base + NvRegMSIXIrqStatus);
3638 if (!(np->events & np->irqmask))
3641 nv_msi_workaround(np);
3643 if (napi_schedule_prep(&np->napi)) {
3648 __napi_schedule(&np->napi);
3657 struct fe_priv *np = netdev_priv(dev);
3667 if (!(events & np->irqmask))
3670 spin_lock_irqsave(&np->lock, flags);
3672 spin_unlock_irqrestore(&np->lock, flags);
3675 spin_lock_irqsave(&np->lock, flags);
3680 if (!np->in_shutdown) {
3681 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3682 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3684 spin_unlock_irqrestore(&np->lock, flags);
3697 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3698 struct net_device *dev = np->dev;
3705 if (!nv_optimized(np)) {
3706 spin_lock_irqsave(&np->lock, flags);
3707 tx_work += nv_tx_done(dev, np->tx_ring_size);
3708 spin_unlock_irqrestore(&np->lock, flags);
3713 spin_lock_irqsave(&np->lock, flags);
3714 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3715 spin_unlock_irqrestore(&np->lock, flags);
3725 spin_lock_irqsave(&np->lock, flags);
3726 if (!np->in_shutdown)
3727 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3728 spin_unlock_irqrestore(&np->lock, flags);
3733 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3734 spin_lock_irqsave(&np->lock, flags);
3736 spin_unlock_irqrestore(&np->lock, flags);
3738 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3739 spin_lock_irqsave(&np->lock, flags);
3741 spin_unlock_irqrestore(&np->lock, flags);
3742 np->link_timeout = jiffies + LINK_TIMEOUT;
3744 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3745 spin_lock_irqsave(&np->lock, flags);
3746 if (!np->in_shutdown) {
3747 np->nic_poll_irq = np->irqmask;
3748 np->recover_error = 1;
3749 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3751 spin_unlock_irqrestore(&np->lock, flags);
3761 writel(np->irqmask, base + NvRegIrqMask);
3769 struct fe_priv *np = netdev_priv(dev);
3779 if (!(events & np->irqmask))
3784 spin_lock_irqsave(&np->lock, flags);
3785 if (!np->in_shutdown)
3786 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3787 spin_unlock_irqrestore(&np->lock, flags);
3792 spin_lock_irqsave(&np->lock, flags);
3797 if (!np->in_shutdown) {
3798 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3799 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3801 spin_unlock_irqrestore(&np->lock, flags);
3814 struct fe_priv *np = netdev_priv(dev);
3824 if (!(events & np->irqmask))
3828 spin_lock_irqsave(&np->lock, flags);
3830 spin_unlock_irqrestore(&np->lock, flags);
3833 spin_lock_irqsave(&np->lock, flags);
3835 spin_unlock_irqrestore(&np->lock, flags);
3837 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3838 spin_lock_irqsave(&np->lock, flags);
3840 spin_unlock_irqrestore(&np->lock, flags);
3841 np->link_timeout = jiffies + LINK_TIMEOUT;
3844 spin_lock_irqsave(&np->lock, flags);
3849 if (!np->in_shutdown) {
3850 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3851 np->recover_error = 1;
3852 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3854 spin_unlock_irqrestore(&np->lock, flags);
3858 spin_lock_irqsave(&np->lock, flags);
3863 if (!np->in_shutdown) {
3864 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3865 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3867 spin_unlock_irqrestore(&np->lock, flags);
3881 struct fe_priv *np = netdev_priv(dev);
3885 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3896 nv_msi_workaround(np);
3898 spin_lock(&np->lock);
3899 np->intr_test = 1;
3900 spin_unlock(&np->lock);
3931 struct fe_priv *np = get_nvpriv(dev);
3940 if (nv_optimized(np))
3946 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3948 np->msi_x_entry[i].entry = i;
3949 ret = pci_enable_msix_range(np->pci_dev,
3950 np->msi_x_entry,
3951 np->msi_flags & NV_MSI_X_VECTORS_MASK,
3952 np->msi_flags & NV_MSI_X_VECTORS_MASK);
3954 np->msi_flags |= NV_MSI_X_ENABLED;
3957 sprintf(np->name_rx, "%s-rx", dev->name);
3958 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3959 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3964 pci_disable_msix(np->pci_dev);
3965 np->msi_flags &= ~NV_MSI_X_ENABLED;
3969 sprintf(np->name_tx, "%s-tx", dev->name);
3970 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3971 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3976 pci_disable_msix(np->pci_dev);
3977 np->msi_flags &= ~NV_MSI_X_ENABLED;
3981 sprintf(np->name_other, "%s-other", dev->name);
3982 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3983 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3988 pci_disable_msix(np->pci_dev);
3989 np->msi_flags &= ~NV_MSI_X_ENABLED;
4000 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4006 pci_disable_msix(np->pci_dev);
4007 np->msi_flags &= ~NV_MSI_X_ENABLED;
4019 if (np->msi_flags & NV_MSI_CAPABLE) {
4020 ret = pci_enable_msi(np->pci_dev);
4022 np->msi_flags |= NV_MSI_ENABLED;
4023 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4027 pci_disable_msi(np->pci_dev);
4028 np->msi_flags &= ~NV_MSI_ENABLED;
4042 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4047 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4049 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4056 struct fe_priv *np = get_nvpriv(dev);
4059 if (np->msi_flags & NV_MSI_X_ENABLED) {
4060 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4061 free_irq(np->msi_x_entry[i].vector, dev);
4062 pci_disable_msix(np->pci_dev);
4063 np->msi_flags &= ~NV_MSI_X_ENABLED;
4065 free_irq(np->pci_dev->irq, dev);
4066 if (np->msi_flags & NV_MSI_ENABLED) {
4067 pci_disable_msi(np->pci_dev);
4068 np->msi_flags &= ~NV_MSI_ENABLED;
4076 struct fe_priv *np = netdev_priv(dev);
4087 if (np->msi_flags & NV_MSI_X_ENABLED)
4088 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4090 disable_irq_lockdep(np->pci_dev->irq);
4091 mask = np->irqmask;
4093 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4094 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4097 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4098 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4101 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4102 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4108 if (np->recover_error) {
4109 np->recover_error = 0;
4114 spin_lock(&np->lock);
4117 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4125 if (!np->in_shutdown)
4126 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4129 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4131 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4134 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4137 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4144 spin_unlock(&np->lock);
4154 np->nic_poll_irq = 0;
4155 if (nv_optimized(np))
4159 if (np->msi_flags & NV_MSI_X_ENABLED)
4160 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4162 enable_irq_lockdep(np->pci_dev->irq);
4164 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4165 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4167 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4169 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4170 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4172 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4174 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4175 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4177 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4195 struct fe_priv *np = netdev_priv(dev);
4199 if (spin_trylock(&np->hwstats_lock)) {
4201 spin_unlock(&np->hwstats_lock);
4204 if (!np->in_shutdown)
4205 mod_timer(&np->stats_poll,
4211 struct fe_priv *np = netdev_priv(dev);
4214 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4219 struct fe_priv *np = netdev_priv(dev);
4222 spin_lock_irq(&np->lock);
4223 if (np->wolenabled)
4225 spin_unlock_irq(&np->lock);
4230 struct fe_priv *np = netdev_priv(dev);
4235 np->wolenabled = 0;
4237 np->wolenabled = 1;
4241 spin_lock_irq(&np->lock);
4243 spin_unlock_irq(&np->lock);
4245 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4251 struct fe_priv *np = netdev_priv(dev);
4255 spin_lock_irq(&np->lock);
4270 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4285 if (np->duplex)
4292 ecmd->autoneg = np->autoneg;
4295 if (np->autoneg) {
4297 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4306 if (np->gigabit == PHY_GIGABIT) {
4307 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4316 if (np->gigabit == PHY_GIGABIT)
4319 ecmd->phy_address = np->phyaddr;
4323 spin_unlock_irq(&np->lock);
4329 struct fe_priv *np = netdev_priv(dev);
4336 if (ecmd->phy_address != np->phyaddr) {
4346 if (np->gigabit == PHY_GIGABIT)
4372 spin_lock_irqsave(&np->lock, flags);
4383 spin_unlock_irqrestore(&np->lock, flags);
4391 np->autoneg = 1;
4394 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4404 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4406 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4408 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4410 if (np->gigabit == PHY_GIGABIT) {
4411 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4415 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4420 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4421 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4431 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4436 np->autoneg = 0;
4438 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4448 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4449 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4451 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4453 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4455 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4457 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4458 np->fixed_mode = adv;
4460 if (np->gigabit == PHY_GIGABIT) {
4461 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4463 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4466 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4468 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4470 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4472 if (np->phy_oui == PHY_OUI_MARVELL) {
4479 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4500 struct fe_priv *np = netdev_priv(dev);
4501 return np->register_size;
4506 struct fe_priv *np = netdev_priv(dev);
4512 spin_lock_irq(&np->lock);
4513 for (i = 0; i < np->register_size/sizeof(u32); i++)
4515 spin_unlock_irq(&np->lock);
4520 struct fe_priv *np = netdev_priv(dev);
4523 if (np->autoneg) {
4531 spin_lock(&np->lock);
4534 spin_unlock(&np->lock);
4540 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4541 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4550 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4567 struct fe_priv *np = netdev_priv(dev);
4569 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4570 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4572 ring->rx_pending = np->rx_ring_size;
4573 ring->tx_pending = np->tx_ring_size;
4578 struct fe_priv *np = netdev_priv(dev);
4587 (np->desc_ver == DESC_VER_1 &&
4590 (np->desc_ver != DESC_VER_1 &&
4597 if (!nv_optimized(np)) {
4598 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4602 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4610 if (!nv_optimized(np)) {
4612 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4616 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4630 spin_lock(&np->lock);
4641 np->rx_ring_size = ring->rx_pending;
4642 np->tx_ring_size = ring->tx_pending;
4644 if (!nv_optimized(np)) {
4645 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4646 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4648 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4649 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4651 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4652 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4653 np->ring_addr = ring_addr;
4655 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4656 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4662 if (!np->in_shutdown)
4663 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4667 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4669 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4672 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4677 spin_unlock(&np->lock);
4690 struct fe_priv *np = netdev_priv(dev);
4692 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4693 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4694 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4699 struct fe_priv *np = netdev_priv(dev);
4702 if ((!np->autoneg && np->duplex == 0) ||
4703 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4707 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4717 spin_lock(&np->lock);
4720 spin_unlock(&np->lock);
4725 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4727 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4729 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4731 if (np->autoneg && pause->autoneg) {
4732 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4734 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4736 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4738 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4740 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4744 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4746 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4748 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4750 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4752 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4757 nv_update_pause(dev, np->pause_flags);
4769 struct fe_priv *np = netdev_priv(dev);
4774 spin_lock_irqsave(&np->lock, flags);
4775 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4778 spin_unlock_irqrestore(&np->lock, flags);
4785 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4788 spin_unlock_irqrestore(&np->lock, flags);
4798 spin_unlock_irqrestore(&np->lock, flags);
4804 spin_unlock_irqrestore(&np->lock, flags);
4810 spin_unlock_irqrestore(&np->lock, flags);
4815 spin_lock_irqsave(&np->lock, flags);
4817 spin_unlock_irqrestore(&np->lock, flags);
4834 struct fe_priv *np = get_nvpriv(dev);
4836 spin_lock_irq(&np->lock);
4839 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4841 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4844 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4846 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4848 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4850 spin_unlock_irq(&np->lock);
4855 struct fe_priv *np = netdev_priv(dev);
4867 spin_lock_irq(&np->lock);
4870 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4872 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4875 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4877 spin_unlock_irq(&np->lock);
4888 struct fe_priv *np = netdev_priv(dev);
4892 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4897 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4899 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4901 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4915 struct fe_priv *np = netdev_priv(dev);
4917 spin_lock_bh(&np->hwstats_lock);
4919 memcpy(buffer, &np->estats,
4921 spin_unlock_bh(&np->hwstats_lock);
4926 struct fe_priv *np = netdev_priv(dev);
4929 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4930 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4969 struct fe_priv *np = netdev_priv(dev);
4982 np->intr_test = 0;
4985 save_msi_flags = np->msi_flags;
4986 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4987 np->msi_flags |= 0x001; /* setup 1 vector */
5000 spin_lock_irq(&np->lock);
5003 testcnt = np->intr_test;
5008 if (!(np->msi_flags & NV_MSI_X_ENABLED))
5013 spin_unlock_irq(&np->lock);
5017 np->msi_flags = save_msi_flags;
5032 struct fe_priv *np = netdev_priv(dev);
5036 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5061 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5063 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5077 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5080 if (pci_dma_mapping_error(np->pci_dev,
5089 if (!nv_optimized(np)) {
5090 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5091 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5093 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5094 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5095 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5097 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5103 if (!nv_optimized(np)) {
5104 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5105 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5108 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5109 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5114 } else if (np->desc_ver == DESC_VER_1) {
5126 rx_skb = np->rx_skb[0].skb;
5136 pci_unmap_single(np->pci_dev, test_dma_addr,
5158 struct fe_priv *np = netdev_priv(dev);
5176 spin_lock_irq(&np->lock);
5177 nv_disable_hw_interrupts(dev, np->irqmask);
5178 if (!(np->msi_flags & NV_MSI_X_ENABLED))
5187 spin_unlock_irq(&np->lock);
5216 if (!np->in_shutdown)
5217 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5220 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5222 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5225 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5231 nv_enable_hw_interrupts(dev, np->irqmask);
5272 struct fe_priv *np = netdev_priv(dev);
5296 np->mgmt_sema = 1;
5307 struct fe_priv *np = netdev_priv(dev);
5311 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5312 if (np->mgmt_sema) {
5323 struct fe_priv *np = netdev_priv(dev);
5345 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5352 struct fe_priv *np = netdev_priv(dev);
5359 mii_rw(dev, np->phyaddr, MII_BMCR,
5360 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5364 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5377 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5389 np->in_shutdown = 0;
5393 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5396 writel(np->linkspeed, base + NvRegLinkSpeed);
5397 if (np->desc_ver == DESC_VER_1)
5401 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5402 writel(np->vlanctl_bits, base + NvRegVlanControl);
5404 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5418 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5424 if (np->desc_ver == DESC_VER_1) {
5427 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5445 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5449 if (np->wolenabled)
5460 nv_disable_hw_interrupts(dev, np->irqmask);
5470 nv_enable_hw_interrupts(dev, np->irqmask);
5472 spin_lock_irq(&np->lock);
5488 np->linkspeed = 0;
5501 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5504 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5505 mod_timer(&np->stats_poll,
5508 spin_unlock_irq(&np->lock);
5524 struct fe_priv *np = netdev_priv(dev);
5527 spin_lock_irq(&np->lock);
5528 np->in_shutdown = 1;
5529 spin_unlock_irq(&np->lock);
5531 synchronize_irq(np->pci_dev->irq);
5533 del_timer_sync(&np->oom_kick);
5534 del_timer_sync(&np->nic_poll);
5535 del_timer_sync(&np->stats_poll);
5538 spin_lock_irq(&np->lock);
5545 nv_disable_hw_interrupts(dev, np->irqmask);
5548 spin_unlock_irq(&np->lock);
5554 if (np->wolenabled || !phy_power_down) {
5560 mii_rw(dev, np->phyaddr, MII_BMCR,
5561 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5607 struct fe_priv *np;
5625 np = netdev_priv(dev);
5626 np->dev = dev;
5627 np->pci_dev = pci_dev;
5628 spin_lock_init(&np->lock);
5629 spin_lock_init(&np->hwstats_lock);
5631 u64_stats_init(&np->swstats_rx_syncp);
5632 u64_stats_init(&np->swstats_tx_syncp);
5634 init_timer(&np->oom_kick);
5635 np->oom_kick.data = (unsigned long) dev;
5636 np->oom_kick.function = nv_do_rx_refill; /* timer handler */
5637 init_timer(&np->nic_poll);
5638 np->nic_poll.data = (unsigned long) dev;
5639 np->nic_poll.function = nv_do_nic_poll; /* timer handler */
5640 init_timer_deferrable(&np->stats_poll);
5641 np->stats_poll.data = (unsigned long) dev;
5642 np->stats_poll.function = nv_do_stats_poll; /* timer handler */
5655 np->register_size = NV_PCI_REGSZ_VER3;
5657 np->register_size = NV_PCI_REGSZ_VER2;
5659 np->register_size = NV_PCI_REGSZ_VER1;
5665 pci_resource_len(pci_dev, i) >= np->register_size) {
5676 np->driver_data = id->driver_data;
5678 np->device_id = id->device;
5683 np->desc_ver = DESC_VER_3;
5684 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5698 np->desc_ver = DESC_VER_2;
5699 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5702 np->desc_ver = DESC_VER_1;
5703 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5706 np->pkt_limit = NV_PKTLIMIT_1;
5708 np->pkt_limit = NV_PKTLIMIT_2;
5711 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5716 np->vlanctl_bits = 0;
5718 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5728 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5732 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5736 np->base = ioremap(addr, np->register_size);
5737 if (!np->base)
5740 np->rx_ring_size = RX_RING_DEFAULT;
5741 np->tx_ring_size = TX_RING_DEFAULT;
5743 if (!nv_optimized(np)) {
5744 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5745 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5746 &np->ring_addr);
5747 if (!np->rx_ring.orig)
5749 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5751 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5752 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5753 &np->ring_addr);
5754 if (!np->rx_ring.ex)
5756 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5758 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5759 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5760 if (!np->rx_skb || !np->tx_skb)
5763 if (!nv_optimized(np))
5768 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5776 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5777 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5783 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5784 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5785 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5786 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5787 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5788 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5791 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5792 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5793 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5794 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5795 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5796 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5802 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5804 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5807 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5808 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5809 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5810 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5811 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5812 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5837 np->wolenabled = 0;
5851 if (np->desc_ver == DESC_VER_1)
5852 np->tx_flags = NV_TX_VALID;
5854 np->tx_flags = NV_TX2_VALID;
5856 np->msi_flags = 0;
5858 np->msi_flags |= NV_MSI_CAPABLE;
5865 np->msi_flags |= NV_MSI_X_CAPABLE;
5870 np->irqmask = NVREG_IRQMASK_CPU;
5871 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5872 np->msi_flags |= 0x0001;
5876 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5878 np->msi_flags &= ~NV_MSI_X_CAPABLE;
5881 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5882 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5883 np->msi_flags |= 0x0003;
5887 np->irqmask |= NVREG_IRQ_TIMER;
5889 np->need_linktimer = 1;
5890 np->link_timeout = jiffies + LINK_TIMEOUT;
5892 np->need_linktimer = 0;
5897 np->tx_limit = 1;
5900 np->tx_limit = 0;
5919 np->mac_in_use = 1;
5920 if (np->mgmt_version > 0)
5921 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5923 if (np->mac_in_use &&
5939 spin_lock_irq(&np->lock);
5941 spin_unlock_irq(&np->lock);
5944 spin_lock_irq(&np->lock);
5946 spin_unlock_irq(&np->lock);
5950 np->phy_model = id2 & PHYID2_MODEL_MASK;
5953 np->phyaddr = phyaddr;
5954 np->phy_oui = id1 | id2;
5957 if (np->phy_oui == PHY_OUI_REALTEK2)
5958 np->phy_oui = PHY_OUI_REALTEK;
5960 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5961 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5975 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5977 np->gigabit = PHY_GIGABIT;
5981 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5982 np->duplex = 0;
5983 np->autoneg = 1;
6005 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6019 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6020 np->need_linktimer ? "lnktim " : "",
6021 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6022 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6023 np->desc_ver);
6046 struct fe_priv *np = netdev_priv(dev);
6049 if (np->phy_oui == PHY_OUI_REALTEK &&
6050 np->phy_model == PHY_MODEL_REALTEK_8201 &&
6052 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6053 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6056 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6057 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6060 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6062 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6069 struct fe_priv *np = netdev_priv(dev);
6075 writel(np->orig_mac[0], base + NvRegMacAddrA);
6076 writel(np->orig_mac[1], base + NvRegMacAddrB);
6107 struct fe_priv *np = netdev_priv(dev);
6118 for (i = 0; i <= np->register_size/sizeof(u32); i++)
6119 np->saved_config_space[i] = readl(base + i*sizeof(u32));
6128 struct fe_priv *np = netdev_priv(dev);
6133 for (i = 0; i <= np->register_size/sizeof(u32); i++)
6134 writel(np->saved_config_space[i], base+i*sizeof(u32));
6136 if (np->driver_data & DEV_NEED_MSI_FIX)
6161 struct fe_priv *np = netdev_priv(dev);
6180 pci_wake_from_d3(pdev, np->wolenabled);