Lines Matching refs:np

71 #define nr64(reg)		readq(np->regs + (reg))
72 #define nw64(reg, val) writeq((val), np->regs + (reg))
74 #define nr64_mac(reg) readq(np->mac_regs + (reg))
75 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
77 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
78 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
80 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
81 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
83 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
84 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
93 #define niu_lock_parent(np, flags) \
94 spin_lock_irqsave(&np->parent->lock, flags)
95 #define niu_unlock_parent(np, flags) \
96 spin_unlock_irqrestore(&np->parent->lock, flags)
98 static int serdes_init_10g_serdes(struct niu *np);
100 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
115 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
122 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
124 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
135 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
150 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
161 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
163 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
174 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
201 err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
203 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
214 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
224 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
250 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
252 struct niu_parent *parent = np->parent;
261 err = niu_ldn_irq_enable(np, i, on);
268 static int niu_enable_interrupts(struct niu *np, int on)
272 for (i = 0; i < np->num_ldg; i++) {
273 struct niu_ldg *lp = &np->ldg[i];
276 err = niu_enable_ldn_in_ldg(np, lp, on);
280 for (i = 0; i < np->num_ldg; i++)
281 niu_ldg_rearm(np, &np->ldg[i], on);
296 static int mdio_wait(struct niu *np)
312 static int mdio_read(struct niu *np, int port, int dev, int reg)
317 err = mdio_wait(np);
322 return mdio_wait(np);
325 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
330 err = mdio_wait(np);
335 err = mdio_wait(np);
342 static int mii_read(struct niu *np, int port, int reg)
345 return mdio_wait(np);
348 static int mii_write(struct niu *np, int port, int reg, int data)
353 err = mdio_wait(np);
360 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
364 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
368 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
374 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
378 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
382 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389 static int serdes_init_niu_10g_fiber(struct niu *np)
391 struct niu_link_config *lp = &np->link_config;
403 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
412 int err = esr2_set_tx_cfg(np, i, tx_cfg);
418 int err = esr2_set_rx_cfg(np, i, rx_cfg);
426 static int serdes_init_niu_1g_serdes(struct niu *np)
428 struct niu_link_config *lp = &np->link_config;
442 if (np->port == 0)
448 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
458 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
461 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
462 np->port, __func__);
468 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
471 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
472 np->port, __func__);
480 err = esr2_set_tx_cfg(np, i, tx_cfg);
486 err = esr2_set_rx_cfg(np, i, rx_cfg);
491 switch (np->port) {
515 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
516 np->port, (int)(sig & mask), (int)val);
523 static int serdes_init_niu_10g_serdes(struct niu *np)
525 struct niu_link_config *lp = &np->link_config;
540 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
550 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
553 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
554 np->port, __func__);
560 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
564 np->port, __func__);
572 err = esr2_set_tx_cfg(np, i, tx_cfg);
578 err = esr2_set_rx_cfg(np, i, rx_cfg);
585 switch (np->port) {
622 np->port, (int)(sig & mask), (int)val);
625 err = serdes_init_niu_1g_serdes(np);
627 np->flags &= ~NIU_FLAGS_10G;
628 np->mac_xcvr = MAC_XCVR_PCS;
630 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
631 np->port);
638 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
642 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
645 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
654 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
658 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
662 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
672 static int esr_read_reset(struct niu *np, u32 *val)
676 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
680 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
690 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
694 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
697 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
702 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
706 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
709 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
714 static int esr_reset(struct niu *np)
719 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
723 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
729 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
735 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
741 err = esr_read_reset(np, &reset);
745 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
746 np->port, reset);
753 static int serdes_init_10g(struct niu *np)
755 struct niu_link_config *lp = &np->link_config;
760 switch (np->port) {
805 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
808 err = esr_read_glue0(np, i, &glue0);
825 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
828 err = esr_write_glue0(np, i, glue0);
833 err = esr_reset(np);
838 switch (np->port) {
866 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
867 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
870 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
871 np->port, (int)(sig & mask), (int)val);
874 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
875 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
879 static int serdes_init_1g(struct niu *np)
885 switch (np->port) {
906 static int serdes_init_1g_serdes(struct niu *np)
908 struct niu_link_config *lp = &np->link_config;
917 switch (np->port) {
973 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
976 err = esr_read_glue0(np, i, &glue0);
993 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
996 err = esr_write_glue0(np, i, glue0);
1003 switch (np->port) {
1019 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1020 np->port, (int)(sig & mask), (int)val);
1027 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1029 struct niu_link_config *lp = &np->link_config;
1040 spin_lock_irqsave(&np->lock, flags);
1052 spin_unlock_irqrestore(&np->lock, flags);
1058 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1061 struct niu_link_config *lp = &np->link_config;
1068 if (!(np->flags & NIU_FLAGS_10G))
1069 return link_status_1g_serdes(np, link_up_p);
1073 spin_lock_irqsave(&np->lock, flags);
1087 spin_unlock_irqrestore(&np->lock, flags);
1092 static int link_status_mii(struct niu *np, int *link_up_p)
1094 struct niu_link_config *lp = &np->link_config;
1099 err = mii_read(np, np->phy_addr, MII_BMCR);
1104 err = mii_read(np, np->phy_addr, MII_BMSR);
1109 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1114 err = mii_read(np, np->phy_addr, MII_LPA);
1120 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1125 err = mii_read(np, np->phy_addr, MII_CTRL1000);
1130 err = mii_read(np, np->phy_addr, MII_STAT1000);
1205 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1207 struct niu_link_config *lp = &np->link_config;
1217 spin_lock_irqsave(&np->lock, flags);
1221 err = mii_read(np, np->phy_addr, MII_BMSR);
1229 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1234 err = mii_read(np, np->phy_addr, MII_LPA);
1239 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1252 spin_unlock_irqrestore(&np->lock, flags);
1258 static int link_status_1g(struct niu *np, int *link_up_p)
1260 struct niu_link_config *lp = &np->link_config;
1264 spin_lock_irqsave(&np->lock, flags);
1266 err = link_status_mii(np, link_up_p);
1270 spin_unlock_irqrestore(&np->lock, flags);
1274 static int bcm8704_reset(struct niu *np)
1278 err = mdio_read(np, np->phy_addr,
1283 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1290 err = mdio_read(np, np->phy_addr,
1298 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1299 np->port, (err & 0xffff));
1308 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1310 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1313 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1319 static int bcm8706_init_user_dev3(struct niu *np)
1324 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1331 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1341 static int bcm8704_init_user_dev3(struct niu *np)
1345 err = mdio_write(np, np->phy_addr,
1359 err = mdio_write(np, np->phy_addr,
1368 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1371 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1375 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1381 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1391 static int mrvl88x2011_act_led(struct niu *np, int val)
1395 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1403 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1407 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1411 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1417 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1424 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1429 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1434 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1438 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1445 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1450 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1455 if (np->link_config.loopback_mode == LOOPBACK_MAC)
1460 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1466 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1471 static int xcvr_diag_bcm870x(struct niu *np)
1477 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1481 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1483 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1486 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1488 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1492 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1496 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1500 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1506 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1510 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1519 np->port);
1522 np->port);
1529 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1531 struct niu_link_config *lp = &np->link_config;
1534 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1544 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1552 static int xcvr_init_10g_bcm8706(struct niu *np)
1557 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1558 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1570 err = bcm8704_reset(np);
1574 err = xcvr_10g_set_lb_bcm870x(np);
1578 err = bcm8706_init_user_dev3(np);
1582 err = xcvr_diag_bcm870x(np);
1589 static int xcvr_init_10g_bcm8704(struct niu *np)
1593 err = bcm8704_reset(np);
1597 err = bcm8704_init_user_dev3(np);
1601 err = xcvr_10g_set_lb_bcm870x(np);
1605 err = xcvr_diag_bcm870x(np);
1612 static int xcvr_init_10g(struct niu *np)
1627 phy_id = phy_decode(np->parent->port_phy, np->port);
1628 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1633 err = xcvr_init_10g_mrvl88x2011(np);
1637 err = xcvr_init_10g_bcm8704(np);
1644 static int mii_reset(struct niu *np)
1648 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1655 err = mii_read(np, np->phy_addr, MII_BMCR);
1662 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1663 np->port, err);
1670 static int xcvr_init_1g_rgmii(struct niu *np)
1680 err = mii_reset(np);
1684 err = mii_read(np, np->phy_addr, MII_BMSR);
1691 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1698 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1707 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1714 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1718 err = mii_read(np, np->phy_addr, MII_BMCR);
1721 bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1723 err = mii_read(np, np->phy_addr, MII_BMSR);
1730 static int mii_init_common(struct niu *np)
1732 struct niu_link_config *lp = &np->link_config;
1736 err = mii_reset(np);
1740 err = mii_read(np, np->phy_addr, MII_BMSR);
1747 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1754 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1771 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1792 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1804 err = mii_write(np, np->phy_addr,
1843 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1848 err = mii_read(np, np->phy_addr, MII_BMCR);
1853 err = mii_read(np, np->phy_addr, MII_BMSR);
1859 np->port, bmcr, bmsr);
1865 static int xcvr_init_1g(struct niu *np)
1874 return mii_init_common(np);
1877 static int niu_xcvr_init(struct niu *np)
1879 const struct niu_phy_ops *ops = np->phy_ops;
1884 err = ops->xcvr_init(np);
1889 static int niu_serdes_init(struct niu *np)
1891 const struct niu_phy_ops *ops = np->phy_ops;
1896 err = ops->serdes_init(np);
1904 static int niu_link_status_common(struct niu *np, int link_up)
1906 struct niu_link_config *lp = &np->link_config;
1907 struct net_device *dev = np->dev;
1911 netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1918 spin_lock_irqsave(&np->lock, flags);
1919 niu_init_xif(np);
1920 niu_handle_led(np, 1);
1921 spin_unlock_irqrestore(&np->lock, flags);
1925 netif_warn(np, link, dev, "Link is down\n");
1926 spin_lock_irqsave(&np->lock, flags);
1927 niu_handle_led(np, 0);
1928 spin_unlock_irqrestore(&np->lock, flags);
1935 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1941 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1947 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1955 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1960 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1968 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1979 np->link_config.active_speed = SPEED_10000;
1980 np->link_config.active_duplex = DUPLEX_FULL;
1983 mrvl88x2011_act_led(np, (link_up ?
1991 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1996 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2005 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2015 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2027 np->link_config.active_speed = SPEED_INVALID;
2028 np->link_config.active_duplex = DUPLEX_INVALID;
2033 np->link_config.active_speed = SPEED_10000;
2034 np->link_config.active_duplex = DUPLEX_FULL;
2042 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2048 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2057 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2066 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2082 np->link_config.active_speed = SPEED_10000;
2083 np->link_config.active_duplex = DUPLEX_FULL;
2091 static int link_status_10g(struct niu *np, int *link_up_p)
2096 spin_lock_irqsave(&np->lock, flags);
2098 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2101 phy_id = phy_decode(np->parent->port_phy, np->port);
2102 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2107 err = link_status_10g_mrvl(np, link_up_p);
2111 err = link_status_10g_bcom(np, link_up_p);
2116 spin_unlock_irqrestore(&np->lock, flags);
2121 static int niu_10g_phy_present(struct niu *np)
2126 switch (np->port) {
2158 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2165 spin_lock_irqsave(&np->lock, flags);
2167 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2168 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2170 phy_present = niu_10g_phy_present(np);
2175 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2176 if (np->phy_ops->xcvr_init)
2177 err = np->phy_ops->xcvr_init(np);
2179 err = mdio_read(np, np->phy_addr,
2186 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2189 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2191 netif_warn(np, link, np->dev,
2196 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2197 err = link_status_10g_bcm8706(np, link_up_p);
2201 np->link_config.active_speed = SPEED_10000;
2202 np->link_config.active_duplex = DUPLEX_FULL;
2207 spin_unlock_irqrestore(&np->lock, flags);
2212 static int niu_link_status(struct niu *np, int *link_up_p)
2214 const struct niu_phy_ops *ops = np->phy_ops;
2219 err = ops->link_status(np, link_up_p);
2226 struct niu *np = (struct niu *) __opaque;
2230 err = niu_link_status(np, &link_up);
2232 niu_link_status_common(np, link_up);
2234 if (netif_carrier_ok(np->dev))
2238 np->timer.expires = jiffies + off;
2240 add_timer(&np->timer);
2367 static int serdes_init_10g_serdes(struct niu *np)
2369 struct niu_link_config *lp = &np->link_config;
2373 switch (np->port) {
2413 esr_reset(np);
2423 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2426 err = esr_read_glue0(np, i, &glue0);
2443 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2446 err = esr_write_glue0(np, i, glue0);
2453 switch (np->port) {
2482 err = serdes_init_1g_serdes(np);
2484 np->flags &= ~NIU_FLAGS_10G;
2485 np->mac_xcvr = MAC_XCVR_PCS;
2487 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2488 np->port);
2496 static int niu_determine_phy_disposition(struct niu *np)
2498 struct niu_parent *parent = np->parent;
2504 switch (np->flags &
2519 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2521 if (np->port == 0)
2523 if (np->port == 1)
2527 phy_addr_off += np->port;
2532 switch (np->flags &
2544 phy_addr_off += (np->port ^ 0x3);
2563 phy_addr_off += np->port;
2564 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2566 if (np->port == 0)
2568 if (np->port == 1)
2576 switch(np->port) {
2588 phy_addr_off = niu_atca_port_num[np->port];
2596 np->phy_ops = tp->ops;
2597 np->phy_addr = tp->phy_addr_base + phy_addr_off;
2602 static int niu_init_link(struct niu *np)
2604 struct niu_parent *parent = np->parent;
2608 err = niu_xcvr_init(np);
2613 err = niu_serdes_init(np);
2614 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2617 err = niu_xcvr_init(np);
2618 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2619 niu_link_status(np, &ignore);
2623 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2629 if (np->flags & NIU_FLAGS_XMAC) {
2640 static int niu_num_alt_addr(struct niu *np)
2642 if (np->flags & NIU_FLAGS_XMAC)
2648 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2654 if (index >= niu_num_alt_addr(np))
2657 if (np->flags & NIU_FLAGS_XMAC) {
2670 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2675 if (index >= niu_num_alt_addr(np))
2678 if (np->flags & NIU_FLAGS_XMAC) {
2696 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2707 static int __set_rdc_table_num(struct niu *np,
2715 if (np->flags & NIU_FLAGS_XMAC)
2719 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2723 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2726 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2729 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2732 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2735 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2738 if (idx >= niu_num_alt_addr(np))
2740 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2764 static void vlan_tbl_write(struct niu *np, unsigned long index,
2782 static void vlan_tbl_clear(struct niu *np)
2790 static int tcam_wait_bit(struct niu *np, u64 bit)
2805 static int tcam_flush(struct niu *np, int index)
2811 return tcam_wait_bit(np, TCAM_CTL_STAT);
2815 static int tcam_read(struct niu *np, int index,
2821 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2836 static int tcam_write(struct niu *np, int index,
2849 return tcam_wait_bit(np, TCAM_CTL_STAT);
2853 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2858 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2866 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2871 return tcam_wait_bit(np, TCAM_CTL_STAT);
2874 static void tcam_enable(struct niu *np, int on)
2885 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2901 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2923 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2944 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2965 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2993 static int tcam_early_init(struct niu *np)
2998 tcam_enable(np, 0);
2999 tcam_set_lat_and_ratio(np,
3003 err = tcam_user_eth_class_enable(np, i, 0);
3008 err = tcam_user_ip_class_enable(np, i, 0);
3016 static int tcam_flush_all(struct niu *np)
3020 for (i = 0; i < np->parent->tcam_num_entries; i++) {
3021 int err = tcam_flush(np, i);
3034 static int hash_read(struct niu *np, unsigned long partition,
3053 static int hash_write(struct niu *np, unsigned long partition,
3071 static void fflp_reset(struct niu *np)
3083 static void fflp_set_timings(struct niu *np)
3102 static int fflp_set_partition(struct niu *np, u64 partition,
3126 static int fflp_disable_all_partitions(struct niu *np)
3131 int err = fflp_set_partition(np, 0, 0, 0, 0);
3138 static void fflp_llcsnap_enable(struct niu *np, int on)
3149 static void fflp_errors_enable(struct niu *np, int on)
3160 static int fflp_hash_clear(struct niu *np)
3170 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3177 static int fflp_early_init(struct niu *np)
3183 niu_lock_parent(np, flags);
3185 parent = np->parent;
3188 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3189 fflp_reset(np);
3190 fflp_set_timings(np);
3191 err = fflp_disable_all_partitions(np);
3193 netif_printk(np, probe, KERN_DEBUG, np->dev,
3200 err = tcam_early_init(np);
3202 netif_printk(np, probe, KERN_DEBUG, np->dev,
3206 fflp_llcsnap_enable(np, 1);
3207 fflp_errors_enable(np, 0);
3211 err = tcam_flush_all(np);
3213 netif_printk(np, probe, KERN_DEBUG, np->dev,
3217 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3218 err = fflp_hash_clear(np);
3220 netif_printk(np, probe, KERN_DEBUG, np->dev,
3227 vlan_tbl_clear(np);
3232 niu_unlock_parent(np, flags);
3236 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3246 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3257 static u16 tcam_get_index(struct niu *np, u16 idx)
3260 if (idx >= (np->clas.tcam_sz - 1))
3262 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3265 static u16 tcam_get_size(struct niu *np)
3268 return np->clas.tcam_sz - 1;
3271 static u16 tcam_get_valid_entry_cnt(struct niu *np)
3274 return np->clas.tcam_valid_entries - 1;
3324 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3335 addr = np->ops->map_page(np->device, page, 0,
3357 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3363 int err = niu_rbr_add_page(np, rp, mask, index);
3382 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3404 np->ops->unmap_page(np->device, page->index,
3422 static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3430 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3432 return niu_rx_pkt_ignore(np, rp);
3474 np->ops->unmap_page(np->device, page->index,
3494 if (np->dev->features & NETIF_F_RXHASH)
3506 skb->protocol = eth_type_trans(skb, np->dev);
3513 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3520 err = niu_rbr_add_page(np, rp, mask, index);
3531 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3543 np->ops->unmap_page(np->device, base, PAGE_SIZE,
3559 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3575 np->ops->unmap_single(np->device, tb->mapping,
3590 np->ops->unmap_page(np->device, tb->mapping,
3603 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3610 index = (rp - np->tx_rings);
3611 txq = netdev_get_tx_queue(np->dev, index);
3625 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3629 cons = release_tx_packet(np, rp, cons);
3645 static inline void niu_sync_rx_discard_stats(struct niu *np,
3674 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3677 netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3689 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3691 netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3697 static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3714 netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3721 rcr_done += niu_process_rx_pkt(napi, np, rp);
3729 niu_rbr_refill(np, rp, GFP_ATOMIC);
3741 niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3746 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3753 netif_printk(np, intr, KERN_DEBUG, np->dev,
3756 for (i = 0; i < np->num_tx_rings; i++) {
3757 struct tx_ring_info *rp = &np->tx_rings[i];
3759 niu_tx_work(np, rp);
3763 for (i = 0; i < np->num_rx_rings; i++) {
3764 struct rx_ring_info *rp = &np->rx_rings[i];
3769 this_work_done = niu_rx_work(&lp->napi, np, rp,
3784 struct niu *np = lp->np;
3787 work_done = niu_poll_core(np, lp, budget);
3791 niu_ldg_rearm(np, lp, 1);
3796 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3799 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3833 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3844 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3848 niu_log_rxchan_errors(np, rp, stat);
3857 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3860 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3882 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3890 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3896 niu_log_txchan_errors(np, rp, cs);
3901 static int niu_mif_interrupt(struct niu *np)
3906 if (np->flags & NIU_FLAGS_XMAC) {
3913 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3919 static void niu_xmac_interrupt(struct niu *np)
3921 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3991 static void niu_bmac_interrupt(struct niu *np)
3993 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
4027 static int niu_mac_interrupt(struct niu *np)
4029 if (np->flags & NIU_FLAGS_XMAC)
4030 niu_xmac_interrupt(np);
4032 niu_bmac_interrupt(np);
4037 static void niu_log_device_error(struct niu *np, u64 stat)
4039 netdev_err(np->dev, "Core device errors ( ");
4067 static int niu_device_error(struct niu *np)
4071 netdev_err(np->dev, "Core device error, stat[%llx]\n",
4074 niu_log_device_error(np, stat);
4079 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4092 for (i = 0; i < np->num_rx_rings; i++) {
4093 struct rx_ring_info *rp = &np->rx_rings[i];
4096 int r = niu_rx_error(np, rp);
4110 for (i = 0; i < np->num_tx_rings; i++) {
4111 struct tx_ring_info *rp = &np->tx_rings[i];
4114 int r = niu_tx_error(np, rp);
4121 int r = niu_mif_interrupt(np);
4127 int r = niu_mac_interrupt(np);
4132 int r = niu_device_error(np);
4139 niu_enable_interrupts(np, 0);
4144 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4154 netif_printk(np, intr, KERN_DEBUG, np->dev,
4158 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4163 netif_printk(np, intr, KERN_DEBUG, np->dev,
4167 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4169 struct niu_parent *parent = np->parent;
4176 for (i = 0; i < np->num_rx_rings; i++) {
4177 struct rx_ring_info *rp = &np->rx_rings[i];
4185 niu_rxchan_intr(np, rp, ldn);
4188 for (i = 0; i < np->num_tx_rings; i++) {
4189 struct tx_ring_info *rp = &np->tx_rings[i];
4197 niu_txchan_intr(np, rp, ldn);
4201 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4208 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4216 struct niu *np = lp->np;
4221 if (netif_msg_intr(np))
4225 spin_lock_irqsave(&np->lock, flags);
4231 if (netif_msg_intr(np))
4238 spin_unlock_irqrestore(&np->lock, flags);
4243 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4248 niu_schedule_napi(np, lp, v0, v1, v2);
4250 niu_ldg_rearm(np, lp, 1);
4252 spin_unlock_irqrestore(&np->lock, flags);
4257 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4260 np->ops->free_coherent(np->device,
4266 np->ops->free_coherent(np->device,
4274 niu_rbr_free(np, rp);
4276 np->ops->free_coherent(np->device,
4287 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4290 np->ops->free_coherent(np->device,
4300 (void) release_tx_packet(np, rp, i);
4303 np->ops->free_coherent(np->device,
4314 static void niu_free_channels(struct niu *np)
4318 if (np->rx_rings) {
4319 for (i = 0; i < np->num_rx_rings; i++) {
4320 struct rx_ring_info *rp = &np->rx_rings[i];
4322 niu_free_rx_ring_info(np, rp);
4324 kfree(np->rx_rings);
4325 np->rx_rings = NULL;
4326 np->num_rx_rings = 0;
4329 if (np->tx_rings) {
4330 for (i = 0; i < np->num_tx_rings; i++) {
4331 struct tx_ring_info *rp = &np->tx_rings[i];
4333 niu_free_tx_ring_info(np, rp);
4335 kfree(np->tx_rings);
4336 np->tx_rings = NULL;
4337 np->num_tx_rings = 0;
4341 static int niu_alloc_rx_ring_info(struct niu *np,
4351 rp->mbox = np->ops->alloc_coherent(np->device,
4357 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4362 rp->rcr = np->ops->alloc_coherent(np->device,
4368 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4375 rp->rbr = np->ops->alloc_coherent(np->device,
4381 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4392 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4394 int mtu = np->dev->mtu;
4404 static int niu_alloc_tx_ring_info(struct niu *np,
4409 rp->mbox = np->ops->alloc_coherent(np->device,
4415 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4420 rp->descr = np->ops->alloc_coherent(np->device,
4426 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4439 niu_set_max_burst(np, rp);
4444 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4455 if (np->dev->mtu > ETH_DATA_LEN) {
4471 static int niu_alloc_channels(struct niu *np)
4473 struct niu_parent *parent = np->parent;
4480 port = np->port;
4496 np->num_rx_rings = num_rx_rings;
4498 np->rx_rings = rx_rings;
4500 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4502 for (i = 0; i < np->num_rx_rings; i++) {
4503 struct rx_ring_info *rp = &np->rx_rings[i];
4505 rp->np = np;
4508 err = niu_alloc_rx_ring_info(np, rp);
4512 niu_size_rbr(np, rp);
4525 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4536 np->num_tx_rings = num_tx_rings;
4538 np->tx_rings = tx_rings;
4540 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4542 for (i = 0; i < np->num_tx_rings; i++) {
4543 struct tx_ring_info *rp = &np->tx_rings[i];
4545 rp->np = np;
4548 err = niu_alloc_tx_ring_info(np, rp);
4556 niu_free_channels(np);
4560 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4572 static int niu_tx_channel_stop(struct niu *np, int channel)
4579 return niu_tx_cs_sng_poll(np, channel);
4582 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4594 static int niu_tx_channel_reset(struct niu *np, int channel)
4602 err = niu_tx_cs_reset_poll(np, channel);
4609 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4621 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4630 static void niu_txc_enable_port(struct niu *np, int on)
4635 niu_lock_parent(np, flags);
4637 mask = (u64)1 << np->port;
4646 niu_unlock_parent(np, flags);
4649 static void niu_txc_set_imask(struct niu *np, u64 imask)
4654 niu_lock_parent(np, flags);
4656 val &= ~TXC_INT_MASK_VAL(np->port);
4657 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4658 niu_unlock_parent(np, flags);
4661 static void niu_txc_port_dma_enable(struct niu *np, int on)
4668 for (i = 0; i < np->num_tx_rings; i++)
4669 val |= (1 << np->tx_rings[i].tx_channel);
4671 nw64(TXC_PORT_DMA(np->port), val);
4674 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4679 err = niu_tx_channel_stop(np, channel);
4683 err = niu_tx_channel_reset(np, channel);
4687 err = niu_tx_channel_lpage_init(np, channel);
4696 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4714 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4728 static void niu_init_rdc_groups(struct niu *np)
4730 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4743 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4746 static void niu_init_drr_weight(struct niu *np)
4748 int type = phy_decode(np->parent->port_phy, np->port);
4761 nw64(PT_DRR_WT(np->port), val);
4764 static int niu_init_hostinfo(struct niu *np)
4766 struct niu_parent *parent = np->parent;
4767 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4768 int i, err, num_alt = niu_num_alt_addr(np);
4771 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4775 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4780 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4788 static int niu_rx_channel_reset(struct niu *np, int channel)
4790 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4795 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4807 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4814 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4905 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4927 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4932 err = niu_rx_channel_reset(np, channel);
4936 err = niu_rx_channel_lpage_init(np, channel);
4940 niu_rx_channel_wred_init(np, rp);
4967 err = niu_enable_rx_channel(np, channel, 1);
4980 static int niu_init_rx_channels(struct niu *np)
4986 niu_lock_parent(np, flags);
4987 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4989 niu_unlock_parent(np, flags);
4993 niu_init_rdc_groups(np);
4994 niu_init_drr_weight(np);
4996 err = niu_init_hostinfo(np);
5000 for (i = 0; i < np->num_rx_rings; i++) {
5001 struct rx_ring_info *rp = &np->rx_rings[i];
5003 err = niu_init_one_rx_channel(np, rp);
5011 static int niu_set_ip_frag_rule(struct niu *np)
5013 struct niu_parent *parent = np->parent;
5014 struct niu_classifier *cp = &np->clas;
5029 err = tcam_write(np, index, tp->key, tp->key_mask);
5032 err = tcam_assoc_write(np, index, tp->assoc_data);
5041 static int niu_init_classifier_hw(struct niu *np)
5043 struct niu_parent *parent = np->parent;
5044 struct niu_classifier *cp = &np->clas;
5050 err = niu_init_hostinfo(np);
5057 vlan_tbl_write(np, i, np->port,
5064 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
5073 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
5076 err = niu_set_flow_key(np, i, parent->flow_key[index]);
5081 err = niu_set_ip_frag_rule(np);
5085 tcam_enable(np, 1);
5090 static int niu_zcp_write(struct niu *np, int index, u64 *data)
5101 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5103 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5107 static int niu_zcp_read(struct niu *np, int index, u64 *data)
5111 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5114 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5122 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5124 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5127 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5141 static void niu_zcp_cfifo_reset(struct niu *np)
5145 val |= RESET_CFIFO_RST(np->port);
5149 val &= ~RESET_CFIFO_RST(np->port);
5153 static int niu_init_zcp(struct niu *np)
5158 if (np->parent->plat_type != PLAT_TYPE_NIU) {
5159 if (np->port == 0 || np->port == 1)
5173 err = niu_zcp_write(np, i, data);
5176 err = niu_zcp_read(np, i, rbuf);
5181 niu_zcp_cfifo_reset(np);
5182 nw64(CFIFO_ECC(np->port), 0);
5190 static void niu_ipp_write(struct niu *np, int index, u64 *data)
5204 static void niu_ipp_read(struct niu *np, int index, u64 *data)
5214 static int niu_ipp_reset(struct niu *np)
5216 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5220 static int niu_init_ipp(struct niu *np)
5225 if (np->parent->plat_type != PLAT_TYPE_NIU) {
5226 if (np->port == 0 || np->port == 1)
5240 niu_ipp_write(np, i, data);
5241 niu_ipp_read(np, i, rbuf);
5247 err = niu_ipp_reset(np);
5271 static void niu_handle_led(struct niu *np, int status)
5276 if ((np->flags & NIU_FLAGS_10G) != 0 &&
5277 (np->flags & NIU_FLAGS_FIBER) != 0) {
5290 static void niu_init_xif_xmac(struct niu *np)
5292 struct niu_link_config *lp = &np->link_config;
5295 if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5313 if (np->flags & NIU_FLAGS_10G) {
5317 if (!(np->flags & NIU_FLAGS_FIBER) &&
5318 !(np->flags & NIU_FLAGS_XCVR_SERDES))
5335 if (np->flags & NIU_FLAGS_10G) {
5347 static void niu_init_xif_bmac(struct niu *np)
5349 struct niu_link_config *lp = &np->link_config;
5367 if (!(np->flags & NIU_FLAGS_10G) &&
5368 !(np->flags & NIU_FLAGS_FIBER) &&
5377 static void niu_init_xif(struct niu *np)
5379 if (np->flags & NIU_FLAGS_XMAC)
5380 niu_init_xif_xmac(np);
5382 niu_init_xif_bmac(np);
5385 static void niu_pcs_mii_reset(struct niu *np)
5397 static void niu_xpcs_reset(struct niu *np)
5409 static int niu_init_pcs(struct niu *np)
5411 struct niu_link_config *lp = &np->link_config;
5414 switch (np->flags & (NIU_FLAGS_10G |
5421 niu_pcs_mii_reset(np);
5428 if (!(np->flags & NIU_FLAGS_XMAC))
5436 niu_xpcs_reset(np);
5453 niu_pcs_mii_reset(np);
5463 niu_pcs_mii_reset(np);
5473 static int niu_reset_tx_xmac(struct niu *np)
5475 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5481 static int niu_reset_tx_bmac(struct niu *np)
5493 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5494 np->port,
5502 static int niu_reset_tx_mac(struct niu *np)
5504 if (np->flags & NIU_FLAGS_XMAC)
5505 return niu_reset_tx_xmac(np);
5507 return niu_reset_tx_bmac(np);
5510 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5526 if (np->flags & NIU_FLAGS_10G) {
5546 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5563 static void niu_init_tx_mac(struct niu *np)
5568 if (np->dev->mtu > ETH_DATA_LEN)
5578 if (np->flags & NIU_FLAGS_XMAC)
5579 niu_init_tx_xmac(np, min, max);
5581 niu_init_tx_bmac(np, min, max);
5584 static int niu_reset_rx_xmac(struct niu *np)
5598 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5599 np->port,
5607 static int niu_reset_rx_bmac(struct niu *np)
5619 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5620 np->port,
5628 static int niu_reset_rx_mac(struct niu *np)
5630 if (np->flags & NIU_FLAGS_XMAC)
5631 return niu_reset_rx_xmac(np);
5633 return niu_reset_rx_bmac(np);
5636 static void niu_init_rx_xmac(struct niu *np)
5638 struct niu_parent *parent = np->parent;
5639 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5652 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5653 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5688 static void niu_init_rx_bmac(struct niu *np)
5690 struct niu_parent *parent = np->parent;
5691 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5703 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5704 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5723 static void niu_init_rx_mac(struct niu *np)
5725 niu_set_primary_mac(np, np->dev->dev_addr);
5727 if (np->flags & NIU_FLAGS_XMAC)
5728 niu_init_rx_xmac(np);
5730 niu_init_rx_bmac(np);
5733 static void niu_enable_tx_xmac(struct niu *np, int on)
5744 static void niu_enable_tx_bmac(struct niu *np, int on)
5755 static void niu_enable_tx_mac(struct niu *np, int on)
5757 if (np->flags & NIU_FLAGS_XMAC)
5758 niu_enable_tx_xmac(np, on);
5760 niu_enable_tx_bmac(np, on);
5763 static void niu_enable_rx_xmac(struct niu *np, int on)
5770 if (np->flags & NIU_FLAGS_MCAST)
5772 if (np->flags & NIU_FLAGS_PROMISC)
5782 static void niu_enable_rx_bmac(struct niu *np, int on)
5789 if (np->flags & NIU_FLAGS_MCAST)
5791 if (np->flags & NIU_FLAGS_PROMISC)
5801 static void niu_enable_rx_mac(struct niu *np, int on)
5803 if (np->flags & NIU_FLAGS_XMAC)
5804 niu_enable_rx_xmac(np, on);
5806 niu_enable_rx_bmac(np, on);
5809 static int niu_init_mac(struct niu *np)
5813 niu_init_xif(np);
5814 err = niu_init_pcs(np);
5818 err = niu_reset_tx_mac(np);
5821 niu_init_tx_mac(np);
5822 err = niu_reset_rx_mac(np);
5825 niu_init_rx_mac(np);
5832 niu_init_tx_mac(np);
5833 niu_enable_tx_mac(np, 1);
5835 niu_enable_rx_mac(np, 1);
5840 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5842 (void) niu_tx_channel_stop(np, rp->tx_channel);
5845 static void niu_stop_tx_channels(struct niu *np)
5849 for (i = 0; i < np->num_tx_rings; i++) {
5850 struct tx_ring_info *rp = &np->tx_rings[i];
5852 niu_stop_one_tx_channel(np, rp);
5856 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5858 (void) niu_tx_channel_reset(np, rp->tx_channel);
5861 static void niu_reset_tx_channels(struct niu *np)
5865 for (i = 0; i < np->num_tx_rings; i++) {
5866 struct tx_ring_info *rp = &np->tx_rings[i];
5868 niu_reset_one_tx_channel(np, rp);
5872 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5874 (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5877 static void niu_stop_rx_channels(struct niu *np)
5881 for (i = 0; i < np->num_rx_rings; i++) {
5882 struct rx_ring_info *rp = &np->rx_rings[i];
5884 niu_stop_one_rx_channel(np, rp);
5888 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5892 (void) niu_rx_channel_reset(np, channel);
5895 (void) niu_enable_rx_channel(np, channel, 0);
5898 static void niu_reset_rx_channels(struct niu *np)
5902 for (i = 0; i < np->num_rx_rings; i++) {
5903 struct rx_ring_info *rp = &np->rx_rings[i];
5905 niu_reset_one_rx_channel(np, rp);
5909 static void niu_disable_ipp(struct niu *np)
5923 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5935 (void) niu_ipp_reset(np);
5938 static int niu_init_hw(struct niu *np)
5942 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5943 niu_txc_enable_port(np, 1);
5944 niu_txc_port_dma_enable(np, 1);
5945 niu_txc_set_imask(np, 0);
5947 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5948 for (i = 0; i < np->num_tx_rings; i++) {
5949 struct tx_ring_info *rp = &np->tx_rings[i];
5951 err = niu_init_one_tx_channel(np, rp);
5956 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5957 err = niu_init_rx_channels(np);
5961 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
5962 err = niu_init_classifier_hw(np);
5966 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
5967 err = niu_init_zcp(np);
5971 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
5972 err = niu_init_ipp(np);
5976 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
5977 err = niu_init_mac(np);
5984 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
5985 niu_disable_ipp(np);
5988 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
5989 niu_stop_rx_channels(np);
5990 niu_reset_rx_channels(np);
5993 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
5994 niu_stop_tx_channels(np);
5995 niu_reset_tx_channels(np);
6000 static void niu_stop_hw(struct niu *np)
6002 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
6003 niu_enable_interrupts(np, 0);
6005 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
6006 niu_enable_rx_mac(np, 0);
6008 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
6009 niu_disable_ipp(np);
6011 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
6012 niu_stop_tx_channels(np);
6014 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
6015 niu_stop_rx_channels(np);
6017 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
6018 niu_reset_tx_channels(np);
6020 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
6021 niu_reset_rx_channels(np);
6024 static void niu_set_irq_name(struct niu *np)
6026 int port = np->port;
6029 sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
6032 sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
6033 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
6037 for (i = 0; i < np->num_ldg - j; i++) {
6038 if (i < np->num_rx_rings)
6039 sprintf(np->irq_name[i+j], "%s-rx-%d",
6040 np->dev->name, i);
6041 else if (i < np->num_tx_rings + np->num_rx_rings)
6042 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
6043 i - np->num_rx_rings);
6047 static int niu_request_irq(struct niu *np)
6051 niu_set_irq_name(np);
6054 for (i = 0; i < np->num_ldg; i++) {
6055 struct niu_ldg *lp = &np->ldg[i];
6058 np->irq_name[i], lp);
6068 struct niu_ldg *lp = &np->ldg[j];
6075 static void niu_free_irq(struct niu *np)
6079 for (i = 0; i < np->num_ldg; i++) {
6080 struct niu_ldg *lp = &np->ldg[i];
6086 static void niu_enable_napi(struct niu *np)
6090 for (i = 0; i < np->num_ldg; i++)
6091 napi_enable(&np->ldg[i].napi);
6094 static void niu_disable_napi(struct niu *np)
6098 for (i = 0; i < np->num_ldg; i++)
6099 napi_disable(&np->ldg[i].napi);
6104 struct niu *np = netdev_priv(dev);
6109 err = niu_alloc_channels(np);
6113 err = niu_enable_interrupts(np, 0);
6117 err = niu_request_irq(np);
6121 niu_enable_napi(np);
6123 spin_lock_irq(&np->lock);
6125 err = niu_init_hw(np);
6127 init_timer(&np->timer);
6128 np->timer.expires = jiffies + HZ;
6129 np->timer.data = (unsigned long) np;
6130 np->timer.function = niu_timer;
6132 err = niu_enable_interrupts(np, 1);
6134 niu_stop_hw(np);
6137 spin_unlock_irq(&np->lock);
6140 niu_disable_napi(np);
6146 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6149 add_timer(&np->timer);
6154 niu_free_irq(np);
6157 niu_free_channels(np);
6163 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6165 cancel_work_sync(&np->reset_task);
6167 niu_disable_napi(np);
6170 del_timer_sync(&np->timer);
6172 spin_lock_irq(&np->lock);
6174 niu_stop_hw(np);
6176 spin_unlock_irq(&np->lock);
6181 struct niu *np = netdev_priv(dev);
6183 niu_full_shutdown(np, dev);
6185 niu_free_irq(np);
6187 niu_free_channels(np);
6189 niu_handle_led(np, 0);
6194 static void niu_sync_xmac_stats(struct niu *np)
6196 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6219 static void niu_sync_bmac_stats(struct niu *np)
6221 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6232 static void niu_sync_mac_stats(struct niu *np)
6234 if (np->flags & NIU_FLAGS_XMAC)
6235 niu_sync_xmac_stats(np);
6237 niu_sync_bmac_stats(np);
6240 static void niu_get_rx_stats(struct niu *np,
6249 rx_rings = ACCESS_ONCE(np->rx_rings);
6253 for (i = 0; i < np->num_rx_rings; i++) {
6256 niu_sync_rx_discard_stats(np, rp, 0);
6271 static void niu_get_tx_stats(struct niu *np,
6280 tx_rings = ACCESS_ONCE(np->tx_rings);
6284 for (i = 0; i < np->num_tx_rings; i++) {
6301 struct niu *np = netdev_priv(dev);
6304 niu_get_rx_stats(np, stats);
6305 niu_get_tx_stats(np, stats);
6311 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6319 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6327 static void niu_load_hash(struct niu *np, u16 *hash)
6329 if (np->flags & NIU_FLAGS_XMAC)
6330 niu_load_hash_xmac(np, hash);
6332 niu_load_hash_bmac(np, hash);
6337 struct niu *np = netdev_priv(dev);
6343 spin_lock_irqsave(&np->lock, flags);
6344 niu_enable_rx_mac(np, 0);
6346 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6348 np->flags |= NIU_FLAGS_PROMISC;
6350 np->flags |= NIU_FLAGS_MCAST;
6353 if (alt_cnt > niu_num_alt_addr(np)) {
6355 np->flags |= NIU_FLAGS_PROMISC;
6362 err = niu_set_alt_mac(np, index, ha->addr);
6366 err = niu_enable_alt_mac(np, index, 1);
6375 if (np->flags & NIU_FLAGS_XMAC)
6379 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6380 err = niu_enable_alt_mac(np, i, 0);
6398 if (np->flags & NIU_FLAGS_MCAST)
6399 niu_load_hash(np, hash);
6401 niu_enable_rx_mac(np, 1);
6402 spin_unlock_irqrestore(&np->lock, flags);
6407 struct niu *np = netdev_priv(dev);
6419 spin_lock_irqsave(&np->lock, flags);
6420 niu_enable_rx_mac(np, 0);
6421 niu_set_primary_mac(np, dev->dev_addr);
6422 niu_enable_rx_mac(np, 1);
6423 spin_unlock_irqrestore(&np->lock, flags);
6433 static void niu_netif_stop(struct niu *np)
6435 np->dev->trans_start = jiffies; /* prevent tx timeout */
6437 niu_disable_napi(np);
6439 netif_tx_disable(np->dev);
6442 static void niu_netif_start(struct niu *np)
6448 netif_tx_wake_all_queues(np->dev);
6450 niu_enable_napi(np);
6452 niu_enable_interrupts(np, 1);
6455 static void niu_reset_buffers(struct niu *np)
6459 if (np->rx_rings) {
6460 for (i = 0; i < np->num_rx_rings; i++) {
6461 struct rx_ring_info *rp = &np->rx_rings[i];
6477 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6488 if (np->tx_rings) {
6489 for (i = 0; i < np->num_tx_rings; i++) {
6490 struct tx_ring_info *rp = &np->tx_rings[i];
6494 (void) release_tx_packet(np, rp, j);
6507 struct niu *np = container_of(work, struct niu, reset_task);
6511 spin_lock_irqsave(&np->lock, flags);
6512 if (!netif_running(np->dev)) {
6513 spin_unlock_irqrestore(&np->lock, flags);
6517 spin_unlock_irqrestore(&np->lock, flags);
6519 del_timer_sync(&np->timer);
6521 niu_netif_stop(np);
6523 spin_lock_irqsave(&np->lock, flags);
6525 niu_stop_hw(np);
6527 spin_unlock_irqrestore(&np->lock, flags);
6529 niu_reset_buffers(np);
6531 spin_lock_irqsave(&np->lock, flags);
6533 err = niu_init_hw(np);
6535 np->timer.expires = jiffies + HZ;
6536 add_timer(&np->timer);
6537 niu_netif_start(np);
6540 spin_unlock_irqrestore(&np->lock, flags);
6545 struct niu *np = netdev_priv(dev);
6547 dev_err(np->device, "%s: Transmit timed out, resetting\n",
6550 schedule_work(&np->reset_task);
6633 struct niu *np = netdev_priv(dev);
6644 rp = &np->tx_rings[i];
6649 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6687 mapping = np->ops->map_single(np->device, skb->data,
6727 mapping = np->ops->map_page(np->device, skb_frag_page(frag),
6762 struct niu *np = netdev_priv(dev);
6777 niu_full_shutdown(np, dev);
6779 niu_free_channels(np);
6781 niu_enable_napi(np);
6783 err = niu_alloc_channels(np);
6787 spin_lock_irq(&np->lock);
6789 err = niu_init_hw(np);
6791 init_timer(&np->timer);
6792 np->timer.expires = jiffies + HZ;
6793 np->timer.data = (unsigned long) np;
6794 np->timer.function = niu_timer;
6796 err = niu_enable_interrupts(np, 1);
6798 niu_stop_hw(np);
6801 spin_unlock_irq(&np->lock);
6805 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6808 add_timer(&np->timer);
6817 struct niu *np = netdev_priv(dev);
6818 struct niu_vpd *vpd = &np->vpd;
6824 if (np->parent->plat_type != PLAT_TYPE_NIU)
6825 strlcpy(info->bus_info, pci_name(np->pdev),
6831 struct niu *np = netdev_priv(dev);
6834 lp = &np->link_config;
6837 cmd->phy_address = np->phy_addr;
6843 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6844 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
6852 struct niu *np = netdev_priv(dev);
6853 struct niu_link_config *lp = &np->link_config;
6859 return niu_init_link(np);
6864 struct niu *np = netdev_priv(dev);
6865 return np->msg_enable;
6870 struct niu *np = netdev_priv(dev);
6871 np->msg_enable = value;
6876 struct niu *np = netdev_priv(dev);
6878 if (np->link_config.autoneg)
6879 return niu_init_link(np);
6886 struct niu *np = netdev_priv(dev);
6888 return np->eeprom_len;
6894 struct niu *np = netdev_priv(dev);
6902 if (offset >= np->eeprom_len)
6904 if (offset + len > np->eeprom_len)
6905 len = eeprom->len = np->eeprom_len - offset;
7090 static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7099 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7103 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
7184 static int niu_get_ethtool_tcam_entry(struct niu *np,
7187 struct niu_parent *parent = np->parent;
7194 idx = tcam_get_index(np, (u16)nfc->fs.location);
7198 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7209 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7260 nfc->data = tcam_get_size(np);
7265 static int niu_get_ethtool_tcam_all(struct niu *np,
7269 struct niu_parent *parent = np->parent;
7276 nfc->data = tcam_get_size(np);
7278 niu_lock_parent(np, flags);
7280 idx = tcam_get_index(np, i);
7291 niu_unlock_parent(np, flags);
7301 struct niu *np = netdev_priv(dev);
7306 ret = niu_get_hash_opts(np, cmd);
7309 cmd->data = np->num_rx_rings;
7312 cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
7315 ret = niu_get_ethtool_tcam_entry(np, cmd);
7318 ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
7328 static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7342 niu_lock_parent(np, flags);
7343 flow_key = np->parent->tcam_key[class -
7347 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7348 niu_unlock_parent(np, flags);
7352 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7354 niu_lock_parent(np, flags);
7355 flow_key = np->parent->tcam_key[class -
7360 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
7362 niu_unlock_parent(np, flags);
7369 niu_lock_parent(np, flags);
7371 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7372 niu_unlock_parent(np, flags);
7445 static int niu_add_ethtool_tcam_entry(struct niu *np,
7448 struct niu_parent *parent = np->parent;
7451 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
7461 if (idx >= tcam_get_size(np))
7473 niu_lock_parent(np, flags);
7501 ret = tcam_user_ip_class_set(np, class, 0,
7508 ret = tcam_user_ip_class_enable(np, class, 1);
7519 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7524 niu_unlock_parent(np, flags);
7531 niu_lock_parent(np, flags);
7533 idx = tcam_get_index(np, idx);
7553 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7561 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7571 if (fsp->ring_cookie >= np->num_rx_rings) {
7572 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7583 err = tcam_write(np, idx, tp->key, tp->key_mask);
7588 err = tcam_assoc_write(np, idx, tp->assoc_data);
7596 np->clas.tcam_valid_entries++;
7598 niu_unlock_parent(np, flags);
7603 static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7605 struct niu_parent *parent = np->parent;
7612 if (loc >= tcam_get_size(np))
7615 niu_lock_parent(np, flags);
7617 idx = tcam_get_index(np, loc);
7631 ret = tcam_user_ip_class_enable(np,
7643 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7651 ret = tcam_flush(np, idx);
7657 np->clas.tcam_valid_entries--;
7659 niu_unlock_parent(np, flags);
7666 struct niu *np = netdev_priv(dev);
7671 ret = niu_set_hash_opts(np, cmd);
7674 ret = niu_add_ethtool_tcam_entry(np, cmd);
7677 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
7767 struct niu *np = netdev_priv(dev);
7773 if (np->flags & NIU_FLAGS_XMAC) {
7782 for (i = 0; i < np->num_rx_rings; i++) {
7787 for (i = 0; i < np->num_tx_rings; i++) {
7796 struct niu *np = netdev_priv(dev);
7801 return (np->flags & NIU_FLAGS_XMAC ?
7804 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7805 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7811 struct niu *np = netdev_priv(dev);
7814 niu_sync_mac_stats(np);
7815 if (np->flags & NIU_FLAGS_XMAC) {
7816 memcpy(data, &np->mac_stats.xmac,
7820 memcpy(data, &np->mac_stats.bmac,
7824 for (i = 0; i < np->num_rx_rings; i++) {
7825 struct rx_ring_info *rp = &np->rx_rings[i];
7827 niu_sync_rx_discard_stats(np, rp, 0);
7836 for (i = 0; i < np->num_tx_rings; i++) {
7837 struct tx_ring_info *rp = &np->tx_rings[i];
7847 static u64 niu_led_state_save(struct niu *np)
7849 if (np->flags & NIU_FLAGS_XMAC)
7855 static void niu_led_state_restore(struct niu *np, u64 val)
7857 if (np->flags & NIU_FLAGS_XMAC)
7863 static void niu_force_led(struct niu *np, int on)
7867 if (np->flags & NIU_FLAGS_XMAC) {
7887 struct niu *np = netdev_priv(dev);
7894 np->orig_led_state = niu_led_state_save(np);
7898 niu_force_led(np, 1);
7902 niu_force_led(np, 0);
7906 niu_led_state_restore(np, np->orig_led_state);
7930 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7940 if (np->parent->plat_type == PLAT_TYPE_NIU) {
7947 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7948 np->port, ldn, ldg,
7958 static int niu_set_ldg_timer_res(struct niu *np, int res)
7969 static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7981 static int niu_pci_eeprom_read(struct niu *np, u32 addr)
8000 dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8015 dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8024 static int niu_pci_eeprom_read16(struct niu *np, u32 off)
8026 int err = niu_pci_eeprom_read(np, off);
8032 err = niu_pci_eeprom_read(np, off + 1);
8040 static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8042 int err = niu_pci_eeprom_read(np, off);
8049 err = niu_pci_eeprom_read(np, off + 1);
8058 static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
8064 int err = niu_pci_eeprom_read(np, off + i);
8077 static void niu_vpd_parse_version(struct niu *np)
8079 struct niu_vpd *vpd = &np->vpd;
8094 netif_printk(np, probe, KERN_DEBUG, np->dev,
8100 np->flags |= NIU_FLAGS_VPD_VALID;
8104 static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
8115 netif_printk(np, probe, KERN_DEBUG, np->dev,
8124 niu_vpd_parse_version(np);
8128 err = niu_pci_eeprom_read(np, start + 2);
8134 prop_len = niu_pci_eeprom_read(np, start + 4);
8135 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8142 prop_buf = np->vpd.model;
8146 prop_buf = np->vpd.board_model;
8150 prop_buf = np->vpd.version;
8154 prop_buf = np->vpd.local_mac;
8158 prop_buf = &np->vpd.mac_num;
8162 prop_buf = np->vpd.phy_type;
8168 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8176 netif_printk(np, probe, KERN_DEBUG, np->dev,
8180 *prop_buf++ = niu_pci_eeprom_read(np, off + i);
8190 static void niu_pci_vpd_fetch(struct niu *np, u32 start)
8195 err = niu_pci_eeprom_read16_swp(np, start + 1);
8205 err = niu_pci_eeprom_read(np, here);
8209 err = niu_pci_eeprom_read16_swp(np, here + 1);
8218 err = niu_pci_vpd_scan_props(np, here, end);
8225 static u32 niu_pci_vpd_offset(struct niu *np)
8234 err = niu_pci_eeprom_read16(np, start + 0);
8239 err = niu_pci_eeprom_read16(np, start + 23);
8245 err = niu_pci_eeprom_read16(np, start + 0);
8248 err = niu_pci_eeprom_read16(np, start + 2);
8253 err = niu_pci_eeprom_read(np, start + 20);
8257 err = niu_pci_eeprom_read(np, ret + 2);
8265 err = niu_pci_eeprom_read16_swp(np, start + 8);
8270 err = niu_pci_eeprom_read(np, ret + 0);
8280 static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
8284 np->flags &= ~(NIU_FLAGS_FIBER |
8286 np->mac_xcvr = MAC_XCVR_MII;
8289 np->flags |= (NIU_FLAGS_10G |
8291 np->mac_xcvr = MAC_XCVR_XPCS;
8294 np->flags &= ~NIU_FLAGS_10G;
8295 np->flags |= NIU_FLAGS_FIBER;
8296 np->mac_xcvr = MAC_XCVR_PCS;
8299 np->flags |= NIU_FLAGS_10G;
8300 np->flags &= ~NIU_FLAGS_FIBER;
8301 np->mac_xcvr = MAC_XCVR_XPCS;
8304 np->flags |= NIU_FLAGS_10G;
8305 np->flags &= ~NIU_FLAGS_FIBER;
8306 np->flags |= NIU_FLAGS_XCVR_SERDES;
8307 np->mac_xcvr = MAC_XCVR_XPCS;
8314 static int niu_pci_vpd_get_nports(struct niu *np)
8318 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
8319 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
8320 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
8321 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
8322 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
8324 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
8325 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
8326 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
8327 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
8334 static void niu_pci_vpd_validate(struct niu *np)
8336 struct net_device *dev = np->dev;
8337 struct niu_vpd *vpd = &np->vpd;
8341 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8343 np->flags &= ~NIU_FLAGS_VPD_VALID;
8347 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8348 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8349 np->flags |= NIU_FLAGS_10G;
8350 np->flags &= ~NIU_FLAGS_FIBER;
8351 np->flags |= NIU_FLAGS_XCVR_SERDES;
8352 np->mac_xcvr = MAC_XCVR_PCS;
8353 if (np->port > 1) {
8354 np->flags |= NIU_FLAGS_FIBER;
8355 np->flags &= ~NIU_FLAGS_10G;
8357 if (np->flags & NIU_FLAGS_10G)
8358 np->mac_xcvr = MAC_XCVR_XPCS;
8359 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8360 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8362 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8363 dev_err(np->device, "Illegal phy string [%s]\n",
8364 np->vpd.phy_type);
8365 dev_err(np->device, "Falling back to SPROM\n");
8366 np->flags &= ~NIU_FLAGS_VPD_VALID;
8373 dev->dev_addr[5] += np->port;
8378 static int niu_pci_probe_sprom(struct niu *np)
8380 struct net_device *dev = np->dev;
8389 np->eeprom_len = len;
8391 netif_printk(np, probe, KERN_DEBUG, np->dev,
8402 netif_printk(np, probe, KERN_DEBUG, np->dev,
8405 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8410 switch (np->port) {
8428 dev_err(np->device, "Bogus port number %u\n",
8429 np->port);
8432 netif_printk(np, probe, KERN_DEBUG, np->dev,
8438 np->flags &= ~(NIU_FLAGS_FIBER |
8440 np->mac_xcvr = MAC_XCVR_MII;
8445 np->flags &= ~NIU_FLAGS_10G;
8446 np->flags |= NIU_FLAGS_FIBER;
8447 np->mac_xcvr = MAC_XCVR_PCS;
8452 np->flags |= NIU_FLAGS_10G;
8453 np->flags &= ~NIU_FLAGS_FIBER;
8454 np->mac_xcvr = MAC_XCVR_XPCS;
8459 np->flags |= (NIU_FLAGS_10G |
8461 np->mac_xcvr = MAC_XCVR_XPCS;
8465 dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8470 netif_printk(np, probe, KERN_DEBUG, np->dev,
8478 netif_printk(np, probe, KERN_DEBUG, np->dev,
8484 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8490 dev->dev_addr[5] += np->port;
8495 netif_printk(np, probe, KERN_DEBUG, np->dev,
8503 np->vpd.model[i + 3] = (tmp >> 0) & 0xff;
8504 np->vpd.model[i + 2] = (tmp >> 8) & 0xff;
8505 np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
8506 np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
8508 np->vpd.model[val] = '\0';
8511 netif_printk(np, probe, KERN_DEBUG, np->dev,
8519 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff;
8520 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff;
8521 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8522 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8524 np->vpd.board_model[val] = '\0';
8526 np->vpd.mac_num =
8528 netif_printk(np, probe, KERN_DEBUG, np->dev,
8529 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8534 static int niu_get_and_validate_port(struct niu *np)
8536 struct niu_parent *parent = np->parent;
8538 if (np->port <= 1)
8539 np->flags |= NIU_FLAGS_XMAC;
8545 parent->num_ports = niu_pci_vpd_get_nports(np);
8562 if (np->port >= parent->num_ports)
8770 static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
8779 niu_lock_parent(np, flags);
8784 dev_id_1 = mdio_read(np, port,
8786 dev_id_2 = mdio_read(np, port,
8792 dev_id_1 = mdio_read(np, port,
8794 dev_id_2 = mdio_read(np, port,
8800 dev_id_1 = mii_read(np, port, MII_PHYSID1);
8801 dev_id_2 = mii_read(np, port, MII_PHYSID2);
8807 niu_unlock_parent(np, flags);
8812 static int walk_phys(struct niu *np, struct niu_parent *parent)
8822 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8823 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8832 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8838 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8841 if (np->flags & NIU_FLAGS_10G) {
8849 err = fill_phy_probe_info(np, parent, info);
8879 val = phy_encode(PORT_TYPE_10G, np->port);
8941 static int niu_probe_ports(struct niu *np)
8943 struct niu_parent *parent = np->parent;
8947 err = walk_phys(np, parent);
8951 niu_set_ldg_timer_res(np, 2);
8953 niu_ldn_irq_enable(np, i, 0);
8962 static int niu_classifier_swstate_init(struct niu *np)
8964 struct niu_classifier *cp = &np->clas;
8966 cp->tcam_top = (u16) np->port;
8967 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
8971 return fflp_early_init(np);
8974 static void niu_link_config_init(struct niu *np)
8976 struct niu_link_config *lp = &np->link_config;
8999 static int niu_init_mac_ipp_pcs_base(struct niu *np)
9001 switch (np->port) {
9003 np->mac_regs = np->regs + XMAC_PORT0_OFF;
9004 np->ipp_off = 0x00000;
9005 np->pcs_off = 0x04000;
9006 np->xpcs_off = 0x02000;
9010 np->mac_regs = np->regs + XMAC_PORT1_OFF;
9011 np->ipp_off = 0x08000;
9012 np->pcs_off = 0x0a000;
9013 np->xpcs_off = 0x08000;
9017 np->mac_regs = np->regs + BMAC_PORT2_OFF;
9018 np->ipp_off = 0x04000;
9019 np->pcs_off = 0x0e000;
9020 np->xpcs_off = ~0UL;
9024 np->mac_regs = np->regs + BMAC_PORT3_OFF;
9025 np->ipp_off = 0x0c000;
9026 np->pcs_off = 0x12000;
9027 np->xpcs_off = ~0UL;
9031 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
9038 static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9041 struct niu_parent *parent = np->parent;
9042 struct pci_dev *pdev = np->pdev;
9046 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
9050 num_irqs = (parent->rxchan_per_port[np->port] +
9051 parent->txchan_per_port[np->port] +
9052 (np->port == 0 ? 3 : 1));
9062 np->flags &= ~NIU_FLAGS_MSIX;
9066 np->flags |= NIU_FLAGS_MSIX;
9068 np->ldg[i].irq = msi_vec[i].vector;
9069 np->num_ldg = num_irqs;
9072 static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9075 struct platform_device *op = np->op;
9085 np->ldg[i].irq = op->archdata.irqs[i];
9088 np->num_ldg = op->archdata.num_irqs;
9096 static int niu_ldg_init(struct niu *np)
9098 struct niu_parent *parent = np->parent;
9104 np->num_ldg = 1;
9105 np->ldg[0].irq = np->dev->irq;
9107 err = niu_n2_irq_init(np, ldg_num_map);
9111 niu_try_msix(np, ldg_num_map);
9113 port = np->port;
9114 for (i = 0; i < np->num_ldg; i++) {
9115 struct niu_ldg *lp = &np->ldg[i];
9117 netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
9119 lp->np = np;
9127 if (np->parent->plat_type != PLAT_TYPE_NIU) {
9128 err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
9147 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9153 if (ldg_rotor == np->num_ldg)
9157 err = niu_ldg_assign_ldn(np, parent,
9164 if (ldg_rotor == np->num_ldg)
9167 err = niu_ldg_assign_ldn(np, parent,
9174 if (ldg_rotor == np->num_ldg)
9185 err = niu_ldg_assign_ldn(np, parent,
9191 if (ldg_rotor == np->num_ldg)
9200 err = niu_ldg_assign_ldn(np, parent,
9206 if (ldg_rotor == np->num_ldg)
9213 static void niu_ldg_free(struct niu *np)
9215 if (np->flags & NIU_FLAGS_MSIX)
9216 pci_disable_msix(np->pdev);
9219 static int niu_get_of_props(struct niu *np)
9222 struct net_device *dev = np->dev;
9229 if (np->parent->plat_type == PLAT_TYPE_NIU)
9230 dp = np->op->dev.of_node;
9232 dp = pci_device_to_OF_node(np->pdev);
9244 strcpy(np->vpd.phy_type, phy_type);
9246 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9248 dp->full_name, np->vpd.phy_type);
9273 strcpy(np->vpd.model, model);
9276 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
9286 static int niu_get_invariants(struct niu *np)
9291 err = niu_get_of_props(np);
9297 err = niu_init_mac_ipp_pcs_base(np);
9302 err = niu_get_and_validate_port(np);
9307 if (np->parent->plat_type == PLAT_TYPE_NIU)
9311 offset = niu_pci_vpd_offset(np);
9312 netif_printk(np, probe, KERN_DEBUG, np->dev,
9315 niu_pci_vpd_fetch(np, offset);
9318 if (np->flags & NIU_FLAGS_VPD_VALID) {
9319 niu_pci_vpd_validate(np);
9320 err = niu_get_and_validate_port(np);
9325 if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
9326 err = niu_get_and_validate_port(np);
9329 err = niu_pci_probe_sprom(np);
9335 err = niu_probe_ports(np);
9339 niu_ldg_init(np);
9341 niu_classifier_swstate_init(np);
9342 niu_link_config_init(np);
9344 err = niu_determine_phy_disposition(np);
9346 err = niu_init_link(np);
9465 static struct niu_parent *niu_new_parent(struct niu *np,
9529 static struct niu_parent *niu_get_parent(struct niu *np,
9533 int port = np->port;
9544 p = niu_new_parent(np, id, ptype);
9552 &np->device->kobj,
9555 p->ports[port] = np;
9564 static void niu_put_parent(struct niu *np)
9566 struct niu_parent *p = np->parent;
9567 u8 port = np->port;
9570 BUG_ON(!p || p->ports[port] != np);
9572 netif_printk(np, probe, KERN_DEBUG, np->dev,
9582 np->parent = NULL;
9660 struct niu *np;
9668 np = netdev_priv(dev);
9669 np->dev = dev;
9670 np->pdev = pdev;
9671 np->op = op;
9672 np->device = gen_dev;
9673 np->ops = ops;
9675 np->msg_enable = niu_debug;
9677 spin_lock_init(&np->lock);
9678 INIT_WORK(&np->reset_task, niu_reset_task);
9680 np->port = port;
9705 static void niu_device_announce(struct niu *np)
9707 struct net_device *dev = np->dev;
9711 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9714 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9715 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9716 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9717 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9718 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9719 np->vpd.phy_type);
9723 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9724 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9725 (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9726 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9728 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9729 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9730 np->vpd.phy_type);
9745 struct niu *np;
9782 np = netdev_priv(dev);
9789 np->parent = niu_get_parent(np, &parent_id,
9791 if (!np->parent) {
9824 np->regs = pci_ioremap_bar(pdev, 0);
9825 if (!np->regs) {
9838 err = niu_get_invariants(np);
9853 niu_device_announce(np);
9858 if (np->regs) {
9859 iounmap(np->regs);
9860 np->regs = NULL;
9864 niu_put_parent(np);
9883 struct niu *np = netdev_priv(dev);
9886 if (np->regs) {
9887 iounmap(np->regs);
9888 np->regs = NULL;
9891 niu_ldg_free(np);
9893 niu_put_parent(np);
9904 struct niu *np = netdev_priv(dev);
9910 flush_work(&np->reset_task);
9911 niu_netif_stop(np);
9913 del_timer_sync(&np->timer);
9915 spin_lock_irqsave(&np->lock, flags);
9916 niu_enable_interrupts(np, 0);
9917 spin_unlock_irqrestore(&np->lock, flags);
9921 spin_lock_irqsave(&np->lock, flags);
9922 niu_stop_hw(np);
9923 spin_unlock_irqrestore(&np->lock, flags);
9933 struct niu *np = netdev_priv(dev);
9944 spin_lock_irqsave(&np->lock, flags);
9946 err = niu_init_hw(np);
9948 np->timer.expires = jiffies + HZ;
9949 add_timer(&np->timer);
9950 niu_netif_start(np);
9953 spin_unlock_irqrestore(&np->lock, flags);
10030 struct niu *np;
10049 np = netdev_priv(dev);
10054 np->parent = niu_get_parent(np, &parent_id,
10056 if (!np->parent) {
10063 np->regs = of_ioremap(&op->resource[1], 0,
10066 if (!np->regs) {
10072 np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10075 if (!np->vir_regs_1) {
10081 np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10084 if (!np->vir_regs_2) {
10092 err = niu_get_invariants(np);
10107 niu_device_announce(np);
10112 if (np->vir_regs_1) {
10113 of_iounmap(&op->resource[2], np->vir_regs_1,
10115 np->vir_regs_1 = NULL;
10118 if (np->vir_regs_2) {
10119 of_iounmap(&op->resource[3], np->vir_regs_2,
10121 np->vir_regs_2 = NULL;
10124 if (np->regs) {
10125 of_iounmap(&op->resource[1], np->regs,
10127 np->regs = NULL;
10131 niu_put_parent(np);
10145 struct niu *np = netdev_priv(dev);
10149 if (np->vir_regs_1) {
10150 of_iounmap(&op->resource[2], np->vir_regs_1,
10152 np->vir_regs_1 = NULL;
10155 if (np->vir_regs_2) {
10156 of_iounmap(&op->resource[3], np->vir_regs_2,
10158 np->vir_regs_2 = NULL;
10161 if (np->regs) {
10162 of_iounmap(&op->resource[1], np->regs,
10164 np->regs = NULL;
10167 niu_ldg_free(np);
10169 niu_put_parent(np);