Lines Matching refs:mp

429 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
431 return readl(mp->shared->base + offset);
434 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
436 return readl(mp->base + offset);
439 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
441 writel(data, mp->shared->base + offset);
444 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
446 writel(data, mp->base + offset);
463 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
469 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
472 wrlp(mp, RXQ_COMMAND, mask << 8);
473 while (rdlp(mp, RXQ_COMMAND) & mask)
479 struct mv643xx_eth_private *mp = txq_to_mp(txq);
484 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
489 struct mv643xx_eth_private *mp = txq_to_mp(txq);
490 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
495 struct mv643xx_eth_private *mp = txq_to_mp(txq);
498 wrlp(mp, TXQ_COMMAND, mask << 8);
499 while (rdlp(mp, TXQ_COMMAND) & mask)
505 struct mv643xx_eth_private *mp = txq_to_mp(txq);
506 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
518 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
519 struct net_device_stats *stats = &mp->dev->stats;
543 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
548 mp->work_rx_refill |= 1 << rxq->index;
580 skb->protocol = eth_type_trans(skb, mp->dev);
582 napi_gro_receive(&mp->napi, skb);
592 netdev_err(mp->dev,
603 mp->work_rx &= ~(1 << rxq->index);
610 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
620 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
623 mp->oom = 1;
640 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
658 mp->work_rx_refill &= ~(1 << rxq->index);
685 static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
701 if (length - hdr_len > mp->shared->tx_csum_limit ||
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
789 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
811 struct mv643xx_eth_private *mp = txq_to_mp(txq);
858 mp->work_tx_end &= ~(1 << txq->index);
874 struct mv643xx_eth_private *mp = txq_to_mp(txq);
905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
913 struct mv643xx_eth_private *mp = txq_to_mp(txq);
930 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
950 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
962 mp->work_tx_end &= ~(1 << txq->index);
975 struct mv643xx_eth_private *mp = netdev_priv(dev);
981 txq = mp->txq + queue;
1014 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1015 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1021 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1024 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1034 mp->work_tx_end &= ~(1 << txq->index);
1039 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1040 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1080 netdev_info(mp->dev, "tx error\n");
1081 mp->dev->stats.tx_errors++;
1089 mp->work_tx &= ~(1 << txq->index);
1100 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1106 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1110 mtu = (mp->dev->mtu + 255) >> 8;
1118 switch (mp->shared->tx_bw_control) {
1120 wrlp(mp, TX_BW_RATE, token_rate);
1121 wrlp(mp, TX_BW_MTU, mtu);
1122 wrlp(mp, TX_BW_BURST, bucket_size);
1125 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1126 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1127 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1134 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1138 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1146 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1147 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1152 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1160 switch (mp->shared->tx_bw_control) {
1170 val = rdlp(mp, off);
1172 wrlp(mp, off, val);
1180 struct mv643xx_eth_private *mp = netdev_priv(dev);
1181 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1187 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1195 if (mp->phy->speed == SPEED_1000) {
1204 if (mp->phy->speed == SPEED_100)
1209 if (mp->phy->duplex == DUPLEX_FULL)
1215 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1221 struct mv643xx_eth_private *mp = netdev_priv(dev);
1228 for (i = 0; i < mp->txq_count; i++) {
1229 struct tx_queue *txq = mp->txq + i;
1243 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1245 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1248 static void mib_counters_clear(struct mv643xx_eth_private *mp)
1253 mib_read(mp, i);
1256 rdlp(mp, RX_DISCARD_FRAME_CNT);
1257 rdlp(mp, RX_OVERRUN_FRAME_CNT);
1260 static void mib_counters_update(struct mv643xx_eth_private *mp)
1262 struct mib_counters *p = &mp->mib_counters;
1264 spin_lock_bh(&mp->mib_counters_lock);
1265 p->good_octets_received += mib_read(mp, 0x00);
1266 p->bad_octets_received += mib_read(mp, 0x08);
1267 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1268 p->good_frames_received += mib_read(mp, 0x10);
1269 p->bad_frames_received += mib_read(mp, 0x14);
1270 p->broadcast_frames_received += mib_read(mp, 0x18);
1271 p->multicast_frames_received += mib_read(mp, 0x1c);
1272 p->frames_64_octets += mib_read(mp, 0x20);
1273 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1274 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1275 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1276 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1277 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1278 p->good_octets_sent += mib_read(mp, 0x38);
1279 p->good_frames_sent += mib_read(mp, 0x40);
1280 p->excessive_collision += mib_read(mp, 0x44);
1281 p->multicast_frames_sent += mib_read(mp, 0x48);
1282 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1283 p->unrec_mac_control_received += mib_read(mp, 0x50);
1284 p->fc_sent += mib_read(mp, 0x54);
1285 p->good_fc_received += mib_read(mp, 0x58);
1286 p->bad_fc_received += mib_read(mp, 0x5c);
1287 p->undersize_received += mib_read(mp, 0x60);
1288 p->fragments_received += mib_read(mp, 0x64);
1289 p->oversize_received += mib_read(mp, 0x68);
1290 p->jabber_received += mib_read(mp, 0x6c);
1291 p->mac_receive_error += mib_read(mp, 0x70);
1292 p->bad_crc_event += mib_read(mp, 0x74);
1293 p->collision += mib_read(mp, 0x78);
1294 p->late_collision += mib_read(mp, 0x7c);
1296 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1297 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1298 spin_unlock_bh(&mp->mib_counters_lock);
1303 struct mv643xx_eth_private *mp = (void *)_mp;
1304 mib_counters_update(mp);
1305 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1321 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1323 u32 val = rdlp(mp, SDMA_CONFIG);
1326 if (mp->shared->extended_rx_coal_limit)
1332 do_div(temp, mp->t_clk);
1337 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1342 temp = (u64)usec * mp->t_clk;
1346 val = rdlp(mp, SDMA_CONFIG);
1347 if (mp->shared->extended_rx_coal_limit) {
1359 wrlp(mp, SDMA_CONFIG, val);
1362 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1366 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1368 do_div(temp, mp->t_clk);
1373 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1377 temp = (u64)usec * mp->t_clk;
1384 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1448 mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1453 err = phy_read_status(mp->phy);
1455 err = phy_ethtool_gset(mp->phy, cmd);
1467 mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1472 port_status = rdlp(mp, PORT_STATUS);
1504 struct mv643xx_eth_private *mp = netdev_priv(dev);
1507 if (mp->phy)
1508 phy_ethtool_get_wol(mp->phy, wol);
1514 struct mv643xx_eth_private *mp = netdev_priv(dev);
1517 if (mp->phy == NULL)
1520 err = phy_ethtool_set_wol(mp->phy, wol);
1532 struct mv643xx_eth_private *mp = netdev_priv(dev);
1534 if (mp->phy != NULL)
1535 return mv643xx_eth_get_settings_phy(mp, cmd);
1537 return mv643xx_eth_get_settings_phyless(mp, cmd);
1543 struct mv643xx_eth_private *mp = netdev_priv(dev);
1546 if (mp->phy == NULL)
1554 ret = phy_ethtool_sset(mp->phy, cmd);
1574 struct mv643xx_eth_private *mp = netdev_priv(dev);
1576 if (mp->phy == NULL)
1579 return genphy_restart_aneg(mp->phy);
1585 struct mv643xx_eth_private *mp = netdev_priv(dev);
1587 ec->rx_coalesce_usecs = get_rx_coal(mp);
1588 ec->tx_coalesce_usecs = get_tx_coal(mp);
1596 struct mv643xx_eth_private *mp = netdev_priv(dev);
1598 set_rx_coal(mp, ec->rx_coalesce_usecs);
1599 set_tx_coal(mp, ec->tx_coalesce_usecs);
1607 struct mv643xx_eth_private *mp = netdev_priv(dev);
1612 er->rx_pending = mp->rx_ring_size;
1613 er->tx_pending = mp->tx_ring_size;
1619 struct mv643xx_eth_private *mp = netdev_priv(dev);
1624 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1625 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1627 if (mp->tx_ring_size != er->tx_pending)
1629 mp->tx_ring_size, er->tx_pending);
1647 struct mv643xx_eth_private *mp = netdev_priv(dev);
1650 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1673 struct mv643xx_eth_private *mp = netdev_priv(dev);
1677 mib_counters_update(mp);
1686 p = ((void *)mp->dev) + stat->netdev_off;
1688 p = ((void *)mp) + stat->mp_off;
1723 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1725 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1726 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1736 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1738 wrlp(mp, MAC_ADDR_HIGH,
1740 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1766 struct mv643xx_eth_private *mp = netdev_priv(dev);
1771 uc_addr_set(mp, dev->dev_addr);
1773 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1782 int off = UNICAST_TABLE(mp->port_num) + i;
1796 wrl(mp, off, v);
1799 wrlp(mp, PORT_CONFIG, port_config);
1822 struct mv643xx_eth_private *mp = netdev_priv(dev);
1833 port_num = mp->port_num;
1836 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1837 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1867 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1868 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1898 static int rxq_init(struct mv643xx_eth_private *mp, int index)
1900 struct rx_queue *rxq = mp->rxq + index;
1907 rxq->rx_ring_size = mp->rx_ring_size;
1915 if (index == 0 && size <= mp->rx_desc_sram_size) {
1916 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1917 mp->rx_desc_sram_size);
1918 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1920 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1926 netdev_err(mp->dev,
1954 if (index == 0 && size <= mp->rx_desc_sram_size)
1957 dma_free_coherent(mp->dev->dev.parent, size,
1967 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1980 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1985 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1988 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1994 static int txq_init(struct mv643xx_eth_private *mp, int index)
1996 struct tx_queue *txq = mp->txq + index;
2003 txq->tx_ring_size = mp->tx_ring_size;
2018 if (index == 0 && size <= mp->tx_desc_sram_size) {
2019 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2020 mp->tx_desc_sram_size);
2021 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2023 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2029 netdev_err(mp->dev,
2052 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2056 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2067 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2075 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2078 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2081 dma_free_coherent(mp->dev->dev.parent,
2088 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2093 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2100 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2104 wrlp(mp, INT_CAUSE, ~int_cause);
2105 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2106 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
2107 mp->work_rx |= (int_cause & INT_RX) >> 2;
2112 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2114 mp->work_link = 1;
2115 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2124 struct mv643xx_eth_private *mp = netdev_priv(dev);
2126 if (unlikely(!mv643xx_eth_collect_events(mp)))
2129 wrlp(mp, INT_MASK, 0);
2130 napi_schedule(&mp->napi);
2135 static void handle_link_event(struct mv643xx_eth_private *mp)
2137 struct net_device *dev = mp->dev;
2143 port_status = rdlp(mp, PORT_STATUS);
2152 for (i = 0; i < mp->txq_count; i++) {
2153 struct tx_queue *txq = mp->txq + i;
2188 struct mv643xx_eth_private *mp;
2191 mp = container_of(napi, struct mv643xx_eth_private, napi);
2193 if (unlikely(mp->oom)) {
2194 mp->oom = 0;
2195 del_timer(&mp->rx_oom);
2204 if (mp->work_link) {
2205 mp->work_link = 0;
2206 handle_link_event(mp);
2211 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2212 if (likely(!mp->oom))
2213 queue_mask |= mp->work_rx_refill;
2216 if (mv643xx_eth_collect_events(mp))
2228 if (mp->work_tx_end & queue_mask) {
2229 txq_kick(mp->txq + queue);
2230 } else if (mp->work_tx & queue_mask) {
2231 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2232 txq_maybe_wake(mp->txq + queue);
2233 } else if (mp->work_rx & queue_mask) {
2234 work_done += rxq_process(mp->rxq + queue, work_tbd);
2235 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2236 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2243 if (mp->oom)
2244 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2246 wrlp(mp, INT_MASK, mp->int_mask);
2254 struct mv643xx_eth_private *mp = (void *)data;
2256 napi_schedule(&mp->napi);
2259 static void port_start(struct mv643xx_eth_private *mp)
2267 if (mp->phy != NULL) {
2270 mv643xx_eth_get_settings(mp->dev, &cmd);
2271 phy_init_hw(mp->phy);
2272 mv643xx_eth_set_settings(mp->dev, &cmd);
2273 phy_start(mp->phy);
2279 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2282 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2285 if (mp->phy == NULL)
2287 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2292 tx_set_rate(mp, 1000000000, 16777216);
2293 for (i = 0; i < mp->txq_count; i++) {
2294 struct tx_queue *txq = mp->txq + i;
2306 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2311 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2316 mv643xx_eth_program_unicast_filter(mp->dev);
2321 for (i = 0; i < mp->rxq_count; i++) {
2322 struct rx_queue *rxq = mp->rxq + i;
2327 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2333 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2343 skb_size = mp->dev->mtu + 36;
2350 mp->skb_size = (skb_size + 7) & ~7;
2358 mp->skb_size += SKB_DMA_REALIGN;
2363 struct mv643xx_eth_private *mp = netdev_priv(dev);
2367 wrlp(mp, INT_CAUSE, 0);
2368 wrlp(mp, INT_CAUSE_EXT, 0);
2369 rdlp(mp, INT_CAUSE_EXT);
2378 mv643xx_eth_recalc_skb_size(mp);
2380 napi_enable(&mp->napi);
2382 mp->int_mask = INT_EXT;
2384 for (i = 0; i < mp->rxq_count; i++) {
2385 err = rxq_init(mp, i);
2388 rxq_deinit(mp->rxq + i);
2392 rxq_refill(mp->rxq + i, INT_MAX);
2393 mp->int_mask |= INT_RX_0 << i;
2396 if (mp->oom) {
2397 mp->rx_oom.expires = jiffies + (HZ / 10);
2398 add_timer(&mp->rx_oom);
2401 for (i = 0; i < mp->txq_count; i++) {
2402 err = txq_init(mp, i);
2405 txq_deinit(mp->txq + i);
2408 mp->int_mask |= INT_TX_END_0 << i;
2411 add_timer(&mp->mib_counters_timer);
2412 port_start(mp);
2414 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2415 wrlp(mp, INT_MASK, mp->int_mask);
2421 for (i = 0; i < mp->rxq_count; i++)
2422 rxq_deinit(mp->rxq + i);
2429 static void port_reset(struct mv643xx_eth_private *mp)
2434 for (i = 0; i < mp->rxq_count; i++)
2435 rxq_disable(mp->rxq + i);
2436 for (i = 0; i < mp->txq_count; i++)
2437 txq_disable(mp->txq + i);
2440 u32 ps = rdlp(mp, PORT_STATUS);
2448 data = rdlp(mp, PORT_SERIAL_CONTROL);
2452 wrlp(mp, PORT_SERIAL_CONTROL, data);
2457 struct mv643xx_eth_private *mp = netdev_priv(dev);
2460 wrlp(mp, INT_MASK_EXT, 0x00000000);
2461 wrlp(mp, INT_MASK, 0x00000000);
2462 rdlp(mp, INT_MASK);
2464 napi_disable(&mp->napi);
2466 del_timer_sync(&mp->rx_oom);
2469 if (mp->phy)
2470 phy_stop(mp->phy);
2473 port_reset(mp);
2475 mib_counters_update(mp);
2476 del_timer_sync(&mp->mib_counters_timer);
2478 for (i = 0; i < mp->rxq_count; i++)
2479 rxq_deinit(mp->rxq + i);
2480 for (i = 0; i < mp->txq_count; i++)
2481 txq_deinit(mp->txq + i);
2488 struct mv643xx_eth_private *mp = netdev_priv(dev);
2491 if (mp->phy == NULL)
2494 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2502 struct mv643xx_eth_private *mp = netdev_priv(dev);
2508 mv643xx_eth_recalc_skb_size(mp);
2509 tx_set_rate(mp, 1000000000, 16777216);
2531 struct mv643xx_eth_private *mp;
2533 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2534 if (netif_running(mp->dev)) {
2535 netif_tx_stop_all_queues(mp->dev);
2536 port_reset(mp);
2537 port_start(mp);
2538 netif_tx_wake_all_queues(mp->dev);
2544 struct mv643xx_eth_private *mp = netdev_priv(dev);
2548 schedule_work(&mp->tx_timeout_task);
2554 struct mv643xx_eth_private *mp = netdev_priv(dev);
2556 wrlp(mp, INT_MASK, 0x00000000);
2557 rdlp(mp, INT_MASK);
2561 wrlp(mp, INT_MASK, mp->int_mask);
2847 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2849 int addr_shift = 5 * mp->port_num;
2852 data = rdl(mp, PHY_ADDR);
2855 wrl(mp, PHY_ADDR, data);
2858 static int phy_addr_get(struct mv643xx_eth_private *mp)
2862 data = rdl(mp, PHY_ADDR);
2864 return (data >> (5 * mp->port_num)) & 0x1f;
2867 static void set_params(struct mv643xx_eth_private *mp,
2870 struct net_device *dev = mp->dev;
2876 uc_addr_get(mp, dev->dev_addr);
2878 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2880 mp->rx_ring_size = pd->rx_queue_size;
2881 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2882 mp->rx_desc_sram_size = pd->rx_sram_size;
2884 mp->rxq_count = pd->rx_queue_count ? : 1;
2890 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2892 if (mp->tx_ring_size != tx_ring_size)
2894 mp->tx_ring_size, tx_ring_size);
2896 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2897 mp->tx_desc_sram_size = pd->tx_sram_size;
2899 mp->txq_count = pd->tx_queue_count ? : 1;
2902 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2912 start = phy_addr_get(mp) & 0x1f;
2927 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2930 phy_addr_set(mp, addr);
2938 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2940 struct phy_device *phy = mp->phy;
2956 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2960 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2963 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2967 if (mp->phy == NULL) {
2981 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3004 struct mv643xx_eth_private *mp;
3024 mp = netdev_priv(dev);
3025 platform_set_drvdata(pdev, mp);
3027 mp->shared = platform_get_drvdata(pd->shared);
3028 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
3029 mp->port_num = pd->port_number;
3031 mp->dev = dev;
3039 wrlp(mp, PORT_SERIAL_CONTROL1,
3040 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
3046 mp->t_clk = 133000000;
3047 mp->clk = devm_clk_get(&pdev->dev, NULL);
3048 if (!IS_ERR(mp->clk)) {
3049 clk_prepare_enable(mp->clk);
3050 mp->t_clk = clk_get_rate(mp->clk);
3051 } else if (!IS_ERR(mp->shared->clk)) {
3052 mp->t_clk = clk_get_rate(mp->shared->clk);
3055 set_params(mp, pd);
3056 netif_set_real_num_tx_queues(dev, mp->txq_count);
3057 netif_set_real_num_rx_queues(dev, mp->rxq_count);
3061 mp->phy = of_phy_connect(mp->dev, pd->phy_node,
3064 if (!mp->phy)
3067 phy_addr_set(mp, mp->phy->addr);
3069 mp->phy = phy_scan(mp, pd->phy_addr);
3071 if (IS_ERR(mp->phy))
3072 err = PTR_ERR(mp->phy);
3074 phy_init(mp, pd->speed, pd->duplex);
3085 init_pscr(mp, pd->speed, pd->duplex);
3088 mib_counters_clear(mp);
3090 init_timer(&mp->mib_counters_timer);
3091 mp->mib_counters_timer.data = (unsigned long)mp;
3092 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
3093 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
3095 spin_lock_init(&mp->mib_counters_lock);
3097 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3099 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
3101 init_timer(&mp->rx_oom);
3102 mp->rx_oom.data = (unsigned long)mp;
3103 mp->rx_oom.function = oom_timer_wrapper;
3126 if (mp->shared->win_protect)
3127 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
3131 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3133 set_rx_coal(mp, 250);
3134 set_tx_coal(mp, 0);
3141 mp->port_num, dev->dev_addr);
3143 if (mp->tx_desc_sram_size > 0)
3149 if (!IS_ERR(mp->clk))
3150 clk_disable_unprepare(mp->clk);
3158 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3160 unregister_netdev(mp->dev);
3161 if (mp->phy != NULL)
3162 phy_disconnect(mp->phy);
3163 cancel_work_sync(&mp->tx_timeout_task);
3165 if (!IS_ERR(mp->clk))
3166 clk_disable_unprepare(mp->clk);
3168 free_netdev(mp->dev);
3175 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3178 wrlp(mp, INT_MASK, 0);
3179 rdlp(mp, INT_MASK);
3181 if (netif_running(mp->dev))
3182 port_reset(mp);