Lines Matching refs:mp

438 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
440 return readl(mp->shared->base + offset);
443 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
445 return readl(mp->base + offset);
448 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
450 writel(data, mp->shared->base + offset);
453 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
455 writel(data, mp->base + offset);
472 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
473 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
478 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
481 wrlp(mp, RXQ_COMMAND, mask << 8);
482 while (rdlp(mp, RXQ_COMMAND) & mask)
488 struct mv643xx_eth_private *mp = txq_to_mp(txq);
493 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
498 struct mv643xx_eth_private *mp = txq_to_mp(txq);
499 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
504 struct mv643xx_eth_private *mp = txq_to_mp(txq);
507 wrlp(mp, TXQ_COMMAND, mask << 8);
508 while (rdlp(mp, TXQ_COMMAND) & mask)
514 struct mv643xx_eth_private *mp = txq_to_mp(txq);
515 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
555 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
556 struct net_device_stats *stats = &mp->dev->stats;
582 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
587 mp->work_rx_refill |= 1 << rxq->index;
619 skb->protocol = eth_type_trans(skb, mp->dev);
636 netdev_err(mp->dev,
650 mp->work_rx &= ~(1 << rxq->index);
657 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
667 skb = __skb_dequeue(&mp->rx_recycle);
669 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
672 mp->oom = 1;
689 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
707 mp->work_rx_refill &= ~(1 << rxq->index);
731 struct mv643xx_eth_private *mp = txq_to_mp(txq);
760 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
774 struct mv643xx_eth_private *mp = txq_to_mp(txq);
794 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
843 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
855 mp->work_tx_end &= ~(1 << txq->index);
868 struct mv643xx_eth_private *mp = netdev_priv(dev);
874 txq = mp->txq + queue;
911 struct mv643xx_eth_private *mp = txq_to_mp(txq);
912 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
918 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
921 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
931 mp->work_tx_end &= ~(1 << txq->index);
936 struct mv643xx_eth_private *mp = txq_to_mp(txq);
937 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
971 netdev_info(mp->dev, "tx error\n");
972 mp->dev->stats.tx_errors++;
976 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
979 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
984 if (skb_queue_len(&mp->rx_recycle) <
985 mp->rx_ring_size &&
986 skb_recycle_check(skb, mp->skb_size))
987 __skb_queue_head(&mp->rx_recycle, skb);
996 mp->work_tx &= ~(1 << txq->index);
1007 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1013 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1017 mtu = (mp->dev->mtu + 255) >> 8;
1025 switch (mp->shared->tx_bw_control) {
1027 wrlp(mp, TX_BW_RATE, token_rate);
1028 wrlp(mp, TX_BW_MTU, mtu);
1029 wrlp(mp, TX_BW_BURST, bucket_size);
1032 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1033 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1034 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1041 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1045 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
1053 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1054 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1059 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1067 switch (mp->shared->tx_bw_control) {
1077 val = rdlp(mp, off);
1079 wrlp(mp, off, val);
1179 struct mv643xx_eth_private *mp = netdev_priv(dev);
1186 for (i = 0; i < mp->txq_count; i++) {
1187 struct tx_queue *txq = mp->txq + i;
1201 static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
1208 for (i = 0; i < mp->rxq_count; i++) {
1209 struct rx_queue *rxq = mp->rxq + i;
1216 mp->lro_counters.lro_aggregated = lro_aggregated;
1217 mp->lro_counters.lro_flushed = lro_flushed;
1218 mp->lro_counters.lro_no_desc = lro_no_desc;
1221 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1223 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1226 static void mib_counters_clear(struct mv643xx_eth_private *mp)
1231 mib_read(mp, i);
1234 rdlp(mp, RX_DISCARD_FRAME_CNT);
1235 rdlp(mp, RX_OVERRUN_FRAME_CNT);
1238 static void mib_counters_update(struct mv643xx_eth_private *mp)
1240 struct mib_counters *p = &mp->mib_counters;
1242 spin_lock_bh(&mp->mib_counters_lock);
1243 p->good_octets_received += mib_read(mp, 0x00);
1244 p->bad_octets_received += mib_read(mp, 0x08);
1245 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1246 p->good_frames_received += mib_read(mp, 0x10);
1247 p->bad_frames_received += mib_read(mp, 0x14);
1248 p->broadcast_frames_received += mib_read(mp, 0x18);
1249 p->multicast_frames_received += mib_read(mp, 0x1c);
1250 p->frames_64_octets += mib_read(mp, 0x20);
1251 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1252 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1253 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1254 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1255 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1256 p->good_octets_sent += mib_read(mp, 0x38);
1257 p->good_frames_sent += mib_read(mp, 0x40);
1258 p->excessive_collision += mib_read(mp, 0x44);
1259 p->multicast_frames_sent += mib_read(mp, 0x48);
1260 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1261 p->unrec_mac_control_received += mib_read(mp, 0x50);
1262 p->fc_sent += mib_read(mp, 0x54);
1263 p->good_fc_received += mib_read(mp, 0x58);
1264 p->bad_fc_received += mib_read(mp, 0x5c);
1265 p->undersize_received += mib_read(mp, 0x60);
1266 p->fragments_received += mib_read(mp, 0x64);
1267 p->oversize_received += mib_read(mp, 0x68);
1268 p->jabber_received += mib_read(mp, 0x6c);
1269 p->mac_receive_error += mib_read(mp, 0x70);
1270 p->bad_crc_event += mib_read(mp, 0x74);
1271 p->collision += mib_read(mp, 0x78);
1272 p->late_collision += mib_read(mp, 0x7c);
1274 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1275 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1276 spin_unlock_bh(&mp->mib_counters_lock);
1278 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1283 struct mv643xx_eth_private *mp = (void *)_mp;
1285 mib_counters_update(mp);
1301 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1303 u32 val = rdlp(mp, SDMA_CONFIG);
1306 if (mp->shared->extended_rx_coal_limit)
1312 do_div(temp, mp->shared->t_clk);
1317 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1322 temp = (u64)usec * mp->shared->t_clk;
1326 val = rdlp(mp, SDMA_CONFIG);
1327 if (mp->shared->extended_rx_coal_limit) {
1339 wrlp(mp, SDMA_CONFIG, val);
1342 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1346 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1348 do_div(temp, mp->shared->t_clk);
1353 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1357 temp = (u64)usec * mp->shared->t_clk;
1364 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1435 mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1440 err = phy_read_status(mp->phy);
1442 err = phy_ethtool_gset(mp->phy, cmd);
1454 mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1459 port_status = rdlp(mp, PORT_STATUS);
1491 struct mv643xx_eth_private *mp = netdev_priv(dev);
1493 if (mp->phy != NULL)
1494 return mv643xx_eth_get_settings_phy(mp, cmd);
1496 return mv643xx_eth_get_settings_phyless(mp, cmd);
1502 struct mv643xx_eth_private *mp = netdev_priv(dev);
1504 if (mp->phy == NULL)
1512 return phy_ethtool_sset(mp->phy, cmd);
1529 struct mv643xx_eth_private *mp = netdev_priv(dev);
1531 if (mp->phy == NULL)
1534 return genphy_restart_aneg(mp->phy);
1540 struct mv643xx_eth_private *mp = netdev_priv(dev);
1542 ec->rx_coalesce_usecs = get_rx_coal(mp);
1543 ec->tx_coalesce_usecs = get_tx_coal(mp);
1551 struct mv643xx_eth_private *mp = netdev_priv(dev);
1553 set_rx_coal(mp, ec->rx_coalesce_usecs);
1554 set_tx_coal(mp, ec->tx_coalesce_usecs);
1562 struct mv643xx_eth_private *mp = netdev_priv(dev);
1567 er->rx_pending = mp->rx_ring_size;
1568 er->tx_pending = mp->tx_ring_size;
1574 struct mv643xx_eth_private *mp = netdev_priv(dev);
1579 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1580 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
1598 struct mv643xx_eth_private *mp = netdev_priv(dev);
1601 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1624 struct mv643xx_eth_private *mp = netdev_priv(dev);
1628 mib_counters_update(mp);
1629 mv643xx_eth_grab_lro_stats(mp);
1638 p = ((void *)mp->dev) + stat->netdev_off;
1640 p = ((void *)mp) + stat->mp_off;
1672 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1674 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1675 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1685 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1687 wrlp(mp, MAC_ADDR_HIGH,
1689 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1715 struct mv643xx_eth_private *mp = netdev_priv(dev);
1720 uc_addr_set(mp, dev->dev_addr);
1722 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1731 int off = UNICAST_TABLE(mp->port_num) + i;
1745 wrl(mp, off, v);
1748 wrlp(mp, PORT_CONFIG, port_config);
1771 struct mv643xx_eth_private *mp = netdev_priv(dev);
1782 port_num = mp->port_num;
1785 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1786 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1816 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1817 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1847 static int rxq_init(struct mv643xx_eth_private *mp, int index)
1849 struct rx_queue *rxq = mp->rxq + index;
1856 rxq->rx_ring_size = mp->rx_ring_size;
1864 if (index == 0 && size <= mp->rx_desc_sram_size) {
1865 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1866 mp->rx_desc_sram_size);
1867 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1869 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1875 netdev_err(mp->dev,
1885 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1901 rxq->lro_mgr.dev = mp->dev;
1918 if (index == 0 && size <= mp->rx_desc_sram_size)
1921 dma_free_coherent(mp->dev->dev.parent, size,
1931 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1944 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1949 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1952 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1958 static int txq_init(struct mv643xx_eth_private *mp, int index)
1960 struct tx_queue *txq = mp->txq + index;
1967 txq->tx_ring_size = mp->tx_ring_size;
1975 if (index == 0 && size <= mp->tx_desc_sram_size) {
1976 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1977 mp->tx_desc_sram_size);
1978 txq->tx_desc_dma = mp->tx_desc_sram_addr;
1980 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1986 netdev_err(mp->dev,
2015 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2023 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2026 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2032 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2037 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2044 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2048 wrlp(mp, INT_CAUSE, ~int_cause);
2049 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2050 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
2051 mp->work_rx |= (int_cause & INT_RX) >> 2;
2056 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2058 mp->work_link = 1;
2059 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2068 struct mv643xx_eth_private *mp = netdev_priv(dev);
2070 if (unlikely(!mv643xx_eth_collect_events(mp)))
2073 wrlp(mp, INT_MASK, 0);
2074 napi_schedule(&mp->napi);
2079 static void handle_link_event(struct mv643xx_eth_private *mp)
2081 struct net_device *dev = mp->dev;
2087 port_status = rdlp(mp, PORT_STATUS);
2096 for (i = 0; i < mp->txq_count; i++) {
2097 struct tx_queue *txq = mp->txq + i;
2132 struct mv643xx_eth_private *mp;
2135 mp = container_of(napi, struct mv643xx_eth_private, napi);
2137 if (unlikely(mp->oom)) {
2138 mp->oom = 0;
2139 del_timer(&mp->rx_oom);
2148 if (mp->work_link) {
2149 mp->work_link = 0;
2150 handle_link_event(mp);
2155 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2156 if (likely(!mp->oom))
2157 queue_mask |= mp->work_rx_refill;
2160 if (mv643xx_eth_collect_events(mp))
2172 if (mp->work_tx_end & queue_mask) {
2173 txq_kick(mp->txq + queue);
2174 } else if (mp->work_tx & queue_mask) {
2175 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2176 txq_maybe_wake(mp->txq + queue);
2177 } else if (mp->work_rx & queue_mask) {
2178 work_done += rxq_process(mp->rxq + queue, work_tbd);
2179 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2180 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2187 if (mp->oom)
2188 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2190 wrlp(mp, INT_MASK, mp->int_mask);
2198 struct mv643xx_eth_private *mp = (void *)data;
2200 napi_schedule(&mp->napi);
2203 static void phy_reset(struct mv643xx_eth_private *mp)
2207 data = phy_read(mp->phy, MII_BMCR);
2212 if (phy_write(mp->phy, MII_BMCR, data) < 0)
2216 data = phy_read(mp->phy, MII_BMCR);
2220 static void port_start(struct mv643xx_eth_private *mp)
2228 if (mp->phy != NULL) {
2231 mv643xx_eth_get_settings(mp->dev, &cmd);
2232 phy_reset(mp);
2233 mv643xx_eth_set_settings(mp->dev, &cmd);
2239 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2242 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2245 if (mp->phy == NULL)
2247 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2252 tx_set_rate(mp, 1000000000, 16777216);
2253 for (i = 0; i < mp->txq_count; i++) {
2254 struct tx_queue *txq = mp->txq + i;
2266 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2271 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2276 mv643xx_eth_program_unicast_filter(mp->dev);
2281 for (i = 0; i < mp->rxq_count; i++) {
2282 struct rx_queue *rxq = mp->rxq + i;
2287 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2293 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2303 skb_size = mp->dev->mtu + 36;
2310 mp->skb_size = (skb_size + 7) & ~7;
2318 mp->skb_size += SKB_DMA_REALIGN;
2323 struct mv643xx_eth_private *mp = netdev_priv(dev);
2327 wrlp(mp, INT_CAUSE, 0);
2328 wrlp(mp, INT_CAUSE_EXT, 0);
2329 rdlp(mp, INT_CAUSE_EXT);
2338 mv643xx_eth_recalc_skb_size(mp);
2340 napi_enable(&mp->napi);
2342 skb_queue_head_init(&mp->rx_recycle);
2344 mp->int_mask = INT_EXT;
2346 for (i = 0; i < mp->rxq_count; i++) {
2347 err = rxq_init(mp, i);
2350 rxq_deinit(mp->rxq + i);
2354 rxq_refill(mp->rxq + i, INT_MAX);
2355 mp->int_mask |= INT_RX_0 << i;
2358 if (mp->oom) {
2359 mp->rx_oom.expires = jiffies + (HZ / 10);
2360 add_timer(&mp->rx_oom);
2363 for (i = 0; i < mp->txq_count; i++) {
2364 err = txq_init(mp, i);
2367 txq_deinit(mp->txq + i);
2370 mp->int_mask |= INT_TX_END_0 << i;
2373 port_start(mp);
2375 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2376 wrlp(mp, INT_MASK, mp->int_mask);
2382 for (i = 0; i < mp->rxq_count; i++)
2383 rxq_deinit(mp->rxq + i);
2390 static void port_reset(struct mv643xx_eth_private *mp)
2395 for (i = 0; i < mp->rxq_count; i++)
2396 rxq_disable(mp->rxq + i);
2397 for (i = 0; i < mp->txq_count; i++)
2398 txq_disable(mp->txq + i);
2401 u32 ps = rdlp(mp, PORT_STATUS);
2409 data = rdlp(mp, PORT_SERIAL_CONTROL);
2413 wrlp(mp, PORT_SERIAL_CONTROL, data);
2418 struct mv643xx_eth_private *mp = netdev_priv(dev);
2421 wrlp(mp, INT_MASK_EXT, 0x00000000);
2422 wrlp(mp, INT_MASK, 0x00000000);
2423 rdlp(mp, INT_MASK);
2425 napi_disable(&mp->napi);
2427 del_timer_sync(&mp->rx_oom);
2433 port_reset(mp);
2435 mib_counters_update(mp);
2436 del_timer_sync(&mp->mib_counters_timer);
2438 skb_queue_purge(&mp->rx_recycle);
2440 for (i = 0; i < mp->rxq_count; i++)
2441 rxq_deinit(mp->rxq + i);
2442 for (i = 0; i < mp->txq_count; i++)
2443 txq_deinit(mp->txq + i);
2450 struct mv643xx_eth_private *mp = netdev_priv(dev);
2452 if (mp->phy != NULL)
2453 return phy_mii_ioctl(mp->phy, ifr, cmd);
2460 struct mv643xx_eth_private *mp = netdev_priv(dev);
2466 mv643xx_eth_recalc_skb_size(mp);
2467 tx_set_rate(mp, 1000000000, 16777216);
2489 struct mv643xx_eth_private *mp;
2491 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2492 if (netif_running(mp->dev)) {
2493 netif_tx_stop_all_queues(mp->dev);
2494 port_reset(mp);
2495 port_start(mp);
2496 netif_tx_wake_all_queues(mp->dev);
2502 struct mv643xx_eth_private *mp = netdev_priv(dev);
2506 schedule_work(&mp->tx_timeout_task);
2512 struct mv643xx_eth_private *mp = netdev_priv(dev);
2514 wrlp(mp, INT_MASK, 0x00000000);
2515 rdlp(mp, INT_MASK);
2519 wrlp(mp, INT_MASK, mp->int_mask);
2713 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2715 int addr_shift = 5 * mp->port_num;
2718 data = rdl(mp, PHY_ADDR);
2721 wrl(mp, PHY_ADDR, data);
2724 static int phy_addr_get(struct mv643xx_eth_private *mp)
2728 data = rdl(mp, PHY_ADDR);
2730 return (data >> (5 * mp->port_num)) & 0x1f;
2733 static void set_params(struct mv643xx_eth_private *mp,
2736 struct net_device *dev = mp->dev;
2741 uc_addr_get(mp, dev->dev_addr);
2743 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2745 mp->rx_ring_size = pd->rx_queue_size;
2746 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2747 mp->rx_desc_sram_size = pd->rx_sram_size;
2749 mp->rxq_count = pd->rx_queue_count ? : 1;
2751 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2753 mp->tx_ring_size = pd->tx_queue_size;
2754 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2755 mp->tx_desc_sram_size = pd->tx_sram_size;
2757 mp->txq_count = pd->tx_queue_count ? : 1;
2760 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2763 struct mii_bus *bus = mp->shared->smi->smi_bus;
2770 start = phy_addr_get(mp) & 0x1f;
2787 phy_addr_set(mp, addr);
2794 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2796 struct phy_device *phy = mp->phy;
2798 phy_reset(mp);
2800 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
2816 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2820 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2823 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2827 if (mp->phy == NULL) {
2841 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2864 struct mv643xx_eth_private *mp;
2884 mp = netdev_priv(dev);
2885 platform_set_drvdata(pdev, mp);
2887 mp->shared = platform_get_drvdata(pd->shared);
2888 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2889 mp->port_num = pd->port_number;
2891 mp->dev = dev;
2893 set_params(mp, pd);
2894 netif_set_real_num_tx_queues(dev, mp->txq_count);
2895 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2898 mp->phy = phy_scan(mp, pd->phy_addr);
2900 if (mp->phy != NULL)
2901 phy_init(mp, pd->speed, pd->duplex);
2905 init_pscr(mp, pd->speed, pd->duplex);
2908 mib_counters_clear(mp);
2910 init_timer(&mp->mib_counters_timer);
2911 mp->mib_counters_timer.data = (unsigned long)mp;
2912 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2913 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2914 add_timer(&mp->mib_counters_timer);
2916 spin_lock_init(&mp->mib_counters_lock);
2918 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2920 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2922 init_timer(&mp->rx_oom);
2923 mp->rx_oom.data = (unsigned long)mp;
2924 mp->rx_oom.function = oom_timer_wrapper;
2945 if (mp->shared->win_protect)
2946 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2950 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2952 set_rx_coal(mp, 250);
2953 set_tx_coal(mp, 0);
2960 mp->port_num, dev->dev_addr);
2962 if (mp->tx_desc_sram_size > 0)
2975 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2977 unregister_netdev(mp->dev);
2978 if (mp->phy != NULL)
2979 phy_detach(mp->phy);
2980 cancel_work_sync(&mp->tx_timeout_task);
2981 free_netdev(mp->dev);
2990 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2993 wrlp(mp, INT_MASK, 0);
2994 rdlp(mp, INT_MASK);
2996 if (netif_running(mp->dev))
2997 port_reset(mp);