Lines Matching refs:np

332 static void free_rxtx_rings(struct netdev_private *np);
336 static void free_ringdesc(struct netdev_private *np);
364 struct netdev_private *np;
383 dev = alloc_etherdev(sizeof(*np));
402 np = netdev_priv(dev);
403 np->pci_dev = pdev;
404 np->chip_id = chip_idx;
405 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
406 spin_lock_init(&np->lock);
407 np->mii_if.dev = dev;
408 np->mii_if.mdio_read = mdio_read;
409 np->mii_if.mdio_write = mdio_write;
410 np->base_addr = ioaddr;
420 np->mii_if.full_duplex = 1;
427 np->mii_if.full_duplex = 1;
429 if (np->mii_if.full_duplex)
430 np->mii_if.force_media = 1;
444 if (np->drv_flags & CanHaveMII) {
449 np->phys[phy_idx++] = phy;
450 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
455 np->mii, phy, mii_status,
456 np->mii_if.advertising);
459 np->mii_cnt = phy_idx;
460 np->mii_if.phy_id = np->phys[0];
569 struct netdev_private *np = netdev_priv(dev);
570 void __iomem *mdio_addr = np->base_addr + MIICtrl;
599 struct netdev_private *np = netdev_priv(dev);
600 void __iomem *mdio_addr = np->base_addr + MIICtrl;
604 if (location == 4 && phy_id == np->phys[0])
605 np->mii_if.advertising = value;
631 struct netdev_private *np = netdev_priv(dev);
632 void __iomem *ioaddr = np->base_addr;
633 const int irq = np->pci_dev->irq;
649 spin_lock_irq(&np->lock);
652 spin_unlock_irq(&np->lock);
659 init_timer(&np->timer);
660 np->timer.expires = jiffies + 1*HZ;
661 np->timer.data = (unsigned long)dev;
662 np->timer.function = netdev_timer; /* timer handler */
663 add_timer(&np->timer);
674 struct netdev_private *np = netdev_priv(dev);
678 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
681 return np->csr6;
683 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
689 np->phys[0]);
692 return np->csr6;
698 np->phys[0]);
702 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
711 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
716 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
717 negotiated = mii_reg & np->mii_if.advertising;
722 duplex |= np->mii_if.force_media;
724 result = np->csr6 & ~0x20000200;
729 if (result != np->csr6 && debug)
733 np->phys[0]);
740 struct netdev_private *np = netdev_priv(dev);
741 void __iomem *ioaddr = np->base_addr;
746 if (new==np->csr6)
749 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
771 np->csr6 = new;
773 iowrite32(np->csr6, ioaddr + NetworkConfig);
775 np->mii_if.full_duplex = 1;
781 struct netdev_private *np = netdev_priv(dev);
782 void __iomem *ioaddr = np->base_addr;
788 spin_lock_irq(&np->lock);
790 spin_unlock_irq(&np->lock);
791 np->timer.expires = jiffies + 10*HZ;
792 add_timer(&np->timer);
797 struct netdev_private *np = netdev_priv(dev);
800 np->rx_head_desc = &np->rx_ring[0];
801 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
805 np->rx_ring[i].length = np->rx_buf_sz;
806 np->rx_ring[i].status = 0;
807 np->rx_skbuff[i] = NULL;
810 np->rx_ring[i-1].length |= DescEndRing;
814 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
815 np->rx_skbuff[i] = skb;
818 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
819 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
821 np->rx_ring[i].buffer1 = np->rx_addr[i];
822 np->rx_ring[i].status = DescOwned;
825 np->cur_rx = 0;
826 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
830 np->tx_skbuff[i] = NULL;
831 np->tx_ring[i].status = 0;
833 np->tx_full = 0;
834 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
836 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
837 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
838 np->base_addr + TxRingPtr);
842 static void free_rxtx_rings(struct netdev_private* np)
847 np->rx_ring[i].status = 0;
848 if (np->rx_skbuff[i]) {
849 pci_unmap_single(np->pci_dev,
850 np->rx_addr[i],
851 np->rx_skbuff[i]->len,
853 dev_kfree_skb(np->rx_skbuff[i]);
855 np->rx_skbuff[i] = NULL;
858 if (np->tx_skbuff[i]) {
859 pci_unmap_single(np->pci_dev,
860 np->tx_addr[i],
861 np->tx_skbuff[i]->len,
863 dev_kfree_skb(np->tx_skbuff[i]);
865 np->tx_skbuff[i] = NULL;
871 struct netdev_private *np = netdev_priv(dev);
872 void __iomem *ioaddr = np->base_addr;
915 np->csr6 = 0;
929 struct netdev_private *np = netdev_priv(dev);
930 void __iomem *ioaddr = np->base_addr;
931 const int irq = np->pci_dev->irq;
938 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
940 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
942 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
944 printk(KERN_CONT " %08x", np->tx_ring[i].status);
948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
952 spin_lock_irq(&np->lock);
959 iowrite32(1, np->base_addr+PCIBusCfg);
962 free_rxtx_rings(np);
965 spin_unlock_irq(&np->lock);
970 np->stats.tx_errors++;
976 struct netdev_private *np = netdev_priv(dev);
978 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
980 np->rx_ring = pci_alloc_consistent(np->pci_dev,
983 &np->ring_dma_addr);
984 if(!np->rx_ring)
990 static void free_ringdesc(struct netdev_private *np)
992 pci_free_consistent(np->pci_dev,
995 np->rx_ring, np->ring_dma_addr);
1001 struct netdev_private *np = netdev_priv(dev);
1008 entry = np->cur_tx % TX_RING_SIZE;
1010 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1012 np->tx_skbuff[entry] = skb;
1014 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1016 np->tx_ring[entry].length = DescWholePkt | skb->len;
1020 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1021 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1024 np->tx_ring[entry].length |= DescEndRing;
1028 * increasing np->cur_tx and setting DescOwned:
1029 * - if np->cur_tx is increased first the interrupt
1034 * since the np->cur_tx was not yet increased.
1036 spin_lock_irq(&np->lock);
1037 np->cur_tx++;
1040 np->tx_ring[entry].status = DescOwned;
1042 iowrite32(0, np->base_addr + TxStartDemand);
1043 np->tx_q_bytes += skb->len;
1046 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1047 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1050 np->tx_full = 1;
1052 spin_unlock_irq(&np->lock);
1056 np->cur_tx, entry);
1063 struct netdev_private *np = netdev_priv(dev);
1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1065 int entry = np->dirty_tx % TX_RING_SIZE;
1066 int tx_status = np->tx_ring[entry].status;
1076 np->stats.tx_errors++;
1077 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1078 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1079 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1080 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1081 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1082 np->stats.tx_heartbeat_errors++;
1089 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1090 np->stats.collisions += (tx_status >> 3) & 15;
1091 np->stats.tx_packets++;
1094 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1095 np->tx_skbuff[entry]->len,
1097 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1098 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1099 np->tx_skbuff[entry] = NULL;
1101 if (np->tx_full &&
1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1103 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1105 np->tx_full = 0;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base_addr;
1143 np->cur_tx != np->dirty_tx) {
1144 spin_lock(&np->lock);
1146 spin_unlock(&np->lock);
1160 spin_lock(&np->lock);
1165 spin_unlock(&np->lock);
1180 struct netdev_private *np = netdev_priv(dev);
1181 int entry = np->cur_rx % RX_RING_SIZE;
1182 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1186 entry, np->rx_ring[entry].status);
1191 struct w840_rx_desc *desc = np->rx_head_desc;
1205 np->cur_rx, status);
1206 np->stats.rx_length_errors++;
1213 np->stats.rx_errors++; /* end of a packet.*/
1214 if (status & 0x0890) np->stats.rx_length_errors++;
1215 if (status & 0x004C) np->stats.rx_frame_errors++;
1216 if (status & 0x0002) np->stats.rx_crc_errors++;
1233 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1234 np->rx_skbuff[entry]->len,
1236 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1238 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1239 np->rx_skbuff[entry]->len,
1242 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1243 np->rx_skbuff[entry]->len,
1245 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1246 np->rx_skbuff[entry] = NULL;
1258 np->stats.rx_packets++;
1259 np->stats.rx_bytes += pkt_len;
1261 entry = (++np->cur_rx) % RX_RING_SIZE;
1262 np->rx_head_desc = &np->rx_ring[entry];
1266 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1268 entry = np->dirty_rx % RX_RING_SIZE;
1269 if (np->rx_skbuff[entry] == NULL) {
1270 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1271 np->rx_skbuff[entry] = skb;
1274 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1276 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1277 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1280 np->rx_ring[entry].status = DescOwned;
1288 struct netdev_private *np = netdev_priv(dev);
1289 void __iomem *ioaddr = np->base_addr;
1295 spin_lock(&np->lock);
1303 new = np->csr6 + 0x4000;
1305 new = (np->csr6 >> 14)&0x7f;
1310 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1316 np->stats.rx_errors++;
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1325 spin_unlock(&np->lock);
1330 struct netdev_private *np = netdev_priv(dev);
1331 void __iomem *ioaddr = np->base_addr;
1334 spin_lock_irq(&np->lock);
1336 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1337 spin_unlock_irq(&np->lock);
1339 return &np->stats;
1345 struct netdev_private *np = netdev_priv(dev);
1346 void __iomem *ioaddr = np->base_addr;
1379 struct netdev_private *np = netdev_priv(dev);
1381 spin_lock_irq(&np->lock);
1382 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1383 spin_unlock_irq(&np->lock);
1388 struct netdev_private *np = netdev_priv(dev);
1392 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1397 struct netdev_private *np = netdev_priv(dev);
1400 spin_lock_irq(&np->lock);
1401 rc = mii_ethtool_gset(&np->mii_if, cmd);
1402 spin_unlock_irq(&np->lock);
1409 struct netdev_private *np = netdev_priv(dev);
1412 spin_lock_irq(&np->lock);
1413 rc = mii_ethtool_sset(&np->mii_if, cmd);
1414 spin_unlock_irq(&np->lock);
1421 struct netdev_private *np = netdev_priv(dev);
1422 return mii_nway_restart(&np->mii_if);
1427 struct netdev_private *np = netdev_priv(dev);
1428 return mii_link_ok(&np->mii_if);
1454 struct netdev_private *np = netdev_priv(dev);
1462 spin_lock_irq(&np->lock);
1464 spin_unlock_irq(&np->lock);
1468 spin_lock_irq(&np->lock);
1470 spin_unlock_irq(&np->lock);
1479 struct netdev_private *np = netdev_priv(dev);
1480 void __iomem *ioaddr = np->base_addr;
1489 np->cur_tx, np->dirty_tx,
1490 np->cur_rx, np->dirty_rx);
1494 spin_lock_irq(&np->lock);
1498 spin_unlock_irq(&np->lock);
1500 free_irq(np->pci_dev->irq, dev);
1505 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1511 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1514 i, np->tx_ring[i].length,
1515 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1516 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1519 i, np->rx_ring[i].length,
1520 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1525 del_timer_sync(&np->timer);
1527 free_rxtx_rings(np);
1528 free_ringdesc(np);
1538 struct netdev_private *np = netdev_priv(dev);
1541 pci_iounmap(pdev, np->base_addr);
1553 * spin_lock_irq(np->lock), doesn't touch hw if not present
1574 struct netdev_private *np = netdev_priv(dev);
1575 void __iomem *ioaddr = np->base_addr;
1579 del_timer_sync(&np->timer);
1581 spin_lock_irq(&np->lock);
1585 spin_unlock_irq(&np->lock);
1587 synchronize_irq(np->pci_dev->irq);
1590 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1594 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1598 free_rxtx_rings(np);
1609 struct netdev_private *np = netdev_priv(dev);
1621 spin_lock_irq(&np->lock);
1622 iowrite32(1, np->base_addr+PCIBusCfg);
1623 ioread32(np->base_addr+PCIBusCfg);
1628 spin_unlock_irq(&np->lock);
1632 mod_timer(&np->timer, jiffies + 1*HZ);