Lines Matching refs:np

60 static void dl2k_enable_int(struct netdev_private *np)
62 void __iomem *ioaddr = np->ioaddr;
117 struct netdev_private *np;
142 dev = alloc_etherdev (sizeof (*np));
147 np = netdev_priv(dev);
153 np->eeprom_addr = ioaddr;
161 np->ioaddr = ioaddr;
162 np->chip_id = chip_idx;
163 np->pdev = pdev;
164 spin_lock_init (&np->tx_lock);
165 spin_lock_init (&np->rx_lock);
168 np->an_enable = 1;
169 np->tx_coalesce = 1;
172 np->an_enable = 0;
176 np->an_enable = 2;
179 np->speed = 100;
180 np->full_duplex = 1;
183 np->speed = 100;
184 np->full_duplex = 0;
187 np->speed = 10;
188 np->full_duplex = 1;
191 np->speed = 10;
192 np->full_duplex = 0;
195 np->speed=1000;
196 np->full_duplex=1;
199 np->speed = 1000;
200 np->full_duplex = 0;
202 np->an_enable = 1;
206 np->jumbo = 1;
209 np->jumbo = 0;
213 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
216 np->rx_coalesce = rx_coalesce;
217 np->rx_timeout = rx_timeout;
218 np->coalesce = 1;
220 np->tx_flow = (tx_flow == 0) ? 0 : 1;
221 np->rx_flow = (rx_flow == 0) ? 0 : 1;
239 np->tx_ring = ring_space;
240 np->tx_ring_dma = ring_dma;
245 np->rx_ring = ring_space;
246 np->rx_ring_dma = ring_dma;
257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258 np->link_status = 0;
260 if (np->phy_media) {
262 if (np->an_enable == 2) {
263 np->an_enable = 1;
269 if (np->speed == 1000)
270 np->an_enable = 1;
281 dev->name, np->name, dev->dev_addr, irq);
285 if (np->coalesce)
289 np->rx_coalesce, np->rx_timeout*640);
290 if (np->vlan)
291 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
295 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
297 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
300 pci_iounmap(pdev, np->ioaddr);
302 pci_iounmap(pdev, np->eeprom_addr);
315 struct netdev_private *np = netdev_priv(dev);
317 np = netdev_priv(dev);
318 np->phy_addr = 1;
323 np->phy_addr = i;
337 struct netdev_private *np = netdev_priv(dev);
338 void __iomem *ioaddr = np->ioaddr;
348 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
350 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
364 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
384 np->duplex_polarity = psib[i];
388 np->wake_polarity = psib[i];
392 memcpy (np->name, &(psib[i]), j);
412 struct netdev_private *np = netdev_priv(dev);
413 void __iomem *ioaddr = np->ioaddr;
414 const int irq = np->pdev->irq;
431 if (np->jumbo != 0)
441 if (np->coalesce) {
442 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
454 if (np->vlan) {
458 dw16(VLANId, np->vlan);
460 dw32(VLANTag, 0x8100 << 16 | np->vlan);
466 init_timer (&np->timer);
467 np->timer.expires = jiffies + 1*HZ;
468 np->timer.data = (unsigned long) dev;
469 np->timer.function = rio_timer;
470 add_timer (&np->timer);
476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
477 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
484 dl2k_enable_int(np);
492 struct netdev_private *np = netdev_priv(dev);
497 spin_lock_irqsave(&np->rx_lock, flags);
499 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
502 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
504 entry = np->old_rx % RX_RING_SIZE;
506 if (np->rx_skbuff[entry] == NULL) {
508 np->rx_buf_sz);
510 np->rx_ring[entry].fraginfo = 0;
516 np->rx_skbuff[entry] = skb;
517 np->rx_ring[entry].fraginfo =
519 (np->pdev, skb->data, np->rx_buf_sz,
522 np->rx_ring[entry].fraginfo |=
523 cpu_to_le64((u64)np->rx_buf_sz << 48);
524 np->rx_ring[entry].status = 0;
527 spin_unlock_irqrestore (&np->rx_lock, flags);
528 np->timer.expires = jiffies + next_tick;
529 add_timer(&np->timer);
535 struct netdev_private *np = netdev_priv(dev);
536 void __iomem *ioaddr = np->ioaddr;
549 struct netdev_private *np = netdev_priv(dev);
550 void __iomem *ioaddr = np->ioaddr;
553 np->cur_rx = np->cur_tx = 0;
554 np->old_rx = np->old_tx = 0;
555 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
559 np->tx_skbuff[i] = NULL;
560 np->tx_ring[i].status = cpu_to_le64 (TFDDone);
561 np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
568 np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
571 np->rx_ring[i].status = 0;
572 np->rx_ring[i].fraginfo = 0;
573 np->rx_skbuff[i] = NULL;
581 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
582 np->rx_skbuff[i] = skb;
587 np->rx_ring[i].fraginfo =
589 np->pdev, skb->data, np->rx_buf_sz,
591 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
595 dw32(RFDListPtr0, np->rx_ring_dma);
602 struct netdev_private *np = netdev_priv(dev);
603 void __iomem *ioaddr = np->ioaddr;
608 if (np->link_status == 0) { /* Link Down */
612 entry = np->cur_tx % TX_RING_SIZE;
613 np->tx_skbuff[entry] = skb;
614 txdesc = &np->tx_ring[entry];
623 if (np->vlan) {
625 ((u64)np->vlan << 32) |
628 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
635 if (entry % np->tx_coalesce == 0 || np->speed == 10)
649 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
650 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
651 < TX_QUEUE_LEN - 1 && np->speed != 10) {
659 dw32(TFDListPtr0, np->tx_ring_dma +
671 struct netdev_private *np = netdev_priv(dev);
672 void __iomem *ioaddr = np->ioaddr;
702 if (np->cur_tx != np->old_tx)
715 struct netdev_private *np = netdev_priv(dev);
716 int entry = np->old_tx % TX_RING_SIZE;
721 spin_lock(&np->tx_lock);
723 spin_lock_irqsave(&np->tx_lock, flag);
726 while (entry != np->cur_tx) {
729 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
731 skb = np->tx_skbuff[entry];
732 pci_unmap_single (np->pdev,
733 desc_to_dma(&np->tx_ring[entry]),
740 np->tx_skbuff[entry] = NULL;
745 spin_unlock(&np->tx_lock);
747 spin_unlock_irqrestore(&np->tx_lock, flag);
748 np->old_tx = entry;
754 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
755 < TX_QUEUE_LEN - 1 || np->speed == 10)) {
763 struct netdev_private *np = netdev_priv(dev);
764 void __iomem *ioaddr = np->ioaddr;
771 np->stats.tx_errors++;
774 np->stats.tx_fifo_errors++;
787 dw32(TFDListPtr0, np->tx_ring_dma +
788 np->old_tx * sizeof (struct netdev_desc));
795 np->stats.tx_fifo_errors++;
809 np->stats.collisions16++;
812 np->stats.collisions++;
821 struct netdev_private *np = netdev_priv(dev);
822 int entry = np->cur_rx % RX_RING_SIZE;
827 struct netdev_desc *desc = &np->rx_ring[entry];
843 np->stats.rx_errors++;
845 np->stats.rx_length_errors++;
847 np->stats.rx_crc_errors++;
848 if (frame_status & RxAlignmentError && np->speed != 1000)
849 np->stats.rx_frame_errors++;
851 np->stats.rx_fifo_errors++;
857 pci_unmap_single (np->pdev,
859 np->rx_buf_sz,
861 skb_put (skb = np->rx_skbuff[entry], pkt_len);
862 np->rx_skbuff[entry] = NULL;
864 pci_dma_sync_single_for_cpu(np->pdev,
866 np->rx_buf_sz,
869 np->rx_skbuff[entry]->data,
872 pci_dma_sync_single_for_device(np->pdev,
874 np->rx_buf_sz,
880 if (np->pdev->pci_rev_id >= 0x0c &&
889 spin_lock(&np->rx_lock);
890 np->cur_rx = entry;
892 entry = np->old_rx;
893 while (entry != np->cur_rx) {
896 if (np->rx_skbuff[entry] == NULL) {
897 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
899 np->rx_ring[entry].fraginfo = 0;
906 np->rx_skbuff[entry] = skb;
907 np->rx_ring[entry].fraginfo =
909 (np->pdev, skb->data, np->rx_buf_sz,
912 np->rx_ring[entry].fraginfo |=
913 cpu_to_le64((u64)np->rx_buf_sz << 48);
914 np->rx_ring[entry].status = 0;
917 np->old_rx = entry;
918 spin_unlock(&np->rx_lock);
925 struct netdev_private *np = netdev_priv(dev);
926 void __iomem *ioaddr = np->ioaddr;
933 if (np->phy_media)
937 if (np->speed == 1000)
938 np->tx_coalesce = tx_coalesce;
940 np->tx_coalesce = 1;
942 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
943 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
944 macctrl |= (np->tx_flow) ?
946 macctrl |= (np->rx_flow) ?
949 np->link_status = 1;
953 np->link_status = 0;
976 struct netdev_private *np = netdev_priv(dev);
977 void __iomem *ioaddr = np->ioaddr;
986 np->stats.rx_packets += dr32(FramesRcvOk);
987 np->stats.tx_packets += dr32(FramesXmtOk);
988 np->stats.rx_bytes += dr32(OctetRcvOk);
989 np->stats.tx_bytes += dr32(OctetXmtOk);
991 np->stats.multicast = dr32(McstFramesRcvdOk);
992 np->stats.collisions += dr32(SingleColFrames)
997 np->stats.tx_aborted_errors += stat_reg;
998 np->stats.tx_errors += stat_reg;
1001 np->stats.tx_carrier_errors += stat_reg;
1002 np->stats.tx_errors += stat_reg;
1032 return &np->stats;
1038 struct netdev_private *np = netdev_priv(dev);
1039 void __iomem *ioaddr = np->ioaddr;
1094 struct netdev_private *np = netdev_priv(dev);
1095 int max = (np->jumbo) ? MAX_JUMBO : 1536;
1109 struct netdev_private *np = netdev_priv(dev);
1110 void __iomem *ioaddr = np->ioaddr;
1143 if (np->vlan) {
1155 struct netdev_private *np = netdev_priv(dev);
1159 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1164 struct netdev_private *np = netdev_priv(dev);
1165 if (np->phy_media) {
1184 if ( np->link_status ) {
1185 ethtool_cmd_speed_set(cmd, np->speed);
1186 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1191 if ( np->an_enable)
1196 cmd->phy_address = np->phy_addr;
1202 struct netdev_private *np = netdev_priv(dev);
1205 if (np->an_enable)
1208 np->an_enable = 1;
1213 np->an_enable = 0;
1214 if (np->speed == 1000) {
1221 np->speed = 10;
1222 np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1225 np->speed = 100;
1226 np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1239 struct netdev_private *np = netdev_priv(dev);
1240 return np->link_status;
1254 struct netdev_private *np = netdev_priv(dev);
1257 phy_addr = np->phy_addr;
1280 static int read_eeprom(struct netdev_private *np, int eep_addr)
1282 void __iomem *ioaddr = np->eeprom_addr;
1302 struct netdev_private *np = netdev_priv(dev);
1303 void __iomem *ioaddr = np->ioaddr;
1315 struct netdev_private *np = netdev_priv(dev);
1316 void __iomem *ioaddr = np->ioaddr;
1385 struct netdev_private *np;
1387 np = netdev_priv(dev);
1388 phy_addr = np->phy_addr;
1406 struct netdev_private *np;
1408 np = netdev_priv(dev);
1409 phy_addr = np->phy_addr;
1412 if (np->an_enable) {
1422 np->speed = 1000;
1423 np->full_duplex = 1;
1426 np->speed = 1000;
1427 np->full_duplex = 0;
1430 np->speed = 100;
1431 np->full_duplex = 1;
1434 np->speed = 100;
1435 np->full_duplex = 0;
1438 np->speed = 10;
1439 np->full_duplex = 1;
1442 np->speed = 10;
1443 np->full_duplex = 0;
1447 np->tx_flow &= 1;
1448 np->rx_flow &= 1;
1450 np->tx_flow = 0;
1451 np->rx_flow &= 1;
1472 if (np->tx_flow)
1476 if (np->rx_flow)
1492 struct netdev_private *np;
1493 np = netdev_priv(dev);
1494 phy_addr = np->phy_addr;
1497 if (np->an_enable) {
1549 if (np->speed == 100) {
1552 } else if (np->speed == 10) {
1555 if (np->full_duplex) {
1579 struct netdev_private *np;
1581 np = netdev_priv(dev);
1582 phy_addr = np->phy_addr;
1585 if (np->an_enable) {
1592 np->speed = 1000;
1595 np->full_duplex = 1;
1598 np->full_duplex = 0;
1601 np->tx_flow &= 1;
1602 np->rx_flow &= 1;
1604 np->tx_flow = 0;
1605 np->rx_flow &= 1;
1617 if (np->tx_flow)
1621 if (np->rx_flow)
1636 struct netdev_private *np;
1637 np = netdev_priv(dev);
1638 phy_addr = np->phy_addr;
1641 if (np->an_enable) {
1665 if (np->full_duplex) {
1685 struct netdev_private *np = netdev_priv(dev);
1686 void __iomem *ioaddr = np->ioaddr;
1688 struct pci_dev *pdev = np->pdev;
1701 del_timer_sync (&np->timer);
1705 skb = np->rx_skbuff[i];
1707 pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1710 np->rx_skbuff[i] = NULL;
1712 np->rx_ring[i].status = 0;
1713 np->rx_ring[i].fraginfo = 0;
1716 skb = np->tx_skbuff[i];
1718 pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1721 np->tx_skbuff[i] = NULL;
1734 struct netdev_private *np = netdev_priv(dev);
1737 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
1738 np->rx_ring_dma);
1739 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1740 np->tx_ring_dma);
1742 pci_iounmap(pdev, np->ioaddr);
1744 pci_iounmap(pdev, np->eeprom_addr);