Lines Matching refs:sp

110 	struct ipg_nic_private *sp = netdev_priv(dev);
111 return sp->ioaddr;
117 struct ipg_nic_private *sp = netdev_priv(dev);
118 void __iomem *ioaddr = sp->ioaddr;
124 netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125 netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
127 (unsigned long)sp->rxd_map);
132 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
134 i, offset, (unsigned long)sp->rxd[i].next_desc);
135 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
137 i, offset, (unsigned long)sp->rxd[i].rfs);
138 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
140 i, offset, (unsigned long)sp->rxd[i].frag_info);
146 struct ipg_nic_private *sp = netdev_priv(dev);
147 void __iomem *ioaddr = sp->ioaddr;
153 netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154 netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
156 (unsigned long) sp->txd_map);
161 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
163 i, offset, (unsigned long)sp->txd[i].next_desc);
165 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
167 i, offset, (unsigned long) sp->txd[i].tfc);
168 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
170 i, offset, (unsigned long) sp->txd[i].frag_info);
381 struct ipg_nic_private *sp = netdev_priv(dev);
382 void __iomem *ioaddr = sp->ioaddr;
388 if ((sp->led_mode & 0x03) > 1)
391 if ((sp->led_mode & 0x01) == 1)
394 if ((sp->led_mode & 0x08) == 8)
402 struct ipg_nic_private *sp = netdev_priv(dev);
403 void __iomem *ioaddr = sp->ioaddr;
408 physet |= ((sp->led_mode & 0x70) >> 4);
471 struct ipg_nic_private *sp = netdev_priv(dev);
472 void __iomem *ioaddr = sp->ioaddr;
500 sp->tenmbpsmode = 0;
506 sp->tenmbpsmode = 1;
520 if (sp->tenmbpsmode == 1)
641 struct ipg_nic_private *sp = netdev_priv(dev);
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
743 sp->rx_buff[entry] = NULL;
748 sp->rx_buff[entry] = skb;
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
754 rxfragsize = sp->rxfrag_size;
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
769 struct ipg_rx *rxfd = sp->rxd + i;
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
798 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
801 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
803 sp->rx_current = 0;
804 sp->rx_dirty = 0;
807 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
815 struct ipg_nic_private *sp = netdev_priv(dev);
816 void __iomem *ioaddr = sp->ioaddr;
822 struct ipg_tx *txfd = sp->txd + i;
826 if (sp->tx_buff[i]) {
827 dev_kfree_skb_irq(sp->tx_buff[i]);
828 sp->tx_buff[i] = NULL;
831 txfd->next_desc = cpu_to_le64(sp->txd_map +
834 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
836 sp->tx_current = 0;
837 sp->tx_dirty = 0;
841 (u32) sp->txd_map);
842 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
845 sp->reset_current_tfd = 1;
854 struct ipg_nic_private *sp = netdev_priv(dev);
859 pending = sp->tx_current - sp->tx_dirty;
860 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
863 struct sk_buff *skb = sp->tx_buff[dirty];
864 struct ipg_tx *txfd = sp->txd + dirty;
878 pci_unmap_single(sp->pdev,
884 sp->tx_buff[dirty] = NULL;
889 sp->tx_dirty += released;
892 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
899 struct ipg_nic_private *sp = netdev_priv(dev);
900 void __iomem *ioaddr = sp->ioaddr;
905 spin_lock_irq(&sp->lock);
913 spin_unlock_irq(&sp->lock);
926 struct ipg_nic_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->ioaddr;
947 if (sp->tenmbpsmode) {
954 sp->stats.tx_errors++;
976 sp->stats.tx_fifo_errors++;
997 struct ipg_nic_private *sp = netdev_priv(dev);
998 void __iomem *ioaddr = sp->ioaddr;
1008 return &sp->stats;
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1015 sp->stats.rx_errors += temp1;
1016 sp->stats.rx_missed_errors += temp1;
1020 sp->stats.collisions += temp1;
1021 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1024 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1027 sp->stats.tx_carrier_errors += temp2;
1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1037 return &sp->stats;
1043 struct ipg_nic_private *sp = netdev_priv(dev);
1044 const unsigned int curr = sp->rx_current;
1045 unsigned int dirty = sp->rx_dirty;
1049 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1053 if (sp->rx_buff[entry])
1067 sp->rxd[entry].rfs = 0x0000000000000000;
1069 sp->rx_dirty = dirty;
1095 struct ipg_nic_private *sp = netdev_priv(dev);
1096 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1098 if (sp->rx_buff[entry]) {
1099 struct ipg_rx *rxfd = sp->rxd + entry;
1101 pci_unmap_single(sp->pdev,
1103 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_irq(sp->rx_buff[entry]);
1105 sp->rx_buff[entry] = NULL;
1111 struct ipg_nic_private *sp = netdev_priv(dev);
1112 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1124 struct ipg_nic_private *sp = netdev_priv(dev);
1125 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126 struct ipg_rx *rxfd = sp->rxd + entry;
1136 sp->stats.rx_errors++;
1142 sp->stats.rx_fifo_errors++;
1147 sp->stats.rx_length_errors++;
1156 sp->stats.rx_frame_errors++;
1167 if (sp->rx_buff[entry]) {
1168 pci_unmap_single(sp->pdev,
1170 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1172 dev_kfree_skb_irq(sp->rx_buff[entry]);
1173 sp->rx_buff[entry] = NULL;
1181 struct ipg_nic_private *sp,
1184 struct ipg_jumbo *jumbo = &sp->jumbo;
1199 skb = sp->rx_buff[entry];
1205 if (framelen > sp->rxfrag_size)
1206 framelen = sp->rxfrag_size;
1212 sp->rx_buff[entry] = NULL;
1216 struct ipg_nic_private *sp,
1219 struct ipg_jumbo *jumbo = &sp->jumbo;
1220 struct pci_dev *pdev = sp->pdev;
1228 skb = sp->rx_buff[entry];
1236 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1238 skb_put(skb, sp->rxfrag_size);
1241 jumbo->current_size = sp->rxfrag_size;
1244 sp->rx_buff[entry] = NULL;
1248 struct ipg_nic_private *sp,
1251 struct ipg_jumbo *jumbo = &sp->jumbo;
1255 struct sk_buff *skb = sp->rx_buff[entry];
1266 if (framelen > sp->rxsupport_size)
1294 struct ipg_nic_private *sp,
1297 struct ipg_jumbo *jumbo = &sp->jumbo;
1301 struct sk_buff *skb = sp->rx_buff[entry];
1305 jumbo->current_size += sp->rxfrag_size;
1306 if (jumbo->current_size <= sp->rxsupport_size) {
1308 sp->rxfrag_size),
1309 skb->data, sp->rxfrag_size);
1324 struct ipg_nic_private *sp = netdev_priv(dev);
1325 unsigned int curr = sp->rx_current;
1326 void __iomem *ioaddr = sp->ioaddr;
1333 struct ipg_rx *rxfd = sp->rxd + entry;
1340 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1343 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1346 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1349 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1354 sp->rx_current = curr;
1373 struct ipg_nic_private *sp = netdev_priv(dev);
1374 unsigned int curr = sp->rx_current;
1375 void __iomem *ioaddr = sp->ioaddr;
1386 struct sk_buff *skb = sp->rx_buff[entry];
1389 rxfd = sp->rxd + entry;
1400 if (framelen > sp->rxfrag_size) {
1404 framelen = sp->rxfrag_size;
1416 sp->stats.rx_errors++;
1421 sp->stats.rx_fifo_errors++;
1426 sp->stats.rx_length_errors++;
1436 sp->stats.rx_frame_errors++;
1451 pci_unmap_single(sp->pdev,
1453 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1483 sp->rx_buff[entry] = NULL;
1497 sp->EmptyRFDListCount++;
1504 rxfd = sp->rxd + entry;
1517 if (sp->rx_buff[entry]) {
1518 pci_unmap_single(sp->pdev,
1520 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1521 dev_kfree_skb_irq(sp->rx_buff[entry]);
1525 sp->rx_buff[entry] = NULL;
1528 sp->rx_current = curr;
1533 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1541 struct ipg_nic_private *sp =
1543 struct net_device *dev = sp->dev;
1556 schedule_delayed_work(&sp->task, HZ);
1563 struct ipg_nic_private *sp = netdev_priv(dev);
1564 void __iomem *ioaddr = sp->ioaddr;
1570 if (sp->is_jumbo)
1573 spin_lock(&sp->lock);
1607 sp->RFDlistendCount++;
1625 sp->RFDListCheckedCount++;
1628 if (sp->is_jumbo)
1652 schedule_delayed_work(&sp->task, 0);
1679 spin_unlock(&sp->lock);
1684 static void ipg_rx_clear(struct ipg_nic_private *sp)
1689 if (sp->rx_buff[i]) {
1690 struct ipg_rx *rxfd = sp->rxd + i;
1692 dev_kfree_skb_irq(sp->rx_buff[i]);
1693 sp->rx_buff[i] = NULL;
1694 pci_unmap_single(sp->pdev,
1696 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1701 static void ipg_tx_clear(struct ipg_nic_private *sp)
1706 if (sp->tx_buff[i]) {
1707 struct ipg_tx *txfd = sp->txd + i;
1709 pci_unmap_single(sp->pdev,
1711 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1713 dev_kfree_skb_irq(sp->tx_buff[i]);
1715 sp->tx_buff[i] = NULL;
1722 struct ipg_nic_private *sp = netdev_priv(dev);
1723 void __iomem *ioaddr = sp->ioaddr;
1724 struct pci_dev *pdev = sp->pdev;
1729 sp->rx_buf_sz = sp->rxsupport_size;
1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1754 &sp->rxd_map, GFP_KERNEL);
1755 if (!sp->rxd)
1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1759 &sp->txd_map, GFP_KERNEL);
1760 if (!sp->txd)
1782 sp->jumbo.found_start = 0;
1783 sp->jumbo.current_size = 0;
1784 sp->jumbo.skb = NULL;
1795 ipg_tx_clear(sp);
1796 ipg_rx_clear(sp);
1798 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1800 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1808 struct ipg_nic_private *sp = netdev_priv(dev);
1809 void __iomem *ioaddr = sp->ioaddr;
1810 struct pci_dev *pdev = sp->pdev;
1826 ipg_rx_clear(sp);
1828 ipg_tx_clear(sp);
1830 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1831 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1841 struct ipg_nic_private *sp = netdev_priv(dev);
1842 void __iomem *ioaddr = sp->ioaddr;
1843 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1852 if (sp->tenmbpsmode)
1855 if (sp->reset_current_tfd) {
1856 sp->reset_current_tfd = 0;
1860 txfd = sp->txd + entry;
1862 sp->tx_buff[entry] = skb;
1869 (IPG_TFC_FRAMEID & sp->tx_current) |
1873 * 0--15 (FrameId) <- sp->tx_current,
1883 if (sp->tenmbpsmode)
1919 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1933 spin_lock_irqsave(&sp->lock, flags);
1935 sp->tx_current++;
1941 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1944 spin_unlock_irqrestore(&sp->lock, flags);
2005 struct ipg_nic_private *sp = netdev_priv(dev);
2006 struct mii_if_info *mii_if = &sp->mii_if;
2028 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039 struct ipg_nic_private *sp = netdev_priv(dev);
2040 void __iomem *ioaddr = sp->ioaddr;
2046 sp->led_mode = read_eeprom(dev, 6);
2059 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2062 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2077 struct ipg_nic_private *sp = netdev_priv(dev);
2080 mutex_lock(&sp->mii_mutex);
2081 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082 mutex_unlock(&sp->mii_mutex);
2089 struct ipg_nic_private *sp = netdev_priv(dev);
2112 sp->max_rxframe_size = new_mtu;
2114 sp->rxfrag_size = new_mtu;
2115 if (sp->rxfrag_size > 4088)
2116 sp->rxfrag_size = 4088;
2118 sp->rxsupport_size = sp->max_rxframe_size;
2121 sp->is_jumbo = true;
2123 sp->is_jumbo = false;
2130 struct ipg_nic_private *sp = netdev_priv(dev);
2133 mutex_lock(&sp->mii_mutex);
2134 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135 mutex_unlock(&sp->mii_mutex);
2142 struct ipg_nic_private *sp = netdev_priv(dev);
2145 mutex_lock(&sp->mii_mutex);
2146 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147 mutex_unlock(&sp->mii_mutex);
2154 struct ipg_nic_private *sp = netdev_priv(dev);
2157 mutex_lock(&sp->mii_mutex);
2158 rc = mii_nway_restart(&sp->mii_if);
2159 mutex_unlock(&sp->mii_mutex);
2173 struct ipg_nic_private *sp = netdev_priv(dev);
2180 pci_iounmap(pdev, sp->ioaddr);
2204 struct ipg_nic_private *sp;
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2239 sp->is_jumbo = IPG_IS_JUMBO;
2240 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2241 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2242 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2262 sp->ioaddr = ioaddr;
2263 sp->pdev = pdev;
2264 sp->dev = dev;
2266 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);