Lines Matching defs:c2_port

81 static void c2_reset(struct c2_port *c2_port);
95 static void c2_set_rxbufsize(struct c2_port *c2_port)
97 struct net_device *netdev = c2_port->netdev;
100 c2_port->rx_buf_size =
104 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
209 static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
211 struct c2_dev *c2dev = c2_port->c2dev;
218 skb = dev_alloc_skb(c2_port->rx_buf_size);
221 c2_port->netdev->name);
228 skb->dev = c2_port->netdev;
230 maplen = c2_port->rx_buf_size;
258 static int c2_rx_fill(struct c2_port *c2_port)
260 struct c2_ring *rx_ring = &c2_port->rx_ring;
266 if (c2_rx_alloc(c2_port, elem)) {
277 static void c2_rx_clean(struct c2_port *c2_port)
279 struct c2_dev *c2dev = c2_port->c2dev;
280 struct c2_ring *rx_ring = &c2_port->rx_ring;
324 static void c2_tx_clean(struct c2_port *c2_port)
326 struct c2_ring *tx_ring = &c2_port->tx_ring;
332 spin_lock_irqsave(&c2_port->tx_lock, flags);
350 c2_port->netdev->stats.tx_dropped++;
361 c2_tx_free(c2_port->c2dev, elem);
366 c2_port->tx_avail = c2_port->tx_ring.count - 1;
367 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
369 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
370 netif_wake_queue(c2_port->netdev);
372 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
381 struct c2_port *c2_port = netdev_priv(netdev);
382 struct c2_dev *c2dev = c2_port->c2dev;
383 struct c2_ring *tx_ring = &c2_port->tx_ring;
387 spin_lock(&c2_port->tx_lock);
397 if (netif_msg_tx_done(c2_port)) {
408 ++(c2_port->tx_avail);
414 && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
417 spin_unlock(&c2_port->tx_lock);
420 static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
430 elem - c2_port->rx_ring.start);
458 c2_port->netdev->stats.rx_dropped++;
463 struct c2_port *c2_port = netdev_priv(netdev);
464 struct c2_dev *c2dev = c2_port->c2dev;
465 struct c2_ring *rx_ring = &c2_port->rx_ring;
494 c2_rx_error(c2_port, elem);
502 if (c2_rx_alloc(c2_port, elem)) {
503 c2_rx_error(c2_port, elem);
587 struct c2_port *c2_port = netdev_priv(netdev);
588 struct c2_dev *c2dev = c2_port->c2dev;
596 if (netif_msg_ifup(c2_port))
600 c2_set_rxbufsize(c2_port);
603 rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
604 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
606 c2_port->mem_size = tx_size + rx_size;
607 c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
608 &c2_port->dma);
609 if (c2_port->mem == NULL) {
617 c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
624 if (c2_rx_fill(c2_port)) {
630 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
631 c2_port->dma + rx_size,
638 c2_port->tx_avail = c2_port->tx_ring.count - 1;
639 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
640 c2_port->tx_ring.start + c2dev->cur_tx;
644 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
647 c2_reset(c2_port);
650 for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
679 c2_rx_clean(c2_port);
680 kfree(c2_port->rx_ring.start);
683 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
684 c2_port->dma);
691 struct c2_port *c2_port = netdev_priv(netdev);
692 struct c2_dev *c2dev = c2_port->c2dev;
694 if (netif_msg_ifdown(c2_port))
713 c2_reset(c2_port);
718 c2_tx_clean(c2_port);
719 c2_rx_clean(c2_port);
722 kfree(c2_port->rx_ring.start);
723 kfree(c2_port->tx_ring.start);
724 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
725 c2_port->dma);
730 static void c2_reset(struct c2_port *c2_port)
732 struct c2_dev *c2dev = c2_port->c2dev;
758 struct c2_port *c2_port = netdev_priv(netdev);
759 struct c2_dev *c2dev = c2_port->c2dev;
760 struct c2_ring *tx_ring = &c2_port->tx_ring;
767 spin_lock_irqsave(&c2_port->tx_lock, flags);
769 if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
771 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
824 c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
826 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
828 if (netif_msg_tx_queued(c2_port))
833 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
842 struct c2_port *c2_port = netdev_priv(netdev);
844 if (netif_msg_timer(c2_port))
847 c2_tx_clean(c2_port);
882 struct c2_port *c2_port = NULL;
883 struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
886 pr_debug("c2_port etherdev alloc failed");
896 c2_port = netdev_priv(netdev);
897 c2_port->netdev = netdev;
898 c2_port->c2dev = c2dev;
899 c2_port->msg_enable = netif_msg_init(debug, default_msg);
900 c2_port->tx_ring.count = C2_NUM_TX_DESC;
901 c2_port->rx_ring.count = C2_NUM_RX_DESC;
903 spin_lock_init(&c2_port->tx_lock);