Lines Matching refs:sge

3  * File: sge.c                                                               *
60 #include "sge.h"
256 struct sge {
270 u32 sge_control; /* shadow value of sge control reg */
284 static void tx_sched_stop(struct sge *sge)
286 struct sched *s = sge->tx_sched;
299 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
302 struct sched *s = sge->tx_sched;
322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
345 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
347 struct sched *s = sge->tx_sched;
352 t1_sched_update_parms(sge, i, 0, 0);
359 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
362 struct sched *s = sge->tx_sched;
365 t1_sched_update_parms(sge, port, 0, 0);
385 static int tx_sched_init(struct sge *sge)
395 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
396 sge->tx_sched = s;
400 t1_sched_update_parms(sge, i, 1500, 1000);
411 static inline int sched_update_avail(struct sge *sge)
413 struct sched *s = sge->tx_sched;
445 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
448 struct sched *s = sge->tx_sched;
486 if (update-- && sched_update_avail(sge))
494 struct cmdQ *q = &sge->cmdQ[0];
498 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
539 static void free_rx_resources(struct sge *sge)
541 struct pci_dev *pdev = sge->adapter->pdev;
544 if (sge->respQ.entries) {
545 size = sizeof(struct respQ_e) * sge->respQ.size;
546 pci_free_consistent(pdev, size, sge->respQ.entries,
547 sge->respQ.dma_addr);
551 struct freelQ *q = &sge->freelQ[i];
569 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
571 struct pci_dev *pdev = sge->adapter->pdev;
575 struct freelQ *q = &sge->freelQ[i];
579 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
598 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
600 sge->freelQ[!sge->jumbo_fl].dma_offset;
605 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
611 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
612 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
614 sge->respQ.genbit = 1;
615 sge->respQ.size = SGE_RESPQ_E_N;
616 sge->respQ.credits = 0;
617 size = sizeof(struct respQ_e) * sge->respQ.size;
618 sge->respQ.entries =
619 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
620 if (!sge->respQ.entries)
625 free_rx_resources(sge);
632 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
635 struct pci_dev *pdev = sge->adapter->pdev;
666 static void free_tx_resources(struct sge *sge)
668 struct pci_dev *pdev = sge->adapter->pdev;
672 struct cmdQ *q = &sge->cmdQ[i];
676 free_cmdQ_buffers(sge, q, q->in_use);
690 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
692 struct pci_dev *pdev = sge->adapter->pdev;
696 struct cmdQ *q = &sge->cmdQ[i];
724 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
729 free_tx_resources(sge);
747 struct sge *sge = adapter->sge;
750 sge->sge_control |= F_VLAN_XTRACT;
752 sge->sge_control &= ~F_VLAN_XTRACT;
754 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
761 * but sge->sge_control is setup and ready to go.
763 static void configure_sge(struct sge *sge, struct sge_params *p)
765 struct adapter *ap = sge->adapter;
768 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
770 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
772 setup_ring_params(ap, sge->freelQ[0].dma_addr,
773 sge->freelQ[0].size, A_SG_FL0BASELWR,
775 setup_ring_params(ap, sge->freelQ[1].dma_addr,
776 sge->freelQ[1].size, A_SG_FL1BASELWR,
782 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
784 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
786 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
789 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
792 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
796 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
798 t1_sge_set_coalesce_params(sge, p);
804 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
806 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
807 sge->freelQ[sge->jumbo_fl].dma_offset -
812 * Frees all SGE related resources and the sge structure itself
814 void t1_sge_destroy(struct sge *sge)
818 for_each_port(sge->adapter, i)
819 free_percpu(sge->port_stats[i]);
821 kfree(sge->tx_sched);
822 free_tx_resources(sge);
823 free_rx_resources(sge);
824 kfree(sge);
835 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
839 static void refill_free_list(struct sge *sge, struct freelQ *q)
841 struct pci_dev *pdev = sge->adapter->pdev;
857 skb_reserve(skb, sge->rx_pkt_pad);
885 static void freelQs_empty(struct sge *sge)
887 struct adapter *adapter = sge->adapter;
891 refill_free_list(sge, &sge->freelQ[0]);
892 refill_free_list(sge, &sge->freelQ[1]);
894 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
895 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
897 irqholdoff_reg = sge->fixed_intrtimer;
901 irqholdoff_reg = sge->intrtimer_nres;
918 void t1_sge_intr_disable(struct sge *sge)
920 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
922 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
923 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
929 void t1_sge_intr_enable(struct sge *sge)
932 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
934 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
936 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
937 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
943 void t1_sge_intr_clear(struct sge *sge)
945 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
946 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
952 int t1_sge_intr_error_handler(struct sge *sge)
954 struct adapter *adapter = sge->adapter;
960 sge->stats.respQ_empty++;
962 sge->stats.respQ_overflow++;
967 sge->stats.freelistQ_empty++;
968 freelQs_empty(sge);
971 sge->stats.pkt_too_big++;
976 sge->stats.pkt_mismatch++;
986 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
988 return &sge->stats;
991 void t1_sge_get_port_stats(const struct sge *sge, int port,
998 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
1303 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1310 free_cmdQ_buffers(sge, q, reclaim);
1321 struct sge *sge = (struct sge *) arg;
1322 struct adapter *adapter = sge->adapter;
1323 struct cmdQ *q = &sge->cmdQ[0];
1328 reclaim_completed_tx(sge, q);
1332 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1361 * @sge: the sge structure
1367 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1371 struct adapter *adapter = sge->adapter;
1375 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1377 sge->stats.rx_drops++;
1388 st = this_cpu_ptr(sge->port_stats[p->iff]);
1422 static void restart_tx_queues(struct sge *sge)
1424 struct adapter *adap = sge->adapter;
1427 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1433 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1435 sge->stats.cmdQ_restarted[2]++;
1449 struct sge *sge = adapter->sge;
1450 struct cmdQ *cmdq = &sge->cmdQ[0];
1454 freelQs_empty(sge);
1465 if (sge->tx_sched)
1466 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1471 if (unlikely(sge->stopped_tx_queues != 0))
1472 restart_tx_queues(sge);
1483 struct sge *sge = adapter->sge;
1484 struct respQ *q = &sge->respQ;
1506 sge->cmdQ[1].processed += cmdq_processed[1];
1511 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1517 sge_rx(sge, fl, e->BufferLength);
1531 refill_free_list(sge, fl);
1533 sge->stats.pure_rsps++;
1550 sge->cmdQ[1].processed += cmdq_processed[1];
1557 const struct respQ *Q = &adapter->sge->respQ;
1573 struct sge *sge = adapter->sge;
1574 struct respQ *q = &sge->respQ;
1576 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1602 sge->stats.pure_rsps++;
1606 sge->cmdQ[1].processed += cmdq_processed[1];
1623 writel(adapter->sge->respQ.cidx,
1632 struct sge *sge = adapter->sge;
1643 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1656 sge->stats.unhandled_irqs++;
1677 struct sge *sge = adapter->sge;
1678 struct cmdQ *q = &sge->cmdQ[qid];
1684 reclaim_completed_tx(sge, q);
1695 set_bit(dev->if_port, &sge->stopped_tx_queues);
1696 sge->stats.cmdQ_full[2]++;
1706 set_bit(dev->if_port, &sge->stopped_tx_queues);
1707 sge->stats.cmdQ_full[2]++;
1713 if (sge->tx_sched && !qid && skb->dev) {
1719 skb = sched_skb(sge, skb, credits);
1789 struct sge *sge = adapter->sge;
1790 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1855 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1858 adapter->sge->espibug_skb[dev->if_port] = skb;
1903 struct sge *sge = (struct sge *)data;
1906 struct cmdQ *q = &sge->cmdQ[i];
1911 reclaim_completed_tx(sge, q);
1913 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1917 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1923 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1925 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1926 core_ticks_per_usec(sge->adapter);
1927 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1935 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1937 if (alloc_rx_resources(sge, p))
1939 if (alloc_tx_resources(sge, p)) {
1940 free_rx_resources(sge);
1943 configure_sge(sge, p);
1951 p->large_buf_capacity = jumbo_payload_capacity(sge);
1958 void t1_sge_stop(struct sge *sge)
1961 writel(0, sge->adapter->regs + A_SG_CONTROL);
1962 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1964 if (is_T2(sge->adapter))
1965 del_timer_sync(&sge->espibug_timer);
1967 del_timer_sync(&sge->tx_reclaim_timer);
1968 if (sge->tx_sched)
1969 tx_sched_stop(sge);
1972 kfree_skb(sge->espibug_skb[i]);
1978 void t1_sge_start(struct sge *sge)
1980 refill_free_list(sge, &sge->freelQ[0]);
1981 refill_free_list(sge, &sge->freelQ[1]);
1983 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1984 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1985 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1987 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1989 if (is_T2(sge->adapter))
1990 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1999 struct sge *sge = adapter->sge;
2010 struct sk_buff *skb = sge->espibug_skb[i];
2036 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2042 struct sge *sge = adapter->sge;
2045 struct sk_buff *skb = sge->espibug_skb[0];
2068 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2074 struct sge * __devinit t1_sge_create(struct adapter *adapter,
2077 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2080 if (!sge)
2083 sge->adapter = adapter;
2084 sge->netdev = adapter->port[0].dev;
2085 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2086 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2089 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2090 if (!sge->port_stats[i])
2094 init_timer(&sge->tx_reclaim_timer);
2095 sge->tx_reclaim_timer.data = (unsigned long)sge;
2096 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2098 if (is_T2(sge->adapter)) {
2099 init_timer(&sge->espibug_timer);
2102 tx_sched_init(sge);
2103 sge->espibug_timer.function = espibug_workaround_t204;
2105 sge->espibug_timer.function = espibug_workaround;
2106 sge->espibug_timer.data = (unsigned long)sge->adapter;
2108 sge->espibug_timeout = 1;
2111 sge->espibug_timeout = HZ/100;
2117 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2118 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2119 if (sge->tx_sched) {
2120 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2130 return sge;
2133 free_percpu(sge->port_stats[i]);
2136 kfree(sge);