bnx2x_stats.c revision cb4dca277694f6c53bf6daf0f5c609dda32e4656
1/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2012 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include "bnx2x_stats.h"
21#include "bnx2x_cmn.h"
22
23
24/* Statistics */
25
26/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32	u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34	u32 hi = *hiref;
35
36	return HILO_U64(hi, lo);
37#else
38	return lo;
39#endif
40}
41
42static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43{
44	u16 res = sizeof(struct host_port_stats) >> 2;
45
46	/* if PFC stats are not supported by the MFW, don't DMA them */
47	if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
48		res -= (sizeof(u32)*4) >> 2;
49
50	return res;
51}
52
53/*
54 * Init service functions
55 */
56
57/* Post the next statistics ramrod. Protect it with the spin in
58 * order to ensure the strict order between statistics ramrods
59 * (each ramrod has a sequence number passed in a
60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
61 * sent in order).
62 */
63static void bnx2x_storm_stats_post(struct bnx2x *bp)
64{
65	if (!bp->stats_pending) {
66		int rc;
67
68		spin_lock_bh(&bp->stats_lock);
69
70		if (bp->stats_pending) {
71			spin_unlock_bh(&bp->stats_lock);
72			return;
73		}
74
75		bp->fw_stats_req->hdr.drv_stats_counter =
76			cpu_to_le16(bp->stats_counter++);
77
78		DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
79			bp->fw_stats_req->hdr.drv_stats_counter);
80
81
82
83		/* send FW stats ramrod */
84		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
85				   U64_HI(bp->fw_stats_req_mapping),
86				   U64_LO(bp->fw_stats_req_mapping),
87				   NONE_CONNECTION_TYPE);
88		if (rc == 0)
89			bp->stats_pending = 1;
90
91		spin_unlock_bh(&bp->stats_lock);
92	}
93}
94
95static void bnx2x_hw_stats_post(struct bnx2x *bp)
96{
97	struct dmae_command *dmae = &bp->stats_dmae;
98	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
99
100	*stats_comp = DMAE_COMP_VAL;
101	if (CHIP_REV_IS_SLOW(bp))
102		return;
103
104	/* loader */
105	if (bp->executer_idx) {
106		int loader_idx = PMF_DMAE_C(bp);
107		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
108						 true, DMAE_COMP_GRC);
109		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
110
111		memset(dmae, 0, sizeof(struct dmae_command));
112		dmae->opcode = opcode;
113		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
114		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
115		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
116				     sizeof(struct dmae_command) *
117				     (loader_idx + 1)) >> 2;
118		dmae->dst_addr_hi = 0;
119		dmae->len = sizeof(struct dmae_command) >> 2;
120		if (CHIP_IS_E1(bp))
121			dmae->len--;
122		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
123		dmae->comp_addr_hi = 0;
124		dmae->comp_val = 1;
125
126		*stats_comp = 0;
127		bnx2x_post_dmae(bp, dmae, loader_idx);
128
129	} else if (bp->func_stx) {
130		*stats_comp = 0;
131		memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
132		       sizeof(bp->func_stats));
133		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
134	}
135}
136
137static int bnx2x_stats_comp(struct bnx2x *bp)
138{
139	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
140	int cnt = 10;
141
142	might_sleep();
143	while (*stats_comp != DMAE_COMP_VAL) {
144		if (!cnt) {
145			BNX2X_ERR("timeout waiting for stats finished\n");
146			break;
147		}
148		cnt--;
149		usleep_range(1000, 1000);
150	}
151	return 1;
152}
153
154/*
155 * Statistics service functions
156 */
157
158static void bnx2x_stats_pmf_update(struct bnx2x *bp)
159{
160	struct dmae_command *dmae;
161	u32 opcode;
162	int loader_idx = PMF_DMAE_C(bp);
163	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
164
165	/* sanity */
166	if (!bp->port.pmf || !bp->port.port_stx) {
167		BNX2X_ERR("BUG!\n");
168		return;
169	}
170
171	bp->executer_idx = 0;
172
173	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
174
175	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
176	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
177	dmae->src_addr_lo = bp->port.port_stx >> 2;
178	dmae->src_addr_hi = 0;
179	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
180	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
181	dmae->len = DMAE_LEN32_RD_MAX;
182	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
183	dmae->comp_addr_hi = 0;
184	dmae->comp_val = 1;
185
186	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
187	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
188	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
189	dmae->src_addr_hi = 0;
190	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
191				   DMAE_LEN32_RD_MAX * 4);
192	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
193				   DMAE_LEN32_RD_MAX * 4);
194	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
195
196	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
197	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
198	dmae->comp_val = DMAE_COMP_VAL;
199
200	*stats_comp = 0;
201	bnx2x_hw_stats_post(bp);
202	bnx2x_stats_comp(bp);
203}
204
205static void bnx2x_port_stats_init(struct bnx2x *bp)
206{
207	struct dmae_command *dmae;
208	int port = BP_PORT(bp);
209	u32 opcode;
210	int loader_idx = PMF_DMAE_C(bp);
211	u32 mac_addr;
212	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
213
214	/* sanity */
215	if (!bp->link_vars.link_up || !bp->port.pmf) {
216		BNX2X_ERR("BUG!\n");
217		return;
218	}
219
220	bp->executer_idx = 0;
221
222	/* MCP */
223	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
224				    true, DMAE_COMP_GRC);
225
226	if (bp->port.port_stx) {
227
228		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
229		dmae->opcode = opcode;
230		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
231		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
232		dmae->dst_addr_lo = bp->port.port_stx >> 2;
233		dmae->dst_addr_hi = 0;
234		dmae->len = bnx2x_get_port_stats_dma_len(bp);
235		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
236		dmae->comp_addr_hi = 0;
237		dmae->comp_val = 1;
238	}
239
240	if (bp->func_stx) {
241
242		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
243		dmae->opcode = opcode;
244		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
245		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
246		dmae->dst_addr_lo = bp->func_stx >> 2;
247		dmae->dst_addr_hi = 0;
248		dmae->len = sizeof(struct host_func_stats) >> 2;
249		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
250		dmae->comp_addr_hi = 0;
251		dmae->comp_val = 1;
252	}
253
254	/* MAC */
255	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
256				   true, DMAE_COMP_GRC);
257
258	/* EMAC is special */
259	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
260		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
261
262		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
263		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
264		dmae->opcode = opcode;
265		dmae->src_addr_lo = (mac_addr +
266				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
267		dmae->src_addr_hi = 0;
268		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
269		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
270		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
271		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
272		dmae->comp_addr_hi = 0;
273		dmae->comp_val = 1;
274
275		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
276		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
277		dmae->opcode = opcode;
278		dmae->src_addr_lo = (mac_addr +
279				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
280		dmae->src_addr_hi = 0;
281		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
282		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
283		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
284		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
285		dmae->len = 1;
286		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
287		dmae->comp_addr_hi = 0;
288		dmae->comp_val = 1;
289
290		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
291		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
292		dmae->opcode = opcode;
293		dmae->src_addr_lo = (mac_addr +
294				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
295		dmae->src_addr_hi = 0;
296		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
297			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
298		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
299			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
300		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
301		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
302		dmae->comp_addr_hi = 0;
303		dmae->comp_val = 1;
304	} else {
305		u32 tx_src_addr_lo, rx_src_addr_lo;
306		u16 rx_len, tx_len;
307
308		/* configure the params according to MAC type */
309		switch (bp->link_vars.mac_type) {
310		case MAC_TYPE_BMAC:
311			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
312					   NIG_REG_INGRESS_BMAC0_MEM);
313
314			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
315			   BIGMAC_REGISTER_TX_STAT_GTBYT */
316			if (CHIP_IS_E1x(bp)) {
317				tx_src_addr_lo = (mac_addr +
318					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
319				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
320					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
321				rx_src_addr_lo = (mac_addr +
322					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
323				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
324					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
325			} else {
326				tx_src_addr_lo = (mac_addr +
327					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
328				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
329					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
330				rx_src_addr_lo = (mac_addr +
331					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
332				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
333					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
334			}
335			break;
336
337		case MAC_TYPE_UMAC: /* handled by MSTAT */
338		case MAC_TYPE_XMAC: /* handled by MSTAT */
339		default:
340			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
341			tx_src_addr_lo = (mac_addr +
342					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
343			rx_src_addr_lo = (mac_addr +
344					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
345			tx_len = sizeof(bp->slowpath->
346					mac_stats.mstat_stats.stats_tx) >> 2;
347			rx_len = sizeof(bp->slowpath->
348					mac_stats.mstat_stats.stats_rx) >> 2;
349			break;
350		}
351
352		/* TX stats */
353		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
354		dmae->opcode = opcode;
355		dmae->src_addr_lo = tx_src_addr_lo;
356		dmae->src_addr_hi = 0;
357		dmae->len = tx_len;
358		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
359		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
360		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
361		dmae->comp_addr_hi = 0;
362		dmae->comp_val = 1;
363
364		/* RX stats */
365		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
366		dmae->opcode = opcode;
367		dmae->src_addr_hi = 0;
368		dmae->src_addr_lo = rx_src_addr_lo;
369		dmae->dst_addr_lo =
370			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
371		dmae->dst_addr_hi =
372			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
373		dmae->len = rx_len;
374		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
375		dmae->comp_addr_hi = 0;
376		dmae->comp_val = 1;
377	}
378
379	/* NIG */
380	if (!CHIP_IS_E3(bp)) {
381		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382		dmae->opcode = opcode;
383		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
384					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
385		dmae->src_addr_hi = 0;
386		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
387				offsetof(struct nig_stats, egress_mac_pkt0_lo));
388		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
389				offsetof(struct nig_stats, egress_mac_pkt0_lo));
390		dmae->len = (2*sizeof(u32)) >> 2;
391		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392		dmae->comp_addr_hi = 0;
393		dmae->comp_val = 1;
394
395		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396		dmae->opcode = opcode;
397		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
398					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
399		dmae->src_addr_hi = 0;
400		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
401				offsetof(struct nig_stats, egress_mac_pkt1_lo));
402		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
403				offsetof(struct nig_stats, egress_mac_pkt1_lo));
404		dmae->len = (2*sizeof(u32)) >> 2;
405		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
406		dmae->comp_addr_hi = 0;
407		dmae->comp_val = 1;
408	}
409
410	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
411	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
412						 true, DMAE_COMP_PCI);
413	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
414				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
415	dmae->src_addr_hi = 0;
416	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
417	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
418	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
419
420	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
421	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
422	dmae->comp_val = DMAE_COMP_VAL;
423
424	*stats_comp = 0;
425}
426
427static void bnx2x_func_stats_init(struct bnx2x *bp)
428{
429	struct dmae_command *dmae = &bp->stats_dmae;
430	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
431
432	/* sanity */
433	if (!bp->func_stx) {
434		BNX2X_ERR("BUG!\n");
435		return;
436	}
437
438	bp->executer_idx = 0;
439	memset(dmae, 0, sizeof(struct dmae_command));
440
441	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
442					 true, DMAE_COMP_PCI);
443	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
444	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
445	dmae->dst_addr_lo = bp->func_stx >> 2;
446	dmae->dst_addr_hi = 0;
447	dmae->len = sizeof(struct host_func_stats) >> 2;
448	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
449	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
450	dmae->comp_val = DMAE_COMP_VAL;
451
452	*stats_comp = 0;
453}
454
455static void bnx2x_stats_start(struct bnx2x *bp)
456{
457	if (bp->port.pmf)
458		bnx2x_port_stats_init(bp);
459
460	else if (bp->func_stx)
461		bnx2x_func_stats_init(bp);
462
463	bnx2x_hw_stats_post(bp);
464	bnx2x_storm_stats_post(bp);
465}
466
467static void bnx2x_stats_pmf_start(struct bnx2x *bp)
468{
469	bnx2x_stats_comp(bp);
470	bnx2x_stats_pmf_update(bp);
471	bnx2x_stats_start(bp);
472}
473
474static void bnx2x_stats_restart(struct bnx2x *bp)
475{
476	bnx2x_stats_comp(bp);
477	bnx2x_stats_start(bp);
478}
479
480static void bnx2x_bmac_stats_update(struct bnx2x *bp)
481{
482	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
483	struct bnx2x_eth_stats *estats = &bp->eth_stats;
484	struct {
485		u32 lo;
486		u32 hi;
487	} diff;
488
489	if (CHIP_IS_E1x(bp)) {
490		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
491
492		/* the macros below will use "bmac1_stats" type */
493		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
494		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
495		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
496		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
497		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
498		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
499		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
500		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
501		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
502
503		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
504		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
505		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
506		UPDATE_STAT64(tx_stat_gt127,
507				tx_stat_etherstatspkts65octetsto127octets);
508		UPDATE_STAT64(tx_stat_gt255,
509				tx_stat_etherstatspkts128octetsto255octets);
510		UPDATE_STAT64(tx_stat_gt511,
511				tx_stat_etherstatspkts256octetsto511octets);
512		UPDATE_STAT64(tx_stat_gt1023,
513				tx_stat_etherstatspkts512octetsto1023octets);
514		UPDATE_STAT64(tx_stat_gt1518,
515				tx_stat_etherstatspkts1024octetsto1522octets);
516		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
517		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
518		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
519		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
520		UPDATE_STAT64(tx_stat_gterr,
521				tx_stat_dot3statsinternalmactransmiterrors);
522		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
523
524	} else {
525		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
526
527		/* the macros below will use "bmac2_stats" type */
528		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
529		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
530		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
531		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
532		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
533		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
534		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
535		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
536		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
537		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
538		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
539		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
540		UPDATE_STAT64(tx_stat_gt127,
541				tx_stat_etherstatspkts65octetsto127octets);
542		UPDATE_STAT64(tx_stat_gt255,
543				tx_stat_etherstatspkts128octetsto255octets);
544		UPDATE_STAT64(tx_stat_gt511,
545				tx_stat_etherstatspkts256octetsto511octets);
546		UPDATE_STAT64(tx_stat_gt1023,
547				tx_stat_etherstatspkts512octetsto1023octets);
548		UPDATE_STAT64(tx_stat_gt1518,
549				tx_stat_etherstatspkts1024octetsto1522octets);
550		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
551		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
552		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
553		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
554		UPDATE_STAT64(tx_stat_gterr,
555				tx_stat_dot3statsinternalmactransmiterrors);
556		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
557
558		/* collect PFC stats */
559		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
560		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
561
562		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
563		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
564	}
565
566	estats->pause_frames_received_hi =
567				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
568	estats->pause_frames_received_lo =
569				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
570
571	estats->pause_frames_sent_hi =
572				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
573	estats->pause_frames_sent_lo =
574				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
575
576	estats->pfc_frames_received_hi =
577				pstats->pfc_frames_rx_hi;
578	estats->pfc_frames_received_lo =
579				pstats->pfc_frames_rx_lo;
580	estats->pfc_frames_sent_hi =
581				pstats->pfc_frames_tx_hi;
582	estats->pfc_frames_sent_lo =
583				pstats->pfc_frames_tx_lo;
584}
585
586static void bnx2x_mstat_stats_update(struct bnx2x *bp)
587{
588	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
589	struct bnx2x_eth_stats *estats = &bp->eth_stats;
590
591	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
592
593	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
594	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
595	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
596	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
597	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
598	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
599	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
600	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
601	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
602	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
603
604	/* collect pfc stats */
605	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
606		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
607	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
608		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
609
610	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
611	ADD_STAT64(stats_tx.tx_gt127,
612			tx_stat_etherstatspkts65octetsto127octets);
613	ADD_STAT64(stats_tx.tx_gt255,
614			tx_stat_etherstatspkts128octetsto255octets);
615	ADD_STAT64(stats_tx.tx_gt511,
616			tx_stat_etherstatspkts256octetsto511octets);
617	ADD_STAT64(stats_tx.tx_gt1023,
618			tx_stat_etherstatspkts512octetsto1023octets);
619	ADD_STAT64(stats_tx.tx_gt1518,
620			tx_stat_etherstatspkts1024octetsto1522octets);
621	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
622
623	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
624	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
625	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
626
627	ADD_STAT64(stats_tx.tx_gterr,
628			tx_stat_dot3statsinternalmactransmiterrors);
629	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
630
631	estats->etherstatspkts1024octetsto1522octets_hi =
632	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
633	estats->etherstatspkts1024octetsto1522octets_lo =
634	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
635
636	estats->etherstatspktsover1522octets_hi =
637	    pstats->mac_stx[1].tx_stat_mac_2047_hi;
638	estats->etherstatspktsover1522octets_lo =
639	    pstats->mac_stx[1].tx_stat_mac_2047_lo;
640
641	ADD_64(estats->etherstatspktsover1522octets_hi,
642	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
643	       estats->etherstatspktsover1522octets_lo,
644	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
645
646	ADD_64(estats->etherstatspktsover1522octets_hi,
647	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
648	       estats->etherstatspktsover1522octets_lo,
649	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
650
651	ADD_64(estats->etherstatspktsover1522octets_hi,
652	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
653	       estats->etherstatspktsover1522octets_lo,
654	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
655
656	estats->pause_frames_received_hi =
657				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
658	estats->pause_frames_received_lo =
659				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
660
661	estats->pause_frames_sent_hi =
662				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
663	estats->pause_frames_sent_lo =
664				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
665
666	estats->pfc_frames_received_hi =
667				pstats->pfc_frames_rx_hi;
668	estats->pfc_frames_received_lo =
669				pstats->pfc_frames_rx_lo;
670	estats->pfc_frames_sent_hi =
671				pstats->pfc_frames_tx_hi;
672	estats->pfc_frames_sent_lo =
673				pstats->pfc_frames_tx_lo;
674}
675
676static void bnx2x_emac_stats_update(struct bnx2x *bp)
677{
678	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
679	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
680	struct bnx2x_eth_stats *estats = &bp->eth_stats;
681
682	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
683	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
684	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
685	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
686	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
687	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
688	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
689	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
690	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
691	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
692	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
693	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
694	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
695	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
696	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
697	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
698	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
699	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
700	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
701	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
702	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
703	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
704	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
705	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
706	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
707	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
708	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
709	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
710	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
711	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
712	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
713
714	estats->pause_frames_received_hi =
715			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
716	estats->pause_frames_received_lo =
717			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
718	ADD_64(estats->pause_frames_received_hi,
719	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
720	       estats->pause_frames_received_lo,
721	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
722
723	estats->pause_frames_sent_hi =
724			pstats->mac_stx[1].tx_stat_outxonsent_hi;
725	estats->pause_frames_sent_lo =
726			pstats->mac_stx[1].tx_stat_outxonsent_lo;
727	ADD_64(estats->pause_frames_sent_hi,
728	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
729	       estats->pause_frames_sent_lo,
730	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
731}
732
733static int bnx2x_hw_stats_update(struct bnx2x *bp)
734{
735	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
736	struct nig_stats *old = &(bp->port.old_nig_stats);
737	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
738	struct bnx2x_eth_stats *estats = &bp->eth_stats;
739	struct {
740		u32 lo;
741		u32 hi;
742	} diff;
743
744	switch (bp->link_vars.mac_type) {
745	case MAC_TYPE_BMAC:
746		bnx2x_bmac_stats_update(bp);
747		break;
748
749	case MAC_TYPE_EMAC:
750		bnx2x_emac_stats_update(bp);
751		break;
752
753	case MAC_TYPE_UMAC:
754	case MAC_TYPE_XMAC:
755		bnx2x_mstat_stats_update(bp);
756		break;
757
758	case MAC_TYPE_NONE: /* unreached */
759		DP(BNX2X_MSG_STATS,
760		   "stats updated by DMAE but no MAC active\n");
761		return -1;
762
763	default: /* unreached */
764		BNX2X_ERR("Unknown MAC type\n");
765	}
766
767	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
768		      new->brb_discard - old->brb_discard);
769	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
770		      new->brb_truncate - old->brb_truncate);
771
772	if (!CHIP_IS_E3(bp)) {
773		UPDATE_STAT64_NIG(egress_mac_pkt0,
774					etherstatspkts1024octetsto1522octets);
775		UPDATE_STAT64_NIG(egress_mac_pkt1,
776					etherstatspktsover1522octets);
777	}
778
779	memcpy(old, new, sizeof(struct nig_stats));
780
781	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
782	       sizeof(struct mac_stx));
783	estats->brb_drop_hi = pstats->brb_drop_hi;
784	estats->brb_drop_lo = pstats->brb_drop_lo;
785
786	pstats->host_port_stats_counter++;
787
788	if (!BP_NOMCP(bp)) {
789		u32 nig_timer_max =
790			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
791		if (nig_timer_max != estats->nig_timer_max) {
792			estats->nig_timer_max = nig_timer_max;
793			BNX2X_ERR("NIG timer max (%u)\n",
794				  estats->nig_timer_max);
795		}
796	}
797
798	return 0;
799}
800
801static int bnx2x_storm_stats_update(struct bnx2x *bp)
802{
803	struct tstorm_per_port_stats *tport =
804				&bp->fw_stats_data->port.tstorm_port_statistics;
805	struct tstorm_per_pf_stats *tfunc =
806				&bp->fw_stats_data->pf.tstorm_pf_statistics;
807	struct host_func_stats *fstats = &bp->func_stats;
808	struct bnx2x_eth_stats *estats = &bp->eth_stats;
809	struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
810	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
811	int i;
812	u16 cur_stats_counter;
813
814	/* Make sure we use the value of the counter
815	 * used for sending the last stats ramrod.
816	 */
817	spin_lock_bh(&bp->stats_lock);
818	cur_stats_counter = bp->stats_counter - 1;
819	spin_unlock_bh(&bp->stats_lock);
820
821	/* are storm stats valid? */
822	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
823		DP(BNX2X_MSG_STATS,
824		   "stats not updated by xstorm  xstorm counter (0x%x) != stats_counter (0x%x)\n",
825		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
826		return -EAGAIN;
827	}
828
829	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
830		DP(BNX2X_MSG_STATS,
831		   "stats not updated by ustorm  ustorm counter (0x%x) != stats_counter (0x%x)\n",
832		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
833		return -EAGAIN;
834	}
835
836	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
837		DP(BNX2X_MSG_STATS,
838		   "stats not updated by cstorm  cstorm counter (0x%x) != stats_counter (0x%x)\n",
839		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
840		return -EAGAIN;
841	}
842
843	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
844		DP(BNX2X_MSG_STATS,
845		   "stats not updated by tstorm  tstorm counter (0x%x) != stats_counter (0x%x)\n",
846		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
847		return -EAGAIN;
848	}
849
850	estats->error_bytes_received_hi = 0;
851	estats->error_bytes_received_lo = 0;
852
853	for_each_eth_queue(bp, i) {
854		struct bnx2x_fastpath *fp = &bp->fp[i];
855		struct tstorm_per_queue_stats *tclient =
856			&bp->fw_stats_data->queue_stats[i].
857			tstorm_queue_statistics;
858		struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
859		struct ustorm_per_queue_stats *uclient =
860			&bp->fw_stats_data->queue_stats[i].
861			ustorm_queue_statistics;
862		struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
863		struct xstorm_per_queue_stats *xclient =
864			&bp->fw_stats_data->queue_stats[i].
865			xstorm_queue_statistics;
866		struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
867		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
868		struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
869
870		u32 diff;
871
872		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
873		   i, xclient->ucast_pkts_sent,
874		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
875
876		DP(BNX2X_MSG_STATS, "---------------\n");
877
878		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
879			     total_broadcast_bytes_received);
880		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
881			     total_multicast_bytes_received);
882		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
883			     total_unicast_bytes_received);
884
885		/*
886		 * sum to total_bytes_received all
887		 * unicast/multicast/broadcast
888		 */
889		qstats->total_bytes_received_hi =
890			qstats->total_broadcast_bytes_received_hi;
891		qstats->total_bytes_received_lo =
892			qstats->total_broadcast_bytes_received_lo;
893
894		ADD_64(qstats->total_bytes_received_hi,
895		       qstats->total_multicast_bytes_received_hi,
896		       qstats->total_bytes_received_lo,
897		       qstats->total_multicast_bytes_received_lo);
898
899		ADD_64(qstats->total_bytes_received_hi,
900		       qstats->total_unicast_bytes_received_hi,
901		       qstats->total_bytes_received_lo,
902		       qstats->total_unicast_bytes_received_lo);
903
904		qstats->valid_bytes_received_hi =
905					qstats->total_bytes_received_hi;
906		qstats->valid_bytes_received_lo =
907					qstats->total_bytes_received_lo;
908
909
910		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
911					total_unicast_packets_received);
912		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
913					total_multicast_packets_received);
914		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
915					total_broadcast_packets_received);
916		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
917				      etherstatsoverrsizepkts);
918		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
919
920		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
921					total_unicast_packets_received);
922		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
923					total_multicast_packets_received);
924		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
925					total_broadcast_packets_received);
926		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
927		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
928		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
929
930		UPDATE_QSTAT(xclient->bcast_bytes_sent,
931			     total_broadcast_bytes_transmitted);
932		UPDATE_QSTAT(xclient->mcast_bytes_sent,
933			     total_multicast_bytes_transmitted);
934		UPDATE_QSTAT(xclient->ucast_bytes_sent,
935			     total_unicast_bytes_transmitted);
936
937		/*
938		 * sum to total_bytes_transmitted all
939		 * unicast/multicast/broadcast
940		 */
941		qstats->total_bytes_transmitted_hi =
942				qstats->total_unicast_bytes_transmitted_hi;
943		qstats->total_bytes_transmitted_lo =
944				qstats->total_unicast_bytes_transmitted_lo;
945
946		ADD_64(qstats->total_bytes_transmitted_hi,
947		       qstats->total_broadcast_bytes_transmitted_hi,
948		       qstats->total_bytes_transmitted_lo,
949		       qstats->total_broadcast_bytes_transmitted_lo);
950
951		ADD_64(qstats->total_bytes_transmitted_hi,
952		       qstats->total_multicast_bytes_transmitted_hi,
953		       qstats->total_bytes_transmitted_lo,
954		       qstats->total_multicast_bytes_transmitted_lo);
955
956		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
957					total_unicast_packets_transmitted);
958		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
959					total_multicast_packets_transmitted);
960		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
961					total_broadcast_packets_transmitted);
962
963		UPDATE_EXTEND_TSTAT(checksum_discard,
964				    total_packets_received_checksum_discarded);
965		UPDATE_EXTEND_TSTAT(ttl0_discard,
966				    total_packets_received_ttl0_discarded);
967
968		UPDATE_EXTEND_XSTAT(error_drop_pkts,
969				    total_transmitted_dropped_packets_error);
970
971		/* TPA aggregations completed */
972		UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
973		/* Number of network frames aggregated by TPA */
974		UPDATE_EXTEND_E_USTAT(coalesced_pkts,
975				      total_tpa_aggregated_frames);
976		/* Total number of bytes in completed TPA aggregations */
977		UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
978
979		UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
980
981		UPDATE_FSTAT_QSTAT(total_bytes_received);
982		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
983		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
984		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
985		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
986		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
987		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
988		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
989		UPDATE_FSTAT_QSTAT(valid_bytes_received);
990	}
991
992	ADD_64(estats->total_bytes_received_hi,
993	       estats->rx_stat_ifhcinbadoctets_hi,
994	       estats->total_bytes_received_lo,
995	       estats->rx_stat_ifhcinbadoctets_lo);
996
997	ADD_64(estats->total_bytes_received_hi,
998	       le32_to_cpu(tfunc->rcv_error_bytes.hi),
999	       estats->total_bytes_received_lo,
1000	       le32_to_cpu(tfunc->rcv_error_bytes.lo));
1001
1002	ADD_64(estats->error_bytes_received_hi,
1003	       le32_to_cpu(tfunc->rcv_error_bytes.hi),
1004	       estats->error_bytes_received_lo,
1005	       le32_to_cpu(tfunc->rcv_error_bytes.lo));
1006
1007	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1008
1009	ADD_64(estats->error_bytes_received_hi,
1010	       estats->rx_stat_ifhcinbadoctets_hi,
1011	       estats->error_bytes_received_lo,
1012	       estats->rx_stat_ifhcinbadoctets_lo);
1013
1014	if (bp->port.pmf) {
1015		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1016		UPDATE_FW_STAT(mac_filter_discard);
1017		UPDATE_FW_STAT(mf_tag_discard);
1018		UPDATE_FW_STAT(brb_truncate_discard);
1019		UPDATE_FW_STAT(mac_discard);
1020	}
1021
1022	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1023
1024	bp->stats_pending = 0;
1025
1026	return 0;
1027}
1028
1029static void bnx2x_net_stats_update(struct bnx2x *bp)
1030{
1031	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1032	struct net_device_stats *nstats = &bp->dev->stats;
1033	unsigned long tmp;
1034	int i;
1035
1036	nstats->rx_packets =
1037		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1038		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1039		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1040
1041	nstats->tx_packets =
1042		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1043		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1044		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1045
1046	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1047
1048	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1049
1050	tmp = estats->mac_discard;
1051	for_each_rx_queue(bp, i)
1052		tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
1053	nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1054
1055	nstats->tx_dropped = 0;
1056
1057	nstats->multicast =
1058		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1059
1060	nstats->collisions =
1061		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1062
1063	nstats->rx_length_errors =
1064		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1065		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1066	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1067				 bnx2x_hilo(&estats->brb_truncate_hi);
1068	nstats->rx_crc_errors =
1069		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1070	nstats->rx_frame_errors =
1071		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1072	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1073	nstats->rx_missed_errors = 0;
1074
1075	nstats->rx_errors = nstats->rx_length_errors +
1076			    nstats->rx_over_errors +
1077			    nstats->rx_crc_errors +
1078			    nstats->rx_frame_errors +
1079			    nstats->rx_fifo_errors +
1080			    nstats->rx_missed_errors;
1081
1082	nstats->tx_aborted_errors =
1083		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1084		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1085	nstats->tx_carrier_errors =
1086		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1087	nstats->tx_fifo_errors = 0;
1088	nstats->tx_heartbeat_errors = 0;
1089	nstats->tx_window_errors = 0;
1090
1091	nstats->tx_errors = nstats->tx_aborted_errors +
1092			    nstats->tx_carrier_errors +
1093	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1094}
1095
1096static void bnx2x_drv_stats_update(struct bnx2x *bp)
1097{
1098	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1099	int i;
1100
1101	for_each_queue(bp, i) {
1102		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1103		struct bnx2x_eth_q_stats_old *qstats_old =
1104						&bp->fp[i].eth_q_stats_old;
1105
1106		UPDATE_ESTAT_QSTAT(driver_xoff);
1107		UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1108		UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1109		UPDATE_ESTAT_QSTAT(hw_csum_err);
1110	}
1111}
1112
1113static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1114{
1115	u32 val;
1116
1117	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1118		val = SHMEM2_RD(bp, edebug_driver_if[1]);
1119
1120		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1121			return true;
1122	}
1123
1124	return false;
1125}
1126
1127static void bnx2x_stats_update(struct bnx2x *bp)
1128{
1129	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1130
1131	if (bnx2x_edebug_stats_stopped(bp))
1132		return;
1133
1134	if (*stats_comp != DMAE_COMP_VAL)
1135		return;
1136
1137	if (bp->port.pmf)
1138		bnx2x_hw_stats_update(bp);
1139
1140	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1141		BNX2X_ERR("storm stats were not updated for 3 times\n");
1142		bnx2x_panic();
1143		return;
1144	}
1145
1146	bnx2x_net_stats_update(bp);
1147	bnx2x_drv_stats_update(bp);
1148
1149	if (netif_msg_timer(bp)) {
1150		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1151
1152		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
1153		       estats->brb_drop_lo, estats->brb_truncate_lo);
1154	}
1155
1156	bnx2x_hw_stats_post(bp);
1157	bnx2x_storm_stats_post(bp);
1158}
1159
1160static void bnx2x_port_stats_stop(struct bnx2x *bp)
1161{
1162	struct dmae_command *dmae;
1163	u32 opcode;
1164	int loader_idx = PMF_DMAE_C(bp);
1165	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1166
1167	bp->executer_idx = 0;
1168
1169	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1170
1171	if (bp->port.port_stx) {
1172
1173		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1174		if (bp->func_stx)
1175			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1176						opcode, DMAE_COMP_GRC);
1177		else
1178			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1179						opcode, DMAE_COMP_PCI);
1180
1181		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1182		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1183		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1184		dmae->dst_addr_hi = 0;
1185		dmae->len = bnx2x_get_port_stats_dma_len(bp);
1186		if (bp->func_stx) {
1187			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1188			dmae->comp_addr_hi = 0;
1189			dmae->comp_val = 1;
1190		} else {
1191			dmae->comp_addr_lo =
1192				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1193			dmae->comp_addr_hi =
1194				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1195			dmae->comp_val = DMAE_COMP_VAL;
1196
1197			*stats_comp = 0;
1198		}
1199	}
1200
1201	if (bp->func_stx) {
1202
1203		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1204		dmae->opcode =
1205			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1206		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1207		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1208		dmae->dst_addr_lo = bp->func_stx >> 2;
1209		dmae->dst_addr_hi = 0;
1210		dmae->len = sizeof(struct host_func_stats) >> 2;
1211		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1212		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1213		dmae->comp_val = DMAE_COMP_VAL;
1214
1215		*stats_comp = 0;
1216	}
1217}
1218
1219static void bnx2x_stats_stop(struct bnx2x *bp)
1220{
1221	int update = 0;
1222
1223	bnx2x_stats_comp(bp);
1224
1225	if (bp->port.pmf)
1226		update = (bnx2x_hw_stats_update(bp) == 0);
1227
1228	update |= (bnx2x_storm_stats_update(bp) == 0);
1229
1230	if (update) {
1231		bnx2x_net_stats_update(bp);
1232
1233		if (bp->port.pmf)
1234			bnx2x_port_stats_stop(bp);
1235
1236		bnx2x_hw_stats_post(bp);
1237		bnx2x_stats_comp(bp);
1238	}
1239}
1240
1241static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1242{
1243}
1244
1245static const struct {
1246	void (*action)(struct bnx2x *bp);
1247	enum bnx2x_stats_state next_state;
1248} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1249/* state	event	*/
1250{
1251/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1252/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1253/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1254/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1255},
1256{
1257/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1258/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1259/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1260/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1261}
1262};
1263
1264void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1265{
1266	enum bnx2x_stats_state state;
1267	if (unlikely(bp->panic))
1268		return;
1269
1270	spin_lock_bh(&bp->stats_lock);
1271	state = bp->stats_state;
1272	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1273	spin_unlock_bh(&bp->stats_lock);
1274
1275	bnx2x_stats_stm[state][event].action(bp);
1276
1277	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1278		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1279		   state, event, bp->stats_state);
1280}
1281
1282static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1283{
1284	struct dmae_command *dmae;
1285	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1286
1287	/* sanity */
1288	if (!bp->port.pmf || !bp->port.port_stx) {
1289		BNX2X_ERR("BUG!\n");
1290		return;
1291	}
1292
1293	bp->executer_idx = 0;
1294
1295	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1296	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1297					 true, DMAE_COMP_PCI);
1298	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1299	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1300	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1301	dmae->dst_addr_hi = 0;
1302	dmae->len = bnx2x_get_port_stats_dma_len(bp);
1303	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1304	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1305	dmae->comp_val = DMAE_COMP_VAL;
1306
1307	*stats_comp = 0;
1308	bnx2x_hw_stats_post(bp);
1309	bnx2x_stats_comp(bp);
1310}
1311
1312/**
1313 * This function will prepare the statistics ramrod data the way
1314 * we will only have to increment the statistics counter and
1315 * send the ramrod each time we have to.
1316 *
1317 * @param bp
1318 */
1319static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{
1321	int i;
1322	int first_queue_query_index;
1323	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1324
1325	dma_addr_t cur_data_offset;
1326	struct stats_query_entry *cur_query_entry;
1327
1328	stats_hdr->cmd_num = bp->fw_stats_num;
1329	stats_hdr->drv_stats_counter = 0;
1330
1331	/* storm_counters struct contains the counters of completed
1332	 * statistics requests per storm which are incremented by FW
1333	 * each time it completes hadning a statistics ramrod. We will
1334	 * check these counters in the timer handler and discard a
1335	 * (statistics) ramrod completion.
1336	 */
1337	cur_data_offset = bp->fw_stats_data_mapping +
1338		offsetof(struct bnx2x_fw_stats_data, storm_counters);
1339
1340	stats_hdr->stats_counters_addrs.hi =
1341		cpu_to_le32(U64_HI(cur_data_offset));
1342	stats_hdr->stats_counters_addrs.lo =
1343		cpu_to_le32(U64_LO(cur_data_offset));
1344
1345	/* prepare to the first stats ramrod (will be completed with
1346	 * the counters equal to zero) - init counters to somethig different.
1347	 */
1348	memset(&bp->fw_stats_data->storm_counters, 0xff,
1349	       sizeof(struct stats_counter));
1350
1351	/**** Port FW statistics data ****/
1352	cur_data_offset = bp->fw_stats_data_mapping +
1353		offsetof(struct bnx2x_fw_stats_data, port);
1354
1355	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1356
1357	cur_query_entry->kind = STATS_TYPE_PORT;
1358	/* For port query index is a DONT CARE */
1359	cur_query_entry->index = BP_PORT(bp);
1360	/* For port query funcID is a DONT CARE */
1361	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1362	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1363	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1364
1365	/**** PF FW statistics data ****/
1366	cur_data_offset = bp->fw_stats_data_mapping +
1367		offsetof(struct bnx2x_fw_stats_data, pf);
1368
1369	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1370
1371	cur_query_entry->kind = STATS_TYPE_PF;
1372	/* For PF query index is a DONT CARE */
1373	cur_query_entry->index = BP_PORT(bp);
1374	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1375	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1376	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1377
1378	/**** FCoE FW statistics data ****/
1379	if (!NO_FCOE(bp)) {
1380		cur_data_offset = bp->fw_stats_data_mapping +
1381			offsetof(struct bnx2x_fw_stats_data, fcoe);
1382
1383		cur_query_entry =
1384			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1385
1386		cur_query_entry->kind = STATS_TYPE_FCOE;
1387		/* For FCoE query index is a DONT CARE */
1388		cur_query_entry->index = BP_PORT(bp);
1389		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1390		cur_query_entry->address.hi =
1391			cpu_to_le32(U64_HI(cur_data_offset));
1392		cur_query_entry->address.lo =
1393			cpu_to_le32(U64_LO(cur_data_offset));
1394	}
1395
1396	/**** Clients' queries ****/
1397	cur_data_offset = bp->fw_stats_data_mapping +
1398		offsetof(struct bnx2x_fw_stats_data, queue_stats);
1399
1400	/* first queue query index depends whether FCoE offloaded request will
1401	 * be included in the ramrod
1402	 */
1403	if (!NO_FCOE(bp))
1404		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1405	else
1406		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1407
1408	for_each_eth_queue(bp, i) {
1409		cur_query_entry =
1410			&bp->fw_stats_req->
1411					query[first_queue_query_index + i];
1412
1413		cur_query_entry->kind = STATS_TYPE_QUEUE;
1414		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1415		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1416		cur_query_entry->address.hi =
1417			cpu_to_le32(U64_HI(cur_data_offset));
1418		cur_query_entry->address.lo =
1419			cpu_to_le32(U64_LO(cur_data_offset));
1420
1421		cur_data_offset += sizeof(struct per_queue_stats);
1422	}
1423
1424	/* add FCoE queue query if needed */
1425	if (!NO_FCOE(bp)) {
1426		cur_query_entry =
1427			&bp->fw_stats_req->
1428					query[first_queue_query_index + i];
1429
1430		cur_query_entry->kind = STATS_TYPE_QUEUE;
1431		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
1432		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1433		cur_query_entry->address.hi =
1434			cpu_to_le32(U64_HI(cur_data_offset));
1435		cur_query_entry->address.lo =
1436			cpu_to_le32(U64_LO(cur_data_offset));
1437	}
1438}
1439
1440void bnx2x_stats_init(struct bnx2x *bp)
1441{
1442	int /*abs*/port = BP_PORT(bp);
1443	int mb_idx = BP_FW_MB_IDX(bp);
1444	int i;
1445
1446	bp->stats_pending = 0;
1447	bp->executer_idx = 0;
1448	bp->stats_counter = 0;
1449
1450	/* port and func stats for management */
1451	if (!BP_NOMCP(bp)) {
1452		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1453		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1454
1455	} else {
1456		bp->port.port_stx = 0;
1457		bp->func_stx = 0;
1458	}
1459	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1460	   bp->port.port_stx, bp->func_stx);
1461
1462	/* pmf should retrieve port statistics from SP on a non-init*/
1463	if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1464		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1465
1466	port = BP_PORT(bp);
1467	/* port stats */
1468	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1469	bp->port.old_nig_stats.brb_discard =
1470			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1471	bp->port.old_nig_stats.brb_truncate =
1472			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1473	if (!CHIP_IS_E3(bp)) {
1474		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1475			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1476		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1477			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1478	}
1479
1480	/* function stats */
1481	for_each_queue(bp, i) {
1482		struct bnx2x_fastpath *fp = &bp->fp[i];
1483
1484		memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
1485		memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
1486		memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
1487		if (bp->stats_init) {
1488			memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
1489			memset(&fp->eth_q_stats_old, 0,
1490			       sizeof(fp->eth_q_stats_old));
1491		}
1492	}
1493
1494	/* Prepare statistics ramrod data */
1495	bnx2x_prep_fw_stats_req(bp);
1496
1497	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1498	if (bp->stats_init) {
1499		memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1500		memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1501		memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1502		memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1503		memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1504
1505		/* Clean SP from previous statistics */
1506		if (bp->func_stx) {
1507			memset(bnx2x_sp(bp, func_stats), 0,
1508			       sizeof(struct host_func_stats));
1509			bnx2x_func_stats_init(bp);
1510			bnx2x_hw_stats_post(bp);
1511			bnx2x_stats_comp(bp);
1512		}
1513	}
1514
1515	bp->stats_state = STATS_STATE_DISABLED;
1516
1517	if (bp->port.pmf && bp->port.port_stx)
1518		bnx2x_port_stats_base_init(bp);
1519
1520	/* mark the end of statistics initializiation */
1521	bp->stats_init = false;
1522}
1523
1524void bnx2x_save_statistics(struct bnx2x *bp)
1525{
1526	int i;
1527	struct net_device_stats *nstats = &bp->dev->stats;
1528
1529	/* save queue statistics */
1530	for_each_eth_queue(bp, i) {
1531		struct bnx2x_fastpath *fp = &bp->fp[i];
1532		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1533		struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1534
1535		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1536		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1537		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1538		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1539		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1540		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1541		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1542		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1543		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1544		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1545		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1546		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1547		UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1548		UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1549	}
1550
1551	/* save net_device_stats statistics */
1552	bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1553
1554	/* store port firmware statistics */
1555	if (bp->port.pmf && IS_MF(bp)) {
1556		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1557		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1558		UPDATE_FW_STAT_OLD(mac_filter_discard);
1559		UPDATE_FW_STAT_OLD(mf_tag_discard);
1560		UPDATE_FW_STAT_OLD(brb_truncate_discard);
1561		UPDATE_FW_STAT_OLD(mac_discard);
1562	}
1563}
1564