bnx2x_stats.c revision f1deab502206ab7e4470334b7738383c76e4ddd9
1/* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2011 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include "bnx2x_stats.h" 21#include "bnx2x_cmn.h" 22 23 24/* Statistics */ 25 26/* 27 * General service functions 28 */ 29 30static inline long bnx2x_hilo(u32 *hiref) 31{ 32 u32 lo = *(hiref + 1); 33#if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37#else 38 return lo; 39#endif 40} 41 42/* 43 * Init service functions 44 */ 45 46/* Post the next statistics ramrod. Protect it with the spin in 47 * order to ensure the strict order between statistics ramrods 48 * (each ramrod has a sequence number passed in a 49 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 50 * sent in order). 51 */ 52static void bnx2x_storm_stats_post(struct bnx2x *bp) 53{ 54 if (!bp->stats_pending) { 55 int rc; 56 57 spin_lock_bh(&bp->stats_lock); 58 59 if (bp->stats_pending) { 60 spin_unlock_bh(&bp->stats_lock); 61 return; 62 } 63 64 bp->fw_stats_req->hdr.drv_stats_counter = 65 cpu_to_le16(bp->stats_counter++); 66 67 DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", 68 bp->fw_stats_req->hdr.drv_stats_counter); 69 70 71 72 /* send FW stats ramrod */ 73 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 74 U64_HI(bp->fw_stats_req_mapping), 75 U64_LO(bp->fw_stats_req_mapping), 76 NONE_CONNECTION_TYPE); 77 if (rc == 0) 78 bp->stats_pending = 1; 79 80 spin_unlock_bh(&bp->stats_lock); 81 } 82} 83 84static void bnx2x_hw_stats_post(struct bnx2x *bp) 85{ 86 struct dmae_command *dmae = &bp->stats_dmae; 87 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 88 89 *stats_comp = DMAE_COMP_VAL; 90 if (CHIP_REV_IS_SLOW(bp)) 91 return; 92 93 /* loader */ 94 if (bp->executer_idx) { 95 int loader_idx = PMF_DMAE_C(bp); 96 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 97 true, DMAE_COMP_GRC); 98 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 99 100 memset(dmae, 0, sizeof(struct dmae_command)); 101 dmae->opcode = opcode; 102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 104 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 105 sizeof(struct dmae_command) * 106 (loader_idx + 1)) >> 2; 107 dmae->dst_addr_hi = 0; 108 dmae->len = sizeof(struct dmae_command) >> 2; 109 if (CHIP_IS_E1(bp)) 110 dmae->len--; 111 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 112 dmae->comp_addr_hi = 0; 113 dmae->comp_val = 1; 114 115 *stats_comp = 0; 116 bnx2x_post_dmae(bp, dmae, loader_idx); 117 118 } else if (bp->func_stx) { 119 *stats_comp = 0; 120 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 121 } 122} 123 124static int bnx2x_stats_comp(struct bnx2x *bp) 125{ 126 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 127 int cnt = 10; 128 129 might_sleep(); 130 while (*stats_comp != DMAE_COMP_VAL) { 131 if (!cnt) { 132 BNX2X_ERR("timeout waiting for stats finished\n"); 133 break; 134 } 135 cnt--; 136 usleep_range(1000, 1000); 137 } 138 return 1; 139} 140 141/* 142 * Statistics service functions 143 */ 144 145static void bnx2x_stats_pmf_update(struct bnx2x *bp) 146{ 147 struct dmae_command *dmae; 148 u32 opcode; 149 int loader_idx = PMF_DMAE_C(bp); 150 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 151 152 /* sanity */ 153 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { 154 BNX2X_ERR("BUG!\n"); 155 return; 156 } 157 158 bp->executer_idx = 0; 159 160 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 161 162 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 163 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 164 dmae->src_addr_lo = bp->port.port_stx >> 2; 165 dmae->src_addr_hi = 0; 166 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 167 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 168 dmae->len = DMAE_LEN32_RD_MAX; 169 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 170 dmae->comp_addr_hi = 0; 171 dmae->comp_val = 1; 172 173 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 174 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 175 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 176 dmae->src_addr_hi = 0; 177 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 178 DMAE_LEN32_RD_MAX * 4); 179 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 180 DMAE_LEN32_RD_MAX * 4); 181 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; 182 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 183 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 184 dmae->comp_val = DMAE_COMP_VAL; 185 186 *stats_comp = 0; 187 bnx2x_hw_stats_post(bp); 188 bnx2x_stats_comp(bp); 189} 190 191static void bnx2x_port_stats_init(struct bnx2x *bp) 192{ 193 struct dmae_command *dmae; 194 int port = BP_PORT(bp); 195 u32 opcode; 196 int loader_idx = PMF_DMAE_C(bp); 197 u32 mac_addr; 198 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 199 200 /* sanity */ 201 if (!bp->link_vars.link_up || !bp->port.pmf) { 202 BNX2X_ERR("BUG!\n"); 203 return; 204 } 205 206 bp->executer_idx = 0; 207 208 /* MCP */ 209 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 210 true, DMAE_COMP_GRC); 211 212 if (bp->port.port_stx) { 213 214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 215 dmae->opcode = opcode; 216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 218 dmae->dst_addr_lo = bp->port.port_stx >> 2; 219 dmae->dst_addr_hi = 0; 220 dmae->len = sizeof(struct host_port_stats) >> 2; 221 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 222 dmae->comp_addr_hi = 0; 223 dmae->comp_val = 1; 224 } 225 226 if (bp->func_stx) { 227 228 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 229 dmae->opcode = opcode; 230 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 231 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 232 dmae->dst_addr_lo = bp->func_stx >> 2; 233 dmae->dst_addr_hi = 0; 234 dmae->len = sizeof(struct host_func_stats) >> 2; 235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 236 dmae->comp_addr_hi = 0; 237 dmae->comp_val = 1; 238 } 239 240 /* MAC */ 241 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 242 true, DMAE_COMP_GRC); 243 244 /* EMAC is special */ 245 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 246 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 247 248 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 250 dmae->opcode = opcode; 251 dmae->src_addr_lo = (mac_addr + 252 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 253 dmae->src_addr_hi = 0; 254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 256 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 258 dmae->comp_addr_hi = 0; 259 dmae->comp_val = 1; 260 261 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 263 dmae->opcode = opcode; 264 dmae->src_addr_lo = (mac_addr + 265 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 266 dmae->src_addr_hi = 0; 267 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 268 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 270 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 271 dmae->len = 1; 272 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 273 dmae->comp_addr_hi = 0; 274 dmae->comp_val = 1; 275 276 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 277 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 278 dmae->opcode = opcode; 279 dmae->src_addr_lo = (mac_addr + 280 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 281 dmae->src_addr_hi = 0; 282 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 283 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 285 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 286 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 288 dmae->comp_addr_hi = 0; 289 dmae->comp_val = 1; 290 } else { 291 u32 tx_src_addr_lo, rx_src_addr_lo; 292 u16 rx_len, tx_len; 293 294 /* configure the params according to MAC type */ 295 switch (bp->link_vars.mac_type) { 296 case MAC_TYPE_BMAC: 297 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 298 NIG_REG_INGRESS_BMAC0_MEM); 299 300 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 301 BIGMAC_REGISTER_TX_STAT_GTBYT */ 302 if (CHIP_IS_E1x(bp)) { 303 tx_src_addr_lo = (mac_addr + 304 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 305 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 306 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 307 rx_src_addr_lo = (mac_addr + 308 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 309 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 310 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 311 } else { 312 tx_src_addr_lo = (mac_addr + 313 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 314 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 315 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 316 rx_src_addr_lo = (mac_addr + 317 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 318 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 319 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 320 } 321 break; 322 323 case MAC_TYPE_UMAC: /* handled by MSTAT */ 324 case MAC_TYPE_XMAC: /* handled by MSTAT */ 325 default: 326 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 327 tx_src_addr_lo = (mac_addr + 328 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 329 rx_src_addr_lo = (mac_addr + 330 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 331 tx_len = sizeof(bp->slowpath-> 332 mac_stats.mstat_stats.stats_tx) >> 2; 333 rx_len = sizeof(bp->slowpath-> 334 mac_stats.mstat_stats.stats_rx) >> 2; 335 break; 336 } 337 338 /* TX stats */ 339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 340 dmae->opcode = opcode; 341 dmae->src_addr_lo = tx_src_addr_lo; 342 dmae->src_addr_hi = 0; 343 dmae->len = tx_len; 344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 346 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 347 dmae->comp_addr_hi = 0; 348 dmae->comp_val = 1; 349 350 /* RX stats */ 351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 352 dmae->opcode = opcode; 353 dmae->src_addr_hi = 0; 354 dmae->src_addr_lo = rx_src_addr_lo; 355 dmae->dst_addr_lo = 356 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 357 dmae->dst_addr_hi = 358 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 359 dmae->len = rx_len; 360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 361 dmae->comp_addr_hi = 0; 362 dmae->comp_val = 1; 363 } 364 365 /* NIG */ 366 if (!CHIP_IS_E3(bp)) { 367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 368 dmae->opcode = opcode; 369 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 370 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 371 dmae->src_addr_hi = 0; 372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 373 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 375 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 376 dmae->len = (2*sizeof(u32)) >> 2; 377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 378 dmae->comp_addr_hi = 0; 379 dmae->comp_val = 1; 380 381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 382 dmae->opcode = opcode; 383 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 384 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 385 dmae->src_addr_hi = 0; 386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 387 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 389 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 390 dmae->len = (2*sizeof(u32)) >> 2; 391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 392 dmae->comp_addr_hi = 0; 393 dmae->comp_val = 1; 394 } 395 396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 397 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 398 true, DMAE_COMP_PCI); 399 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 400 NIG_REG_STAT0_BRB_DISCARD) >> 2; 401 dmae->src_addr_hi = 0; 402 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 404 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 405 406 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 407 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 408 dmae->comp_val = DMAE_COMP_VAL; 409 410 *stats_comp = 0; 411} 412 413static void bnx2x_func_stats_init(struct bnx2x *bp) 414{ 415 struct dmae_command *dmae = &bp->stats_dmae; 416 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 417 418 /* sanity */ 419 if (!bp->func_stx) { 420 BNX2X_ERR("BUG!\n"); 421 return; 422 } 423 424 bp->executer_idx = 0; 425 memset(dmae, 0, sizeof(struct dmae_command)); 426 427 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 428 true, DMAE_COMP_PCI); 429 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 430 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 431 dmae->dst_addr_lo = bp->func_stx >> 2; 432 dmae->dst_addr_hi = 0; 433 dmae->len = sizeof(struct host_func_stats) >> 2; 434 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 435 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 436 dmae->comp_val = DMAE_COMP_VAL; 437 438 *stats_comp = 0; 439} 440 441static void bnx2x_stats_start(struct bnx2x *bp) 442{ 443 if (bp->port.pmf) 444 bnx2x_port_stats_init(bp); 445 446 else if (bp->func_stx) 447 bnx2x_func_stats_init(bp); 448 449 bnx2x_hw_stats_post(bp); 450 bnx2x_storm_stats_post(bp); 451} 452 453static void bnx2x_stats_pmf_start(struct bnx2x *bp) 454{ 455 bnx2x_stats_comp(bp); 456 bnx2x_stats_pmf_update(bp); 457 bnx2x_stats_start(bp); 458} 459 460static void bnx2x_stats_restart(struct bnx2x *bp) 461{ 462 bnx2x_stats_comp(bp); 463 bnx2x_stats_start(bp); 464} 465 466static void bnx2x_bmac_stats_update(struct bnx2x *bp) 467{ 468 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 469 struct bnx2x_eth_stats *estats = &bp->eth_stats; 470 struct { 471 u32 lo; 472 u32 hi; 473 } diff; 474 475 if (CHIP_IS_E1x(bp)) { 476 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 477 478 /* the macros below will use "bmac1_stats" type */ 479 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 480 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 481 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 482 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 483 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 484 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 485 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 486 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 487 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 488 489 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 490 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 491 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 492 UPDATE_STAT64(tx_stat_gt127, 493 tx_stat_etherstatspkts65octetsto127octets); 494 UPDATE_STAT64(tx_stat_gt255, 495 tx_stat_etherstatspkts128octetsto255octets); 496 UPDATE_STAT64(tx_stat_gt511, 497 tx_stat_etherstatspkts256octetsto511octets); 498 UPDATE_STAT64(tx_stat_gt1023, 499 tx_stat_etherstatspkts512octetsto1023octets); 500 UPDATE_STAT64(tx_stat_gt1518, 501 tx_stat_etherstatspkts1024octetsto1522octets); 502 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 503 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 504 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 505 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 506 UPDATE_STAT64(tx_stat_gterr, 507 tx_stat_dot3statsinternalmactransmiterrors); 508 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 509 510 } else { 511 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 512 513 /* the macros below will use "bmac2_stats" type */ 514 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 515 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 516 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 517 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 518 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 519 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 520 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 521 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 522 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 523 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 524 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 525 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 526 UPDATE_STAT64(tx_stat_gt127, 527 tx_stat_etherstatspkts65octetsto127octets); 528 UPDATE_STAT64(tx_stat_gt255, 529 tx_stat_etherstatspkts128octetsto255octets); 530 UPDATE_STAT64(tx_stat_gt511, 531 tx_stat_etherstatspkts256octetsto511octets); 532 UPDATE_STAT64(tx_stat_gt1023, 533 tx_stat_etherstatspkts512octetsto1023octets); 534 UPDATE_STAT64(tx_stat_gt1518, 535 tx_stat_etherstatspkts1024octetsto1522octets); 536 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 537 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 538 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 539 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 540 UPDATE_STAT64(tx_stat_gterr, 541 tx_stat_dot3statsinternalmactransmiterrors); 542 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 543 } 544 545 estats->pause_frames_received_hi = 546 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 547 estats->pause_frames_received_lo = 548 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 549 550 estats->pause_frames_sent_hi = 551 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 552 estats->pause_frames_sent_lo = 553 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 554} 555 556static void bnx2x_mstat_stats_update(struct bnx2x *bp) 557{ 558 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 559 struct bnx2x_eth_stats *estats = &bp->eth_stats; 560 561 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 562 563 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 564 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 565 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 566 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 567 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 568 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 569 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 570 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 571 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 572 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 573 574 575 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 576 ADD_STAT64(stats_tx.tx_gt127, 577 tx_stat_etherstatspkts65octetsto127octets); 578 ADD_STAT64(stats_tx.tx_gt255, 579 tx_stat_etherstatspkts128octetsto255octets); 580 ADD_STAT64(stats_tx.tx_gt511, 581 tx_stat_etherstatspkts256octetsto511octets); 582 ADD_STAT64(stats_tx.tx_gt1023, 583 tx_stat_etherstatspkts512octetsto1023octets); 584 ADD_STAT64(stats_tx.tx_gt1518, 585 tx_stat_etherstatspkts1024octetsto1522octets); 586 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 587 588 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 589 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 590 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 591 592 ADD_STAT64(stats_tx.tx_gterr, 593 tx_stat_dot3statsinternalmactransmiterrors); 594 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 595 596 ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, 597 new->stats_tx.tx_gt1518_hi, 598 estats->etherstatspkts1024octetsto1522octets_lo, 599 new->stats_tx.tx_gt1518_lo); 600 601 ADD_64(estats->etherstatspktsover1522octets_hi, 602 new->stats_tx.tx_gt2047_hi, 603 estats->etherstatspktsover1522octets_lo, 604 new->stats_tx.tx_gt2047_lo); 605 606 ADD_64(estats->etherstatspktsover1522octets_hi, 607 new->stats_tx.tx_gt4095_hi, 608 estats->etherstatspktsover1522octets_lo, 609 new->stats_tx.tx_gt4095_lo); 610 611 ADD_64(estats->etherstatspktsover1522octets_hi, 612 new->stats_tx.tx_gt9216_hi, 613 estats->etherstatspktsover1522octets_lo, 614 new->stats_tx.tx_gt9216_lo); 615 616 617 ADD_64(estats->etherstatspktsover1522octets_hi, 618 new->stats_tx.tx_gt16383_hi, 619 estats->etherstatspktsover1522octets_lo, 620 new->stats_tx.tx_gt16383_lo); 621 622 estats->pause_frames_received_hi = 623 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 624 estats->pause_frames_received_lo = 625 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 626 627 estats->pause_frames_sent_hi = 628 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 629 estats->pause_frames_sent_lo = 630 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 631} 632 633static void bnx2x_emac_stats_update(struct bnx2x *bp) 634{ 635 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 636 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 637 struct bnx2x_eth_stats *estats = &bp->eth_stats; 638 639 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 640 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 641 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 642 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 643 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 644 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 645 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 646 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 647 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 648 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 649 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 650 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 651 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 652 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 653 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 654 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 655 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 656 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 657 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 658 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 659 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 660 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 661 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 668 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 669 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 670 671 estats->pause_frames_received_hi = 672 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 673 estats->pause_frames_received_lo = 674 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 675 ADD_64(estats->pause_frames_received_hi, 676 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 677 estats->pause_frames_received_lo, 678 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 679 680 estats->pause_frames_sent_hi = 681 pstats->mac_stx[1].tx_stat_outxonsent_hi; 682 estats->pause_frames_sent_lo = 683 pstats->mac_stx[1].tx_stat_outxonsent_lo; 684 ADD_64(estats->pause_frames_sent_hi, 685 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 686 estats->pause_frames_sent_lo, 687 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 688} 689 690static int bnx2x_hw_stats_update(struct bnx2x *bp) 691{ 692 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 693 struct nig_stats *old = &(bp->port.old_nig_stats); 694 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 695 struct bnx2x_eth_stats *estats = &bp->eth_stats; 696 struct { 697 u32 lo; 698 u32 hi; 699 } diff; 700 701 switch (bp->link_vars.mac_type) { 702 case MAC_TYPE_BMAC: 703 bnx2x_bmac_stats_update(bp); 704 break; 705 706 case MAC_TYPE_EMAC: 707 bnx2x_emac_stats_update(bp); 708 break; 709 710 case MAC_TYPE_UMAC: 711 case MAC_TYPE_XMAC: 712 bnx2x_mstat_stats_update(bp); 713 break; 714 715 case MAC_TYPE_NONE: /* unreached */ 716 BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 717 return -1; 718 719 default: /* unreached */ 720 BNX2X_ERR("Unknown MAC type\n"); 721 } 722 723 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 724 new->brb_discard - old->brb_discard); 725 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 726 new->brb_truncate - old->brb_truncate); 727 728 if (!CHIP_IS_E3(bp)) { 729 UPDATE_STAT64_NIG(egress_mac_pkt0, 730 etherstatspkts1024octetsto1522octets); 731 UPDATE_STAT64_NIG(egress_mac_pkt1, 732 etherstatspktsover1522octets); 733 } 734 735 memcpy(old, new, sizeof(struct nig_stats)); 736 737 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 738 sizeof(struct mac_stx)); 739 estats->brb_drop_hi = pstats->brb_drop_hi; 740 estats->brb_drop_lo = pstats->brb_drop_lo; 741 742 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 743 744 if (!BP_NOMCP(bp)) { 745 u32 nig_timer_max = 746 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 747 if (nig_timer_max != estats->nig_timer_max) { 748 estats->nig_timer_max = nig_timer_max; 749 BNX2X_ERR("NIG timer max (%u)\n", 750 estats->nig_timer_max); 751 } 752 } 753 754 return 0; 755} 756 757static int bnx2x_storm_stats_update(struct bnx2x *bp) 758{ 759 struct tstorm_per_port_stats *tport = 760 &bp->fw_stats_data->port.tstorm_port_statistics; 761 struct tstorm_per_pf_stats *tfunc = 762 &bp->fw_stats_data->pf.tstorm_pf_statistics; 763 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); 764 struct bnx2x_eth_stats *estats = &bp->eth_stats; 765 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 766 int i; 767 u16 cur_stats_counter; 768 769 /* Make sure we use the value of the counter 770 * used for sending the last stats ramrod. 771 */ 772 spin_lock_bh(&bp->stats_lock); 773 cur_stats_counter = bp->stats_counter - 1; 774 spin_unlock_bh(&bp->stats_lock); 775 776 /* are storm stats valid? */ 777 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 778 DP(BNX2X_MSG_STATS, "stats not updated by xstorm" 779 " xstorm counter (0x%x) != stats_counter (0x%x)\n", 780 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 781 return -EAGAIN; 782 } 783 784 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 785 DP(BNX2X_MSG_STATS, "stats not updated by ustorm" 786 " ustorm counter (0x%x) != stats_counter (0x%x)\n", 787 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 788 return -EAGAIN; 789 } 790 791 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 792 DP(BNX2X_MSG_STATS, "stats not updated by cstorm" 793 " cstorm counter (0x%x) != stats_counter (0x%x)\n", 794 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 795 return -EAGAIN; 796 } 797 798 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 799 DP(BNX2X_MSG_STATS, "stats not updated by tstorm" 800 " tstorm counter (0x%x) != stats_counter (0x%x)\n", 801 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 802 return -EAGAIN; 803 } 804 805 memcpy(&(fstats->total_bytes_received_hi), 806 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), 807 sizeof(struct host_func_stats) - 2*sizeof(u32)); 808 estats->error_bytes_received_hi = 0; 809 estats->error_bytes_received_lo = 0; 810 estats->etherstatsoverrsizepkts_hi = 0; 811 estats->etherstatsoverrsizepkts_lo = 0; 812 estats->no_buff_discard_hi = 0; 813 estats->no_buff_discard_lo = 0; 814 estats->total_tpa_aggregations_hi = 0; 815 estats->total_tpa_aggregations_lo = 0; 816 estats->total_tpa_aggregated_frames_hi = 0; 817 estats->total_tpa_aggregated_frames_lo = 0; 818 estats->total_tpa_bytes_hi = 0; 819 estats->total_tpa_bytes_lo = 0; 820 821 for_each_eth_queue(bp, i) { 822 struct bnx2x_fastpath *fp = &bp->fp[i]; 823 struct tstorm_per_queue_stats *tclient = 824 &bp->fw_stats_data->queue_stats[i]. 825 tstorm_queue_statistics; 826 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 827 struct ustorm_per_queue_stats *uclient = 828 &bp->fw_stats_data->queue_stats[i]. 829 ustorm_queue_statistics; 830 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 831 struct xstorm_per_queue_stats *xclient = 832 &bp->fw_stats_data->queue_stats[i]. 833 xstorm_queue_statistics; 834 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 835 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 836 u32 diff; 837 838 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " 839 "bcast_sent 0x%x mcast_sent 0x%x\n", 840 i, xclient->ucast_pkts_sent, 841 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 842 843 DP(BNX2X_MSG_STATS, "---------------\n"); 844 845 qstats->total_broadcast_bytes_received_hi = 846 le32_to_cpu(tclient->rcv_bcast_bytes.hi); 847 qstats->total_broadcast_bytes_received_lo = 848 le32_to_cpu(tclient->rcv_bcast_bytes.lo); 849 850 qstats->total_multicast_bytes_received_hi = 851 le32_to_cpu(tclient->rcv_mcast_bytes.hi); 852 qstats->total_multicast_bytes_received_lo = 853 le32_to_cpu(tclient->rcv_mcast_bytes.lo); 854 855 qstats->total_unicast_bytes_received_hi = 856 le32_to_cpu(tclient->rcv_ucast_bytes.hi); 857 qstats->total_unicast_bytes_received_lo = 858 le32_to_cpu(tclient->rcv_ucast_bytes.lo); 859 860 /* 861 * sum to total_bytes_received all 862 * unicast/multicast/broadcast 863 */ 864 qstats->total_bytes_received_hi = 865 qstats->total_broadcast_bytes_received_hi; 866 qstats->total_bytes_received_lo = 867 qstats->total_broadcast_bytes_received_lo; 868 869 ADD_64(qstats->total_bytes_received_hi, 870 qstats->total_multicast_bytes_received_hi, 871 qstats->total_bytes_received_lo, 872 qstats->total_multicast_bytes_received_lo); 873 874 ADD_64(qstats->total_bytes_received_hi, 875 qstats->total_unicast_bytes_received_hi, 876 qstats->total_bytes_received_lo, 877 qstats->total_unicast_bytes_received_lo); 878 879 qstats->valid_bytes_received_hi = 880 qstats->total_bytes_received_hi; 881 qstats->valid_bytes_received_lo = 882 qstats->total_bytes_received_lo; 883 884 885 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 886 total_unicast_packets_received); 887 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 888 total_multicast_packets_received); 889 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 890 total_broadcast_packets_received); 891 UPDATE_EXTEND_TSTAT(pkts_too_big_discard, 892 etherstatsoverrsizepkts); 893 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); 894 895 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 896 total_unicast_packets_received); 897 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 898 total_multicast_packets_received); 899 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 900 total_broadcast_packets_received); 901 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); 902 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); 903 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); 904 905 qstats->total_broadcast_bytes_transmitted_hi = 906 le32_to_cpu(xclient->bcast_bytes_sent.hi); 907 qstats->total_broadcast_bytes_transmitted_lo = 908 le32_to_cpu(xclient->bcast_bytes_sent.lo); 909 910 qstats->total_multicast_bytes_transmitted_hi = 911 le32_to_cpu(xclient->mcast_bytes_sent.hi); 912 qstats->total_multicast_bytes_transmitted_lo = 913 le32_to_cpu(xclient->mcast_bytes_sent.lo); 914 915 qstats->total_unicast_bytes_transmitted_hi = 916 le32_to_cpu(xclient->ucast_bytes_sent.hi); 917 qstats->total_unicast_bytes_transmitted_lo = 918 le32_to_cpu(xclient->ucast_bytes_sent.lo); 919 /* 920 * sum to total_bytes_transmitted all 921 * unicast/multicast/broadcast 922 */ 923 qstats->total_bytes_transmitted_hi = 924 qstats->total_unicast_bytes_transmitted_hi; 925 qstats->total_bytes_transmitted_lo = 926 qstats->total_unicast_bytes_transmitted_lo; 927 928 ADD_64(qstats->total_bytes_transmitted_hi, 929 qstats->total_broadcast_bytes_transmitted_hi, 930 qstats->total_bytes_transmitted_lo, 931 qstats->total_broadcast_bytes_transmitted_lo); 932 933 ADD_64(qstats->total_bytes_transmitted_hi, 934 qstats->total_multicast_bytes_transmitted_hi, 935 qstats->total_bytes_transmitted_lo, 936 qstats->total_multicast_bytes_transmitted_lo); 937 938 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 939 total_unicast_packets_transmitted); 940 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 941 total_multicast_packets_transmitted); 942 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 943 total_broadcast_packets_transmitted); 944 945 UPDATE_EXTEND_TSTAT(checksum_discard, 946 total_packets_received_checksum_discarded); 947 UPDATE_EXTEND_TSTAT(ttl0_discard, 948 total_packets_received_ttl0_discarded); 949 950 UPDATE_EXTEND_XSTAT(error_drop_pkts, 951 total_transmitted_dropped_packets_error); 952 953 /* TPA aggregations completed */ 954 UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); 955 /* Number of network frames aggregated by TPA */ 956 UPDATE_EXTEND_USTAT(coalesced_pkts, 957 total_tpa_aggregated_frames); 958 /* Total number of bytes in completed TPA aggregations */ 959 qstats->total_tpa_bytes_lo = 960 le32_to_cpu(uclient->coalesced_bytes.lo); 961 qstats->total_tpa_bytes_hi = 962 le32_to_cpu(uclient->coalesced_bytes.hi); 963 964 /* TPA stats per-function */ 965 ADD_64(estats->total_tpa_aggregations_hi, 966 qstats->total_tpa_aggregations_hi, 967 estats->total_tpa_aggregations_lo, 968 qstats->total_tpa_aggregations_lo); 969 ADD_64(estats->total_tpa_aggregated_frames_hi, 970 qstats->total_tpa_aggregated_frames_hi, 971 estats->total_tpa_aggregated_frames_lo, 972 qstats->total_tpa_aggregated_frames_lo); 973 ADD_64(estats->total_tpa_bytes_hi, 974 qstats->total_tpa_bytes_hi, 975 estats->total_tpa_bytes_lo, 976 qstats->total_tpa_bytes_lo); 977 978 ADD_64(fstats->total_bytes_received_hi, 979 qstats->total_bytes_received_hi, 980 fstats->total_bytes_received_lo, 981 qstats->total_bytes_received_lo); 982 ADD_64(fstats->total_bytes_transmitted_hi, 983 qstats->total_bytes_transmitted_hi, 984 fstats->total_bytes_transmitted_lo, 985 qstats->total_bytes_transmitted_lo); 986 ADD_64(fstats->total_unicast_packets_received_hi, 987 qstats->total_unicast_packets_received_hi, 988 fstats->total_unicast_packets_received_lo, 989 qstats->total_unicast_packets_received_lo); 990 ADD_64(fstats->total_multicast_packets_received_hi, 991 qstats->total_multicast_packets_received_hi, 992 fstats->total_multicast_packets_received_lo, 993 qstats->total_multicast_packets_received_lo); 994 ADD_64(fstats->total_broadcast_packets_received_hi, 995 qstats->total_broadcast_packets_received_hi, 996 fstats->total_broadcast_packets_received_lo, 997 qstats->total_broadcast_packets_received_lo); 998 ADD_64(fstats->total_unicast_packets_transmitted_hi, 999 qstats->total_unicast_packets_transmitted_hi, 1000 fstats->total_unicast_packets_transmitted_lo, 1001 qstats->total_unicast_packets_transmitted_lo); 1002 ADD_64(fstats->total_multicast_packets_transmitted_hi, 1003 qstats->total_multicast_packets_transmitted_hi, 1004 fstats->total_multicast_packets_transmitted_lo, 1005 qstats->total_multicast_packets_transmitted_lo); 1006 ADD_64(fstats->total_broadcast_packets_transmitted_hi, 1007 qstats->total_broadcast_packets_transmitted_hi, 1008 fstats->total_broadcast_packets_transmitted_lo, 1009 qstats->total_broadcast_packets_transmitted_lo); 1010 ADD_64(fstats->valid_bytes_received_hi, 1011 qstats->valid_bytes_received_hi, 1012 fstats->valid_bytes_received_lo, 1013 qstats->valid_bytes_received_lo); 1014 1015 ADD_64(estats->etherstatsoverrsizepkts_hi, 1016 qstats->etherstatsoverrsizepkts_hi, 1017 estats->etherstatsoverrsizepkts_lo, 1018 qstats->etherstatsoverrsizepkts_lo); 1019 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, 1020 estats->no_buff_discard_lo, qstats->no_buff_discard_lo); 1021 } 1022 1023 ADD_64(fstats->total_bytes_received_hi, 1024 estats->rx_stat_ifhcinbadoctets_hi, 1025 fstats->total_bytes_received_lo, 1026 estats->rx_stat_ifhcinbadoctets_lo); 1027 1028 ADD_64(fstats->total_bytes_received_hi, 1029 tfunc->rcv_error_bytes.hi, 1030 fstats->total_bytes_received_lo, 1031 tfunc->rcv_error_bytes.lo); 1032 1033 memcpy(estats, &(fstats->total_bytes_received_hi), 1034 sizeof(struct host_func_stats) - 2*sizeof(u32)); 1035 1036 ADD_64(estats->error_bytes_received_hi, 1037 tfunc->rcv_error_bytes.hi, 1038 estats->error_bytes_received_lo, 1039 tfunc->rcv_error_bytes.lo); 1040 1041 ADD_64(estats->etherstatsoverrsizepkts_hi, 1042 estats->rx_stat_dot3statsframestoolong_hi, 1043 estats->etherstatsoverrsizepkts_lo, 1044 estats->rx_stat_dot3statsframestoolong_lo); 1045 ADD_64(estats->error_bytes_received_hi, 1046 estats->rx_stat_ifhcinbadoctets_hi, 1047 estats->error_bytes_received_lo, 1048 estats->rx_stat_ifhcinbadoctets_lo); 1049 1050 if (bp->port.pmf) { 1051 estats->mac_filter_discard = 1052 le32_to_cpu(tport->mac_filter_discard); 1053 estats->mf_tag_discard = 1054 le32_to_cpu(tport->mf_tag_discard); 1055 estats->brb_truncate_discard = 1056 le32_to_cpu(tport->brb_truncate_discard); 1057 estats->mac_discard = le32_to_cpu(tport->mac_discard); 1058 } 1059 1060 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1061 1062 bp->stats_pending = 0; 1063 1064 return 0; 1065} 1066 1067static void bnx2x_net_stats_update(struct bnx2x *bp) 1068{ 1069 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1070 struct net_device_stats *nstats = &bp->dev->stats; 1071 unsigned long tmp; 1072 int i; 1073 1074 nstats->rx_packets = 1075 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1076 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1077 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1078 1079 nstats->tx_packets = 1080 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1081 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1082 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1083 1084 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1085 1086 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1087 1088 tmp = estats->mac_discard; 1089 for_each_rx_queue(bp, i) 1090 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1091 nstats->rx_dropped = tmp; 1092 1093 nstats->tx_dropped = 0; 1094 1095 nstats->multicast = 1096 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1097 1098 nstats->collisions = 1099 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1100 1101 nstats->rx_length_errors = 1102 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1103 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1104 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1105 bnx2x_hilo(&estats->brb_truncate_hi); 1106 nstats->rx_crc_errors = 1107 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1108 nstats->rx_frame_errors = 1109 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1110 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1111 nstats->rx_missed_errors = 0; 1112 1113 nstats->rx_errors = nstats->rx_length_errors + 1114 nstats->rx_over_errors + 1115 nstats->rx_crc_errors + 1116 nstats->rx_frame_errors + 1117 nstats->rx_fifo_errors + 1118 nstats->rx_missed_errors; 1119 1120 nstats->tx_aborted_errors = 1121 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1122 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1123 nstats->tx_carrier_errors = 1124 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1125 nstats->tx_fifo_errors = 0; 1126 nstats->tx_heartbeat_errors = 0; 1127 nstats->tx_window_errors = 0; 1128 1129 nstats->tx_errors = nstats->tx_aborted_errors + 1130 nstats->tx_carrier_errors + 1131 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1132} 1133 1134static void bnx2x_drv_stats_update(struct bnx2x *bp) 1135{ 1136 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1137 int i; 1138 1139 estats->driver_xoff = 0; 1140 estats->rx_err_discard_pkt = 0; 1141 estats->rx_skb_alloc_failed = 0; 1142 estats->hw_csum_err = 0; 1143 for_each_queue(bp, i) { 1144 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1145 1146 estats->driver_xoff += qstats->driver_xoff; 1147 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; 1148 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; 1149 estats->hw_csum_err += qstats->hw_csum_err; 1150 } 1151} 1152 1153static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1154{ 1155 u32 val; 1156 1157 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1158 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1159 1160 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1161 return true; 1162 } 1163 1164 return false; 1165} 1166 1167static void bnx2x_stats_update(struct bnx2x *bp) 1168{ 1169 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1170 1171 if (bnx2x_edebug_stats_stopped(bp)) 1172 return; 1173 1174 if (*stats_comp != DMAE_COMP_VAL) 1175 return; 1176 1177 if (bp->port.pmf) 1178 bnx2x_hw_stats_update(bp); 1179 1180 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { 1181 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1182 bnx2x_panic(); 1183 return; 1184 } 1185 1186 bnx2x_net_stats_update(bp); 1187 bnx2x_drv_stats_update(bp); 1188 1189 if (netif_msg_timer(bp)) { 1190 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1191 int i, cos; 1192 1193 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1194 estats->brb_drop_lo, estats->brb_truncate_lo); 1195 1196 for_each_eth_queue(bp, i) { 1197 struct bnx2x_fastpath *fp = &bp->fp[i]; 1198 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1199 1200 pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", 1201 fp->name, (le16_to_cpu(*fp->rx_cons_sb) - 1202 fp->rx_comp_cons), 1203 le16_to_cpu(*fp->rx_cons_sb), 1204 bnx2x_hilo(&qstats-> 1205 total_unicast_packets_received_hi), 1206 fp->rx_calls, fp->rx_pkt); 1207 } 1208 1209 for_each_eth_queue(bp, i) { 1210 struct bnx2x_fastpath *fp = &bp->fp[i]; 1211 struct bnx2x_fp_txdata *txdata; 1212 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1213 struct netdev_queue *txq; 1214 1215 pr_debug("%s: tx pkt(%lu) (Xoff events %u)", 1216 fp->name, 1217 bnx2x_hilo( 1218 &qstats->total_unicast_packets_transmitted_hi), 1219 qstats->driver_xoff); 1220 1221 for_each_cos_in_tx_queue(fp, cos) { 1222 txdata = &fp->txdata[cos]; 1223 txq = netdev_get_tx_queue(bp->dev, 1224 FP_COS_TO_TXQ(fp, cos)); 1225 1226 pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", 1227 cos, 1228 bnx2x_tx_avail(bp, txdata), 1229 le16_to_cpu(*txdata->tx_cons_sb), 1230 txdata->tx_pkt, 1231 (netif_tx_queue_stopped(txq) ? 1232 "Xoff" : "Xon") 1233 ); 1234 } 1235 } 1236 } 1237 1238 bnx2x_hw_stats_post(bp); 1239 bnx2x_storm_stats_post(bp); 1240} 1241 1242static void bnx2x_port_stats_stop(struct bnx2x *bp) 1243{ 1244 struct dmae_command *dmae; 1245 u32 opcode; 1246 int loader_idx = PMF_DMAE_C(bp); 1247 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1248 1249 bp->executer_idx = 0; 1250 1251 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1252 1253 if (bp->port.port_stx) { 1254 1255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1256 if (bp->func_stx) 1257 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1258 opcode, DMAE_COMP_GRC); 1259 else 1260 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1261 opcode, DMAE_COMP_PCI); 1262 1263 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1264 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1265 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1266 dmae->dst_addr_hi = 0; 1267 dmae->len = sizeof(struct host_port_stats) >> 2; 1268 if (bp->func_stx) { 1269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1270 dmae->comp_addr_hi = 0; 1271 dmae->comp_val = 1; 1272 } else { 1273 dmae->comp_addr_lo = 1274 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1275 dmae->comp_addr_hi = 1276 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1277 dmae->comp_val = DMAE_COMP_VAL; 1278 1279 *stats_comp = 0; 1280 } 1281 } 1282 1283 if (bp->func_stx) { 1284 1285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1286 dmae->opcode = 1287 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1288 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1289 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1290 dmae->dst_addr_lo = bp->func_stx >> 2; 1291 dmae->dst_addr_hi = 0; 1292 dmae->len = sizeof(struct host_func_stats) >> 2; 1293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1295 dmae->comp_val = DMAE_COMP_VAL; 1296 1297 *stats_comp = 0; 1298 } 1299} 1300 1301static void bnx2x_stats_stop(struct bnx2x *bp) 1302{ 1303 int update = 0; 1304 1305 bnx2x_stats_comp(bp); 1306 1307 if (bp->port.pmf) 1308 update = (bnx2x_hw_stats_update(bp) == 0); 1309 1310 update |= (bnx2x_storm_stats_update(bp) == 0); 1311 1312 if (update) { 1313 bnx2x_net_stats_update(bp); 1314 1315 if (bp->port.pmf) 1316 bnx2x_port_stats_stop(bp); 1317 1318 bnx2x_hw_stats_post(bp); 1319 bnx2x_stats_comp(bp); 1320 } 1321} 1322 1323static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1324{ 1325} 1326 1327static const struct { 1328 void (*action)(struct bnx2x *bp); 1329 enum bnx2x_stats_state next_state; 1330} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1331/* state event */ 1332{ 1333/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1334/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1335/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1336/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1337}, 1338{ 1339/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1340/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1341/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1342/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1343} 1344}; 1345 1346void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1347{ 1348 enum bnx2x_stats_state state; 1349 if (unlikely(bp->panic)) 1350 return; 1351 bnx2x_stats_stm[bp->stats_state][event].action(bp); 1352 spin_lock_bh(&bp->stats_lock); 1353 state = bp->stats_state; 1354 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1355 spin_unlock_bh(&bp->stats_lock); 1356 1357 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1358 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1359 state, event, bp->stats_state); 1360} 1361 1362static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1363{ 1364 struct dmae_command *dmae; 1365 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1366 1367 /* sanity */ 1368 if (!bp->port.pmf || !bp->port.port_stx) { 1369 BNX2X_ERR("BUG!\n"); 1370 return; 1371 } 1372 1373 bp->executer_idx = 0; 1374 1375 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1376 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1377 true, DMAE_COMP_PCI); 1378 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1379 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1380 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1381 dmae->dst_addr_hi = 0; 1382 dmae->len = sizeof(struct host_port_stats) >> 2; 1383 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1384 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1385 dmae->comp_val = DMAE_COMP_VAL; 1386 1387 *stats_comp = 0; 1388 bnx2x_hw_stats_post(bp); 1389 bnx2x_stats_comp(bp); 1390} 1391 1392static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1393{ 1394 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1395 u32 func_stx; 1396 1397 /* sanity */ 1398 if (!bp->port.pmf || !bp->func_stx) { 1399 BNX2X_ERR("BUG!\n"); 1400 return; 1401 } 1402 1403 /* save our func_stx */ 1404 func_stx = bp->func_stx; 1405 1406 for (vn = VN_0; vn < vn_max; vn++) { 1407 int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1408 1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1410 bnx2x_func_stats_init(bp); 1411 bnx2x_hw_stats_post(bp); 1412 bnx2x_stats_comp(bp); 1413 } 1414 1415 /* restore our func_stx */ 1416 bp->func_stx = func_stx; 1417} 1418 1419static void bnx2x_func_stats_base_update(struct bnx2x *bp) 1420{ 1421 struct dmae_command *dmae = &bp->stats_dmae; 1422 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1423 1424 /* sanity */ 1425 if (!bp->func_stx) { 1426 BNX2X_ERR("BUG!\n"); 1427 return; 1428 } 1429 1430 bp->executer_idx = 0; 1431 memset(dmae, 0, sizeof(struct dmae_command)); 1432 1433 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 1434 true, DMAE_COMP_PCI); 1435 dmae->src_addr_lo = bp->func_stx >> 2; 1436 dmae->src_addr_hi = 0; 1437 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1438 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); 1439 dmae->len = sizeof(struct host_func_stats) >> 2; 1440 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1441 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1442 dmae->comp_val = DMAE_COMP_VAL; 1443 1444 *stats_comp = 0; 1445 bnx2x_hw_stats_post(bp); 1446 bnx2x_stats_comp(bp); 1447} 1448 1449/** 1450 * This function will prepare the statistics ramrod data the way 1451 * we will only have to increment the statistics counter and 1452 * send the ramrod each time we have to. 1453 * 1454 * @param bp 1455 */ 1456static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1457{ 1458 int i; 1459 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1460 1461 dma_addr_t cur_data_offset; 1462 struct stats_query_entry *cur_query_entry; 1463 1464 stats_hdr->cmd_num = bp->fw_stats_num; 1465 stats_hdr->drv_stats_counter = 0; 1466 1467 /* storm_counters struct contains the counters of completed 1468 * statistics requests per storm which are incremented by FW 1469 * each time it completes hadning a statistics ramrod. We will 1470 * check these counters in the timer handler and discard a 1471 * (statistics) ramrod completion. 1472 */ 1473 cur_data_offset = bp->fw_stats_data_mapping + 1474 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1475 1476 stats_hdr->stats_counters_addrs.hi = 1477 cpu_to_le32(U64_HI(cur_data_offset)); 1478 stats_hdr->stats_counters_addrs.lo = 1479 cpu_to_le32(U64_LO(cur_data_offset)); 1480 1481 /* prepare to the first stats ramrod (will be completed with 1482 * the counters equal to zero) - init counters to somethig different. 1483 */ 1484 memset(&bp->fw_stats_data->storm_counters, 0xff, 1485 sizeof(struct stats_counter)); 1486 1487 /**** Port FW statistics data ****/ 1488 cur_data_offset = bp->fw_stats_data_mapping + 1489 offsetof(struct bnx2x_fw_stats_data, port); 1490 1491 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1492 1493 cur_query_entry->kind = STATS_TYPE_PORT; 1494 /* For port query index is a DONT CARE */ 1495 cur_query_entry->index = BP_PORT(bp); 1496 /* For port query funcID is a DONT CARE */ 1497 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1498 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1499 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1500 1501 /**** PF FW statistics data ****/ 1502 cur_data_offset = bp->fw_stats_data_mapping + 1503 offsetof(struct bnx2x_fw_stats_data, pf); 1504 1505 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1506 1507 cur_query_entry->kind = STATS_TYPE_PF; 1508 /* For PF query index is a DONT CARE */ 1509 cur_query_entry->index = BP_PORT(bp); 1510 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1511 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1512 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1513 1514 /**** Clients' queries ****/ 1515 cur_data_offset = bp->fw_stats_data_mapping + 1516 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1517 1518 for_each_eth_queue(bp, i) { 1519 cur_query_entry = 1520 &bp->fw_stats_req-> 1521 query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; 1522 1523 cur_query_entry->kind = STATS_TYPE_QUEUE; 1524 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1525 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1526 cur_query_entry->address.hi = 1527 cpu_to_le32(U64_HI(cur_data_offset)); 1528 cur_query_entry->address.lo = 1529 cpu_to_le32(U64_LO(cur_data_offset)); 1530 1531 cur_data_offset += sizeof(struct per_queue_stats); 1532 } 1533} 1534 1535void bnx2x_stats_init(struct bnx2x *bp) 1536{ 1537 int /*abs*/port = BP_PORT(bp); 1538 int mb_idx = BP_FW_MB_IDX(bp); 1539 int i; 1540 1541 bp->stats_pending = 0; 1542 bp->executer_idx = 0; 1543 bp->stats_counter = 0; 1544 1545 /* port and func stats for management */ 1546 if (!BP_NOMCP(bp)) { 1547 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1548 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1549 1550 } else { 1551 bp->port.port_stx = 0; 1552 bp->func_stx = 0; 1553 } 1554 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1555 bp->port.port_stx, bp->func_stx); 1556 1557 port = BP_PORT(bp); 1558 /* port stats */ 1559 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1560 bp->port.old_nig_stats.brb_discard = 1561 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1562 bp->port.old_nig_stats.brb_truncate = 1563 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1564 if (!CHIP_IS_E3(bp)) { 1565 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1566 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1567 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1568 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1569 } 1570 1571 /* function stats */ 1572 for_each_queue(bp, i) { 1573 struct bnx2x_fastpath *fp = &bp->fp[i]; 1574 1575 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1576 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1577 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1578 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1579 } 1580 1581 /* Prepare statistics ramrod data */ 1582 bnx2x_prep_fw_stats_req(bp); 1583 1584 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1585 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1586 1587 bp->stats_state = STATS_STATE_DISABLED; 1588 1589 if (bp->port.pmf) { 1590 if (bp->port.port_stx) 1591 bnx2x_port_stats_base_init(bp); 1592 1593 if (bp->func_stx) 1594 bnx2x_func_stats_base_init(bp); 1595 1596 } else if (bp->func_stx) 1597 bnx2x_func_stats_base_update(bp); 1598} 1599