bnx2x_stats.c revision 50f0a562f8cc9ed9d9f7f7380434c3c8646172d5
1/* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2011 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include "bnx2x_stats.h" 21#include "bnx2x_cmn.h" 22 23 24/* Statistics */ 25 26/* 27 * General service functions 28 */ 29 30static inline long bnx2x_hilo(u32 *hiref) 31{ 32 u32 lo = *(hiref + 1); 33#if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37#else 38 return lo; 39#endif 40} 41 42/* 43 * Init service functions 44 */ 45 46/* Post the next statistics ramrod. Protect it with the spin in 47 * order to ensure the strict order between statistics ramrods 48 * (each ramrod has a sequence number passed in a 49 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 50 * sent in order). 51 */ 52static void bnx2x_storm_stats_post(struct bnx2x *bp) 53{ 54 if (!bp->stats_pending) { 55 int rc; 56 57 spin_lock_bh(&bp->stats_lock); 58 59 if (bp->stats_pending) { 60 spin_unlock_bh(&bp->stats_lock); 61 return; 62 } 63 64 bp->fw_stats_req->hdr.drv_stats_counter = 65 cpu_to_le16(bp->stats_counter++); 66 67 DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", 68 bp->fw_stats_req->hdr.drv_stats_counter); 69 70 71 72 /* send FW stats ramrod */ 73 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 74 U64_HI(bp->fw_stats_req_mapping), 75 U64_LO(bp->fw_stats_req_mapping), 76 NONE_CONNECTION_TYPE); 77 if (rc == 0) 78 bp->stats_pending = 1; 79 80 spin_unlock_bh(&bp->stats_lock); 81 } 82} 83 84static void bnx2x_hw_stats_post(struct bnx2x *bp) 85{ 86 struct dmae_command *dmae = &bp->stats_dmae; 87 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 88 89 *stats_comp = DMAE_COMP_VAL; 90 if (CHIP_REV_IS_SLOW(bp)) 91 return; 92 93 /* loader */ 94 if (bp->executer_idx) { 95 int loader_idx = PMF_DMAE_C(bp); 96 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 97 true, DMAE_COMP_GRC); 98 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 99 100 memset(dmae, 0, sizeof(struct dmae_command)); 101 dmae->opcode = opcode; 102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 104 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 105 sizeof(struct dmae_command) * 106 (loader_idx + 1)) >> 2; 107 dmae->dst_addr_hi = 0; 108 dmae->len = sizeof(struct dmae_command) >> 2; 109 if (CHIP_IS_E1(bp)) 110 dmae->len--; 111 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 112 dmae->comp_addr_hi = 0; 113 dmae->comp_val = 1; 114 115 *stats_comp = 0; 116 bnx2x_post_dmae(bp, dmae, loader_idx); 117 118 } else if (bp->func_stx) { 119 *stats_comp = 0; 120 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 121 } 122} 123 124static int bnx2x_stats_comp(struct bnx2x *bp) 125{ 126 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 127 int cnt = 10; 128 129 might_sleep(); 130 while (*stats_comp != DMAE_COMP_VAL) { 131 if (!cnt) { 132 BNX2X_ERR("timeout waiting for stats finished\n"); 133 break; 134 } 135 cnt--; 136 usleep_range(1000, 1000); 137 } 138 return 1; 139} 140 141/* 142 * Statistics service functions 143 */ 144 145static void bnx2x_stats_pmf_update(struct bnx2x *bp) 146{ 147 struct dmae_command *dmae; 148 u32 opcode; 149 int loader_idx = PMF_DMAE_C(bp); 150 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 151 152 /* sanity */ 153 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { 154 BNX2X_ERR("BUG!\n"); 155 return; 156 } 157 158 bp->executer_idx = 0; 159 160 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 161 162 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 163 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 164 dmae->src_addr_lo = bp->port.port_stx >> 2; 165 dmae->src_addr_hi = 0; 166 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 167 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 168 dmae->len = DMAE_LEN32_RD_MAX; 169 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 170 dmae->comp_addr_hi = 0; 171 dmae->comp_val = 1; 172 173 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 174 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 175 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 176 dmae->src_addr_hi = 0; 177 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 178 DMAE_LEN32_RD_MAX * 4); 179 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 180 DMAE_LEN32_RD_MAX * 4); 181 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; 182 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 183 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 184 dmae->comp_val = DMAE_COMP_VAL; 185 186 *stats_comp = 0; 187 bnx2x_hw_stats_post(bp); 188 bnx2x_stats_comp(bp); 189} 190 191static void bnx2x_port_stats_init(struct bnx2x *bp) 192{ 193 struct dmae_command *dmae; 194 int port = BP_PORT(bp); 195 u32 opcode; 196 int loader_idx = PMF_DMAE_C(bp); 197 u32 mac_addr; 198 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 199 200 /* sanity */ 201 if (!bp->link_vars.link_up || !bp->port.pmf) { 202 BNX2X_ERR("BUG!\n"); 203 return; 204 } 205 206 bp->executer_idx = 0; 207 208 /* MCP */ 209 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 210 true, DMAE_COMP_GRC); 211 212 if (bp->port.port_stx) { 213 214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 215 dmae->opcode = opcode; 216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 218 dmae->dst_addr_lo = bp->port.port_stx >> 2; 219 dmae->dst_addr_hi = 0; 220 dmae->len = sizeof(struct host_port_stats) >> 2; 221 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 222 dmae->comp_addr_hi = 0; 223 dmae->comp_val = 1; 224 } 225 226 if (bp->func_stx) { 227 228 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 229 dmae->opcode = opcode; 230 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 231 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 232 dmae->dst_addr_lo = bp->func_stx >> 2; 233 dmae->dst_addr_hi = 0; 234 dmae->len = sizeof(struct host_func_stats) >> 2; 235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 236 dmae->comp_addr_hi = 0; 237 dmae->comp_val = 1; 238 } 239 240 /* MAC */ 241 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 242 true, DMAE_COMP_GRC); 243 244 /* EMAC is special */ 245 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 246 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 247 248 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 250 dmae->opcode = opcode; 251 dmae->src_addr_lo = (mac_addr + 252 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 253 dmae->src_addr_hi = 0; 254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 256 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 258 dmae->comp_addr_hi = 0; 259 dmae->comp_val = 1; 260 261 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 263 dmae->opcode = opcode; 264 dmae->src_addr_lo = (mac_addr + 265 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 266 dmae->src_addr_hi = 0; 267 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 268 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 270 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 271 dmae->len = 1; 272 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 273 dmae->comp_addr_hi = 0; 274 dmae->comp_val = 1; 275 276 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 277 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 278 dmae->opcode = opcode; 279 dmae->src_addr_lo = (mac_addr + 280 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 281 dmae->src_addr_hi = 0; 282 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 283 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 285 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 286 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 288 dmae->comp_addr_hi = 0; 289 dmae->comp_val = 1; 290 } else { 291 u32 tx_src_addr_lo, rx_src_addr_lo; 292 u16 rx_len, tx_len; 293 294 /* configure the params according to MAC type */ 295 switch (bp->link_vars.mac_type) { 296 case MAC_TYPE_BMAC: 297 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 298 NIG_REG_INGRESS_BMAC0_MEM); 299 300 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 301 BIGMAC_REGISTER_TX_STAT_GTBYT */ 302 if (CHIP_IS_E1x(bp)) { 303 tx_src_addr_lo = (mac_addr + 304 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 305 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 306 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 307 rx_src_addr_lo = (mac_addr + 308 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 309 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 310 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 311 } else { 312 tx_src_addr_lo = (mac_addr + 313 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 314 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 315 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 316 rx_src_addr_lo = (mac_addr + 317 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 318 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 319 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 320 } 321 break; 322 323 case MAC_TYPE_UMAC: /* handled by MSTAT */ 324 case MAC_TYPE_XMAC: /* handled by MSTAT */ 325 default: 326 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 327 tx_src_addr_lo = (mac_addr + 328 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 329 rx_src_addr_lo = (mac_addr + 330 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 331 tx_len = sizeof(bp->slowpath-> 332 mac_stats.mstat_stats.stats_tx) >> 2; 333 rx_len = sizeof(bp->slowpath-> 334 mac_stats.mstat_stats.stats_rx) >> 2; 335 break; 336 } 337 338 /* TX stats */ 339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 340 dmae->opcode = opcode; 341 dmae->src_addr_lo = tx_src_addr_lo; 342 dmae->src_addr_hi = 0; 343 dmae->len = tx_len; 344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 346 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 347 dmae->comp_addr_hi = 0; 348 dmae->comp_val = 1; 349 350 /* RX stats */ 351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 352 dmae->opcode = opcode; 353 dmae->src_addr_hi = 0; 354 dmae->src_addr_lo = rx_src_addr_lo; 355 dmae->dst_addr_lo = 356 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 357 dmae->dst_addr_hi = 358 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 359 dmae->len = rx_len; 360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 361 dmae->comp_addr_hi = 0; 362 dmae->comp_val = 1; 363 } 364 365 /* NIG */ 366 if (!CHIP_IS_E3(bp)) { 367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 368 dmae->opcode = opcode; 369 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 370 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 371 dmae->src_addr_hi = 0; 372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 373 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 375 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 376 dmae->len = (2*sizeof(u32)) >> 2; 377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 378 dmae->comp_addr_hi = 0; 379 dmae->comp_val = 1; 380 381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 382 dmae->opcode = opcode; 383 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 384 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 385 dmae->src_addr_hi = 0; 386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 387 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 389 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 390 dmae->len = (2*sizeof(u32)) >> 2; 391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 392 dmae->comp_addr_hi = 0; 393 dmae->comp_val = 1; 394 } 395 396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 397 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 398 true, DMAE_COMP_PCI); 399 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 400 NIG_REG_STAT0_BRB_DISCARD) >> 2; 401 dmae->src_addr_hi = 0; 402 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 404 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 405 406 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 407 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 408 dmae->comp_val = DMAE_COMP_VAL; 409 410 *stats_comp = 0; 411} 412 413static void bnx2x_func_stats_init(struct bnx2x *bp) 414{ 415 struct dmae_command *dmae = &bp->stats_dmae; 416 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 417 418 /* sanity */ 419 if (!bp->func_stx) { 420 BNX2X_ERR("BUG!\n"); 421 return; 422 } 423 424 bp->executer_idx = 0; 425 memset(dmae, 0, sizeof(struct dmae_command)); 426 427 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 428 true, DMAE_COMP_PCI); 429 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 430 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 431 dmae->dst_addr_lo = bp->func_stx >> 2; 432 dmae->dst_addr_hi = 0; 433 dmae->len = sizeof(struct host_func_stats) >> 2; 434 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 435 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 436 dmae->comp_val = DMAE_COMP_VAL; 437 438 *stats_comp = 0; 439} 440 441static void bnx2x_stats_start(struct bnx2x *bp) 442{ 443 if (bp->port.pmf) 444 bnx2x_port_stats_init(bp); 445 446 else if (bp->func_stx) 447 bnx2x_func_stats_init(bp); 448 449 bnx2x_hw_stats_post(bp); 450 bnx2x_storm_stats_post(bp); 451} 452 453static void bnx2x_stats_pmf_start(struct bnx2x *bp) 454{ 455 bnx2x_stats_comp(bp); 456 bnx2x_stats_pmf_update(bp); 457 bnx2x_stats_start(bp); 458} 459 460static void bnx2x_stats_restart(struct bnx2x *bp) 461{ 462 bnx2x_stats_comp(bp); 463 bnx2x_stats_start(bp); 464} 465 466static void bnx2x_bmac_stats_update(struct bnx2x *bp) 467{ 468 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 469 struct bnx2x_eth_stats *estats = &bp->eth_stats; 470 struct { 471 u32 lo; 472 u32 hi; 473 } diff; 474 475 if (CHIP_IS_E1x(bp)) { 476 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 477 478 /* the macros below will use "bmac1_stats" type */ 479 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 480 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 481 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 482 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 483 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 484 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 485 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 486 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 487 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 488 489 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 490 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 491 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 492 UPDATE_STAT64(tx_stat_gt127, 493 tx_stat_etherstatspkts65octetsto127octets); 494 UPDATE_STAT64(tx_stat_gt255, 495 tx_stat_etherstatspkts128octetsto255octets); 496 UPDATE_STAT64(tx_stat_gt511, 497 tx_stat_etherstatspkts256octetsto511octets); 498 UPDATE_STAT64(tx_stat_gt1023, 499 tx_stat_etherstatspkts512octetsto1023octets); 500 UPDATE_STAT64(tx_stat_gt1518, 501 tx_stat_etherstatspkts1024octetsto1522octets); 502 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 503 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 504 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 505 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 506 UPDATE_STAT64(tx_stat_gterr, 507 tx_stat_dot3statsinternalmactransmiterrors); 508 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 509 510 } else { 511 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 512 513 /* the macros below will use "bmac2_stats" type */ 514 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 515 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 516 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 517 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 518 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 519 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 520 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 521 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 522 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 523 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 524 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 525 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 526 UPDATE_STAT64(tx_stat_gt127, 527 tx_stat_etherstatspkts65octetsto127octets); 528 UPDATE_STAT64(tx_stat_gt255, 529 tx_stat_etherstatspkts128octetsto255octets); 530 UPDATE_STAT64(tx_stat_gt511, 531 tx_stat_etherstatspkts256octetsto511octets); 532 UPDATE_STAT64(tx_stat_gt1023, 533 tx_stat_etherstatspkts512octetsto1023octets); 534 UPDATE_STAT64(tx_stat_gt1518, 535 tx_stat_etherstatspkts1024octetsto1522octets); 536 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 537 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 538 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 539 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 540 UPDATE_STAT64(tx_stat_gterr, 541 tx_stat_dot3statsinternalmactransmiterrors); 542 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 543 544 /* collect PFC stats */ 545 DIFF_64(diff.hi, new->tx_stat_gtpp_hi, 546 pstats->pfc_frames_tx_hi, 547 diff.lo, new->tx_stat_gtpp_lo, 548 pstats->pfc_frames_tx_lo); 549 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 550 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 551 ADD_64(pstats->pfc_frames_tx_hi, diff.hi, 552 pstats->pfc_frames_tx_lo, diff.lo); 553 554 DIFF_64(diff.hi, new->rx_stat_grpp_hi, 555 pstats->pfc_frames_rx_hi, 556 diff.lo, new->rx_stat_grpp_lo, 557 pstats->pfc_frames_rx_lo); 558 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 559 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 560 ADD_64(pstats->pfc_frames_rx_hi, diff.hi, 561 pstats->pfc_frames_rx_lo, diff.lo); 562 } 563 564 estats->pause_frames_received_hi = 565 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 566 estats->pause_frames_received_lo = 567 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 568 569 estats->pause_frames_sent_hi = 570 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 571 estats->pause_frames_sent_lo = 572 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 573 574 estats->pfc_frames_received_hi = 575 pstats->pfc_frames_rx_hi; 576 estats->pfc_frames_received_lo = 577 pstats->pfc_frames_rx_lo; 578 estats->pfc_frames_sent_hi = 579 pstats->pfc_frames_tx_hi; 580 estats->pfc_frames_sent_lo = 581 pstats->pfc_frames_tx_lo; 582} 583 584static void bnx2x_mstat_stats_update(struct bnx2x *bp) 585{ 586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 587 struct bnx2x_eth_stats *estats = &bp->eth_stats; 588 589 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 590 591 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 592 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 593 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 594 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 595 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 596 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 597 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 598 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 599 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 600 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 601 602 /* collect pfc stats */ 603 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 604 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 605 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 606 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 607 608 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 609 ADD_STAT64(stats_tx.tx_gt127, 610 tx_stat_etherstatspkts65octetsto127octets); 611 ADD_STAT64(stats_tx.tx_gt255, 612 tx_stat_etherstatspkts128octetsto255octets); 613 ADD_STAT64(stats_tx.tx_gt511, 614 tx_stat_etherstatspkts256octetsto511octets); 615 ADD_STAT64(stats_tx.tx_gt1023, 616 tx_stat_etherstatspkts512octetsto1023octets); 617 ADD_STAT64(stats_tx.tx_gt1518, 618 tx_stat_etherstatspkts1024octetsto1522octets); 619 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 620 621 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 622 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 623 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 624 625 ADD_STAT64(stats_tx.tx_gterr, 626 tx_stat_dot3statsinternalmactransmiterrors); 627 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 628 629 ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, 630 new->stats_tx.tx_gt1518_hi, 631 estats->etherstatspkts1024octetsto1522octets_lo, 632 new->stats_tx.tx_gt1518_lo); 633 634 ADD_64(estats->etherstatspktsover1522octets_hi, 635 new->stats_tx.tx_gt2047_hi, 636 estats->etherstatspktsover1522octets_lo, 637 new->stats_tx.tx_gt2047_lo); 638 639 ADD_64(estats->etherstatspktsover1522octets_hi, 640 new->stats_tx.tx_gt4095_hi, 641 estats->etherstatspktsover1522octets_lo, 642 new->stats_tx.tx_gt4095_lo); 643 644 ADD_64(estats->etherstatspktsover1522octets_hi, 645 new->stats_tx.tx_gt9216_hi, 646 estats->etherstatspktsover1522octets_lo, 647 new->stats_tx.tx_gt9216_lo); 648 649 650 ADD_64(estats->etherstatspktsover1522octets_hi, 651 new->stats_tx.tx_gt16383_hi, 652 estats->etherstatspktsover1522octets_lo, 653 new->stats_tx.tx_gt16383_lo); 654 655 estats->pause_frames_received_hi = 656 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 657 estats->pause_frames_received_lo = 658 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 659 660 estats->pause_frames_sent_hi = 661 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 662 estats->pause_frames_sent_lo = 663 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 664 665 estats->pfc_frames_received_hi = 666 pstats->pfc_frames_rx_hi; 667 estats->pfc_frames_received_lo = 668 pstats->pfc_frames_rx_lo; 669 estats->pfc_frames_sent_hi = 670 pstats->pfc_frames_tx_hi; 671 estats->pfc_frames_sent_lo = 672 pstats->pfc_frames_tx_lo; 673} 674 675static void bnx2x_emac_stats_update(struct bnx2x *bp) 676{ 677 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 678 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 679 struct bnx2x_eth_stats *estats = &bp->eth_stats; 680 681 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 682 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 683 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 684 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 685 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 686 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 687 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 688 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 689 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 690 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 691 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 692 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 693 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 694 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 695 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 696 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 697 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 698 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 699 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 700 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 701 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 702 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 703 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 704 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 710 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 711 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 712 713 estats->pause_frames_received_hi = 714 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 715 estats->pause_frames_received_lo = 716 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 717 ADD_64(estats->pause_frames_received_hi, 718 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 719 estats->pause_frames_received_lo, 720 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 721 722 estats->pause_frames_sent_hi = 723 pstats->mac_stx[1].tx_stat_outxonsent_hi; 724 estats->pause_frames_sent_lo = 725 pstats->mac_stx[1].tx_stat_outxonsent_lo; 726 ADD_64(estats->pause_frames_sent_hi, 727 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 728 estats->pause_frames_sent_lo, 729 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 730} 731 732static int bnx2x_hw_stats_update(struct bnx2x *bp) 733{ 734 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 735 struct nig_stats *old = &(bp->port.old_nig_stats); 736 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 737 struct bnx2x_eth_stats *estats = &bp->eth_stats; 738 struct { 739 u32 lo; 740 u32 hi; 741 } diff; 742 743 switch (bp->link_vars.mac_type) { 744 case MAC_TYPE_BMAC: 745 bnx2x_bmac_stats_update(bp); 746 break; 747 748 case MAC_TYPE_EMAC: 749 bnx2x_emac_stats_update(bp); 750 break; 751 752 case MAC_TYPE_UMAC: 753 case MAC_TYPE_XMAC: 754 bnx2x_mstat_stats_update(bp); 755 break; 756 757 case MAC_TYPE_NONE: /* unreached */ 758 DP(BNX2X_MSG_STATS, 759 "stats updated by DMAE but no MAC active\n"); 760 return -1; 761 762 default: /* unreached */ 763 BNX2X_ERR("Unknown MAC type\n"); 764 } 765 766 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 767 new->brb_discard - old->brb_discard); 768 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 769 new->brb_truncate - old->brb_truncate); 770 771 if (!CHIP_IS_E3(bp)) { 772 UPDATE_STAT64_NIG(egress_mac_pkt0, 773 etherstatspkts1024octetsto1522octets); 774 UPDATE_STAT64_NIG(egress_mac_pkt1, 775 etherstatspktsover1522octets); 776 } 777 778 memcpy(old, new, sizeof(struct nig_stats)); 779 780 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 781 sizeof(struct mac_stx)); 782 estats->brb_drop_hi = pstats->brb_drop_hi; 783 estats->brb_drop_lo = pstats->brb_drop_lo; 784 785 pstats->host_port_stats_counter++; 786 787 if (!BP_NOMCP(bp)) { 788 u32 nig_timer_max = 789 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 790 if (nig_timer_max != estats->nig_timer_max) { 791 estats->nig_timer_max = nig_timer_max; 792 BNX2X_ERR("NIG timer max (%u)\n", 793 estats->nig_timer_max); 794 } 795 } 796 797 return 0; 798} 799 800static int bnx2x_storm_stats_update(struct bnx2x *bp) 801{ 802 struct tstorm_per_port_stats *tport = 803 &bp->fw_stats_data->port.tstorm_port_statistics; 804 struct tstorm_per_pf_stats *tfunc = 805 &bp->fw_stats_data->pf.tstorm_pf_statistics; 806 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); 807 struct bnx2x_eth_stats *estats = &bp->eth_stats; 808 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 809 int i; 810 u16 cur_stats_counter; 811 812 /* Make sure we use the value of the counter 813 * used for sending the last stats ramrod. 814 */ 815 spin_lock_bh(&bp->stats_lock); 816 cur_stats_counter = bp->stats_counter - 1; 817 spin_unlock_bh(&bp->stats_lock); 818 819 /* are storm stats valid? */ 820 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 821 DP(BNX2X_MSG_STATS, "stats not updated by xstorm" 822 " xstorm counter (0x%x) != stats_counter (0x%x)\n", 823 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 824 return -EAGAIN; 825 } 826 827 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 828 DP(BNX2X_MSG_STATS, "stats not updated by ustorm" 829 " ustorm counter (0x%x) != stats_counter (0x%x)\n", 830 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 831 return -EAGAIN; 832 } 833 834 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 835 DP(BNX2X_MSG_STATS, "stats not updated by cstorm" 836 " cstorm counter (0x%x) != stats_counter (0x%x)\n", 837 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 838 return -EAGAIN; 839 } 840 841 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 842 DP(BNX2X_MSG_STATS, "stats not updated by tstorm" 843 " tstorm counter (0x%x) != stats_counter (0x%x)\n", 844 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 845 return -EAGAIN; 846 } 847 848 memcpy(&(fstats->total_bytes_received_hi), 849 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), 850 sizeof(struct host_func_stats) - 2*sizeof(u32)); 851 estats->error_bytes_received_hi = 0; 852 estats->error_bytes_received_lo = 0; 853 estats->etherstatsoverrsizepkts_hi = 0; 854 estats->etherstatsoverrsizepkts_lo = 0; 855 estats->no_buff_discard_hi = 0; 856 estats->no_buff_discard_lo = 0; 857 estats->total_tpa_aggregations_hi = 0; 858 estats->total_tpa_aggregations_lo = 0; 859 estats->total_tpa_aggregated_frames_hi = 0; 860 estats->total_tpa_aggregated_frames_lo = 0; 861 estats->total_tpa_bytes_hi = 0; 862 estats->total_tpa_bytes_lo = 0; 863 864 for_each_eth_queue(bp, i) { 865 struct bnx2x_fastpath *fp = &bp->fp[i]; 866 struct tstorm_per_queue_stats *tclient = 867 &bp->fw_stats_data->queue_stats[i]. 868 tstorm_queue_statistics; 869 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 870 struct ustorm_per_queue_stats *uclient = 871 &bp->fw_stats_data->queue_stats[i]. 872 ustorm_queue_statistics; 873 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 874 struct xstorm_per_queue_stats *xclient = 875 &bp->fw_stats_data->queue_stats[i]. 876 xstorm_queue_statistics; 877 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 878 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 879 u32 diff; 880 881 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " 882 "bcast_sent 0x%x mcast_sent 0x%x\n", 883 i, xclient->ucast_pkts_sent, 884 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 885 886 DP(BNX2X_MSG_STATS, "---------------\n"); 887 888 qstats->total_broadcast_bytes_received_hi = 889 le32_to_cpu(tclient->rcv_bcast_bytes.hi); 890 qstats->total_broadcast_bytes_received_lo = 891 le32_to_cpu(tclient->rcv_bcast_bytes.lo); 892 893 qstats->total_multicast_bytes_received_hi = 894 le32_to_cpu(tclient->rcv_mcast_bytes.hi); 895 qstats->total_multicast_bytes_received_lo = 896 le32_to_cpu(tclient->rcv_mcast_bytes.lo); 897 898 qstats->total_unicast_bytes_received_hi = 899 le32_to_cpu(tclient->rcv_ucast_bytes.hi); 900 qstats->total_unicast_bytes_received_lo = 901 le32_to_cpu(tclient->rcv_ucast_bytes.lo); 902 903 /* 904 * sum to total_bytes_received all 905 * unicast/multicast/broadcast 906 */ 907 qstats->total_bytes_received_hi = 908 qstats->total_broadcast_bytes_received_hi; 909 qstats->total_bytes_received_lo = 910 qstats->total_broadcast_bytes_received_lo; 911 912 ADD_64(qstats->total_bytes_received_hi, 913 qstats->total_multicast_bytes_received_hi, 914 qstats->total_bytes_received_lo, 915 qstats->total_multicast_bytes_received_lo); 916 917 ADD_64(qstats->total_bytes_received_hi, 918 qstats->total_unicast_bytes_received_hi, 919 qstats->total_bytes_received_lo, 920 qstats->total_unicast_bytes_received_lo); 921 922 qstats->valid_bytes_received_hi = 923 qstats->total_bytes_received_hi; 924 qstats->valid_bytes_received_lo = 925 qstats->total_bytes_received_lo; 926 927 928 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 929 total_unicast_packets_received); 930 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 931 total_multicast_packets_received); 932 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 933 total_broadcast_packets_received); 934 UPDATE_EXTEND_TSTAT(pkts_too_big_discard, 935 etherstatsoverrsizepkts); 936 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); 937 938 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 939 total_unicast_packets_received); 940 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 941 total_multicast_packets_received); 942 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 943 total_broadcast_packets_received); 944 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); 945 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); 946 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); 947 948 qstats->total_broadcast_bytes_transmitted_hi = 949 le32_to_cpu(xclient->bcast_bytes_sent.hi); 950 qstats->total_broadcast_bytes_transmitted_lo = 951 le32_to_cpu(xclient->bcast_bytes_sent.lo); 952 953 qstats->total_multicast_bytes_transmitted_hi = 954 le32_to_cpu(xclient->mcast_bytes_sent.hi); 955 qstats->total_multicast_bytes_transmitted_lo = 956 le32_to_cpu(xclient->mcast_bytes_sent.lo); 957 958 qstats->total_unicast_bytes_transmitted_hi = 959 le32_to_cpu(xclient->ucast_bytes_sent.hi); 960 qstats->total_unicast_bytes_transmitted_lo = 961 le32_to_cpu(xclient->ucast_bytes_sent.lo); 962 /* 963 * sum to total_bytes_transmitted all 964 * unicast/multicast/broadcast 965 */ 966 qstats->total_bytes_transmitted_hi = 967 qstats->total_unicast_bytes_transmitted_hi; 968 qstats->total_bytes_transmitted_lo = 969 qstats->total_unicast_bytes_transmitted_lo; 970 971 ADD_64(qstats->total_bytes_transmitted_hi, 972 qstats->total_broadcast_bytes_transmitted_hi, 973 qstats->total_bytes_transmitted_lo, 974 qstats->total_broadcast_bytes_transmitted_lo); 975 976 ADD_64(qstats->total_bytes_transmitted_hi, 977 qstats->total_multicast_bytes_transmitted_hi, 978 qstats->total_bytes_transmitted_lo, 979 qstats->total_multicast_bytes_transmitted_lo); 980 981 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 982 total_unicast_packets_transmitted); 983 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 984 total_multicast_packets_transmitted); 985 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 986 total_broadcast_packets_transmitted); 987 988 UPDATE_EXTEND_TSTAT(checksum_discard, 989 total_packets_received_checksum_discarded); 990 UPDATE_EXTEND_TSTAT(ttl0_discard, 991 total_packets_received_ttl0_discarded); 992 993 UPDATE_EXTEND_XSTAT(error_drop_pkts, 994 total_transmitted_dropped_packets_error); 995 996 /* TPA aggregations completed */ 997 UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); 998 /* Number of network frames aggregated by TPA */ 999 UPDATE_EXTEND_USTAT(coalesced_pkts, 1000 total_tpa_aggregated_frames); 1001 /* Total number of bytes in completed TPA aggregations */ 1002 qstats->total_tpa_bytes_lo = 1003 le32_to_cpu(uclient->coalesced_bytes.lo); 1004 qstats->total_tpa_bytes_hi = 1005 le32_to_cpu(uclient->coalesced_bytes.hi); 1006 1007 /* TPA stats per-function */ 1008 ADD_64(estats->total_tpa_aggregations_hi, 1009 qstats->total_tpa_aggregations_hi, 1010 estats->total_tpa_aggregations_lo, 1011 qstats->total_tpa_aggregations_lo); 1012 ADD_64(estats->total_tpa_aggregated_frames_hi, 1013 qstats->total_tpa_aggregated_frames_hi, 1014 estats->total_tpa_aggregated_frames_lo, 1015 qstats->total_tpa_aggregated_frames_lo); 1016 ADD_64(estats->total_tpa_bytes_hi, 1017 qstats->total_tpa_bytes_hi, 1018 estats->total_tpa_bytes_lo, 1019 qstats->total_tpa_bytes_lo); 1020 1021 ADD_64(fstats->total_bytes_received_hi, 1022 qstats->total_bytes_received_hi, 1023 fstats->total_bytes_received_lo, 1024 qstats->total_bytes_received_lo); 1025 ADD_64(fstats->total_bytes_transmitted_hi, 1026 qstats->total_bytes_transmitted_hi, 1027 fstats->total_bytes_transmitted_lo, 1028 qstats->total_bytes_transmitted_lo); 1029 ADD_64(fstats->total_unicast_packets_received_hi, 1030 qstats->total_unicast_packets_received_hi, 1031 fstats->total_unicast_packets_received_lo, 1032 qstats->total_unicast_packets_received_lo); 1033 ADD_64(fstats->total_multicast_packets_received_hi, 1034 qstats->total_multicast_packets_received_hi, 1035 fstats->total_multicast_packets_received_lo, 1036 qstats->total_multicast_packets_received_lo); 1037 ADD_64(fstats->total_broadcast_packets_received_hi, 1038 qstats->total_broadcast_packets_received_hi, 1039 fstats->total_broadcast_packets_received_lo, 1040 qstats->total_broadcast_packets_received_lo); 1041 ADD_64(fstats->total_unicast_packets_transmitted_hi, 1042 qstats->total_unicast_packets_transmitted_hi, 1043 fstats->total_unicast_packets_transmitted_lo, 1044 qstats->total_unicast_packets_transmitted_lo); 1045 ADD_64(fstats->total_multicast_packets_transmitted_hi, 1046 qstats->total_multicast_packets_transmitted_hi, 1047 fstats->total_multicast_packets_transmitted_lo, 1048 qstats->total_multicast_packets_transmitted_lo); 1049 ADD_64(fstats->total_broadcast_packets_transmitted_hi, 1050 qstats->total_broadcast_packets_transmitted_hi, 1051 fstats->total_broadcast_packets_transmitted_lo, 1052 qstats->total_broadcast_packets_transmitted_lo); 1053 ADD_64(fstats->valid_bytes_received_hi, 1054 qstats->valid_bytes_received_hi, 1055 fstats->valid_bytes_received_lo, 1056 qstats->valid_bytes_received_lo); 1057 1058 ADD_64(estats->etherstatsoverrsizepkts_hi, 1059 qstats->etherstatsoverrsizepkts_hi, 1060 estats->etherstatsoverrsizepkts_lo, 1061 qstats->etherstatsoverrsizepkts_lo); 1062 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, 1063 estats->no_buff_discard_lo, qstats->no_buff_discard_lo); 1064 } 1065 1066 ADD_64(fstats->total_bytes_received_hi, 1067 estats->rx_stat_ifhcinbadoctets_hi, 1068 fstats->total_bytes_received_lo, 1069 estats->rx_stat_ifhcinbadoctets_lo); 1070 1071 ADD_64(fstats->total_bytes_received_hi, 1072 tfunc->rcv_error_bytes.hi, 1073 fstats->total_bytes_received_lo, 1074 tfunc->rcv_error_bytes.lo); 1075 1076 memcpy(estats, &(fstats->total_bytes_received_hi), 1077 sizeof(struct host_func_stats) - 2*sizeof(u32)); 1078 1079 ADD_64(estats->error_bytes_received_hi, 1080 tfunc->rcv_error_bytes.hi, 1081 estats->error_bytes_received_lo, 1082 tfunc->rcv_error_bytes.lo); 1083 1084 ADD_64(estats->etherstatsoverrsizepkts_hi, 1085 estats->rx_stat_dot3statsframestoolong_hi, 1086 estats->etherstatsoverrsizepkts_lo, 1087 estats->rx_stat_dot3statsframestoolong_lo); 1088 ADD_64(estats->error_bytes_received_hi, 1089 estats->rx_stat_ifhcinbadoctets_hi, 1090 estats->error_bytes_received_lo, 1091 estats->rx_stat_ifhcinbadoctets_lo); 1092 1093 if (bp->port.pmf) { 1094 estats->mac_filter_discard = 1095 le32_to_cpu(tport->mac_filter_discard); 1096 estats->mf_tag_discard = 1097 le32_to_cpu(tport->mf_tag_discard); 1098 estats->brb_truncate_discard = 1099 le32_to_cpu(tport->brb_truncate_discard); 1100 estats->mac_discard = le32_to_cpu(tport->mac_discard); 1101 } 1102 1103 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1104 1105 bp->stats_pending = 0; 1106 1107 return 0; 1108} 1109 1110static void bnx2x_net_stats_update(struct bnx2x *bp) 1111{ 1112 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1113 struct net_device_stats *nstats = &bp->dev->stats; 1114 unsigned long tmp; 1115 int i; 1116 1117 nstats->rx_packets = 1118 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1119 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1120 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1121 1122 nstats->tx_packets = 1123 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1124 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1125 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1126 1127 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1128 1129 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1130 1131 tmp = estats->mac_discard; 1132 for_each_rx_queue(bp, i) 1133 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1134 nstats->rx_dropped = tmp; 1135 1136 nstats->tx_dropped = 0; 1137 1138 nstats->multicast = 1139 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1140 1141 nstats->collisions = 1142 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1143 1144 nstats->rx_length_errors = 1145 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1146 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1147 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1148 bnx2x_hilo(&estats->brb_truncate_hi); 1149 nstats->rx_crc_errors = 1150 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1151 nstats->rx_frame_errors = 1152 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1153 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1154 nstats->rx_missed_errors = 0; 1155 1156 nstats->rx_errors = nstats->rx_length_errors + 1157 nstats->rx_over_errors + 1158 nstats->rx_crc_errors + 1159 nstats->rx_frame_errors + 1160 nstats->rx_fifo_errors + 1161 nstats->rx_missed_errors; 1162 1163 nstats->tx_aborted_errors = 1164 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1165 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1166 nstats->tx_carrier_errors = 1167 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1168 nstats->tx_fifo_errors = 0; 1169 nstats->tx_heartbeat_errors = 0; 1170 nstats->tx_window_errors = 0; 1171 1172 nstats->tx_errors = nstats->tx_aborted_errors + 1173 nstats->tx_carrier_errors + 1174 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1175} 1176 1177static void bnx2x_drv_stats_update(struct bnx2x *bp) 1178{ 1179 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1180 int i; 1181 1182 estats->driver_xoff = 0; 1183 estats->rx_err_discard_pkt = 0; 1184 estats->rx_skb_alloc_failed = 0; 1185 estats->hw_csum_err = 0; 1186 for_each_queue(bp, i) { 1187 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1188 1189 estats->driver_xoff += qstats->driver_xoff; 1190 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; 1191 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; 1192 estats->hw_csum_err += qstats->hw_csum_err; 1193 } 1194} 1195 1196static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1197{ 1198 u32 val; 1199 1200 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1201 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1202 1203 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1204 return true; 1205 } 1206 1207 return false; 1208} 1209 1210static void bnx2x_stats_update(struct bnx2x *bp) 1211{ 1212 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1213 1214 if (bnx2x_edebug_stats_stopped(bp)) 1215 return; 1216 1217 if (*stats_comp != DMAE_COMP_VAL) 1218 return; 1219 1220 if (bp->port.pmf) 1221 bnx2x_hw_stats_update(bp); 1222 1223 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { 1224 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1225 bnx2x_panic(); 1226 return; 1227 } 1228 1229 bnx2x_net_stats_update(bp); 1230 bnx2x_drv_stats_update(bp); 1231 1232 if (netif_msg_timer(bp)) { 1233 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1234 int i, cos; 1235 1236 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1237 estats->brb_drop_lo, estats->brb_truncate_lo); 1238 1239 for_each_eth_queue(bp, i) { 1240 struct bnx2x_fastpath *fp = &bp->fp[i]; 1241 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1242 1243 pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", 1244 fp->name, (le16_to_cpu(*fp->rx_cons_sb) - 1245 fp->rx_comp_cons), 1246 le16_to_cpu(*fp->rx_cons_sb), 1247 bnx2x_hilo(&qstats-> 1248 total_unicast_packets_received_hi), 1249 fp->rx_calls, fp->rx_pkt); 1250 } 1251 1252 for_each_eth_queue(bp, i) { 1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1254 struct bnx2x_fp_txdata *txdata; 1255 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1256 struct netdev_queue *txq; 1257 1258 pr_debug("%s: tx pkt(%lu) (Xoff events %u)", 1259 fp->name, 1260 bnx2x_hilo( 1261 &qstats->total_unicast_packets_transmitted_hi), 1262 qstats->driver_xoff); 1263 1264 for_each_cos_in_tx_queue(fp, cos) { 1265 txdata = &fp->txdata[cos]; 1266 txq = netdev_get_tx_queue(bp->dev, 1267 FP_COS_TO_TXQ(fp, cos)); 1268 1269 pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", 1270 cos, 1271 bnx2x_tx_avail(bp, txdata), 1272 le16_to_cpu(*txdata->tx_cons_sb), 1273 txdata->tx_pkt, 1274 (netif_tx_queue_stopped(txq) ? 1275 "Xoff" : "Xon") 1276 ); 1277 } 1278 } 1279 } 1280 1281 bnx2x_hw_stats_post(bp); 1282 bnx2x_storm_stats_post(bp); 1283} 1284 1285static void bnx2x_port_stats_stop(struct bnx2x *bp) 1286{ 1287 struct dmae_command *dmae; 1288 u32 opcode; 1289 int loader_idx = PMF_DMAE_C(bp); 1290 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1291 1292 bp->executer_idx = 0; 1293 1294 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1295 1296 if (bp->port.port_stx) { 1297 1298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1299 if (bp->func_stx) 1300 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1301 opcode, DMAE_COMP_GRC); 1302 else 1303 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1304 opcode, DMAE_COMP_PCI); 1305 1306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1308 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1309 dmae->dst_addr_hi = 0; 1310 dmae->len = sizeof(struct host_port_stats) >> 2; 1311 if (bp->func_stx) { 1312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1313 dmae->comp_addr_hi = 0; 1314 dmae->comp_val = 1; 1315 } else { 1316 dmae->comp_addr_lo = 1317 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1318 dmae->comp_addr_hi = 1319 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1320 dmae->comp_val = DMAE_COMP_VAL; 1321 1322 *stats_comp = 0; 1323 } 1324 } 1325 1326 if (bp->func_stx) { 1327 1328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1329 dmae->opcode = 1330 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1333 dmae->dst_addr_lo = bp->func_stx >> 2; 1334 dmae->dst_addr_hi = 0; 1335 dmae->len = sizeof(struct host_func_stats) >> 2; 1336 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1337 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1338 dmae->comp_val = DMAE_COMP_VAL; 1339 1340 *stats_comp = 0; 1341 } 1342} 1343 1344static void bnx2x_stats_stop(struct bnx2x *bp) 1345{ 1346 int update = 0; 1347 1348 bnx2x_stats_comp(bp); 1349 1350 if (bp->port.pmf) 1351 update = (bnx2x_hw_stats_update(bp) == 0); 1352 1353 update |= (bnx2x_storm_stats_update(bp) == 0); 1354 1355 if (update) { 1356 bnx2x_net_stats_update(bp); 1357 1358 if (bp->port.pmf) 1359 bnx2x_port_stats_stop(bp); 1360 1361 bnx2x_hw_stats_post(bp); 1362 bnx2x_stats_comp(bp); 1363 } 1364} 1365 1366static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1367{ 1368} 1369 1370static const struct { 1371 void (*action)(struct bnx2x *bp); 1372 enum bnx2x_stats_state next_state; 1373} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1374/* state event */ 1375{ 1376/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1377/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1378/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1379/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1380}, 1381{ 1382/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1383/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1384/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1385/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1386} 1387}; 1388 1389void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1390{ 1391 enum bnx2x_stats_state state; 1392 if (unlikely(bp->panic)) 1393 return; 1394 1395 spin_lock_bh(&bp->stats_lock); 1396 state = bp->stats_state; 1397 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1398 spin_unlock_bh(&bp->stats_lock); 1399 1400 bnx2x_stats_stm[state][event].action(bp); 1401 1402 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1403 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1404 state, event, bp->stats_state); 1405} 1406 1407static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1408{ 1409 struct dmae_command *dmae; 1410 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1411 1412 /* sanity */ 1413 if (!bp->port.pmf || !bp->port.port_stx) { 1414 BNX2X_ERR("BUG!\n"); 1415 return; 1416 } 1417 1418 bp->executer_idx = 0; 1419 1420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1421 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1422 true, DMAE_COMP_PCI); 1423 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1424 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1425 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1426 dmae->dst_addr_hi = 0; 1427 dmae->len = sizeof(struct host_port_stats) >> 2; 1428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1430 dmae->comp_val = DMAE_COMP_VAL; 1431 1432 *stats_comp = 0; 1433 bnx2x_hw_stats_post(bp); 1434 bnx2x_stats_comp(bp); 1435} 1436 1437static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1438{ 1439 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; 1440 u32 func_stx; 1441 1442 /* sanity */ 1443 if (!bp->port.pmf || !bp->func_stx) { 1444 BNX2X_ERR("BUG!\n"); 1445 return; 1446 } 1447 1448 /* save our func_stx */ 1449 func_stx = bp->func_stx; 1450 1451 for (vn = VN_0; vn < vn_max; vn++) { 1452 int mb_idx = BP_FW_MB_IDX_VN(bp, vn); 1453 1454 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1455 bnx2x_func_stats_init(bp); 1456 bnx2x_hw_stats_post(bp); 1457 bnx2x_stats_comp(bp); 1458 } 1459 1460 /* restore our func_stx */ 1461 bp->func_stx = func_stx; 1462} 1463 1464static void bnx2x_func_stats_base_update(struct bnx2x *bp) 1465{ 1466 struct dmae_command *dmae = &bp->stats_dmae; 1467 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1468 1469 /* sanity */ 1470 if (!bp->func_stx) { 1471 BNX2X_ERR("BUG!\n"); 1472 return; 1473 } 1474 1475 bp->executer_idx = 0; 1476 memset(dmae, 0, sizeof(struct dmae_command)); 1477 1478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 1479 true, DMAE_COMP_PCI); 1480 dmae->src_addr_lo = bp->func_stx >> 2; 1481 dmae->src_addr_hi = 0; 1482 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1483 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); 1484 dmae->len = sizeof(struct host_func_stats) >> 2; 1485 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1486 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1487 dmae->comp_val = DMAE_COMP_VAL; 1488 1489 *stats_comp = 0; 1490 bnx2x_hw_stats_post(bp); 1491 bnx2x_stats_comp(bp); 1492} 1493 1494/** 1495 * This function will prepare the statistics ramrod data the way 1496 * we will only have to increment the statistics counter and 1497 * send the ramrod each time we have to. 1498 * 1499 * @param bp 1500 */ 1501static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1502{ 1503 int i; 1504 int first_queue_query_index; 1505 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1506 1507 dma_addr_t cur_data_offset; 1508 struct stats_query_entry *cur_query_entry; 1509 1510 stats_hdr->cmd_num = bp->fw_stats_num; 1511 stats_hdr->drv_stats_counter = 0; 1512 1513 /* storm_counters struct contains the counters of completed 1514 * statistics requests per storm which are incremented by FW 1515 * each time it completes hadning a statistics ramrod. We will 1516 * check these counters in the timer handler and discard a 1517 * (statistics) ramrod completion. 1518 */ 1519 cur_data_offset = bp->fw_stats_data_mapping + 1520 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1521 1522 stats_hdr->stats_counters_addrs.hi = 1523 cpu_to_le32(U64_HI(cur_data_offset)); 1524 stats_hdr->stats_counters_addrs.lo = 1525 cpu_to_le32(U64_LO(cur_data_offset)); 1526 1527 /* prepare to the first stats ramrod (will be completed with 1528 * the counters equal to zero) - init counters to somethig different. 1529 */ 1530 memset(&bp->fw_stats_data->storm_counters, 0xff, 1531 sizeof(struct stats_counter)); 1532 1533 /**** Port FW statistics data ****/ 1534 cur_data_offset = bp->fw_stats_data_mapping + 1535 offsetof(struct bnx2x_fw_stats_data, port); 1536 1537 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1538 1539 cur_query_entry->kind = STATS_TYPE_PORT; 1540 /* For port query index is a DONT CARE */ 1541 cur_query_entry->index = BP_PORT(bp); 1542 /* For port query funcID is a DONT CARE */ 1543 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1544 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1545 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1546 1547 /**** PF FW statistics data ****/ 1548 cur_data_offset = bp->fw_stats_data_mapping + 1549 offsetof(struct bnx2x_fw_stats_data, pf); 1550 1551 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1552 1553 cur_query_entry->kind = STATS_TYPE_PF; 1554 /* For PF query index is a DONT CARE */ 1555 cur_query_entry->index = BP_PORT(bp); 1556 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1557 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1558 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1559 1560 /**** FCoE FW statistics data ****/ 1561 if (!NO_FCOE(bp)) { 1562 cur_data_offset = bp->fw_stats_data_mapping + 1563 offsetof(struct bnx2x_fw_stats_data, fcoe); 1564 1565 cur_query_entry = 1566 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1567 1568 cur_query_entry->kind = STATS_TYPE_FCOE; 1569 /* For FCoE query index is a DONT CARE */ 1570 cur_query_entry->index = BP_PORT(bp); 1571 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1572 cur_query_entry->address.hi = 1573 cpu_to_le32(U64_HI(cur_data_offset)); 1574 cur_query_entry->address.lo = 1575 cpu_to_le32(U64_LO(cur_data_offset)); 1576 } 1577 1578 /**** Clients' queries ****/ 1579 cur_data_offset = bp->fw_stats_data_mapping + 1580 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1581 1582 /* first queue query index depends whether FCoE offloaded request will 1583 * be included in the ramrod 1584 */ 1585 if (!NO_FCOE(bp)) 1586 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1587 else 1588 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1589 1590 for_each_eth_queue(bp, i) { 1591 cur_query_entry = 1592 &bp->fw_stats_req-> 1593 query[first_queue_query_index + i]; 1594 1595 cur_query_entry->kind = STATS_TYPE_QUEUE; 1596 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1597 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1598 cur_query_entry->address.hi = 1599 cpu_to_le32(U64_HI(cur_data_offset)); 1600 cur_query_entry->address.lo = 1601 cpu_to_le32(U64_LO(cur_data_offset)); 1602 1603 cur_data_offset += sizeof(struct per_queue_stats); 1604 } 1605 1606 /* add FCoE queue query if needed */ 1607 if (!NO_FCOE(bp)) { 1608 cur_query_entry = 1609 &bp->fw_stats_req-> 1610 query[first_queue_query_index + i]; 1611 1612 cur_query_entry->kind = STATS_TYPE_QUEUE; 1613 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); 1614 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1615 cur_query_entry->address.hi = 1616 cpu_to_le32(U64_HI(cur_data_offset)); 1617 cur_query_entry->address.lo = 1618 cpu_to_le32(U64_LO(cur_data_offset)); 1619 } 1620} 1621 1622void bnx2x_stats_init(struct bnx2x *bp) 1623{ 1624 int /*abs*/port = BP_PORT(bp); 1625 int mb_idx = BP_FW_MB_IDX(bp); 1626 int i; 1627 1628 bp->stats_pending = 0; 1629 bp->executer_idx = 0; 1630 bp->stats_counter = 0; 1631 1632 /* port and func stats for management */ 1633 if (!BP_NOMCP(bp)) { 1634 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1635 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1636 1637 } else { 1638 bp->port.port_stx = 0; 1639 bp->func_stx = 0; 1640 } 1641 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1642 bp->port.port_stx, bp->func_stx); 1643 1644 port = BP_PORT(bp); 1645 /* port stats */ 1646 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1647 bp->port.old_nig_stats.brb_discard = 1648 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1649 bp->port.old_nig_stats.brb_truncate = 1650 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1651 if (!CHIP_IS_E3(bp)) { 1652 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1653 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1654 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1655 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1656 } 1657 1658 /* function stats */ 1659 for_each_queue(bp, i) { 1660 struct bnx2x_fastpath *fp = &bp->fp[i]; 1661 1662 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1663 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1664 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1665 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1666 } 1667 1668 /* Prepare statistics ramrod data */ 1669 bnx2x_prep_fw_stats_req(bp); 1670 1671 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1672 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1673 1674 bp->stats_state = STATS_STATE_DISABLED; 1675 1676 if (bp->port.pmf) { 1677 if (bp->port.port_stx) 1678 bnx2x_port_stats_base_init(bp); 1679 1680 if (bp->func_stx) 1681 bnx2x_func_stats_base_init(bp); 1682 1683 } else if (bp->func_stx) 1684 bnx2x_func_stats_base_update(bp); 1685} 1686