1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
15 #include "bnx2x_stats.h"
18 #define BITS_PER_LONG 32
20 #define BITS_PER_LONG 64
23 static inline uint16_t
24 bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
29 /* 'newest' convention - shmem2 contains the size of the port stats */
30 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
31 size = SHMEM2_RD(sc, sizeof_port_stats);
36 /* prevent newer BC from causing buffer overflow */
37 if (res > sizeof(struct host_port_stats)) {
38 res = sizeof(struct host_port_stats);
43 * Older convention - all BCs support the port stats fields up until
44 * the 'not_used' field
47 res = (offsetof(struct host_port_stats, not_used) + 4);
49 /* if PFC stats are supported by the MFW, DMA them as well */
50 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
51 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
52 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
62 * Init service functions
66 * Post the next statistics ramrod. Protect it with the lock in
67 * order to ensure the strict order between statistics ramrods
68 * (each ramrod has a sequence number passed in a
69 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
73 bnx2x_storm_stats_post(struct bnx2x_softc *sc)
77 if (!sc->stats_pending) {
78 if (sc->stats_pending) {
82 sc->fw_stats_req->hdr.drv_stats_counter =
83 htole16(sc->stats_counter++);
85 PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
86 "sending statistics ramrod %d",
87 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
89 /* adjust the ramrod to include VF queues statistics */
91 /* send FW stats ramrod */
92 rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
93 U64_HI(sc->fw_stats_req_mapping),
94 U64_LO(sc->fw_stats_req_mapping),
95 NONE_CONNECTION_TYPE);
97 sc->stats_pending = 1;
103 bnx2x_hw_stats_post(struct bnx2x_softc *sc)
105 struct dmae_command *dmae = &sc->stats_dmae;
106 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
110 *stats_comp = DMAE_COMP_VAL;
111 if (CHIP_REV_IS_SLOW(sc)) {
115 /* Update MCP's statistics if possible */
117 rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
118 sizeof(sc->func_stats));
122 if (sc->executer_idx) {
123 loader_idx = PMF_DMAE_C(sc);
124 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
125 TRUE, DMAE_COMP_GRC);
126 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
128 memset(dmae, 0, sizeof(struct dmae_command));
129 dmae->opcode = opcode;
130 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
131 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
132 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
133 sizeof(struct dmae_command) *
134 (loader_idx + 1)) >> 2);
135 dmae->dst_addr_hi = 0;
136 dmae->len = sizeof(struct dmae_command) >> 2;
137 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
138 dmae->comp_addr_hi = 0;
142 bnx2x_post_dmae(sc, dmae, loader_idx);
143 } else if (sc->func_stx) {
145 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
150 bnx2x_stats_comp(struct bnx2x_softc *sc)
152 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
155 while (*stats_comp != DMAE_COMP_VAL) {
157 PMD_DRV_LOG(ERR, sc, "Timeout waiting for stats finished");
169 * Statistics service functions
173 bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
175 struct dmae_command *dmae;
177 int loader_idx = PMF_DMAE_C(sc);
178 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
180 if (sc->devinfo.bc_ver <= 0x06001400) {
182 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
183 * BRB registers while the BRB block is in reset. The DMA transfer
184 * below triggers this issue resulting in the DMAE to stop
185 * functioning. Skip this initial stats transfer for old bootcode
186 * versions <= 6.0.20.
191 if (!sc->port.pmf || !sc->port.port_stx) {
192 PMD_DRV_LOG(ERR, sc, "BUG!");
196 sc->executer_idx = 0;
198 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
200 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
201 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
202 dmae->src_addr_lo = (sc->port.port_stx >> 2);
203 dmae->src_addr_hi = 0;
204 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
205 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
206 dmae->len = DMAE_LEN32_RD_MAX;
207 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
208 dmae->comp_addr_hi = 0;
211 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
212 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
213 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
214 dmae->src_addr_hi = 0;
215 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
216 DMAE_LEN32_RD_MAX * 4);
217 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
218 DMAE_LEN32_RD_MAX * 4);
219 dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
221 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
222 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
223 dmae->comp_val = DMAE_COMP_VAL;
226 bnx2x_hw_stats_post(sc);
227 bnx2x_stats_comp(sc);
231 bnx2x_port_stats_init(struct bnx2x_softc *sc)
233 struct dmae_command *dmae;
234 int port = SC_PORT(sc);
236 int loader_idx = PMF_DMAE_C(sc);
238 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
241 if (!sc->link_vars.link_up || !sc->port.pmf) {
242 PMD_DRV_LOG(ERR, sc, "BUG!");
246 sc->executer_idx = 0;
249 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
250 TRUE, DMAE_COMP_GRC);
252 if (sc->port.port_stx) {
253 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
254 dmae->opcode = opcode;
255 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
256 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
257 dmae->dst_addr_lo = sc->port.port_stx >> 2;
258 dmae->dst_addr_hi = 0;
259 dmae->len = bnx2x_get_port_stats_dma_len(sc);
260 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
261 dmae->comp_addr_hi = 0;
266 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
267 dmae->opcode = opcode;
268 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
269 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
270 dmae->dst_addr_lo = (sc->func_stx >> 2);
271 dmae->dst_addr_hi = 0;
272 dmae->len = (sizeof(struct host_func_stats) >> 2);
273 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
274 dmae->comp_addr_hi = 0;
279 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
280 TRUE, DMAE_COMP_GRC);
282 /* EMAC is special */
283 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
284 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
286 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
287 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
288 dmae->opcode = opcode;
289 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
290 dmae->src_addr_hi = 0;
291 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
292 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
293 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
294 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
295 dmae->comp_addr_hi = 0;
298 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
299 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
300 dmae->opcode = opcode;
301 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
302 dmae->src_addr_hi = 0;
303 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
304 offsetof(struct emac_stats,
305 rx_stat_falsecarriererrors));
306 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
307 offsetof(struct emac_stats,
308 rx_stat_falsecarriererrors));
310 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
311 dmae->comp_addr_hi = 0;
314 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
315 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
316 dmae->opcode = opcode;
317 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
318 dmae->src_addr_hi = 0;
319 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
320 offsetof(struct emac_stats,
321 tx_stat_ifhcoutoctets));
322 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
323 offsetof(struct emac_stats,
324 tx_stat_ifhcoutoctets));
325 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
326 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
327 dmae->comp_addr_hi = 0;
330 uint32_t tx_src_addr_lo, rx_src_addr_lo;
331 uint16_t rx_len, tx_len;
333 /* configure the params according to MAC type */
334 switch (sc->link_vars.mac_type) {
335 case ELINK_MAC_TYPE_BMAC:
336 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
337 NIG_REG_INGRESS_BMAC0_MEM;
339 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
340 BIGMAC_REGISTER_TX_STAT_GTBYT */
341 if (CHIP_IS_E1x(sc)) {
343 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
344 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
345 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
347 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
348 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
349 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
352 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
353 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
354 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
356 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
357 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
358 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
363 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
364 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
366 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
367 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
368 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
370 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
372 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
377 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
378 dmae->opcode = opcode;
379 dmae->src_addr_lo = tx_src_addr_lo;
380 dmae->src_addr_hi = 0;
382 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
383 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
384 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
385 dmae->comp_addr_hi = 0;
389 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
390 dmae->opcode = opcode;
391 dmae->src_addr_hi = 0;
392 dmae->src_addr_lo = rx_src_addr_lo;
394 U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
396 U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
399 dmae->comp_addr_hi = 0;
404 if (!CHIP_IS_E3(sc)) {
405 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
406 dmae->opcode = opcode;
408 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
409 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
410 dmae->src_addr_hi = 0;
411 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
412 offsetof(struct nig_stats,
413 egress_mac_pkt0_lo));
414 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
415 offsetof(struct nig_stats,
416 egress_mac_pkt0_lo));
417 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
418 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
419 dmae->comp_addr_hi = 0;
422 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
423 dmae->opcode = opcode;
425 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
426 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
427 dmae->src_addr_hi = 0;
428 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
429 offsetof(struct nig_stats,
430 egress_mac_pkt1_lo));
431 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
432 offsetof(struct nig_stats,
433 egress_mac_pkt1_lo));
434 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
435 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
436 dmae->comp_addr_hi = 0;
440 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
441 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
442 TRUE, DMAE_COMP_PCI);
444 (port ? NIG_REG_STAT1_BRB_DISCARD :
445 NIG_REG_STAT0_BRB_DISCARD) >> 2;
446 dmae->src_addr_hi = 0;
447 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
448 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
449 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
451 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
452 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
453 dmae->comp_val = DMAE_COMP_VAL;
459 bnx2x_func_stats_init(struct bnx2x_softc *sc)
461 struct dmae_command *dmae = &sc->stats_dmae;
462 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
466 PMD_DRV_LOG(ERR, sc, "BUG!");
470 sc->executer_idx = 0;
471 memset(dmae, 0, sizeof(struct dmae_command));
473 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
474 TRUE, DMAE_COMP_PCI);
475 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
476 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
477 dmae->dst_addr_lo = (sc->func_stx >> 2);
478 dmae->dst_addr_hi = 0;
479 dmae->len = (sizeof(struct host_func_stats) >> 2);
480 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
481 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
482 dmae->comp_val = DMAE_COMP_VAL;
488 bnx2x_stats_start(struct bnx2x_softc *sc)
491 * VFs travel through here as part of the statistics FSM, but no action
499 bnx2x_port_stats_init(sc);
502 else if (sc->func_stx) {
503 bnx2x_func_stats_init(sc);
506 bnx2x_hw_stats_post(sc);
507 bnx2x_storm_stats_post(sc);
511 bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
513 bnx2x_stats_comp(sc);
514 bnx2x_stats_pmf_update(sc);
515 bnx2x_stats_start(sc);
519 bnx2x_stats_restart(struct bnx2x_softc *sc)
522 * VFs travel through here as part of the statistics FSM, but no action
529 bnx2x_stats_comp(sc);
530 bnx2x_stats_start(sc);
534 bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
536 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
537 struct bnx2x_eth_stats *estats = &sc->eth_stats;
543 if (CHIP_IS_E1x(sc)) {
544 struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
546 /* the macros below will use "bmac1_stats" type */
547 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
548 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
549 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
550 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
551 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
552 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
553 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
554 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
555 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
557 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
558 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
559 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
560 UPDATE_STAT64(tx_stat_gt127,
561 tx_stat_etherstatspkts65octetsto127octets);
562 UPDATE_STAT64(tx_stat_gt255,
563 tx_stat_etherstatspkts128octetsto255octets);
564 UPDATE_STAT64(tx_stat_gt511,
565 tx_stat_etherstatspkts256octetsto511octets);
566 UPDATE_STAT64(tx_stat_gt1023,
567 tx_stat_etherstatspkts512octetsto1023octets);
568 UPDATE_STAT64(tx_stat_gt1518,
569 tx_stat_etherstatspkts1024octetsto1522octets);
570 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
571 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
572 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
573 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
574 UPDATE_STAT64(tx_stat_gterr,
575 tx_stat_dot3statsinternalmactransmiterrors);
576 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
578 struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
579 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
581 /* the macros below will use "bmac2_stats" type */
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
593 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
594 UPDATE_STAT64(tx_stat_gt127,
595 tx_stat_etherstatspkts65octetsto127octets);
596 UPDATE_STAT64(tx_stat_gt255,
597 tx_stat_etherstatspkts128octetsto255octets);
598 UPDATE_STAT64(tx_stat_gt511,
599 tx_stat_etherstatspkts256octetsto511octets);
600 UPDATE_STAT64(tx_stat_gt1023,
601 tx_stat_etherstatspkts512octetsto1023octets);
602 UPDATE_STAT64(tx_stat_gt1518,
603 tx_stat_etherstatspkts1024octetsto1522octets);
604 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
605 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
606 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
607 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
608 UPDATE_STAT64(tx_stat_gterr,
609 tx_stat_dot3statsinternalmactransmiterrors);
610 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
612 /* collect PFC stats */
613 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
614 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
615 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
616 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
618 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
619 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
620 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
621 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
624 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
625 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
627 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
628 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
630 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
631 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
632 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
633 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
637 bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
639 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
640 struct bnx2x_eth_stats *estats = &sc->eth_stats;
641 struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
643 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
644 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
645 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
646 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
647 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
648 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
649 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
650 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
651 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
652 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
654 /* collect pfc stats */
655 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
656 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
657 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
658 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
660 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
661 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
662 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
663 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
664 ADD_STAT64(stats_tx.tx_gt1023,
665 tx_stat_etherstatspkts512octetsto1023octets);
666 ADD_STAT64(stats_tx.tx_gt1518,
667 tx_stat_etherstatspkts1024octetsto1522octets);
668 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
670 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
671 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
672 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
674 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
675 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
677 estats->etherstatspkts1024octetsto1522octets_hi =
678 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
679 estats->etherstatspkts1024octetsto1522octets_lo =
680 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
682 estats->etherstatspktsover1522octets_hi =
683 pstats->mac_stx[1].tx_stat_mac_2047_hi;
684 estats->etherstatspktsover1522octets_lo =
685 pstats->mac_stx[1].tx_stat_mac_2047_lo;
687 ADD_64(estats->etherstatspktsover1522octets_hi,
688 pstats->mac_stx[1].tx_stat_mac_4095_hi,
689 estats->etherstatspktsover1522octets_lo,
690 pstats->mac_stx[1].tx_stat_mac_4095_lo);
692 ADD_64(estats->etherstatspktsover1522octets_hi,
693 pstats->mac_stx[1].tx_stat_mac_9216_hi,
694 estats->etherstatspktsover1522octets_lo,
695 pstats->mac_stx[1].tx_stat_mac_9216_lo);
697 ADD_64(estats->etherstatspktsover1522octets_hi,
698 pstats->mac_stx[1].tx_stat_mac_16383_hi,
699 estats->etherstatspktsover1522octets_lo,
700 pstats->mac_stx[1].tx_stat_mac_16383_lo);
702 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
703 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
705 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
706 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
708 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
709 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
710 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
711 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
715 bnx2x_emac_stats_update(struct bnx2x_softc *sc)
717 struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
718 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
719 struct bnx2x_eth_stats *estats = &sc->eth_stats;
721 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
722 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
723 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
724 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
725 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
726 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
727 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
728 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
729 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
730 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
731 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
732 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
733 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
734 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
735 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
736 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
737 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
738 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
739 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
740 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
741 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
742 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
743 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
744 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
745 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
746 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
747 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
748 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
749 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
750 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
751 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
753 estats->pause_frames_received_hi =
754 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
755 estats->pause_frames_received_lo =
756 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
757 ADD_64(estats->pause_frames_received_hi,
758 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
759 estats->pause_frames_received_lo,
760 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
762 estats->pause_frames_sent_hi =
763 pstats->mac_stx[1].tx_stat_outxonsent_hi;
764 estats->pause_frames_sent_lo =
765 pstats->mac_stx[1].tx_stat_outxonsent_lo;
766 ADD_64(estats->pause_frames_sent_hi,
767 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
768 estats->pause_frames_sent_lo,
769 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
773 bnx2x_hw_stats_update(struct bnx2x_softc *sc)
775 struct nig_stats *new = BNX2X_SP(sc, nig_stats);
776 struct nig_stats *old = &(sc->port.old_nig_stats);
777 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
778 struct bnx2x_eth_stats *estats = &sc->eth_stats;
779 uint32_t lpi_reg, nig_timer_max;
785 switch (sc->link_vars.mac_type) {
786 case ELINK_MAC_TYPE_BMAC:
787 bnx2x_bmac_stats_update(sc);
790 case ELINK_MAC_TYPE_EMAC:
791 bnx2x_emac_stats_update(sc);
794 case ELINK_MAC_TYPE_UMAC:
795 case ELINK_MAC_TYPE_XMAC:
796 bnx2x_mstat_stats_update(sc);
799 case ELINK_MAC_TYPE_NONE: /* unreached */
800 PMD_DRV_LOG(DEBUG, sc,
801 "stats updated by DMAE but no MAC active");
804 default: /* unreached */
805 PMD_DRV_LOG(ERR, sc, "stats update failed, unknown MAC type");
808 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
809 new->brb_discard - old->brb_discard);
810 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
811 new->brb_truncate - old->brb_truncate);
813 if (!CHIP_IS_E3(sc)) {
814 UPDATE_STAT64_NIG(egress_mac_pkt0,
815 etherstatspkts1024octetsto1522octets);
816 UPDATE_STAT64_NIG(egress_mac_pkt1,
817 etherstatspktsover1522octets);
820 rte_memcpy(old, new, sizeof(struct nig_stats));
822 rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
823 sizeof(struct mac_stx));
824 estats->brb_drop_hi = pstats->brb_drop_hi;
825 estats->brb_drop_lo = pstats->brb_drop_lo;
827 pstats->host_port_stats_counter++;
829 if (CHIP_IS_E3(sc)) {
830 lpi_reg = (SC_PORT(sc)) ?
831 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
832 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
833 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
836 if (!BNX2X_NOMCP(sc)) {
837 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
838 if (nig_timer_max != estats->nig_timer_max) {
839 estats->nig_timer_max = nig_timer_max;
840 PMD_DRV_LOG(ERR, sc, "invalid NIG timer max (%u)",
841 estats->nig_timer_max);
849 bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
851 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
852 uint16_t cur_stats_counter;
855 * Make sure we use the value of the counter
856 * used for sending the last stats ramrod.
858 cur_stats_counter = (sc->stats_counter - 1);
860 /* are storm stats valid? */
861 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
862 PMD_DRV_LOG(DEBUG, sc,
863 "stats not updated by xstorm, "
864 "counter 0x%x != stats_counter 0x%x",
865 le16toh(counters->xstats_counter), sc->stats_counter);
869 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
870 PMD_DRV_LOG(DEBUG, sc,
871 "stats not updated by ustorm, "
872 "counter 0x%x != stats_counter 0x%x",
873 le16toh(counters->ustats_counter), sc->stats_counter);
877 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
878 PMD_DRV_LOG(DEBUG, sc,
879 "stats not updated by cstorm, "
880 "counter 0x%x != stats_counter 0x%x",
881 le16toh(counters->cstats_counter), sc->stats_counter);
885 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
886 PMD_DRV_LOG(DEBUG, sc,
887 "stats not updated by tstorm, "
888 "counter 0x%x != stats_counter 0x%x",
889 le16toh(counters->tstats_counter), sc->stats_counter);
897 bnx2x_storm_stats_update(struct bnx2x_softc *sc)
899 struct tstorm_per_port_stats *tport =
900 &sc->fw_stats_data->port.tstorm_port_statistics;
901 struct tstorm_per_pf_stats *tfunc =
902 &sc->fw_stats_data->pf.tstorm_pf_statistics;
903 struct host_func_stats *fstats = &sc->func_stats;
904 struct bnx2x_eth_stats *estats = &sc->eth_stats;
905 struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
908 /* vfs stat counter is managed by pf */
909 if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) {
913 estats->error_bytes_received_hi = 0;
914 estats->error_bytes_received_lo = 0;
916 for (i = 0; i < sc->num_queues; i++) {
917 struct bnx2x_fastpath *fp = &sc->fp[i];
918 struct tstorm_per_queue_stats *tclient =
919 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
920 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
921 struct ustorm_per_queue_stats *uclient =
922 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
923 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
924 struct xstorm_per_queue_stats *xclient =
925 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
926 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
927 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
928 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
932 /* PMD_DRV_LOG(DEBUG, sc,
933 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
934 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
935 xclient->mcast_pkts_sent);
937 PMD_DRV_LOG(DEBUG, sc, "---------------");
940 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
941 total_broadcast_bytes_received);
942 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
943 total_multicast_bytes_received);
944 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
945 total_unicast_bytes_received);
948 * sum to total_bytes_received all
949 * unicast/multicast/broadcast
951 qstats->total_bytes_received_hi =
952 qstats->total_broadcast_bytes_received_hi;
953 qstats->total_bytes_received_lo =
954 qstats->total_broadcast_bytes_received_lo;
956 ADD_64(qstats->total_bytes_received_hi,
957 qstats->total_multicast_bytes_received_hi,
958 qstats->total_bytes_received_lo,
959 qstats->total_multicast_bytes_received_lo);
961 ADD_64(qstats->total_bytes_received_hi,
962 qstats->total_unicast_bytes_received_hi,
963 qstats->total_bytes_received_lo,
964 qstats->total_unicast_bytes_received_lo);
966 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
967 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
969 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
970 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
971 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
972 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
973 etherstatsoverrsizepkts, 32);
974 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
976 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
977 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
978 total_multicast_packets_received);
979 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
980 total_broadcast_packets_received);
981 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
982 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
983 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
985 UPDATE_QSTAT(xclient->bcast_bytes_sent,
986 total_broadcast_bytes_transmitted);
987 UPDATE_QSTAT(xclient->mcast_bytes_sent,
988 total_multicast_bytes_transmitted);
989 UPDATE_QSTAT(xclient->ucast_bytes_sent,
990 total_unicast_bytes_transmitted);
993 * sum to total_bytes_transmitted all
994 * unicast/multicast/broadcast
996 qstats->total_bytes_transmitted_hi =
997 qstats->total_unicast_bytes_transmitted_hi;
998 qstats->total_bytes_transmitted_lo =
999 qstats->total_unicast_bytes_transmitted_lo;
1001 ADD_64(qstats->total_bytes_transmitted_hi,
1002 qstats->total_broadcast_bytes_transmitted_hi,
1003 qstats->total_bytes_transmitted_lo,
1004 qstats->total_broadcast_bytes_transmitted_lo);
1006 ADD_64(qstats->total_bytes_transmitted_hi,
1007 qstats->total_multicast_bytes_transmitted_hi,
1008 qstats->total_bytes_transmitted_lo,
1009 qstats->total_multicast_bytes_transmitted_lo);
1011 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1012 total_unicast_packets_transmitted);
1013 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1014 total_multicast_packets_transmitted);
1015 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1016 total_broadcast_packets_transmitted);
1018 UPDATE_EXTEND_TSTAT(checksum_discard,
1019 total_packets_received_checksum_discarded);
1020 UPDATE_EXTEND_TSTAT(ttl0_discard,
1021 total_packets_received_ttl0_discarded);
1023 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1024 total_transmitted_dropped_packets_error);
1026 UPDATE_FSTAT_QSTAT(total_bytes_received);
1027 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1028 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1029 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1030 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1031 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1032 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1033 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1034 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1037 ADD_64(estats->total_bytes_received_hi,
1038 estats->rx_stat_ifhcinbadoctets_hi,
1039 estats->total_bytes_received_lo,
1040 estats->rx_stat_ifhcinbadoctets_lo);
1042 ADD_64_LE(estats->total_bytes_received_hi,
1043 tfunc->rcv_error_bytes.hi,
1044 estats->total_bytes_received_lo,
1045 tfunc->rcv_error_bytes.lo);
1047 ADD_64_LE(estats->error_bytes_received_hi,
1048 tfunc->rcv_error_bytes.hi,
1049 estats->error_bytes_received_lo,
1050 tfunc->rcv_error_bytes.lo);
1052 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1054 ADD_64(estats->error_bytes_received_hi,
1055 estats->rx_stat_ifhcinbadoctets_hi,
1056 estats->error_bytes_received_lo,
1057 estats->rx_stat_ifhcinbadoctets_lo);
1060 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1061 UPDATE_FW_STAT(mac_filter_discard);
1062 UPDATE_FW_STAT(mf_tag_discard);
1063 UPDATE_FW_STAT(brb_truncate_discard);
1064 UPDATE_FW_STAT(mac_discard);
1067 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1069 sc->stats_pending = 0;
1075 bnx2x_drv_stats_update(struct bnx2x_softc *sc)
1077 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1080 for (i = 0; i < sc->num_queues; i++) {
1081 struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1082 struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1084 UPDATE_ESTAT_QSTAT(rx_calls);
1085 UPDATE_ESTAT_QSTAT(rx_pkts);
1086 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1087 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1088 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1089 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1090 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1091 UPDATE_ESTAT_QSTAT(tx_pkts);
1092 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1093 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1094 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1095 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1096 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1097 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1098 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1099 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1100 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1101 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1102 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1103 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1104 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1106 /* mbuf driver statistics */
1107 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1108 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1109 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1110 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1112 /* track the number of allocated mbufs */
1113 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1114 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1119 bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
1123 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1124 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1126 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1135 bnx2x_stats_update(struct bnx2x_softc *sc)
1137 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1139 if (bnx2x_edebug_stats_stopped(sc)) {
1145 bnx2x_storm_stats_update(sc);
1146 bnx2x_hw_stats_post(sc);
1147 bnx2x_storm_stats_post(sc);
1150 if (*stats_comp != DMAE_COMP_VAL) {
1155 bnx2x_hw_stats_update(sc);
1158 if (bnx2x_storm_stats_update(sc)) {
1159 if (sc->stats_pending++ == 3) {
1160 rte_panic("storm stats not updated for 3 times");
1166 * VF doesn't collect HW statistics, and doesn't get completions,
1167 * performs only update.
1169 bnx2x_storm_stats_update(sc);
1172 bnx2x_drv_stats_update(sc);
1176 bnx2x_port_stats_stop(struct bnx2x_softc *sc)
1178 struct dmae_command *dmae;
1180 int loader_idx = PMF_DMAE_C(sc);
1181 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1183 sc->executer_idx = 0;
1185 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1187 if (sc->port.port_stx) {
1188 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1191 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1193 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1196 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1197 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1198 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1199 dmae->dst_addr_hi = 0;
1200 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1202 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1203 dmae->comp_addr_hi = 0;
1206 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1207 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1208 dmae->comp_val = DMAE_COMP_VAL;
1215 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1216 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1217 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
1218 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
1219 dmae->dst_addr_lo = (sc->func_stx >> 2);
1220 dmae->dst_addr_hi = 0;
1221 dmae->len = (sizeof(struct host_func_stats) >> 2);
1222 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1223 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1224 dmae->comp_val = DMAE_COMP_VAL;
1231 bnx2x_stats_stop(struct bnx2x_softc *sc)
1233 uint8_t update = FALSE;
1235 bnx2x_stats_comp(sc);
1238 update = bnx2x_hw_stats_update(sc) == 0;
1241 update |= bnx2x_storm_stats_update(sc) == 0;
1246 bnx2x_port_stats_stop(sc);
1249 bnx2x_hw_stats_post(sc);
1250 bnx2x_stats_comp(sc);
1255 bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
1260 static const struct {
1261 void (*action)(struct bnx2x_softc *sc);
1262 enum bnx2x_stats_state next_state;
1263 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1265 /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
1266 /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED },
1267 /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
1268 /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
1271 /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED },
1272 /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED },
1273 /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED },
1274 /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED }
1278 void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
1280 enum bnx2x_stats_state state;
1282 if (unlikely(sc->panic)) {
1286 state = sc->stats_state;
1287 sc->stats_state = bnx2x_stats_stm[state][event].next_state;
1289 bnx2x_stats_stm[state][event].action(sc);
1291 if (event != STATS_EVENT_UPDATE) {
1292 PMD_DRV_LOG(DEBUG, sc,
1293 "state %d -> event %d -> state %d",
1294 state, event, sc->stats_state);
1299 bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
1301 struct dmae_command *dmae;
1302 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1305 if (!sc->port.pmf || !sc->port.port_stx) {
1306 PMD_DRV_LOG(ERR, sc, "BUG!");
1310 sc->executer_idx = 0;
1312 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1313 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1314 TRUE, DMAE_COMP_PCI);
1315 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1316 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1317 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1318 dmae->dst_addr_hi = 0;
1319 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1320 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1321 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1322 dmae->comp_val = DMAE_COMP_VAL;
1325 bnx2x_hw_stats_post(sc);
1326 bnx2x_stats_comp(sc);
1330 * This function will prepare the statistics ramrod data the way
1331 * we will only have to increment the statistics counter and
1332 * send the ramrod each time we have to.
1335 bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
1338 int first_queue_query_index;
1339 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1340 rte_iova_t cur_data_offset;
1341 struct stats_query_entry *cur_query_entry;
1343 stats_hdr->cmd_num = sc->fw_stats_num;
1344 stats_hdr->drv_stats_counter = 0;
1347 * The storm_counters struct contains the counters of completed
1348 * statistics requests per storm which are incremented by FW
1349 * each time it completes hadning a statistics ramrod. We will
1350 * check these counters in the timer handler and discard a
1351 * (statistics) ramrod completion.
1353 cur_data_offset = (sc->fw_stats_data_mapping +
1354 offsetof(struct bnx2x_fw_stats_data, storm_counters));
1356 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1357 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1360 * Prepare the first stats ramrod (will be completed with
1361 * the counters equal to zero) - init counters to somethig different.
1363 memset(&sc->fw_stats_data->storm_counters, 0xff,
1364 sizeof(struct stats_counter));
1366 /**** Port FW statistics data ****/
1367 cur_data_offset = (sc->fw_stats_data_mapping +
1368 offsetof(struct bnx2x_fw_stats_data, port));
1370 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1372 cur_query_entry->kind = STATS_TYPE_PORT;
1373 /* For port query index is a DON'T CARE */
1374 cur_query_entry->index = SC_PORT(sc);
1375 /* For port query funcID is a DON'T CARE */
1376 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1377 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1378 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1380 /**** PF FW statistics data ****/
1381 cur_data_offset = (sc->fw_stats_data_mapping +
1382 offsetof(struct bnx2x_fw_stats_data, pf));
1384 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1386 cur_query_entry->kind = STATS_TYPE_PF;
1387 /* For PF query index is a DON'T CARE */
1388 cur_query_entry->index = SC_PORT(sc);
1389 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1390 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1391 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1393 /**** Clients' queries ****/
1394 cur_data_offset = (sc->fw_stats_data_mapping +
1395 offsetof(struct bnx2x_fw_stats_data, queue_stats));
1398 * First queue query index depends whether FCoE offloaded request will
1399 * be included in the ramrod
1401 first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
1403 for (i = 0; i < sc->num_queues; i++) {
1405 &sc->fw_stats_req->query[first_queue_query_index + i];
1407 cur_query_entry->kind = STATS_TYPE_QUEUE;
1408 cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
1409 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1410 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1411 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1413 cur_data_offset += sizeof(struct per_queue_stats);
1417 void bnx2x_memset_stats(struct bnx2x_softc *sc)
1421 /* function stats */
1422 for (i = 0; i < sc->num_queues; i++) {
1423 struct bnx2x_fastpath *fp = &sc->fp[i];
1425 memset(&fp->old_tclient, 0,
1426 sizeof(fp->old_tclient));
1427 memset(&fp->old_uclient, 0,
1428 sizeof(fp->old_uclient));
1429 memset(&fp->old_xclient, 0,
1430 sizeof(fp->old_xclient));
1431 if (sc->stats_init) {
1432 memset(&fp->eth_q_stats, 0,
1433 sizeof(fp->eth_q_stats));
1434 memset(&fp->eth_q_stats_old, 0,
1435 sizeof(fp->eth_q_stats_old));
1439 if (sc->stats_init) {
1440 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1441 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1442 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1443 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1444 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1447 sc->stats_state = STATS_STATE_DISABLED;
1449 if (sc->port.pmf && sc->port.port_stx)
1450 bnx2x_port_stats_base_init(sc);
1452 /* mark the end of statistics initialization */
1453 sc->stats_init = false;
1457 bnx2x_stats_init(struct bnx2x_softc *sc)
1459 int /*abs*/port = SC_PORT(sc);
1460 int mb_idx = SC_FW_MB_IDX(sc);
1463 sc->stats_pending = 0;
1464 sc->executer_idx = 0;
1465 sc->stats_counter = 0;
1467 sc->stats_init = TRUE;
1469 /* port and func stats for management */
1470 if (!BNX2X_NOMCP(sc)) {
1471 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1472 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1474 sc->port.port_stx = 0;
1478 PMD_DRV_LOG(DEBUG, sc, "port_stx 0x%x func_stx 0x%x",
1479 sc->port.port_stx, sc->func_stx);
1481 /* pmf should retrieve port statistics from SP on a non-init*/
1482 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1483 bnx2x_stats_handle(sc, STATS_EVENT_PMF);
1488 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1489 sc->port.old_nig_stats.brb_discard =
1490 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1491 sc->port.old_nig_stats.brb_truncate =
1492 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1493 if (!CHIP_IS_E3(sc)) {
1494 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1495 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1496 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1497 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1500 /* function stats */
1501 for (i = 0; i < sc->num_queues; i++) {
1502 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1503 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1504 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1505 if (sc->stats_init) {
1506 memset(&sc->fp[i].eth_q_stats, 0,
1507 sizeof(sc->fp[i].eth_q_stats));
1508 memset(&sc->fp[i].eth_q_stats_old, 0,
1509 sizeof(sc->fp[i].eth_q_stats_old));
1513 /* prepare statistics ramrod data */
1514 bnx2x_prep_fw_stats_req(sc);
1516 if (sc->stats_init) {
1517 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1518 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1519 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1520 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1521 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1523 /* Clean SP from previous statistics */
1525 memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1526 bnx2x_func_stats_init(sc);
1527 bnx2x_hw_stats_post(sc);
1528 bnx2x_stats_comp(sc);
1532 sc->stats_state = STATS_STATE_DISABLED;
1534 if (sc->port.pmf && sc->port.port_stx) {
1535 bnx2x_port_stats_base_init(sc);
1538 /* mark the end of statistics initialization */
1539 sc->stats_init = FALSE;
1543 bnx2x_save_statistics(struct bnx2x_softc *sc)
1547 /* save queue statistics */
1548 for (i = 0; i < sc->num_queues; i++) {
1549 struct bnx2x_fastpath *fp = &sc->fp[i];
1550 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1551 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1553 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1554 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1555 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1556 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1557 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1558 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1559 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1560 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1561 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1562 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1563 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1564 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1567 /* store port firmware statistics */
1569 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1570 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1571 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
1573 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1574 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1575 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1576 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1579 UPDATE_FW_STAT_OLD(mac_filter_discard);
1580 UPDATE_FW_STAT_OLD(mf_tag_discard);
1581 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1582 UPDATE_FW_STAT_OLD(mac_discard);