2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015 QLogic Corporation.
10 * All rights reserved.
13 * See LICENSE.bnx2x_pmd for copyright and licensing details.
17 #include "bnx2x_stats.h"
20 #define BITS_PER_LONG 32
22 #define BITS_PER_LONG 64
25 static inline uint16_t
26 bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
31 /* 'newest' convention - shmem2 contains the size of the port stats */
32 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
33 size = SHMEM2_RD(sc, sizeof_port_stats);
38 /* prevent newer BC from causing buffer overflow */
39 if (res > sizeof(struct host_port_stats)) {
40 res = sizeof(struct host_port_stats);
45 * Older convention - all BCs support the port stats fields up until
46 * the 'not_used' field
49 res = (offsetof(struct host_port_stats, not_used) + 4);
51 /* if PFC stats are supported by the MFW, DMA them as well */
52 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
53 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
54 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
64 * Init service functions
68 * Post the next statistics ramrod. Protect it with the lock in
69 * order to ensure the strict order between statistics ramrods
70 * (each ramrod has a sequence number passed in a
71 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
75 bnx2x_storm_stats_post(struct bnx2x_softc *sc)
79 if (!sc->stats_pending) {
80 if (sc->stats_pending) {
84 sc->fw_stats_req->hdr.drv_stats_counter =
85 htole16(sc->stats_counter++);
87 PMD_DEBUG_PERIODIC_LOG(DEBUG,
88 "sending statistics ramrod %d",
89 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
91 /* adjust the ramrod to include VF queues statistics */
93 /* send FW stats ramrod */
94 rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
95 U64_HI(sc->fw_stats_req_mapping),
96 U64_LO(sc->fw_stats_req_mapping),
97 NONE_CONNECTION_TYPE);
99 sc->stats_pending = 1;
105 bnx2x_hw_stats_post(struct bnx2x_softc *sc)
107 struct dmae_command *dmae = &sc->stats_dmae;
108 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
112 *stats_comp = DMAE_COMP_VAL;
113 if (CHIP_REV_IS_SLOW(sc)) {
117 /* Update MCP's statistics if possible */
119 rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
120 sizeof(sc->func_stats));
124 if (sc->executer_idx) {
125 loader_idx = PMF_DMAE_C(sc);
126 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
127 TRUE, DMAE_COMP_GRC);
128 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
130 memset(dmae, 0, sizeof(struct dmae_command));
131 dmae->opcode = opcode;
132 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
133 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
134 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
135 sizeof(struct dmae_command) *
136 (loader_idx + 1)) >> 2);
137 dmae->dst_addr_hi = 0;
138 dmae->len = sizeof(struct dmae_command) >> 2;
139 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
140 dmae->comp_addr_hi = 0;
144 bnx2x_post_dmae(sc, dmae, loader_idx);
145 } else if (sc->func_stx) {
147 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
152 bnx2x_stats_comp(struct bnx2x_softc *sc)
154 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
157 while (*stats_comp != DMAE_COMP_VAL) {
159 PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
171 * Statistics service functions
175 bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
177 struct dmae_command *dmae;
179 int loader_idx = PMF_DMAE_C(sc);
180 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
182 if (sc->devinfo.bc_ver <= 0x06001400) {
184 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
185 * BRB registers while the BRB block is in reset. The DMA transfer
186 * below triggers this issue resulting in the DMAE to stop
187 * functioning. Skip this initial stats transfer for old bootcode
188 * versions <= 6.0.20.
193 if (!sc->port.pmf || !sc->port.port_stx) {
194 PMD_DRV_LOG(ERR, "BUG!");
198 sc->executer_idx = 0;
200 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
202 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
203 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
204 dmae->src_addr_lo = (sc->port.port_stx >> 2);
205 dmae->src_addr_hi = 0;
206 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
207 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
208 dmae->len = DMAE_LEN32_RD_MAX;
209 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
210 dmae->comp_addr_hi = 0;
213 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
214 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
215 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
216 dmae->src_addr_hi = 0;
217 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
218 DMAE_LEN32_RD_MAX * 4);
219 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
220 DMAE_LEN32_RD_MAX * 4);
221 dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
223 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
224 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
228 bnx2x_hw_stats_post(sc);
229 bnx2x_stats_comp(sc);
233 bnx2x_port_stats_init(struct bnx2x_softc *sc)
235 struct dmae_command *dmae;
236 int port = SC_PORT(sc);
238 int loader_idx = PMF_DMAE_C(sc);
240 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
243 if (!sc->link_vars.link_up || !sc->port.pmf) {
244 PMD_DRV_LOG(ERR, "BUG!");
248 sc->executer_idx = 0;
251 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
252 TRUE, DMAE_COMP_GRC);
254 if (sc->port.port_stx) {
255 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
256 dmae->opcode = opcode;
257 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
258 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
259 dmae->dst_addr_lo = sc->port.port_stx >> 2;
260 dmae->dst_addr_hi = 0;
261 dmae->len = bnx2x_get_port_stats_dma_len(sc);
262 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
263 dmae->comp_addr_hi = 0;
268 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
269 dmae->opcode = opcode;
270 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
271 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
272 dmae->dst_addr_lo = (sc->func_stx >> 2);
273 dmae->dst_addr_hi = 0;
274 dmae->len = (sizeof(struct host_func_stats) >> 2);
275 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
276 dmae->comp_addr_hi = 0;
281 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
282 TRUE, DMAE_COMP_GRC);
284 /* EMAC is special */
285 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
286 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
288 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
289 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
290 dmae->opcode = opcode;
291 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
292 dmae->src_addr_hi = 0;
293 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
294 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
295 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
296 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
297 dmae->comp_addr_hi = 0;
300 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
301 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
302 dmae->opcode = opcode;
303 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
304 dmae->src_addr_hi = 0;
305 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
306 offsetof(struct emac_stats,
307 rx_stat_falsecarriererrors));
308 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
309 offsetof(struct emac_stats,
310 rx_stat_falsecarriererrors));
312 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
313 dmae->comp_addr_hi = 0;
316 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
317 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
318 dmae->opcode = opcode;
319 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
320 dmae->src_addr_hi = 0;
321 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
322 offsetof(struct emac_stats,
323 tx_stat_ifhcoutoctets));
324 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
325 offsetof(struct emac_stats,
326 tx_stat_ifhcoutoctets));
327 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
328 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
329 dmae->comp_addr_hi = 0;
332 uint32_t tx_src_addr_lo, rx_src_addr_lo;
333 uint16_t rx_len, tx_len;
335 /* configure the params according to MAC type */
336 switch (sc->link_vars.mac_type) {
337 case ELINK_MAC_TYPE_BMAC:
338 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
339 NIG_REG_INGRESS_BMAC0_MEM;
341 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
342 BIGMAC_REGISTER_TX_STAT_GTBYT */
343 if (CHIP_IS_E1x(sc)) {
345 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
346 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
347 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
349 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
350 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
351 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
354 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
355 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
356 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
358 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
359 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
360 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
365 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
366 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
368 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
369 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
370 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
372 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
374 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
379 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
380 dmae->opcode = opcode;
381 dmae->src_addr_lo = tx_src_addr_lo;
382 dmae->src_addr_hi = 0;
384 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
385 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
387 dmae->comp_addr_hi = 0;
391 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
392 dmae->opcode = opcode;
393 dmae->src_addr_hi = 0;
394 dmae->src_addr_lo = rx_src_addr_lo;
396 U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
398 U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
400 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
401 dmae->comp_addr_hi = 0;
406 if (!CHIP_IS_E3(sc)) {
407 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
408 dmae->opcode = opcode;
410 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
411 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
412 dmae->src_addr_hi = 0;
413 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
414 offsetof(struct nig_stats,
415 egress_mac_pkt0_lo));
416 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
417 offsetof(struct nig_stats,
418 egress_mac_pkt0_lo));
419 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
420 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
421 dmae->comp_addr_hi = 0;
424 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
425 dmae->opcode = opcode;
427 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
428 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
429 dmae->src_addr_hi = 0;
430 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
431 offsetof(struct nig_stats,
432 egress_mac_pkt1_lo));
433 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
434 offsetof(struct nig_stats,
435 egress_mac_pkt1_lo));
436 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
437 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
438 dmae->comp_addr_hi = 0;
442 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
443 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
444 TRUE, DMAE_COMP_PCI);
446 (port ? NIG_REG_STAT1_BRB_DISCARD :
447 NIG_REG_STAT0_BRB_DISCARD) >> 2;
448 dmae->src_addr_hi = 0;
449 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
450 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
451 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
453 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
454 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
455 dmae->comp_val = DMAE_COMP_VAL;
461 bnx2x_func_stats_init(struct bnx2x_softc *sc)
463 struct dmae_command *dmae = &sc->stats_dmae;
464 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
468 PMD_DRV_LOG(ERR, "BUG!");
472 sc->executer_idx = 0;
473 memset(dmae, 0, sizeof(struct dmae_command));
475 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
476 TRUE, DMAE_COMP_PCI);
477 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
478 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
479 dmae->dst_addr_lo = (sc->func_stx >> 2);
480 dmae->dst_addr_hi = 0;
481 dmae->len = (sizeof(struct host_func_stats) >> 2);
482 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
483 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
484 dmae->comp_val = DMAE_COMP_VAL;
490 bnx2x_stats_start(struct bnx2x_softc *sc)
493 * VFs travel through here as part of the statistics FSM, but no action
501 bnx2x_port_stats_init(sc);
504 else if (sc->func_stx) {
505 bnx2x_func_stats_init(sc);
508 bnx2x_hw_stats_post(sc);
509 bnx2x_storm_stats_post(sc);
513 bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
515 bnx2x_stats_comp(sc);
516 bnx2x_stats_pmf_update(sc);
517 bnx2x_stats_start(sc);
521 bnx2x_stats_restart(struct bnx2x_softc *sc)
524 * VFs travel through here as part of the statistics FSM, but no action
531 bnx2x_stats_comp(sc);
532 bnx2x_stats_start(sc);
536 bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
538 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
539 struct bnx2x_eth_stats *estats = &sc->eth_stats;
545 if (CHIP_IS_E1x(sc)) {
546 struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
548 /* the macros below will use "bmac1_stats" type */
549 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
550 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
551 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
552 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
553 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
554 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
555 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
556 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
562 UPDATE_STAT64(tx_stat_gt127,
563 tx_stat_etherstatspkts65octetsto127octets);
564 UPDATE_STAT64(tx_stat_gt255,
565 tx_stat_etherstatspkts128octetsto255octets);
566 UPDATE_STAT64(tx_stat_gt511,
567 tx_stat_etherstatspkts256octetsto511octets);
568 UPDATE_STAT64(tx_stat_gt1023,
569 tx_stat_etherstatspkts512octetsto1023octets);
570 UPDATE_STAT64(tx_stat_gt1518,
571 tx_stat_etherstatspkts1024octetsto1522octets);
572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
576 UPDATE_STAT64(tx_stat_gterr,
577 tx_stat_dot3statsinternalmactransmiterrors);
578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
580 struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
581 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
583 /* the macros below will use "bmac2_stats" type */
584 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
585 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
586 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
587 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
588 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
589 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
590 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
591 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
592 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
593 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
594 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
595 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
596 UPDATE_STAT64(tx_stat_gt127,
597 tx_stat_etherstatspkts65octetsto127octets);
598 UPDATE_STAT64(tx_stat_gt255,
599 tx_stat_etherstatspkts128octetsto255octets);
600 UPDATE_STAT64(tx_stat_gt511,
601 tx_stat_etherstatspkts256octetsto511octets);
602 UPDATE_STAT64(tx_stat_gt1023,
603 tx_stat_etherstatspkts512octetsto1023octets);
604 UPDATE_STAT64(tx_stat_gt1518,
605 tx_stat_etherstatspkts1024octetsto1522octets);
606 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
607 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
608 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
609 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
610 UPDATE_STAT64(tx_stat_gterr,
611 tx_stat_dot3statsinternalmactransmiterrors);
612 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
614 /* collect PFC stats */
615 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
616 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
617 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
618 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
620 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
621 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
622 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
623 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
626 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
627 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
629 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
630 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
632 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
633 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
634 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
635 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
639 bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
641 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
642 struct bnx2x_eth_stats *estats = &sc->eth_stats;
643 struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
645 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
646 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
647 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
648 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
649 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
650 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
651 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
652 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
653 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
654 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
656 /* collect pfc stats */
657 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
658 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
659 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
660 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
662 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
663 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
664 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
665 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
666 ADD_STAT64(stats_tx.tx_gt1023,
667 tx_stat_etherstatspkts512octetsto1023octets);
668 ADD_STAT64(stats_tx.tx_gt1518,
669 tx_stat_etherstatspkts1024octetsto1522octets);
670 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
672 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
673 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
674 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
676 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
677 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
679 estats->etherstatspkts1024octetsto1522octets_hi =
680 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
681 estats->etherstatspkts1024octetsto1522octets_lo =
682 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
684 estats->etherstatspktsover1522octets_hi =
685 pstats->mac_stx[1].tx_stat_mac_2047_hi;
686 estats->etherstatspktsover1522octets_lo =
687 pstats->mac_stx[1].tx_stat_mac_2047_lo;
689 ADD_64(estats->etherstatspktsover1522octets_hi,
690 pstats->mac_stx[1].tx_stat_mac_4095_hi,
691 estats->etherstatspktsover1522octets_lo,
692 pstats->mac_stx[1].tx_stat_mac_4095_lo);
694 ADD_64(estats->etherstatspktsover1522octets_hi,
695 pstats->mac_stx[1].tx_stat_mac_9216_hi,
696 estats->etherstatspktsover1522octets_lo,
697 pstats->mac_stx[1].tx_stat_mac_9216_lo);
699 ADD_64(estats->etherstatspktsover1522octets_hi,
700 pstats->mac_stx[1].tx_stat_mac_16383_hi,
701 estats->etherstatspktsover1522octets_lo,
702 pstats->mac_stx[1].tx_stat_mac_16383_lo);
704 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
705 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
707 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
708 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
710 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
711 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
712 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
713 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
717 bnx2x_emac_stats_update(struct bnx2x_softc *sc)
719 struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
720 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
721 struct bnx2x_eth_stats *estats = &sc->eth_stats;
723 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
724 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
725 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
726 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
727 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
728 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
729 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
730 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
731 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
732 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
733 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
734 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
735 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
736 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
737 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
738 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
739 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
740 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
741 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
742 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
743 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
744 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
745 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
746 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
747 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
748 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
749 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
750 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
751 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
752 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
753 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
755 estats->pause_frames_received_hi =
756 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
757 estats->pause_frames_received_lo =
758 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
759 ADD_64(estats->pause_frames_received_hi,
760 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
761 estats->pause_frames_received_lo,
762 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
764 estats->pause_frames_sent_hi =
765 pstats->mac_stx[1].tx_stat_outxonsent_hi;
766 estats->pause_frames_sent_lo =
767 pstats->mac_stx[1].tx_stat_outxonsent_lo;
768 ADD_64(estats->pause_frames_sent_hi,
769 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
770 estats->pause_frames_sent_lo,
771 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
775 bnx2x_hw_stats_update(struct bnx2x_softc *sc)
777 struct nig_stats *new = BNX2X_SP(sc, nig_stats);
778 struct nig_stats *old = &(sc->port.old_nig_stats);
779 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
780 struct bnx2x_eth_stats *estats = &sc->eth_stats;
781 uint32_t lpi_reg, nig_timer_max;
787 switch (sc->link_vars.mac_type) {
788 case ELINK_MAC_TYPE_BMAC:
789 bnx2x_bmac_stats_update(sc);
792 case ELINK_MAC_TYPE_EMAC:
793 bnx2x_emac_stats_update(sc);
796 case ELINK_MAC_TYPE_UMAC:
797 case ELINK_MAC_TYPE_XMAC:
798 bnx2x_mstat_stats_update(sc);
801 case ELINK_MAC_TYPE_NONE: /* unreached */
803 "stats updated by DMAE but no MAC active");
806 default: /* unreached */
807 PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
810 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
811 new->brb_discard - old->brb_discard);
812 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
813 new->brb_truncate - old->brb_truncate);
815 if (!CHIP_IS_E3(sc)) {
816 UPDATE_STAT64_NIG(egress_mac_pkt0,
817 etherstatspkts1024octetsto1522octets);
818 UPDATE_STAT64_NIG(egress_mac_pkt1,
819 etherstatspktsover1522octets);
822 rte_memcpy(old, new, sizeof(struct nig_stats));
824 rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
825 sizeof(struct mac_stx));
826 estats->brb_drop_hi = pstats->brb_drop_hi;
827 estats->brb_drop_lo = pstats->brb_drop_lo;
829 pstats->host_port_stats_counter++;
831 if (CHIP_IS_E3(sc)) {
832 lpi_reg = (SC_PORT(sc)) ?
833 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
834 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
835 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
838 if (!BNX2X_NOMCP(sc)) {
839 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
840 if (nig_timer_max != estats->nig_timer_max) {
841 estats->nig_timer_max = nig_timer_max;
842 PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
843 estats->nig_timer_max);
851 bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
853 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
854 uint16_t cur_stats_counter;
857 * Make sure we use the value of the counter
858 * used for sending the last stats ramrod.
860 cur_stats_counter = (sc->stats_counter - 1);
862 /* are storm stats valid? */
863 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
865 "stats not updated by xstorm, "
866 "counter 0x%x != stats_counter 0x%x",
867 le16toh(counters->xstats_counter), sc->stats_counter);
871 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
873 "stats not updated by ustorm, "
874 "counter 0x%x != stats_counter 0x%x",
875 le16toh(counters->ustats_counter), sc->stats_counter);
879 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
881 "stats not updated by cstorm, "
882 "counter 0x%x != stats_counter 0x%x",
883 le16toh(counters->cstats_counter), sc->stats_counter);
887 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
889 "stats not updated by tstorm, "
890 "counter 0x%x != stats_counter 0x%x",
891 le16toh(counters->tstats_counter), sc->stats_counter);
899 bnx2x_storm_stats_update(struct bnx2x_softc *sc)
901 struct tstorm_per_port_stats *tport =
902 &sc->fw_stats_data->port.tstorm_port_statistics;
903 struct tstorm_per_pf_stats *tfunc =
904 &sc->fw_stats_data->pf.tstorm_pf_statistics;
905 struct host_func_stats *fstats = &sc->func_stats;
906 struct bnx2x_eth_stats *estats = &sc->eth_stats;
907 struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
910 /* vfs stat counter is managed by pf */
911 if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) {
915 estats->error_bytes_received_hi = 0;
916 estats->error_bytes_received_lo = 0;
918 for (i = 0; i < sc->num_queues; i++) {
919 struct bnx2x_fastpath *fp = &sc->fp[i];
920 struct tstorm_per_queue_stats *tclient =
921 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
922 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
923 struct ustorm_per_queue_stats *uclient =
924 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
925 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
926 struct xstorm_per_queue_stats *xclient =
927 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
928 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
929 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
930 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
934 /* PMD_DRV_LOG(DEBUG,
935 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
936 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
937 xclient->mcast_pkts_sent);
939 PMD_DRV_LOG(DEBUG, "---------------"); */
941 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
942 total_broadcast_bytes_received);
943 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
944 total_multicast_bytes_received);
945 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
946 total_unicast_bytes_received);
949 * sum to total_bytes_received all
950 * unicast/multicast/broadcast
952 qstats->total_bytes_received_hi =
953 qstats->total_broadcast_bytes_received_hi;
954 qstats->total_bytes_received_lo =
955 qstats->total_broadcast_bytes_received_lo;
957 ADD_64(qstats->total_bytes_received_hi,
958 qstats->total_multicast_bytes_received_hi,
959 qstats->total_bytes_received_lo,
960 qstats->total_multicast_bytes_received_lo);
962 ADD_64(qstats->total_bytes_received_hi,
963 qstats->total_unicast_bytes_received_hi,
964 qstats->total_bytes_received_lo,
965 qstats->total_unicast_bytes_received_lo);
967 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
968 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
970 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
971 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
972 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
973 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
974 etherstatsoverrsizepkts, 32);
975 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
977 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
978 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
979 total_multicast_packets_received);
980 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
981 total_broadcast_packets_received);
982 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
983 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
984 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
986 UPDATE_QSTAT(xclient->bcast_bytes_sent,
987 total_broadcast_bytes_transmitted);
988 UPDATE_QSTAT(xclient->mcast_bytes_sent,
989 total_multicast_bytes_transmitted);
990 UPDATE_QSTAT(xclient->ucast_bytes_sent,
991 total_unicast_bytes_transmitted);
994 * sum to total_bytes_transmitted all
995 * unicast/multicast/broadcast
997 qstats->total_bytes_transmitted_hi =
998 qstats->total_unicast_bytes_transmitted_hi;
999 qstats->total_bytes_transmitted_lo =
1000 qstats->total_unicast_bytes_transmitted_lo;
1002 ADD_64(qstats->total_bytes_transmitted_hi,
1003 qstats->total_broadcast_bytes_transmitted_hi,
1004 qstats->total_bytes_transmitted_lo,
1005 qstats->total_broadcast_bytes_transmitted_lo);
1007 ADD_64(qstats->total_bytes_transmitted_hi,
1008 qstats->total_multicast_bytes_transmitted_hi,
1009 qstats->total_bytes_transmitted_lo,
1010 qstats->total_multicast_bytes_transmitted_lo);
1012 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1013 total_unicast_packets_transmitted);
1014 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1015 total_multicast_packets_transmitted);
1016 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1017 total_broadcast_packets_transmitted);
1019 UPDATE_EXTEND_TSTAT(checksum_discard,
1020 total_packets_received_checksum_discarded);
1021 UPDATE_EXTEND_TSTAT(ttl0_discard,
1022 total_packets_received_ttl0_discarded);
1024 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1025 total_transmitted_dropped_packets_error);
1027 UPDATE_FSTAT_QSTAT(total_bytes_received);
1028 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1029 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1030 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1031 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1032 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1033 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1034 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1035 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1038 ADD_64(estats->total_bytes_received_hi,
1039 estats->rx_stat_ifhcinbadoctets_hi,
1040 estats->total_bytes_received_lo,
1041 estats->rx_stat_ifhcinbadoctets_lo);
1043 ADD_64_LE(estats->total_bytes_received_hi,
1044 tfunc->rcv_error_bytes.hi,
1045 estats->total_bytes_received_lo,
1046 tfunc->rcv_error_bytes.lo);
1048 ADD_64_LE(estats->error_bytes_received_hi,
1049 tfunc->rcv_error_bytes.hi,
1050 estats->error_bytes_received_lo,
1051 tfunc->rcv_error_bytes.lo);
1053 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1055 ADD_64(estats->error_bytes_received_hi,
1056 estats->rx_stat_ifhcinbadoctets_hi,
1057 estats->error_bytes_received_lo,
1058 estats->rx_stat_ifhcinbadoctets_lo);
1061 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1062 UPDATE_FW_STAT(mac_filter_discard);
1063 UPDATE_FW_STAT(mf_tag_discard);
1064 UPDATE_FW_STAT(brb_truncate_discard);
1065 UPDATE_FW_STAT(mac_discard);
1068 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1070 sc->stats_pending = 0;
1076 bnx2x_drv_stats_update(struct bnx2x_softc *sc)
1078 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1081 for (i = 0; i < sc->num_queues; i++) {
1082 struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1083 struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1085 UPDATE_ESTAT_QSTAT(rx_calls);
1086 UPDATE_ESTAT_QSTAT(rx_pkts);
1087 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1088 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1089 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1090 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1091 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1092 UPDATE_ESTAT_QSTAT(tx_pkts);
1093 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1094 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1095 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1096 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1097 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1098 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1099 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1100 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1101 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1102 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1103 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1104 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1105 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1107 /* mbuf driver statistics */
1108 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1109 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1110 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1111 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1113 /* track the number of allocated mbufs */
1114 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1115 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1120 bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
1124 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1125 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1127 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1136 bnx2x_stats_update(struct bnx2x_softc *sc)
1138 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1140 if (bnx2x_edebug_stats_stopped(sc)) {
1146 bnx2x_storm_stats_update(sc);
1147 bnx2x_hw_stats_post(sc);
1148 bnx2x_storm_stats_post(sc);
1151 if (*stats_comp != DMAE_COMP_VAL) {
1156 bnx2x_hw_stats_update(sc);
1159 if (bnx2x_storm_stats_update(sc)) {
1160 if (sc->stats_pending++ == 3) {
1161 rte_panic("storm stats not updated for 3 times");
1167 * VF doesn't collect HW statistics, and doesn't get completions,
1168 * performs only update.
1170 bnx2x_storm_stats_update(sc);
1173 bnx2x_drv_stats_update(sc);
1177 bnx2x_port_stats_stop(struct bnx2x_softc *sc)
1179 struct dmae_command *dmae;
1181 int loader_idx = PMF_DMAE_C(sc);
1182 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1184 sc->executer_idx = 0;
1186 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1188 if (sc->port.port_stx) {
1189 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1192 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1194 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1197 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1198 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1199 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1200 dmae->dst_addr_hi = 0;
1201 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1203 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1204 dmae->comp_addr_hi = 0;
1207 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1208 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1209 dmae->comp_val = DMAE_COMP_VAL;
1216 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1217 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1218 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
1219 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
1220 dmae->dst_addr_lo = (sc->func_stx >> 2);
1221 dmae->dst_addr_hi = 0;
1222 dmae->len = (sizeof(struct host_func_stats) >> 2);
1223 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1224 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1225 dmae->comp_val = DMAE_COMP_VAL;
1232 bnx2x_stats_stop(struct bnx2x_softc *sc)
1234 uint8_t update = FALSE;
1236 bnx2x_stats_comp(sc);
1239 update = bnx2x_hw_stats_update(sc) == 0;
1242 update |= bnx2x_storm_stats_update(sc) == 0;
1247 bnx2x_port_stats_stop(sc);
1250 bnx2x_hw_stats_post(sc);
1251 bnx2x_stats_comp(sc);
1256 bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
1261 static const struct {
1262 void (*action)(struct bnx2x_softc *sc);
1263 enum bnx2x_stats_state next_state;
1264 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1266 /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
1267 /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED },
1268 /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
1269 /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
1272 /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED },
1273 /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED },
1274 /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED },
1275 /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED }
1279 void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
1281 enum bnx2x_stats_state state;
1283 if (unlikely(sc->panic)) {
1287 state = sc->stats_state;
1288 sc->stats_state = bnx2x_stats_stm[state][event].next_state;
1290 bnx2x_stats_stm[state][event].action(sc);
1292 if (event != STATS_EVENT_UPDATE) {
1294 "state %d -> event %d -> state %d",
1295 state, event, sc->stats_state);
1300 bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
1302 struct dmae_command *dmae;
1303 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1306 if (!sc->port.pmf || !sc->port.port_stx) {
1307 PMD_DRV_LOG(ERR, "BUG!");
1311 sc->executer_idx = 0;
1313 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1314 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1315 TRUE, DMAE_COMP_PCI);
1316 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1317 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1318 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1319 dmae->dst_addr_hi = 0;
1320 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1321 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1322 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1323 dmae->comp_val = DMAE_COMP_VAL;
1326 bnx2x_hw_stats_post(sc);
1327 bnx2x_stats_comp(sc);
1331 * This function will prepare the statistics ramrod data the way
1332 * we will only have to increment the statistics counter and
1333 * send the ramrod each time we have to.
1336 bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
1339 int first_queue_query_index;
1340 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1341 rte_iova_t cur_data_offset;
1342 struct stats_query_entry *cur_query_entry;
1344 stats_hdr->cmd_num = sc->fw_stats_num;
1345 stats_hdr->drv_stats_counter = 0;
1348 * The storm_counters struct contains the counters of completed
1349 * statistics requests per storm which are incremented by FW
1350 * each time it completes hadning a statistics ramrod. We will
1351 * check these counters in the timer handler and discard a
1352 * (statistics) ramrod completion.
1354 cur_data_offset = (sc->fw_stats_data_mapping +
1355 offsetof(struct bnx2x_fw_stats_data, storm_counters));
1357 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1358 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1361 * Prepare the first stats ramrod (will be completed with
1362 * the counters equal to zero) - init counters to somethig different.
1364 memset(&sc->fw_stats_data->storm_counters, 0xff,
1365 sizeof(struct stats_counter));
1367 /**** Port FW statistics data ****/
1368 cur_data_offset = (sc->fw_stats_data_mapping +
1369 offsetof(struct bnx2x_fw_stats_data, port));
1371 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1373 cur_query_entry->kind = STATS_TYPE_PORT;
1374 /* For port query index is a DON'T CARE */
1375 cur_query_entry->index = SC_PORT(sc);
1376 /* For port query funcID is a DON'T CARE */
1377 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1378 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1379 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1381 /**** PF FW statistics data ****/
1382 cur_data_offset = (sc->fw_stats_data_mapping +
1383 offsetof(struct bnx2x_fw_stats_data, pf));
1385 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1387 cur_query_entry->kind = STATS_TYPE_PF;
1388 /* For PF query index is a DON'T CARE */
1389 cur_query_entry->index = SC_PORT(sc);
1390 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1391 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1392 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1394 /**** Clients' queries ****/
1395 cur_data_offset = (sc->fw_stats_data_mapping +
1396 offsetof(struct bnx2x_fw_stats_data, queue_stats));
1399 * First queue query index depends whether FCoE offloaded request will
1400 * be included in the ramrod
1402 first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
1404 for (i = 0; i < sc->num_queues; i++) {
1406 &sc->fw_stats_req->query[first_queue_query_index + i];
1408 cur_query_entry->kind = STATS_TYPE_QUEUE;
1409 cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
1410 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1411 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1412 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1414 cur_data_offset += sizeof(struct per_queue_stats);
1418 void bnx2x_memset_stats(struct bnx2x_softc *sc)
1422 /* function stats */
1423 for (i = 0; i < sc->num_queues; i++) {
1424 struct bnx2x_fastpath *fp = &sc->fp[i];
1426 memset(&fp->old_tclient, 0,
1427 sizeof(fp->old_tclient));
1428 memset(&fp->old_uclient, 0,
1429 sizeof(fp->old_uclient));
1430 memset(&fp->old_xclient, 0,
1431 sizeof(fp->old_xclient));
1432 if (sc->stats_init) {
1433 memset(&fp->eth_q_stats, 0,
1434 sizeof(fp->eth_q_stats));
1435 memset(&fp->eth_q_stats_old, 0,
1436 sizeof(fp->eth_q_stats_old));
1440 if (sc->stats_init) {
1441 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1442 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1443 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1444 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1445 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1448 sc->stats_state = STATS_STATE_DISABLED;
1450 if (sc->port.pmf && sc->port.port_stx)
1451 bnx2x_port_stats_base_init(sc);
1453 /* mark the end of statistics initialization */
1454 sc->stats_init = false;
1458 bnx2x_stats_init(struct bnx2x_softc *sc)
1460 int /*abs*/port = SC_PORT(sc);
1461 int mb_idx = SC_FW_MB_IDX(sc);
1464 sc->stats_pending = 0;
1465 sc->executer_idx = 0;
1466 sc->stats_counter = 0;
1468 sc->stats_init = TRUE;
1470 /* port and func stats for management */
1471 if (!BNX2X_NOMCP(sc)) {
1472 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1473 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1475 sc->port.port_stx = 0;
1479 PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
1480 sc->port.port_stx, sc->func_stx);
1482 /* pmf should retrieve port statistics from SP on a non-init*/
1483 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1484 bnx2x_stats_handle(sc, STATS_EVENT_PMF);
1489 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1490 sc->port.old_nig_stats.brb_discard =
1491 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1492 sc->port.old_nig_stats.brb_truncate =
1493 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1494 if (!CHIP_IS_E3(sc)) {
1495 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1496 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1497 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1498 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1501 /* function stats */
1502 for (i = 0; i < sc->num_queues; i++) {
1503 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1504 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1505 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1506 if (sc->stats_init) {
1507 memset(&sc->fp[i].eth_q_stats, 0,
1508 sizeof(sc->fp[i].eth_q_stats));
1509 memset(&sc->fp[i].eth_q_stats_old, 0,
1510 sizeof(sc->fp[i].eth_q_stats_old));
1514 /* prepare statistics ramrod data */
1515 bnx2x_prep_fw_stats_req(sc);
1517 if (sc->stats_init) {
1518 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1519 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1520 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1521 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1522 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1524 /* Clean SP from previous statistics */
1526 memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1527 bnx2x_func_stats_init(sc);
1528 bnx2x_hw_stats_post(sc);
1529 bnx2x_stats_comp(sc);
1533 sc->stats_state = STATS_STATE_DISABLED;
1535 if (sc->port.pmf && sc->port.port_stx) {
1536 bnx2x_port_stats_base_init(sc);
1539 /* mark the end of statistics initialization */
1540 sc->stats_init = FALSE;
1544 bnx2x_save_statistics(struct bnx2x_softc *sc)
1548 /* save queue statistics */
1549 for (i = 0; i < sc->num_queues; i++) {
1550 struct bnx2x_fastpath *fp = &sc->fp[i];
1551 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1552 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1554 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1555 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1556 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1557 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1558 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1559 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1560 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1561 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1562 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1563 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1564 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1565 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1568 /* store port firmware statistics */
1570 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1571 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1572 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
1574 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1575 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1576 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1577 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1580 UPDATE_FW_STAT_OLD(mac_filter_discard);
1581 UPDATE_FW_STAT_OLD(mf_tag_discard);
1582 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1583 UPDATE_FW_STAT_OLD(mac_discard);