2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written consent.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
38 #include "bnx2x_stats.h"
41 #define BITS_PER_LONG 32
43 #define BITS_PER_LONG 64
47 bnx2x_hilo(uint32_t *hiref)
49 uint32_t lo = *(hiref + 1);
50 #if (BITS_PER_LONG == 64)
52 return HILO_U64(hi, lo);
58 static inline uint16_t
59 bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
64 /* 'newest' convention - shmem2 contains the size of the port stats */
65 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
66 size = SHMEM2_RD(sc, sizeof_port_stats);
71 /* prevent newer BC from causing buffer overflow */
72 if (res > sizeof(struct host_port_stats)) {
73 res = sizeof(struct host_port_stats);
78 * Older convention - all BCs support the port stats fields up until
79 * the 'not_used' field
82 res = (offsetof(struct host_port_stats, not_used) + 4);
84 /* if PFC stats are supported by the MFW, DMA them as well */
85 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
86 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
87 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
97 * Init service functions
101 * Post the next statistics ramrod. Protect it with the lock in
102 * order to ensure the strict order between statistics ramrods
103 * (each ramrod has a sequence number passed in a
104 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
108 bnx2x_storm_stats_post(struct bnx2x_softc *sc)
112 if (!sc->stats_pending) {
113 if (sc->stats_pending) {
117 sc->fw_stats_req->hdr.drv_stats_counter =
118 htole16(sc->stats_counter++);
121 "sending statistics ramrod %d",
122 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
124 /* adjust the ramrod to include VF queues statistics */
126 /* send FW stats ramrod */
127 rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
128 U64_HI(sc->fw_stats_req_mapping),
129 U64_LO(sc->fw_stats_req_mapping),
130 NONE_CONNECTION_TYPE);
132 sc->stats_pending = 1;
138 bnx2x_hw_stats_post(struct bnx2x_softc *sc)
140 struct dmae_command *dmae = &sc->stats_dmae;
141 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
145 *stats_comp = DMAE_COMP_VAL;
146 if (CHIP_REV_IS_SLOW(sc)) {
150 /* Update MCP's statistics if possible */
152 rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
153 sizeof(sc->func_stats));
157 if (sc->executer_idx) {
158 loader_idx = PMF_DMAE_C(sc);
159 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
160 TRUE, DMAE_COMP_GRC);
161 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
163 memset(dmae, 0, sizeof(struct dmae_command));
164 dmae->opcode = opcode;
165 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
166 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
167 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
168 sizeof(struct dmae_command) *
169 (loader_idx + 1)) >> 2);
170 dmae->dst_addr_hi = 0;
171 dmae->len = sizeof(struct dmae_command) >> 2;
172 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
173 dmae->comp_addr_hi = 0;
177 bnx2x_post_dmae(sc, dmae, loader_idx);
178 } else if (sc->func_stx) {
180 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
185 bnx2x_stats_comp(struct bnx2x_softc *sc)
187 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
190 while (*stats_comp != DMAE_COMP_VAL) {
192 PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
204 * Statistics service functions
208 bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
210 struct dmae_command *dmae;
212 int loader_idx = PMF_DMAE_C(sc);
213 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
215 if (sc->devinfo.bc_ver <= 0x06001400) {
217 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
218 * BRB registers while the BRB block is in reset. The DMA transfer
219 * below triggers this issue resulting in the DMAE to stop
220 * functioning. Skip this initial stats transfer for old bootcode
221 * versions <= 6.0.20.
226 if (!sc->port.pmf || !sc->port.port_stx) {
227 PMD_DRV_LOG(ERR, "BUG!");
231 sc->executer_idx = 0;
233 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
235 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
236 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
237 dmae->src_addr_lo = (sc->port.port_stx >> 2);
238 dmae->src_addr_hi = 0;
239 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
240 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
241 dmae->len = DMAE_LEN32_RD_MAX;
242 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
243 dmae->comp_addr_hi = 0;
246 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
247 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
248 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
249 dmae->src_addr_hi = 0;
250 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
251 DMAE_LEN32_RD_MAX * 4);
252 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
253 DMAE_LEN32_RD_MAX * 4);
254 dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
256 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
257 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
258 dmae->comp_val = DMAE_COMP_VAL;
261 bnx2x_hw_stats_post(sc);
262 bnx2x_stats_comp(sc);
266 bnx2x_port_stats_init(struct bnx2x_softc *sc)
268 struct dmae_command *dmae;
269 int port = SC_PORT(sc);
271 int loader_idx = PMF_DMAE_C(sc);
273 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
276 if (!sc->link_vars.link_up || !sc->port.pmf) {
277 PMD_DRV_LOG(ERR, "BUG!");
281 sc->executer_idx = 0;
284 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
285 TRUE, DMAE_COMP_GRC);
287 if (sc->port.port_stx) {
288 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
289 dmae->opcode = opcode;
290 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
291 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
292 dmae->dst_addr_lo = sc->port.port_stx >> 2;
293 dmae->dst_addr_hi = 0;
294 dmae->len = bnx2x_get_port_stats_dma_len(sc);
295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
296 dmae->comp_addr_hi = 0;
301 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
302 dmae->opcode = opcode;
303 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
304 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
305 dmae->dst_addr_lo = (sc->func_stx >> 2);
306 dmae->dst_addr_hi = 0;
307 dmae->len = (sizeof(struct host_func_stats) >> 2);
308 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
309 dmae->comp_addr_hi = 0;
314 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
315 TRUE, DMAE_COMP_GRC);
317 /* EMAC is special */
318 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
319 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
321 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
322 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
323 dmae->opcode = opcode;
324 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
325 dmae->src_addr_hi = 0;
326 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
327 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
328 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
329 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
330 dmae->comp_addr_hi = 0;
333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
334 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
335 dmae->opcode = opcode;
336 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
337 dmae->src_addr_hi = 0;
338 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
339 offsetof(struct emac_stats,
340 rx_stat_falsecarriererrors));
341 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
342 offsetof(struct emac_stats,
343 rx_stat_falsecarriererrors));
345 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
346 dmae->comp_addr_hi = 0;
349 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
350 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
351 dmae->opcode = opcode;
352 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
353 dmae->src_addr_hi = 0;
354 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
355 offsetof(struct emac_stats,
356 tx_stat_ifhcoutoctets));
357 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
358 offsetof(struct emac_stats,
359 tx_stat_ifhcoutoctets));
360 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
361 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
362 dmae->comp_addr_hi = 0;
365 uint32_t tx_src_addr_lo, rx_src_addr_lo;
366 uint16_t rx_len, tx_len;
368 /* configure the params according to MAC type */
369 switch (sc->link_vars.mac_type) {
370 case ELINK_MAC_TYPE_BMAC:
371 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
372 NIG_REG_INGRESS_BMAC0_MEM;
374 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
375 BIGMAC_REGISTER_TX_STAT_GTBYT */
376 if (CHIP_IS_E1x(sc)) {
378 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
379 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
380 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
382 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
383 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
387 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
388 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
389 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
391 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
392 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
393 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
398 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
399 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
401 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
402 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
403 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
405 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
407 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
412 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
413 dmae->opcode = opcode;
414 dmae->src_addr_lo = tx_src_addr_lo;
415 dmae->src_addr_hi = 0;
417 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
418 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
419 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
420 dmae->comp_addr_hi = 0;
424 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
425 dmae->opcode = opcode;
426 dmae->src_addr_hi = 0;
427 dmae->src_addr_lo = rx_src_addr_lo;
429 U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
431 U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
434 dmae->comp_addr_hi = 0;
439 if (!CHIP_IS_E3(sc)) {
440 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
441 dmae->opcode = opcode;
443 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
444 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
445 dmae->src_addr_hi = 0;
446 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
447 offsetof(struct nig_stats,
448 egress_mac_pkt0_lo));
449 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
450 offsetof(struct nig_stats,
451 egress_mac_pkt0_lo));
452 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
453 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
454 dmae->comp_addr_hi = 0;
457 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
458 dmae->opcode = opcode;
460 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
461 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
462 dmae->src_addr_hi = 0;
463 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
464 offsetof(struct nig_stats,
465 egress_mac_pkt1_lo));
466 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
467 offsetof(struct nig_stats,
468 egress_mac_pkt1_lo));
469 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
470 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
471 dmae->comp_addr_hi = 0;
475 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
476 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
477 TRUE, DMAE_COMP_PCI);
479 (port ? NIG_REG_STAT1_BRB_DISCARD :
480 NIG_REG_STAT0_BRB_DISCARD) >> 2;
481 dmae->src_addr_hi = 0;
482 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
483 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
484 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
486 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
487 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
488 dmae->comp_val = DMAE_COMP_VAL;
494 bnx2x_func_stats_init(struct bnx2x_softc *sc)
496 struct dmae_command *dmae = &sc->stats_dmae;
497 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
501 PMD_DRV_LOG(ERR, "BUG!");
505 sc->executer_idx = 0;
506 memset(dmae, 0, sizeof(struct dmae_command));
508 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
509 TRUE, DMAE_COMP_PCI);
510 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
511 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
512 dmae->dst_addr_lo = (sc->func_stx >> 2);
513 dmae->dst_addr_hi = 0;
514 dmae->len = (sizeof(struct host_func_stats) >> 2);
515 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
516 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
517 dmae->comp_val = DMAE_COMP_VAL;
523 bnx2x_stats_start(struct bnx2x_softc *sc)
526 * VFs travel through here as part of the statistics FSM, but no action
534 bnx2x_port_stats_init(sc);
537 else if (sc->func_stx) {
538 bnx2x_func_stats_init(sc);
541 bnx2x_hw_stats_post(sc);
542 bnx2x_storm_stats_post(sc);
546 bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
548 bnx2x_stats_comp(sc);
549 bnx2x_stats_pmf_update(sc);
550 bnx2x_stats_start(sc);
554 bnx2x_stats_restart(struct bnx2x_softc *sc)
557 * VFs travel through here as part of the statistics FSM, but no action
564 bnx2x_stats_comp(sc);
565 bnx2x_stats_start(sc);
569 bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
571 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
572 struct bnx2x_eth_stats *estats = &sc->eth_stats;
578 if (CHIP_IS_E1x(sc)) {
579 struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
581 /* the macros below will use "bmac1_stats" type */
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
593 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
594 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
595 UPDATE_STAT64(tx_stat_gt127,
596 tx_stat_etherstatspkts65octetsto127octets);
597 UPDATE_STAT64(tx_stat_gt255,
598 tx_stat_etherstatspkts128octetsto255octets);
599 UPDATE_STAT64(tx_stat_gt511,
600 tx_stat_etherstatspkts256octetsto511octets);
601 UPDATE_STAT64(tx_stat_gt1023,
602 tx_stat_etherstatspkts512octetsto1023octets);
603 UPDATE_STAT64(tx_stat_gt1518,
604 tx_stat_etherstatspkts1024octetsto1522octets);
605 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
606 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
607 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
608 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
609 UPDATE_STAT64(tx_stat_gterr,
610 tx_stat_dot3statsinternalmactransmiterrors);
611 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
613 struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
614 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
616 /* the macros below will use "bmac2_stats" type */
617 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
618 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
619 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
620 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
621 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
622 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
623 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
624 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
625 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
626 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
627 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
628 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
629 UPDATE_STAT64(tx_stat_gt127,
630 tx_stat_etherstatspkts65octetsto127octets);
631 UPDATE_STAT64(tx_stat_gt255,
632 tx_stat_etherstatspkts128octetsto255octets);
633 UPDATE_STAT64(tx_stat_gt511,
634 tx_stat_etherstatspkts256octetsto511octets);
635 UPDATE_STAT64(tx_stat_gt1023,
636 tx_stat_etherstatspkts512octetsto1023octets);
637 UPDATE_STAT64(tx_stat_gt1518,
638 tx_stat_etherstatspkts1024octetsto1522octets);
639 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
640 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
641 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
642 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
643 UPDATE_STAT64(tx_stat_gterr,
644 tx_stat_dot3statsinternalmactransmiterrors);
645 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
647 /* collect PFC stats */
648 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
649 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
650 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
651 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
653 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
654 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
655 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
656 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
659 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
660 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
662 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
663 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
665 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
666 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
667 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
668 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
672 bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
674 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
675 struct bnx2x_eth_stats *estats = &sc->eth_stats;
676 struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
678 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
679 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
680 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
681 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
682 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
683 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
684 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
685 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
686 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
687 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
689 /* collect pfc stats */
690 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
691 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
692 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
693 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
695 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
696 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
697 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
698 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
699 ADD_STAT64(stats_tx.tx_gt1023,
700 tx_stat_etherstatspkts512octetsto1023octets);
701 ADD_STAT64(stats_tx.tx_gt1518,
702 tx_stat_etherstatspkts1024octetsto1522octets);
703 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
705 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
706 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
707 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
709 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
710 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
712 estats->etherstatspkts1024octetsto1522octets_hi =
713 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
714 estats->etherstatspkts1024octetsto1522octets_lo =
715 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
717 estats->etherstatspktsover1522octets_hi =
718 pstats->mac_stx[1].tx_stat_mac_2047_hi;
719 estats->etherstatspktsover1522octets_lo =
720 pstats->mac_stx[1].tx_stat_mac_2047_lo;
722 ADD_64(estats->etherstatspktsover1522octets_hi,
723 pstats->mac_stx[1].tx_stat_mac_4095_hi,
724 estats->etherstatspktsover1522octets_lo,
725 pstats->mac_stx[1].tx_stat_mac_4095_lo);
727 ADD_64(estats->etherstatspktsover1522octets_hi,
728 pstats->mac_stx[1].tx_stat_mac_9216_hi,
729 estats->etherstatspktsover1522octets_lo,
730 pstats->mac_stx[1].tx_stat_mac_9216_lo);
732 ADD_64(estats->etherstatspktsover1522octets_hi,
733 pstats->mac_stx[1].tx_stat_mac_16383_hi,
734 estats->etherstatspktsover1522octets_lo,
735 pstats->mac_stx[1].tx_stat_mac_16383_lo);
737 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
738 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
740 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
741 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
743 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
744 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
745 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
746 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
750 bnx2x_emac_stats_update(struct bnx2x_softc *sc)
752 struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
753 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
754 struct bnx2x_eth_stats *estats = &sc->eth_stats;
756 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
757 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
758 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
759 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
760 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
761 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
762 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
763 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
764 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
765 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
766 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
767 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
768 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
769 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
770 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
771 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
772 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
773 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
774 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
775 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
776 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
777 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
778 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
779 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
780 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
781 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
782 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
783 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
784 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
785 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
786 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
788 estats->pause_frames_received_hi =
789 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
790 estats->pause_frames_received_lo =
791 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
792 ADD_64(estats->pause_frames_received_hi,
793 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
794 estats->pause_frames_received_lo,
795 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
797 estats->pause_frames_sent_hi =
798 pstats->mac_stx[1].tx_stat_outxonsent_hi;
799 estats->pause_frames_sent_lo =
800 pstats->mac_stx[1].tx_stat_outxonsent_lo;
801 ADD_64(estats->pause_frames_sent_hi,
802 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
803 estats->pause_frames_sent_lo,
804 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
808 bnx2x_hw_stats_update(struct bnx2x_softc *sc)
810 struct nig_stats *new = BNX2X_SP(sc, nig_stats);
811 struct nig_stats *old = &(sc->port.old_nig_stats);
812 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
813 struct bnx2x_eth_stats *estats = &sc->eth_stats;
814 uint32_t lpi_reg, nig_timer_max;
820 switch (sc->link_vars.mac_type) {
821 case ELINK_MAC_TYPE_BMAC:
822 bnx2x_bmac_stats_update(sc);
825 case ELINK_MAC_TYPE_EMAC:
826 bnx2x_emac_stats_update(sc);
829 case ELINK_MAC_TYPE_UMAC:
830 case ELINK_MAC_TYPE_XMAC:
831 bnx2x_mstat_stats_update(sc);
834 case ELINK_MAC_TYPE_NONE: /* unreached */
836 "stats updated by DMAE but no MAC active");
839 default: /* unreached */
840 PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
843 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
844 new->brb_discard - old->brb_discard);
845 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
846 new->brb_truncate - old->brb_truncate);
848 if (!CHIP_IS_E3(sc)) {
849 UPDATE_STAT64_NIG(egress_mac_pkt0,
850 etherstatspkts1024octetsto1522octets);
851 UPDATE_STAT64_NIG(egress_mac_pkt1,
852 etherstatspktsover1522octets);
855 rte_memcpy(old, new, sizeof(struct nig_stats));
857 rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
858 sizeof(struct mac_stx));
859 estats->brb_drop_hi = pstats->brb_drop_hi;
860 estats->brb_drop_lo = pstats->brb_drop_lo;
862 pstats->host_port_stats_counter++;
864 if (CHIP_IS_E3(sc)) {
865 lpi_reg = (SC_PORT(sc)) ?
866 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
867 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
868 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
871 if (!BNX2X_NOMCP(sc)) {
872 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
873 if (nig_timer_max != estats->nig_timer_max) {
874 estats->nig_timer_max = nig_timer_max;
875 PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
876 estats->nig_timer_max);
884 bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
886 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
887 uint16_t cur_stats_counter;
890 * Make sure we use the value of the counter
891 * used for sending the last stats ramrod.
893 cur_stats_counter = (sc->stats_counter - 1);
895 /* are storm stats valid? */
896 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
898 "stats not updated by xstorm, "
899 "counter 0x%x != stats_counter 0x%x",
900 le16toh(counters->xstats_counter), sc->stats_counter);
904 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
906 "stats not updated by ustorm, "
907 "counter 0x%x != stats_counter 0x%x",
908 le16toh(counters->ustats_counter), sc->stats_counter);
912 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
914 "stats not updated by cstorm, "
915 "counter 0x%x != stats_counter 0x%x",
916 le16toh(counters->cstats_counter), sc->stats_counter);
920 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
922 "stats not updated by tstorm, "
923 "counter 0x%x != stats_counter 0x%x",
924 le16toh(counters->tstats_counter), sc->stats_counter);
932 bnx2x_storm_stats_update(struct bnx2x_softc *sc)
934 struct tstorm_per_port_stats *tport =
935 &sc->fw_stats_data->port.tstorm_port_statistics;
936 struct tstorm_per_pf_stats *tfunc =
937 &sc->fw_stats_data->pf.tstorm_pf_statistics;
938 struct host_func_stats *fstats = &sc->func_stats;
939 struct bnx2x_eth_stats *estats = &sc->eth_stats;
940 struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
943 /* vfs stat counter is managed by pf */
944 if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) {
948 estats->error_bytes_received_hi = 0;
949 estats->error_bytes_received_lo = 0;
951 for (i = 0; i < sc->num_queues; i++) {
952 struct bnx2x_fastpath *fp = &sc->fp[i];
953 struct tstorm_per_queue_stats *tclient =
954 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
955 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
956 struct ustorm_per_queue_stats *uclient =
957 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
958 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
959 struct xstorm_per_queue_stats *xclient =
960 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
961 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
962 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
963 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
967 /* PMD_DRV_LOG(DEBUG,
968 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
969 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
970 xclient->mcast_pkts_sent);
972 PMD_DRV_LOG(DEBUG, "---------------"); */
974 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
975 total_broadcast_bytes_received);
976 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
977 total_multicast_bytes_received);
978 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
979 total_unicast_bytes_received);
982 * sum to total_bytes_received all
983 * unicast/multicast/broadcast
985 qstats->total_bytes_received_hi =
986 qstats->total_broadcast_bytes_received_hi;
987 qstats->total_bytes_received_lo =
988 qstats->total_broadcast_bytes_received_lo;
990 ADD_64(qstats->total_bytes_received_hi,
991 qstats->total_multicast_bytes_received_hi,
992 qstats->total_bytes_received_lo,
993 qstats->total_multicast_bytes_received_lo);
995 ADD_64(qstats->total_bytes_received_hi,
996 qstats->total_unicast_bytes_received_hi,
997 qstats->total_bytes_received_lo,
998 qstats->total_unicast_bytes_received_lo);
1000 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1001 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1003 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1004 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1005 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1006 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1007 etherstatsoverrsizepkts, 32);
1008 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1010 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1011 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1012 total_multicast_packets_received);
1013 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1014 total_broadcast_packets_received);
1015 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1016 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1017 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1019 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1020 total_broadcast_bytes_transmitted);
1021 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1022 total_multicast_bytes_transmitted);
1023 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1024 total_unicast_bytes_transmitted);
1027 * sum to total_bytes_transmitted all
1028 * unicast/multicast/broadcast
1030 qstats->total_bytes_transmitted_hi =
1031 qstats->total_unicast_bytes_transmitted_hi;
1032 qstats->total_bytes_transmitted_lo =
1033 qstats->total_unicast_bytes_transmitted_lo;
1035 ADD_64(qstats->total_bytes_transmitted_hi,
1036 qstats->total_broadcast_bytes_transmitted_hi,
1037 qstats->total_bytes_transmitted_lo,
1038 qstats->total_broadcast_bytes_transmitted_lo);
1040 ADD_64(qstats->total_bytes_transmitted_hi,
1041 qstats->total_multicast_bytes_transmitted_hi,
1042 qstats->total_bytes_transmitted_lo,
1043 qstats->total_multicast_bytes_transmitted_lo);
1045 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1046 total_unicast_packets_transmitted);
1047 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1048 total_multicast_packets_transmitted);
1049 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1050 total_broadcast_packets_transmitted);
1052 UPDATE_EXTEND_TSTAT(checksum_discard,
1053 total_packets_received_checksum_discarded);
1054 UPDATE_EXTEND_TSTAT(ttl0_discard,
1055 total_packets_received_ttl0_discarded);
1057 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1058 total_transmitted_dropped_packets_error);
1060 UPDATE_FSTAT_QSTAT(total_bytes_received);
1061 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1062 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1063 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1064 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1065 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1066 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1067 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1068 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1071 ADD_64(estats->total_bytes_received_hi,
1072 estats->rx_stat_ifhcinbadoctets_hi,
1073 estats->total_bytes_received_lo,
1074 estats->rx_stat_ifhcinbadoctets_lo);
1076 ADD_64_LE(estats->total_bytes_received_hi,
1077 tfunc->rcv_error_bytes.hi,
1078 estats->total_bytes_received_lo,
1079 tfunc->rcv_error_bytes.lo);
1081 ADD_64_LE(estats->error_bytes_received_hi,
1082 tfunc->rcv_error_bytes.hi,
1083 estats->error_bytes_received_lo,
1084 tfunc->rcv_error_bytes.lo);
1086 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1088 ADD_64(estats->error_bytes_received_hi,
1089 estats->rx_stat_ifhcinbadoctets_hi,
1090 estats->error_bytes_received_lo,
1091 estats->rx_stat_ifhcinbadoctets_lo);
1094 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1095 UPDATE_FW_STAT(mac_filter_discard);
1096 UPDATE_FW_STAT(mf_tag_discard);
1097 UPDATE_FW_STAT(brb_truncate_discard);
1098 UPDATE_FW_STAT(mac_discard);
1101 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1103 sc->stats_pending = 0;
1109 bnx2x_drv_stats_update(struct bnx2x_softc *sc)
1111 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1114 for (i = 0; i < sc->num_queues; i++) {
1115 struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1116 struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1118 UPDATE_ESTAT_QSTAT(rx_calls);
1119 UPDATE_ESTAT_QSTAT(rx_pkts);
1120 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1121 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1122 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1123 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1124 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1125 UPDATE_ESTAT_QSTAT(tx_pkts);
1126 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1127 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1128 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1129 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1130 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1131 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1132 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1133 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1134 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1135 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1136 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1137 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1138 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1140 /* mbuf driver statistics */
1141 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1142 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1143 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1144 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1146 /* track the number of allocated mbufs */
1147 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1148 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1153 bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
1157 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1158 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1160 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1169 bnx2x_stats_update(struct bnx2x_softc *sc)
1171 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1173 if (bnx2x_edebug_stats_stopped(sc)) {
1179 bnx2x_storm_stats_update(sc);
1180 bnx2x_hw_stats_post(sc);
1181 bnx2x_storm_stats_post(sc);
1184 if (*stats_comp != DMAE_COMP_VAL) {
1189 bnx2x_hw_stats_update(sc);
1192 if (bnx2x_storm_stats_update(sc)) {
1193 if (sc->stats_pending++ == 3) {
1194 rte_panic("storm stats not updated for 3 times");
1200 * VF doesn't collect HW statistics, and doesn't get completions,
1201 * performs only update.
1203 bnx2x_storm_stats_update(sc);
1206 bnx2x_drv_stats_update(sc);
1210 bnx2x_port_stats_stop(struct bnx2x_softc *sc)
1212 struct dmae_command *dmae;
1214 int loader_idx = PMF_DMAE_C(sc);
1215 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1217 sc->executer_idx = 0;
1219 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1221 if (sc->port.port_stx) {
1222 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1225 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1227 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1230 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1231 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1232 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1233 dmae->dst_addr_hi = 0;
1234 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1236 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1237 dmae->comp_addr_hi = 0;
1240 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1241 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1242 dmae->comp_val = DMAE_COMP_VAL;
1249 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1250 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1251 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
1252 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
1253 dmae->dst_addr_lo = (sc->func_stx >> 2);
1254 dmae->dst_addr_hi = 0;
1255 dmae->len = (sizeof(struct host_func_stats) >> 2);
1256 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1257 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1258 dmae->comp_val = DMAE_COMP_VAL;
1265 bnx2x_stats_stop(struct bnx2x_softc *sc)
1267 uint8_t update = FALSE;
1269 bnx2x_stats_comp(sc);
1272 update = bnx2x_hw_stats_update(sc) == 0;
1275 update |= bnx2x_storm_stats_update(sc) == 0;
1280 bnx2x_port_stats_stop(sc);
1283 bnx2x_hw_stats_post(sc);
1284 bnx2x_stats_comp(sc);
1289 bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
1294 static const struct {
1295 void (*action)(struct bnx2x_softc *sc);
1296 enum bnx2x_stats_state next_state;
1297 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1299 /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
1300 /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED },
1301 /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
1302 /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
1305 /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED },
1306 /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED },
1307 /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED },
1308 /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED }
1312 void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
1314 enum bnx2x_stats_state state;
1316 if (unlikely(sc->panic)) {
1320 state = sc->stats_state;
1321 sc->stats_state = bnx2x_stats_stm[state][event].next_state;
1323 bnx2x_stats_stm[state][event].action(sc);
1325 if (event != STATS_EVENT_UPDATE) {
1327 "state %d -> event %d -> state %d",
1328 state, event, sc->stats_state);
1333 bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
1335 struct dmae_command *dmae;
1336 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1339 if (!sc->port.pmf || !sc->port.port_stx) {
1340 PMD_DRV_LOG(ERR, "BUG!");
1344 sc->executer_idx = 0;
1346 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1347 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1348 TRUE, DMAE_COMP_PCI);
1349 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1350 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1351 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1352 dmae->dst_addr_hi = 0;
1353 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1354 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1355 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1356 dmae->comp_val = DMAE_COMP_VAL;
1359 bnx2x_hw_stats_post(sc);
1360 bnx2x_stats_comp(sc);
1364 * This function will prepare the statistics ramrod data the way
1365 * we will only have to increment the statistics counter and
1366 * send the ramrod each time we have to.
1369 bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
1372 int first_queue_query_index;
1373 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1374 phys_addr_t cur_data_offset;
1375 struct stats_query_entry *cur_query_entry;
1377 stats_hdr->cmd_num = sc->fw_stats_num;
1378 stats_hdr->drv_stats_counter = 0;
1381 * The storm_counters struct contains the counters of completed
1382 * statistics requests per storm which are incremented by FW
1383 * each time it completes hadning a statistics ramrod. We will
1384 * check these counters in the timer handler and discard a
1385 * (statistics) ramrod completion.
1387 cur_data_offset = (sc->fw_stats_data_mapping +
1388 offsetof(struct bnx2x_fw_stats_data, storm_counters));
1390 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1391 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1394 * Prepare the first stats ramrod (will be completed with
1395 * the counters equal to zero) - init counters to somethig different.
1397 memset(&sc->fw_stats_data->storm_counters, 0xff,
1398 sizeof(struct stats_counter));
1400 /**** Port FW statistics data ****/
1401 cur_data_offset = (sc->fw_stats_data_mapping +
1402 offsetof(struct bnx2x_fw_stats_data, port));
1404 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1406 cur_query_entry->kind = STATS_TYPE_PORT;
1407 /* For port query index is a DONT CARE */
1408 cur_query_entry->index = SC_PORT(sc);
1409 /* For port query funcID is a DONT CARE */
1410 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1411 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1412 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1414 /**** PF FW statistics data ****/
1415 cur_data_offset = (sc->fw_stats_data_mapping +
1416 offsetof(struct bnx2x_fw_stats_data, pf));
1418 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1420 cur_query_entry->kind = STATS_TYPE_PF;
1421 /* For PF query index is a DONT CARE */
1422 cur_query_entry->index = SC_PORT(sc);
1423 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1424 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1425 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1427 /**** Clients' queries ****/
1428 cur_data_offset = (sc->fw_stats_data_mapping +
1429 offsetof(struct bnx2x_fw_stats_data, queue_stats));
1432 * First queue query index depends whether FCoE offloaded request will
1433 * be included in the ramrod
1435 first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
1437 for (i = 0; i < sc->num_queues; i++) {
1439 &sc->fw_stats_req->query[first_queue_query_index + i];
1441 cur_query_entry->kind = STATS_TYPE_QUEUE;
1442 cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
1443 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1444 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1445 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1447 cur_data_offset += sizeof(struct per_queue_stats);
1451 void bnx2x_memset_stats(struct bnx2x_softc *sc)
1455 /* function stats */
1456 for (i = 0; i < sc->num_queues; i++) {
1457 struct bnx2x_fastpath *fp = &sc->fp[i];
1459 memset(&fp->old_tclient, 0,
1460 sizeof(fp->old_tclient));
1461 memset(&fp->old_uclient, 0,
1462 sizeof(fp->old_uclient));
1463 memset(&fp->old_xclient, 0,
1464 sizeof(fp->old_xclient));
1465 if (sc->stats_init) {
1466 memset(&fp->eth_q_stats, 0,
1467 sizeof(fp->eth_q_stats));
1468 memset(&fp->eth_q_stats_old, 0,
1469 sizeof(fp->eth_q_stats_old));
1473 if (sc->stats_init) {
1474 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1475 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1476 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1477 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1478 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1481 sc->stats_state = STATS_STATE_DISABLED;
1483 if (sc->port.pmf && sc->port.port_stx)
1484 bnx2x_port_stats_base_init(sc);
1486 /* mark the end of statistics initializiation */
1487 sc->stats_init = false;
1491 bnx2x_stats_init(struct bnx2x_softc *sc)
1493 int /*abs*/port = SC_PORT(sc);
1494 int mb_idx = SC_FW_MB_IDX(sc);
1497 sc->stats_pending = 0;
1498 sc->executer_idx = 0;
1499 sc->stats_counter = 0;
1501 sc->stats_init = TRUE;
1503 /* port and func stats for management */
1504 if (!BNX2X_NOMCP(sc)) {
1505 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1506 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1508 sc->port.port_stx = 0;
1512 PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
1513 sc->port.port_stx, sc->func_stx);
1515 /* pmf should retrieve port statistics from SP on a non-init*/
1516 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1517 bnx2x_stats_handle(sc, STATS_EVENT_PMF);
1522 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1523 sc->port.old_nig_stats.brb_discard =
1524 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1525 sc->port.old_nig_stats.brb_truncate =
1526 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1527 if (!CHIP_IS_E3(sc)) {
1528 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1529 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1530 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1531 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1534 /* function stats */
1535 for (i = 0; i < sc->num_queues; i++) {
1536 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1537 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1538 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1539 if (sc->stats_init) {
1540 memset(&sc->fp[i].eth_q_stats, 0,
1541 sizeof(sc->fp[i].eth_q_stats));
1542 memset(&sc->fp[i].eth_q_stats_old, 0,
1543 sizeof(sc->fp[i].eth_q_stats_old));
1547 /* prepare statistics ramrod data */
1548 bnx2x_prep_fw_stats_req(sc);
1550 if (sc->stats_init) {
1551 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1552 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1553 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1554 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1555 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1557 /* Clean SP from previous statistics */
1559 memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1560 bnx2x_func_stats_init(sc);
1561 bnx2x_hw_stats_post(sc);
1562 bnx2x_stats_comp(sc);
1566 sc->stats_state = STATS_STATE_DISABLED;
1568 if (sc->port.pmf && sc->port.port_stx) {
1569 bnx2x_port_stats_base_init(sc);
1572 /* mark the end of statistics initializiation */
1573 sc->stats_init = FALSE;
1577 bnx2x_save_statistics(struct bnx2x_softc *sc)
1581 /* save queue statistics */
1582 for (i = 0; i < sc->num_queues; i++) {
1583 struct bnx2x_fastpath *fp = &sc->fp[i];
1584 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1585 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1587 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1588 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1589 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1590 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1591 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1592 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1593 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1594 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1595 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1596 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1597 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1598 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1601 /* store port firmware statistics */
1603 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1604 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1605 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
1607 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1608 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1609 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1610 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1613 UPDATE_FW_STAT_OLD(mac_filter_discard);
1614 UPDATE_FW_STAT_OLD(mf_tag_discard);
1615 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1616 UPDATE_FW_STAT_OLD(mac_discard);