1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_string_fns.h>
9 #include <rte_byteorder.h>
13 #include "bnxt_filter.h"
14 #include "bnxt_hwrm.h"
16 #include "bnxt_stats.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
21 static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
22 {"rx_64b_frames", offsetof(struct rx_port_stats,
24 {"rx_65b_127b_frames", offsetof(struct rx_port_stats,
26 {"rx_128b_255b_frames", offsetof(struct rx_port_stats,
27 rx_128b_255b_frames)},
28 {"rx_256b_511b_frames", offsetof(struct rx_port_stats,
29 rx_256b_511b_frames)},
30 {"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
31 rx_512b_1023b_frames)},
32 {"rx_1024b_1518b_frames", offsetof(struct rx_port_stats,
33 rx_1024b_1518b_frames)},
34 {"rx_good_vlan_frames", offsetof(struct rx_port_stats,
35 rx_good_vlan_frames)},
36 {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
37 rx_1519b_2047b_frames)},
38 {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
39 rx_2048b_4095b_frames)},
40 {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
41 rx_4096b_9216b_frames)},
42 {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
43 rx_9217b_16383b_frames)},
44 {"rx_total_frames", offsetof(struct rx_port_stats,
46 {"rx_ucast_frames", offsetof(struct rx_port_stats,
48 {"rx_mcast_frames", offsetof(struct rx_port_stats,
50 {"rx_bcast_frames", offsetof(struct rx_port_stats,
52 {"rx_fcs_err_frames", offsetof(struct rx_port_stats,
54 {"rx_ctrl_frames", offsetof(struct rx_port_stats,
56 {"rx_pause_frames", offsetof(struct rx_port_stats,
58 {"rx_pfc_frames", offsetof(struct rx_port_stats,
60 {"rx_unsupported_opcode_frames", offsetof(struct rx_port_stats,
61 rx_unsupported_opcode_frames)},
62 {"rx_unsupported_da_pausepfc_frames", offsetof(struct rx_port_stats,
63 rx_unsupported_da_pausepfc_frames)},
64 {"rx_wrong_sa_frames", offsetof(struct rx_port_stats,
66 {"rx_align_err_frames", offsetof(struct rx_port_stats,
67 rx_align_err_frames)},
68 {"rx_oor_len_frames", offsetof(struct rx_port_stats,
70 {"rx_code_err_frames", offsetof(struct rx_port_stats,
72 {"rx_false_carrier_frames", offsetof(struct rx_port_stats,
73 rx_false_carrier_frames)},
74 {"rx_ovrsz_frames", offsetof(struct rx_port_stats,
76 {"rx_jbr_frames", offsetof(struct rx_port_stats,
78 {"rx_mtu_err_frames", offsetof(struct rx_port_stats,
80 {"rx_match_crc_frames", offsetof(struct rx_port_stats,
81 rx_match_crc_frames)},
82 {"rx_promiscuous_frames", offsetof(struct rx_port_stats,
83 rx_promiscuous_frames)},
84 {"rx_tagged_frames", offsetof(struct rx_port_stats,
86 {"rx_double_tagged_frames", offsetof(struct rx_port_stats,
87 rx_double_tagged_frames)},
88 {"rx_trunc_frames", offsetof(struct rx_port_stats,
90 {"rx_good_frames", offsetof(struct rx_port_stats,
92 {"rx_sch_crc_err_frames", offsetof(struct rx_port_stats,
93 rx_sch_crc_err_frames)},
94 {"rx_undrsz_frames", offsetof(struct rx_port_stats,
96 {"rx_frag_frames", offsetof(struct rx_port_stats,
98 {"rx_eee_lpi_events", offsetof(struct rx_port_stats,
100 {"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
101 rx_eee_lpi_duration)},
102 {"rx_llfc_physical_msgs", offsetof(struct rx_port_stats,
103 rx_llfc_physical_msgs)},
104 {"rx_llfc_logical_msgs", offsetof(struct rx_port_stats,
105 rx_llfc_logical_msgs)},
106 {"rx_llfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
107 rx_llfc_msgs_with_crc_err)},
108 {"rx_hcfc_msgs", offsetof(struct rx_port_stats,
110 {"rx_hcfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
111 rx_hcfc_msgs_with_crc_err)},
112 {"rx_bytes", offsetof(struct rx_port_stats,
114 {"rx_runt_bytes", offsetof(struct rx_port_stats,
116 {"rx_runt_frames", offsetof(struct rx_port_stats,
118 {"rx_pfc_xon2xoff_frames_pri0", offsetof(struct rx_port_stats,
119 rx_pfc_xon2xoff_frames_pri0)},
120 {"rx_pfc_xon2xoff_frames_pri1", offsetof(struct rx_port_stats,
121 rx_pfc_xon2xoff_frames_pri1)},
122 {"rx_pfc_xon2xoff_frames_pri2", offsetof(struct rx_port_stats,
123 rx_pfc_xon2xoff_frames_pri2)},
124 {"rx_pfc_xon2xoff_frames_pri3", offsetof(struct rx_port_stats,
125 rx_pfc_xon2xoff_frames_pri3)},
126 {"rx_pfc_xon2xoff_frames_pri4", offsetof(struct rx_port_stats,
127 rx_pfc_xon2xoff_frames_pri4)},
128 {"rx_pfc_xon2xoff_frames_pri5", offsetof(struct rx_port_stats,
129 rx_pfc_xon2xoff_frames_pri5)},
130 {"rx_pfc_xon2xoff_frames_pri6", offsetof(struct rx_port_stats,
131 rx_pfc_xon2xoff_frames_pri6)},
132 {"rx_pfc_xon2xoff_frames_pri7", offsetof(struct rx_port_stats,
133 rx_pfc_xon2xoff_frames_pri7)},
134 {"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats,
135 rx_pfc_ena_frames_pri0)},
136 {"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats,
137 rx_pfc_ena_frames_pri1)},
138 {"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats,
139 rx_pfc_ena_frames_pri2)},
140 {"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats,
141 rx_pfc_ena_frames_pri3)},
142 {"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats,
143 rx_pfc_ena_frames_pri4)},
144 {"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats,
145 rx_pfc_ena_frames_pri5)},
146 {"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats,
147 rx_pfc_ena_frames_pri6)},
148 {"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats,
149 rx_pfc_ena_frames_pri7)},
150 {"rx_stat_discard", offsetof(struct rx_port_stats,
152 {"rx_stat_err", offsetof(struct rx_port_stats,
156 static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
157 {"tx_64b_frames", offsetof(struct tx_port_stats,
159 {"tx_65b_127b_frames", offsetof(struct tx_port_stats,
160 tx_65b_127b_frames)},
161 {"tx_128b_255b_frames", offsetof(struct tx_port_stats,
162 tx_128b_255b_frames)},
163 {"tx_256b_511b_frames", offsetof(struct tx_port_stats,
164 tx_256b_511b_frames)},
165 {"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
166 tx_512b_1023b_frames)},
167 {"tx_1024b_1518b_frames", offsetof(struct tx_port_stats,
168 tx_1024b_1518b_frames)},
169 {"tx_good_vlan_frames", offsetof(struct tx_port_stats,
170 tx_good_vlan_frames)},
171 {"tx_1519b_2047b_frames", offsetof(struct tx_port_stats,
172 tx_1519b_2047b_frames)},
173 {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
174 tx_2048b_4095b_frames)},
175 {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
176 tx_4096b_9216b_frames)},
177 {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
178 tx_9217b_16383b_frames)},
179 {"tx_good_frames", offsetof(struct tx_port_stats,
181 {"tx_total_frames", offsetof(struct tx_port_stats,
183 {"tx_ucast_frames", offsetof(struct tx_port_stats,
185 {"tx_mcast_frames", offsetof(struct tx_port_stats,
187 {"tx_bcast_frames", offsetof(struct tx_port_stats,
189 {"tx_pause_frames", offsetof(struct tx_port_stats,
191 {"tx_pfc_frames", offsetof(struct tx_port_stats,
193 {"tx_jabber_frames", offsetof(struct tx_port_stats,
195 {"tx_fcs_err_frames", offsetof(struct tx_port_stats,
197 {"tx_control_frames", offsetof(struct tx_port_stats,
199 {"tx_oversz_frames", offsetof(struct tx_port_stats,
201 {"tx_single_dfrl_frames", offsetof(struct tx_port_stats,
202 tx_single_dfrl_frames)},
203 {"tx_multi_dfrl_frames", offsetof(struct tx_port_stats,
204 tx_multi_dfrl_frames)},
205 {"tx_single_coll_frames", offsetof(struct tx_port_stats,
206 tx_single_coll_frames)},
207 {"tx_multi_coll_frames", offsetof(struct tx_port_stats,
208 tx_multi_coll_frames)},
209 {"tx_late_coll_frames", offsetof(struct tx_port_stats,
210 tx_late_coll_frames)},
211 {"tx_excessive_coll_frames", offsetof(struct tx_port_stats,
212 tx_excessive_coll_frames)},
213 {"tx_frag_frames", offsetof(struct tx_port_stats,
215 {"tx_err", offsetof(struct tx_port_stats,
217 {"tx_tagged_frames", offsetof(struct tx_port_stats,
219 {"tx_dbl_tagged_frames", offsetof(struct tx_port_stats,
220 tx_dbl_tagged_frames)},
221 {"tx_runt_frames", offsetof(struct tx_port_stats,
223 {"tx_fifo_underruns", offsetof(struct tx_port_stats,
225 {"tx_eee_lpi_events", offsetof(struct tx_port_stats,
227 {"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
228 tx_eee_lpi_duration)},
229 {"tx_total_collisions", offsetof(struct tx_port_stats,
230 tx_total_collisions)},
231 {"tx_bytes", offsetof(struct tx_port_stats,
233 {"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats,
234 tx_pfc_ena_frames_pri0)},
235 {"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats,
236 tx_pfc_ena_frames_pri1)},
237 {"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats,
238 tx_pfc_ena_frames_pri2)},
239 {"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats,
240 tx_pfc_ena_frames_pri3)},
241 {"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats,
242 tx_pfc_ena_frames_pri4)},
243 {"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats,
244 tx_pfc_ena_frames_pri5)},
245 {"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats,
246 tx_pfc_ena_frames_pri6)},
247 {"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats,
248 tx_pfc_ena_frames_pri7)},
249 {"tx_llfc_logical_msgs", offsetof(struct tx_port_stats,
250 tx_llfc_logical_msgs)},
251 {"tx_hcfc_msgs", offsetof(struct tx_port_stats,
253 {"tx_xthol_frames", offsetof(struct tx_port_stats,
255 {"tx_stat_discard", offsetof(struct tx_port_stats,
257 {"tx_stat_error", offsetof(struct tx_port_stats,
261 static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
262 {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
264 {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
266 {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
268 {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
270 {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
272 {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
274 {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
276 {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
278 {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
280 {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
282 {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
284 {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
286 {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
288 {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
290 {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
292 {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
294 {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
296 {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
298 {"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
300 {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
305 static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = {
306 {"link_down_events", offsetof(struct rx_port_stats_ext,
308 {"continuous_pause_events", offsetof(struct rx_port_stats_ext,
309 continuous_pause_events)},
310 {"resume_pause_events", offsetof(struct rx_port_stats_ext,
311 resume_pause_events)},
312 {"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext,
313 continuous_roce_pause_events)},
314 {"resume_roce_pause_events", offsetof(struct rx_port_stats_ext,
315 resume_roce_pause_events)},
316 {"rx_bytes_cos0", offsetof(struct rx_port_stats_ext,
318 {"rx_bytes_cos1", offsetof(struct rx_port_stats_ext,
320 {"rx_bytes_cos2", offsetof(struct rx_port_stats_ext,
322 {"rx_bytes_cos3", offsetof(struct rx_port_stats_ext,
324 {"rx_bytes_cos4", offsetof(struct rx_port_stats_ext,
326 {"rx_bytes_cos5", offsetof(struct rx_port_stats_ext,
328 {"rx_bytes_cos6", offsetof(struct rx_port_stats_ext,
330 {"rx_bytes_cos7", offsetof(struct rx_port_stats_ext,
332 {"rx_packets_cos0", offsetof(struct rx_port_stats_ext,
334 {"rx_packets_cos1", offsetof(struct rx_port_stats_ext,
336 {"rx_packets_cos2", offsetof(struct rx_port_stats_ext,
338 {"rx_packets_cos3", offsetof(struct rx_port_stats_ext,
340 {"rx_packets_cos4", offsetof(struct rx_port_stats_ext,
342 {"rx_packets_cos5", offsetof(struct rx_port_stats_ext,
344 {"rx_packets_cos6", offsetof(struct rx_port_stats_ext,
346 {"rx_packets_cos7", offsetof(struct rx_port_stats_ext,
348 {"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext,
349 pfc_pri0_rx_duration_us)},
350 {"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext,
351 pfc_pri0_rx_transitions)},
352 {"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext,
353 pfc_pri1_rx_duration_us)},
354 {"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext,
355 pfc_pri1_rx_transitions)},
356 {"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext,
357 pfc_pri2_rx_duration_us)},
358 {"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext,
359 pfc_pri2_rx_transitions)},
360 {"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext,
361 pfc_pri3_rx_duration_us)},
362 {"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext,
363 pfc_pri3_rx_transitions)},
364 {"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext,
365 pfc_pri4_rx_duration_us)},
366 {"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext,
367 pfc_pri4_rx_transitions)},
368 {"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext,
369 pfc_pri5_rx_duration_us)},
370 {"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext,
371 pfc_pri5_rx_transitions)},
372 {"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext,
373 pfc_pri6_rx_duration_us)},
374 {"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext,
375 pfc_pri6_rx_transitions)},
376 {"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext,
377 pfc_pri7_rx_duration_us)},
378 {"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext,
379 pfc_pri7_rx_transitions)},
380 {"rx_bits", offsetof(struct rx_port_stats_ext,
382 {"rx_buffer_passed_threshold", offsetof(struct rx_port_stats_ext,
383 rx_buffer_passed_threshold)},
384 {"rx_pcs_symbol_err", offsetof(struct rx_port_stats_ext,
386 {"rx_corrected_bits", offsetof(struct rx_port_stats_ext,
388 {"rx_discard_bytes_cos0", offsetof(struct rx_port_stats_ext,
389 rx_discard_bytes_cos0)},
390 {"rx_discard_bytes_cos1", offsetof(struct rx_port_stats_ext,
391 rx_discard_bytes_cos1)},
392 {"rx_discard_bytes_cos2", offsetof(struct rx_port_stats_ext,
393 rx_discard_bytes_cos2)},
394 {"rx_discard_bytes_cos3", offsetof(struct rx_port_stats_ext,
395 rx_discard_bytes_cos3)},
396 {"rx_discard_bytes_cos4", offsetof(struct rx_port_stats_ext,
397 rx_discard_bytes_cos4)},
398 {"rx_discard_bytes_cos5", offsetof(struct rx_port_stats_ext,
399 rx_discard_bytes_cos5)},
400 {"rx_discard_bytes_cos6", offsetof(struct rx_port_stats_ext,
401 rx_discard_bytes_cos6)},
402 {"rx_discard_bytes_cos7", offsetof(struct rx_port_stats_ext,
403 rx_discard_bytes_cos7)},
404 {"rx_discard_packets_cos0", offsetof(struct rx_port_stats_ext,
405 rx_discard_packets_cos0)},
406 {"rx_discard_packets_cos1", offsetof(struct rx_port_stats_ext,
407 rx_discard_packets_cos1)},
408 {"rx_discard_packets_cos2", offsetof(struct rx_port_stats_ext,
409 rx_discard_packets_cos2)},
410 {"rx_discard_packets_cos3", offsetof(struct rx_port_stats_ext,
411 rx_discard_packets_cos3)},
412 {"rx_discard_packets_cos4", offsetof(struct rx_port_stats_ext,
413 rx_discard_packets_cos4)},
414 {"rx_discard_packets_cos5", offsetof(struct rx_port_stats_ext,
415 rx_discard_packets_cos5)},
416 {"rx_discard_packets_cos6", offsetof(struct rx_port_stats_ext,
417 rx_discard_packets_cos6)},
418 {"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext,
419 rx_discard_packets_cos7)},
422 static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = {
423 {"tx_bytes_cos0", offsetof(struct tx_port_stats_ext,
425 {"tx_bytes_cos1", offsetof(struct tx_port_stats_ext,
427 {"tx_bytes_cos2", offsetof(struct tx_port_stats_ext,
429 {"tx_bytes_cos3", offsetof(struct tx_port_stats_ext,
431 {"tx_bytes_cos4", offsetof(struct tx_port_stats_ext,
433 {"tx_bytes_cos5", offsetof(struct tx_port_stats_ext,
435 {"tx_bytes_cos6", offsetof(struct tx_port_stats_ext,
437 {"tx_bytes_cos7", offsetof(struct tx_port_stats_ext,
439 {"tx_packets_cos0", offsetof(struct tx_port_stats_ext,
441 {"tx_packets_cos1", offsetof(struct tx_port_stats_ext,
443 {"tx_packets_cos2", offsetof(struct tx_port_stats_ext,
445 {"tx_packets_cos3", offsetof(struct tx_port_stats_ext,
447 {"tx_packets_cos4", offsetof(struct tx_port_stats_ext,
449 {"tx_packets_cos5", offsetof(struct tx_port_stats_ext,
451 {"tx_packets_cos6", offsetof(struct tx_port_stats_ext,
453 {"tx_packets_cos7", offsetof(struct tx_port_stats_ext,
455 {"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext,
456 pfc_pri0_tx_duration_us)},
457 {"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext,
458 pfc_pri0_tx_transitions)},
459 {"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext,
460 pfc_pri1_tx_duration_us)},
461 {"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext,
462 pfc_pri1_tx_transitions)},
463 {"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext,
464 pfc_pri2_tx_duration_us)},
465 {"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext,
466 pfc_pri2_tx_transitions)},
467 {"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext,
468 pfc_pri3_tx_duration_us)},
469 {"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext,
470 pfc_pri3_tx_transitions)},
471 {"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext,
472 pfc_pri4_tx_duration_us)},
473 {"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext,
474 pfc_pri4_tx_transitions)},
475 {"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext,
476 pfc_pri5_tx_duration_us)},
477 {"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext,
478 pfc_pri5_tx_transitions)},
479 {"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext,
480 pfc_pri6_tx_duration_us)},
481 {"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext,
482 pfc_pri6_tx_transitions)},
483 {"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext,
484 pfc_pri7_tx_duration_us)},
485 {"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext,
486 pfc_pri7_tx_transitions)},
490 * Statistics functions
493 void bnxt_free_stats(struct bnxt *bp)
497 for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
498 struct bnxt_tx_queue *txq = bp->tx_queues[i];
500 bnxt_free_txq_stats(txq);
502 for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
503 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
505 bnxt_free_rxq_stats(rxq);
509 static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
510 struct bnxt_ring_stats *ring_stats,
511 unsigned int i, bool rx)
514 stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
515 stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
516 stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
518 stats->ipackets += stats->q_ipackets[i];
520 stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
521 stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
522 stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
524 stats->ibytes += stats->q_ibytes[i];
526 stats->q_errors[i] = ring_stats->rx_discard_pkts;
527 stats->q_errors[i] += ring_stats->rx_error_pkts;
529 stats->imissed += ring_stats->rx_discard_pkts;
530 stats->ierrors += ring_stats->rx_error_pkts;
532 stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
533 stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
534 stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
536 stats->opackets += stats->q_opackets[i];
538 stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
539 stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
540 stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
542 stats->obytes += stats->q_obytes[i];
544 stats->oerrors += ring_stats->tx_discard_pkts;
548 int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
549 struct rte_eth_stats *bnxt_stats)
553 struct bnxt *bp = eth_dev->data->dev_private;
554 unsigned int num_q_stats;
556 rc = is_bnxt_in_error(bp);
560 if (!eth_dev->data->dev_started)
563 num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
564 (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
566 for (i = 0; i < num_q_stats; i++) {
567 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
568 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
569 struct bnxt_ring_stats ring_stats = {0};
571 if (!rxq->rx_started)
574 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
579 bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
580 bnxt_stats->rx_nombuf +=
581 rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
584 num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
585 (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
587 for (i = 0; i < num_q_stats; i++) {
588 struct bnxt_tx_queue *txq = bp->tx_queues[i];
589 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
590 struct bnxt_ring_stats ring_stats = {0};
592 if (!txq->tx_started)
595 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
600 bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
606 static void bnxt_clear_prev_stat(struct bnxt *bp)
609 * Clear the cached values of stats returned by HW in the previous
612 memset(bp->prev_rx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
613 memset(bp->prev_tx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
616 int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
618 struct bnxt *bp = eth_dev->data->dev_private;
622 ret = is_bnxt_in_error(bp);
626 if (!eth_dev->data->dev_started) {
627 PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
631 ret = bnxt_clear_all_hwrm_stat_ctxs(bp);
632 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
633 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
635 rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
638 bnxt_clear_prev_stat(bp);
643 static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
644 struct bnxt_ring_stats *ring_stats,
648 func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
649 func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
650 func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
652 func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
653 func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
654 func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
656 func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
657 func_qstats->rx_drop_pkts += ring_stats->rx_error_pkts;
659 func_qstats->rx_agg_pkts += ring_stats->rx_agg_pkts;
660 func_qstats->rx_agg_bytes += ring_stats->rx_agg_bytes;
661 func_qstats->rx_agg_events += ring_stats->rx_agg_events;
662 func_qstats->rx_agg_aborts += ring_stats->rx_agg_aborts;
664 func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
665 func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
666 func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
668 func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
669 func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
670 func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
672 func_qstats->tx_drop_pkts += ring_stats->tx_error_pkts;
673 func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
677 int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
678 struct rte_eth_xstat *xstats, unsigned int n)
680 struct bnxt *bp = eth_dev->data->dev_private;
681 unsigned int count, i;
682 unsigned int rx_port_stats_ext_cnt;
683 unsigned int tx_port_stats_ext_cnt;
684 unsigned int stat_size = sizeof(uint64_t);
685 struct hwrm_func_qstats_output func_qstats = {0};
686 unsigned int stat_count;
689 rc = is_bnxt_in_error(bp);
693 stat_count = RTE_DIM(bnxt_rx_stats_strings) +
694 RTE_DIM(bnxt_tx_stats_strings) +
695 RTE_DIM(bnxt_func_stats_strings) +
696 RTE_DIM(bnxt_rx_ext_stats_strings) +
697 RTE_DIM(bnxt_tx_ext_stats_strings) +
698 bnxt_flow_stats_cnt(bp);
700 if (n < stat_count || xstats == NULL)
703 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
704 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
705 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
706 struct bnxt_ring_stats ring_stats = {0};
708 if (!rxq->rx_started)
711 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
716 bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
719 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
720 struct bnxt_tx_queue *txq = bp->tx_queues[i];
721 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
722 struct bnxt_ring_stats ring_stats = {0};
724 if (!txq->tx_started)
727 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
732 bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
735 bnxt_hwrm_port_qstats(bp);
736 bnxt_hwrm_ext_port_qstats(bp);
737 rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
738 (bp->fw_rx_port_stats_ext_size /
740 tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings),
741 (bp->fw_tx_port_stats_ext_size /
744 memset(xstats, 0, sizeof(*xstats));
747 for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
748 uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
749 xstats[count].id = count;
750 xstats[count].value = rte_le_to_cpu_64(
751 *(uint64_t *)((char *)rx_stats +
752 bnxt_rx_stats_strings[i].offset));
756 for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
757 uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
758 xstats[count].id = count;
759 xstats[count].value = rte_le_to_cpu_64(
760 *(uint64_t *)((char *)tx_stats +
761 bnxt_tx_stats_strings[i].offset));
765 for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
766 xstats[count].id = count;
767 xstats[count].value = *(uint64_t *)((char *)&func_qstats +
768 bnxt_func_stats_strings[i].offset);
772 for (i = 0; i < rx_port_stats_ext_cnt; i++) {
773 uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
775 xstats[count].value = rte_le_to_cpu_64
776 (*(uint64_t *)((char *)rx_stats_ext +
777 bnxt_rx_ext_stats_strings[i].offset));
782 for (i = 0; i < tx_port_stats_ext_cnt; i++) {
783 uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
785 xstats[count].value = rte_le_to_cpu_64
786 (*(uint64_t *)((char *)tx_stats_ext +
787 bnxt_tx_ext_stats_strings[i].offset));
791 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
792 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
793 BNXT_FLOW_XSTATS_EN(bp)) {
797 for (j = 0; j < bp->max_vnics; j++) {
798 struct bnxt_filter_info *filter;
799 struct bnxt_vnic_info *vnic;
800 struct rte_flow *flow;
802 vnic = &bp->vnic_info[j];
803 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
806 if (STAILQ_EMPTY(&vnic->flow_list))
809 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
810 if (!flow || !flow->filter)
813 filter = flow->filter;
814 xstats[count].id = count;
815 xstats[count].value =
816 filter->hw_stats.bytes;
818 xstats[count].id = count;
819 xstats[count].value =
820 filter->hw_stats.packets;
822 if (++i > bp->max_l2_ctx)
825 if (i > bp->max_l2_ctx)
833 int bnxt_flow_stats_cnt(struct bnxt *bp)
835 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
836 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
837 BNXT_FLOW_XSTATS_EN(bp)) {
838 struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx];
839 struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx];
841 return RTE_DIM(flow_bytes) + RTE_DIM(flow_pkts);
847 int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
848 struct rte_eth_xstat_name *xstats_names,
849 __rte_unused unsigned int limit)
851 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
852 const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
853 RTE_DIM(bnxt_tx_stats_strings) +
854 RTE_DIM(bnxt_func_stats_strings) +
855 RTE_DIM(bnxt_rx_ext_stats_strings) +
856 RTE_DIM(bnxt_tx_ext_stats_strings) +
857 bnxt_flow_stats_cnt(bp);
858 unsigned int i, count = 0;
861 rc = is_bnxt_in_error(bp);
865 if (xstats_names != NULL) {
868 for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
869 strlcpy(xstats_names[count].name,
870 bnxt_rx_stats_strings[i].name,
871 sizeof(xstats_names[count].name));
875 for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
876 strlcpy(xstats_names[count].name,
877 bnxt_tx_stats_strings[i].name,
878 sizeof(xstats_names[count].name));
882 for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
883 strlcpy(xstats_names[count].name,
884 bnxt_func_stats_strings[i].name,
885 sizeof(xstats_names[count].name));
889 for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
890 strlcpy(xstats_names[count].name,
891 bnxt_rx_ext_stats_strings[i].name,
892 sizeof(xstats_names[count].name));
897 for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
898 strlcpy(xstats_names[count].name,
899 bnxt_tx_ext_stats_strings[i].name,
900 sizeof(xstats_names[count].name));
905 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
906 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
907 BNXT_FLOW_XSTATS_EN(bp)) {
908 for (i = 0; i < bp->max_l2_ctx; i++) {
909 char buf[RTE_ETH_XSTATS_NAME_SIZE];
911 sprintf(buf, "flow_%d_bytes", i);
912 strlcpy(xstats_names[count].name, buf,
913 sizeof(xstats_names[count].name));
916 sprintf(buf, "flow_%d_packets", i);
917 strlcpy(xstats_names[count].name, buf,
918 sizeof(xstats_names[count].name));
928 int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
930 struct bnxt *bp = eth_dev->data->dev_private;
933 ret = is_bnxt_in_error(bp);
937 if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) ||
938 !(bp->flags & BNXT_FLAG_PORT_STATS)) {
939 PMD_DRV_LOG(ERR, "Operation not supported\n");
943 ret = bnxt_hwrm_port_clr_stats(bp);
945 PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n",
948 bnxt_clear_prev_stat(bp);
953 /* Update the input context memory with the flow counter IDs
954 * of the flows that we are interested in.
955 * Also, update the output tables with the current local values
956 * since that is what will be used by FW to accumulate
958 static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl,
960 struct bnxt_filter_info *filter,
963 uint32_t in_tbl_cnt = *ptbl_cnt;
965 in_tbl[in_tbl_cnt] = filter->flow_id;
966 out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets;
967 out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes;
969 *ptbl_cnt = in_tbl_cnt;
972 /* Post issuing counter_qstats cmd, update the driver's local stat
973 * entries with the values DMA-ed by FW in the output table
975 static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter,
977 uint32_t out_tbl_idx)
979 filter->hw_stats.packets = out_tbl[2 * out_tbl_idx];
980 filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1];
983 static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr,
984 struct bnxt_filter_info *en_tbl[],
985 uint16_t in_flow_cnt)
988 uint64_t *out_rx_tbl;
989 uint32_t in_rx_tbl_cnt = 0;
990 uint32_t out_rx_tbl_cnt = 0;
993 in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va;
994 out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va;
996 for (i = 0; i < in_flow_cnt; i++) {
1000 /* Currently only ingress/Rx flows are supported anyway. */
1001 bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl,
1002 en_tbl[i], &in_rx_tbl_cnt);
1005 /* Currently only ingress/Rx flows are supported */
1006 if (in_rx_tbl_cnt) {
1007 rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr,
1013 for (i = 0; i < in_flow_cnt; i++) {
1017 /* Currently only ingress/Rx flows are supported */
1018 bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl,
1026 /* Walks through the list which has all the flows
1027 * requesting for explicit flow counters.
1029 int bnxt_flow_stats_req(struct bnxt *bp)
1033 struct rte_flow *flow;
1034 uint16_t in_flow_tbl_cnt = 0;
1035 struct bnxt_vnic_info *vnic = NULL;
1036 struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc];
1037 uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC;
1039 bnxt_acquire_flow_lock(bp);
1040 for (i = 0; i < bp->max_vnics; i++) {
1041 vnic = &bp->vnic_info[i];
1042 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1045 if (STAILQ_EMPTY(&vnic->flow_list))
1048 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1049 if (!flow || !flow->filter)
1052 valid_en_tbl[in_flow_tbl_cnt++] = flow->filter;
1053 if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) {
1054 rc = bnxt_update_fc_tbl(bp, counter_type,
1059 in_flow_tbl_cnt = 0;
1065 if (!in_flow_tbl_cnt) {
1066 bnxt_release_flow_lock(bp);
1070 rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl,
1073 bnxt_release_flow_lock(bp);
1078 /* If cmd fails once, no need of
1079 * invoking again every second
1081 bnxt_release_flow_lock(bp);
1082 bnxt_cancel_fc_thread(bp);