X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_stats.c;h=1d3be16f8aeb8195adf242dcab50d0d3a313d281;hb=f6d1379f5516402bf7d6dcbf918ef030533d821d;hp=6f1c7602a00214a089a6007618091c425a4113c9;hpb=57d5e5bc86e457eb6ba7745175b94d7a61151676;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index 6f1c7602a0..1d3be16f8a 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -1,48 +1,491 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include +#include #include #include "bnxt.h" #include "bnxt_cpr.h" +#include "bnxt_filter.h" #include "bnxt_hwrm.h" #include "bnxt_rxq.h" #include "bnxt_stats.h" #include "bnxt_txq.h" +#include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" +static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = { + {"rx_64b_frames", offsetof(struct rx_port_stats, + rx_64b_frames)}, + {"rx_65b_127b_frames", offsetof(struct rx_port_stats, + rx_65b_127b_frames)}, + {"rx_128b_255b_frames", offsetof(struct rx_port_stats, + rx_128b_255b_frames)}, + {"rx_256b_511b_frames", offsetof(struct rx_port_stats, + rx_256b_511b_frames)}, + {"rx_512b_1023b_frames", offsetof(struct rx_port_stats, + rx_512b_1023b_frames)}, + {"rx_1024b_1518b_frames", offsetof(struct rx_port_stats, + rx_1024b_1518b_frames)}, + {"rx_good_vlan_frames", offsetof(struct rx_port_stats, + rx_good_vlan_frames)}, + {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats, + rx_1519b_2047b_frames)}, + {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats, + rx_2048b_4095b_frames)}, + {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats, + rx_4096b_9216b_frames)}, + {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats, + rx_9217b_16383b_frames)}, + {"rx_total_frames", offsetof(struct rx_port_stats, + rx_total_frames)}, + {"rx_ucast_frames", offsetof(struct rx_port_stats, + rx_ucast_frames)}, + {"rx_mcast_frames", offsetof(struct rx_port_stats, + rx_mcast_frames)}, + {"rx_bcast_frames", offsetof(struct rx_port_stats, + rx_bcast_frames)}, + {"rx_fcs_err_frames", offsetof(struct rx_port_stats, + rx_fcs_err_frames)}, + {"rx_ctrl_frames", offsetof(struct rx_port_stats, + rx_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct rx_port_stats, + rx_pause_frames)}, + {"rx_pfc_frames", offsetof(struct rx_port_stats, + rx_pfc_frames)}, + {"rx_unsupported_opcode_frames", offsetof(struct rx_port_stats, + rx_unsupported_opcode_frames)}, + {"rx_unsupported_da_pausepfc_frames", offsetof(struct rx_port_stats, + rx_unsupported_da_pausepfc_frames)}, + {"rx_wrong_sa_frames", offsetof(struct rx_port_stats, + rx_wrong_sa_frames)}, + {"rx_align_err_frames", offsetof(struct rx_port_stats, + rx_align_err_frames)}, + {"rx_oor_len_frames", offsetof(struct rx_port_stats, + rx_oor_len_frames)}, + {"rx_code_err_frames", offsetof(struct rx_port_stats, + rx_code_err_frames)}, + {"rx_false_carrier_frames", offsetof(struct rx_port_stats, + rx_false_carrier_frames)}, + {"rx_ovrsz_frames", offsetof(struct rx_port_stats, + rx_ovrsz_frames)}, + {"rx_jbr_frames", offsetof(struct rx_port_stats, + rx_jbr_frames)}, + {"rx_mtu_err_frames", offsetof(struct rx_port_stats, + rx_mtu_err_frames)}, + {"rx_match_crc_frames", offsetof(struct rx_port_stats, + rx_match_crc_frames)}, + {"rx_promiscuous_frames", offsetof(struct rx_port_stats, + rx_promiscuous_frames)}, + {"rx_tagged_frames", offsetof(struct rx_port_stats, + rx_tagged_frames)}, + {"rx_double_tagged_frames", offsetof(struct rx_port_stats, + rx_double_tagged_frames)}, + {"rx_trunc_frames", offsetof(struct rx_port_stats, + rx_trunc_frames)}, + {"rx_good_frames", offsetof(struct rx_port_stats, + rx_good_frames)}, + {"rx_sch_crc_err_frames", offsetof(struct rx_port_stats, + rx_sch_crc_err_frames)}, + {"rx_undrsz_frames", offsetof(struct rx_port_stats, + rx_undrsz_frames)}, + {"rx_frag_frames", offsetof(struct rx_port_stats, + rx_frag_frames)}, + {"rx_eee_lpi_events", offsetof(struct rx_port_stats, + rx_eee_lpi_events)}, + {"rx_eee_lpi_duration", offsetof(struct rx_port_stats, + rx_eee_lpi_duration)}, + {"rx_llfc_physical_msgs", offsetof(struct rx_port_stats, + rx_llfc_physical_msgs)}, + {"rx_llfc_logical_msgs", offsetof(struct rx_port_stats, + rx_llfc_logical_msgs)}, + {"rx_llfc_msgs_with_crc_err", offsetof(struct rx_port_stats, + rx_llfc_msgs_with_crc_err)}, + {"rx_hcfc_msgs", offsetof(struct rx_port_stats, + rx_hcfc_msgs)}, + {"rx_hcfc_msgs_with_crc_err", offsetof(struct rx_port_stats, + rx_hcfc_msgs_with_crc_err)}, + {"rx_bytes", offsetof(struct rx_port_stats, + rx_bytes)}, + {"rx_runt_bytes", offsetof(struct rx_port_stats, + rx_runt_bytes)}, + {"rx_runt_frames", offsetof(struct rx_port_stats, + rx_runt_frames)}, + {"rx_pfc_xon2xoff_frames_pri0", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri0)}, + {"rx_pfc_xon2xoff_frames_pri1", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri1)}, + {"rx_pfc_xon2xoff_frames_pri2", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri2)}, + {"rx_pfc_xon2xoff_frames_pri3", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri3)}, + {"rx_pfc_xon2xoff_frames_pri4", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri4)}, + {"rx_pfc_xon2xoff_frames_pri5", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri5)}, + {"rx_pfc_xon2xoff_frames_pri6", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri6)}, + {"rx_pfc_xon2xoff_frames_pri7", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri7)}, + {"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri0)}, + {"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri1)}, + {"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri2)}, + {"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri3)}, + {"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri4)}, + {"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri5)}, + {"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri6)}, + {"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri7)}, + {"rx_stat_discard", offsetof(struct rx_port_stats, + rx_stat_discard)}, + {"rx_stat_err", offsetof(struct rx_port_stats, + rx_stat_err)}, +}; + +static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = { + {"tx_64b_frames", offsetof(struct tx_port_stats, + tx_64b_frames)}, + {"tx_65b_127b_frames", offsetof(struct tx_port_stats, + tx_65b_127b_frames)}, + {"tx_128b_255b_frames", offsetof(struct tx_port_stats, + tx_128b_255b_frames)}, + {"tx_256b_511b_frames", offsetof(struct tx_port_stats, + tx_256b_511b_frames)}, + {"tx_512b_1023b_frames", offsetof(struct tx_port_stats, + tx_512b_1023b_frames)}, + {"tx_1024b_1518b_frames", offsetof(struct tx_port_stats, + tx_1024b_1518b_frames)}, + {"tx_good_vlan_frames", offsetof(struct tx_port_stats, + tx_good_vlan_frames)}, + {"tx_1519b_2047b_frames", offsetof(struct tx_port_stats, + tx_1519b_2047b_frames)}, + {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats, + tx_2048b_4095b_frames)}, + {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats, + tx_4096b_9216b_frames)}, + {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats, + tx_9217b_16383b_frames)}, + {"tx_good_frames", offsetof(struct tx_port_stats, + tx_good_frames)}, + {"tx_total_frames", offsetof(struct tx_port_stats, + tx_total_frames)}, + {"tx_ucast_frames", offsetof(struct tx_port_stats, + tx_ucast_frames)}, + {"tx_mcast_frames", offsetof(struct tx_port_stats, + tx_mcast_frames)}, + {"tx_bcast_frames", offsetof(struct tx_port_stats, + tx_bcast_frames)}, + {"tx_pause_frames", offsetof(struct tx_port_stats, + tx_pause_frames)}, + {"tx_pfc_frames", offsetof(struct tx_port_stats, + tx_pfc_frames)}, + {"tx_jabber_frames", offsetof(struct tx_port_stats, + tx_jabber_frames)}, + {"tx_fcs_err_frames", offsetof(struct tx_port_stats, + tx_fcs_err_frames)}, + {"tx_control_frames", offsetof(struct tx_port_stats, + tx_control_frames)}, + {"tx_oversz_frames", offsetof(struct tx_port_stats, + tx_oversz_frames)}, + {"tx_single_dfrl_frames", offsetof(struct tx_port_stats, + tx_single_dfrl_frames)}, + {"tx_multi_dfrl_frames", offsetof(struct tx_port_stats, + tx_multi_dfrl_frames)}, + {"tx_single_coll_frames", offsetof(struct tx_port_stats, + tx_single_coll_frames)}, + {"tx_multi_coll_frames", offsetof(struct tx_port_stats, + tx_multi_coll_frames)}, + {"tx_late_coll_frames", offsetof(struct tx_port_stats, + tx_late_coll_frames)}, + {"tx_excessive_coll_frames", offsetof(struct tx_port_stats, + tx_excessive_coll_frames)}, + {"tx_frag_frames", offsetof(struct tx_port_stats, + tx_frag_frames)}, + {"tx_err", offsetof(struct tx_port_stats, + tx_err)}, + {"tx_tagged_frames", offsetof(struct tx_port_stats, + tx_tagged_frames)}, + {"tx_dbl_tagged_frames", offsetof(struct tx_port_stats, + tx_dbl_tagged_frames)}, + {"tx_runt_frames", offsetof(struct tx_port_stats, + tx_runt_frames)}, + {"tx_fifo_underruns", offsetof(struct tx_port_stats, + tx_fifo_underruns)}, + {"tx_eee_lpi_events", offsetof(struct tx_port_stats, + tx_eee_lpi_events)}, + {"tx_eee_lpi_duration", offsetof(struct tx_port_stats, + tx_eee_lpi_duration)}, + {"tx_total_collisions", offsetof(struct tx_port_stats, + tx_total_collisions)}, + {"tx_bytes", offsetof(struct tx_port_stats, + tx_bytes)}, + {"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri0)}, + {"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri1)}, + {"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri2)}, + {"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri3)}, + {"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri4)}, + {"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri5)}, + {"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri6)}, + {"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri7)}, + {"tx_llfc_logical_msgs", offsetof(struct tx_port_stats, + tx_llfc_logical_msgs)}, + {"tx_hcfc_msgs", offsetof(struct tx_port_stats, + tx_hcfc_msgs)}, + {"tx_xthol_frames", offsetof(struct tx_port_stats, + tx_xthol_frames)}, + {"tx_stat_discard", offsetof(struct tx_port_stats, + tx_stat_discard)}, + {"tx_stat_error", offsetof(struct tx_port_stats, + tx_stat_error)}, +}; + +static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { + {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_ucast_pkts)}, + {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_mcast_pkts)}, + {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_bcast_pkts)}, + {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + tx_discard_pkts)}, + {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + tx_drop_pkts)}, + {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_ucast_bytes)}, + {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_mcast_bytes)}, + {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_bcast_bytes)}, + {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_ucast_pkts)}, + {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_mcast_pkts)}, + {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_bcast_pkts)}, + {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + rx_discard_pkts)}, + {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + rx_drop_pkts)}, + {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_ucast_bytes)}, + {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_mcast_bytes)}, + {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_bcast_bytes)}, + {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output, + rx_agg_pkts)}, + {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output, + rx_agg_bytes)}, + {"rx_agg_events", offsetof(struct hwrm_func_qstats_output, + rx_agg_events)}, + {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output, + rx_agg_aborts)}, +}; + + +static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = { + {"link_down_events", offsetof(struct rx_port_stats_ext, + link_down_events)}, + {"continuous_pause_events", offsetof(struct rx_port_stats_ext, + continuous_pause_events)}, + {"resume_pause_events", offsetof(struct rx_port_stats_ext, + resume_pause_events)}, + {"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext, + continuous_roce_pause_events)}, + {"resume_roce_pause_events", offsetof(struct rx_port_stats_ext, + resume_roce_pause_events)}, + {"rx_bytes_cos0", offsetof(struct rx_port_stats_ext, + rx_bytes_cos0)}, + {"rx_bytes_cos1", offsetof(struct rx_port_stats_ext, + rx_bytes_cos1)}, + {"rx_bytes_cos2", offsetof(struct rx_port_stats_ext, + rx_bytes_cos2)}, + {"rx_bytes_cos3", offsetof(struct rx_port_stats_ext, + rx_bytes_cos3)}, + {"rx_bytes_cos4", offsetof(struct rx_port_stats_ext, + rx_bytes_cos4)}, + {"rx_bytes_cos5", offsetof(struct rx_port_stats_ext, + rx_bytes_cos5)}, + {"rx_bytes_cos6", offsetof(struct rx_port_stats_ext, + rx_bytes_cos6)}, + {"rx_bytes_cos7", offsetof(struct rx_port_stats_ext, + rx_bytes_cos7)}, + {"rx_packets_cos0", offsetof(struct rx_port_stats_ext, + rx_packets_cos0)}, + {"rx_packets_cos1", offsetof(struct rx_port_stats_ext, + rx_packets_cos1)}, + {"rx_packets_cos2", offsetof(struct rx_port_stats_ext, + rx_packets_cos2)}, + {"rx_packets_cos3", offsetof(struct rx_port_stats_ext, + rx_packets_cos3)}, + {"rx_packets_cos4", offsetof(struct rx_port_stats_ext, + rx_packets_cos4)}, + {"rx_packets_cos5", offsetof(struct rx_port_stats_ext, + rx_packets_cos5)}, + {"rx_packets_cos6", offsetof(struct rx_port_stats_ext, + rx_packets_cos6)}, + {"rx_packets_cos7", offsetof(struct rx_port_stats_ext, + rx_packets_cos7)}, + {"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri0_rx_duration_us)}, + {"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri0_rx_transitions)}, + {"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri1_rx_duration_us)}, + {"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri1_rx_transitions)}, + {"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri2_rx_duration_us)}, + {"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri2_rx_transitions)}, + {"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri3_rx_duration_us)}, + {"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri3_rx_transitions)}, + {"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri4_rx_duration_us)}, + {"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri4_rx_transitions)}, + {"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri5_rx_duration_us)}, + {"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri5_rx_transitions)}, + {"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri6_rx_duration_us)}, + {"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri6_rx_transitions)}, + {"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri7_rx_duration_us)}, + {"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri7_rx_transitions)}, + {"rx_bits", offsetof(struct rx_port_stats_ext, + rx_bits)}, + {"rx_buffer_passed_threshold", offsetof(struct rx_port_stats_ext, + rx_buffer_passed_threshold)}, + {"rx_pcs_symbol_err", offsetof(struct rx_port_stats_ext, + rx_pcs_symbol_err)}, + {"rx_corrected_bits", offsetof(struct rx_port_stats_ext, + rx_corrected_bits)}, + {"rx_discard_bytes_cos0", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos0)}, + {"rx_discard_bytes_cos1", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos1)}, + {"rx_discard_bytes_cos2", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos2)}, + {"rx_discard_bytes_cos3", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos3)}, + {"rx_discard_bytes_cos4", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos4)}, + {"rx_discard_bytes_cos5", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos5)}, + {"rx_discard_bytes_cos6", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos6)}, + {"rx_discard_bytes_cos7", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos7)}, + {"rx_discard_packets_cos0", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos0)}, + {"rx_discard_packets_cos1", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos1)}, + {"rx_discard_packets_cos2", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos2)}, + {"rx_discard_packets_cos3", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos3)}, + {"rx_discard_packets_cos4", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos4)}, + {"rx_discard_packets_cos5", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos5)}, + {"rx_discard_packets_cos6", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos6)}, + {"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos7)}, +}; + +static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = { + {"tx_bytes_cos0", offsetof(struct tx_port_stats_ext, + tx_bytes_cos0)}, + {"tx_bytes_cos1", offsetof(struct tx_port_stats_ext, + tx_bytes_cos1)}, + {"tx_bytes_cos2", offsetof(struct tx_port_stats_ext, + tx_bytes_cos2)}, + {"tx_bytes_cos3", offsetof(struct tx_port_stats_ext, + tx_bytes_cos3)}, + {"tx_bytes_cos4", offsetof(struct tx_port_stats_ext, + tx_bytes_cos4)}, + {"tx_bytes_cos5", offsetof(struct tx_port_stats_ext, + tx_bytes_cos5)}, + {"tx_bytes_cos6", offsetof(struct tx_port_stats_ext, + tx_bytes_cos6)}, + {"tx_bytes_cos7", offsetof(struct tx_port_stats_ext, + tx_bytes_cos7)}, + {"tx_packets_cos0", offsetof(struct tx_port_stats_ext, + tx_packets_cos0)}, + {"tx_packets_cos1", offsetof(struct tx_port_stats_ext, + tx_packets_cos1)}, + {"tx_packets_cos2", offsetof(struct tx_port_stats_ext, + tx_packets_cos2)}, + {"tx_packets_cos3", offsetof(struct tx_port_stats_ext, + tx_packets_cos3)}, + {"tx_packets_cos4", offsetof(struct tx_port_stats_ext, + tx_packets_cos4)}, + {"tx_packets_cos5", offsetof(struct tx_port_stats_ext, + tx_packets_cos5)}, + {"tx_packets_cos6", offsetof(struct tx_port_stats_ext, + tx_packets_cos6)}, + {"tx_packets_cos7", offsetof(struct tx_port_stats_ext, + tx_packets_cos7)}, + {"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri0_tx_duration_us)}, + {"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri0_tx_transitions)}, + {"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri1_tx_duration_us)}, + {"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri1_tx_transitions)}, + {"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri2_tx_duration_us)}, + {"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri2_tx_transitions)}, + {"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri3_tx_duration_us)}, + {"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri3_tx_transitions)}, + {"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri4_tx_duration_us)}, + {"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri4_tx_transitions)}, + {"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri5_tx_duration_us)}, + {"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri5_tx_transitions)}, + {"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri6_tx_duration_us)}, + {"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri6_tx_transitions)}, + {"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri7_tx_duration_us)}, + {"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri7_tx_transitions)}, +}; + /* * Statistics functions */ @@ -63,80 +506,524 @@ void bnxt_free_stats(struct bnxt *bp) } } -void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, +int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_stats *bnxt_stats) { + int rc = 0; unsigned int i; struct bnxt *bp = eth_dev->data->dev_private; + unsigned int num_q_stats; - memset(bnxt_stats, 0, sizeof(*bnxt_stats)); + rc = is_bnxt_in_error(bp); + if (rc) + return rc; - for (i = 0; i < bp->rx_cp_nr_rings; i++) { + if (!eth_dev->data->dev_started) + return -EIO; + + num_q_stats = RTE_MIN(bp->rx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - struct ctx_hw_stats64 *hw_stats = - (struct ctx_hw_stats64 *)cpr->hw_stats; - - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_ucast_pkts); - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_mcast_pkts); - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_bcast_pkts); - - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_ucast_bytes); - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_mcast_bytes); - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_bcast_bytes); - - /* - * TBD: No clear mapping to this... we don't seem - * to have a stat specifically for dropped due to - * insufficient mbufs. - */ - bnxt_stats->q_errors[i] = 0; - - /* These get replaced once the *_QSTATS commands work */ - bnxt_stats->ipackets += bnxt_stats->q_ipackets[i]; - bnxt_stats->ibytes += bnxt_stats->q_ibytes[i]; - bnxt_stats->imissed += bnxt_stats->q_errors[i]; - bnxt_stats->ierrors += - rte_le_to_cpu_64(hw_stats->rx_err_pkts); - } - - for (i = 0; i < bp->tx_cp_nr_rings; i++) { + + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 1); + if (unlikely(rc)) + return rc; + bnxt_stats->rx_nombuf += + rte_atomic64_read(&rxq->rx_mbuf_alloc_fail); + } + + num_q_stats = RTE_MIN(bp->tx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - struct ctx_hw_stats64 *hw_stats = - (struct ctx_hw_stats64 *)cpr->hw_stats; - - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_ucast_pkts); - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_mcast_pkts); - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_bcast_pkts); - - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_ucast_bytes); - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_mcast_bytes); - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_bcast_bytes); - - /* These get replaced once the *_QSTATS commands work */ - bnxt_stats->opackets += bnxt_stats->q_opackets[i]; - bnxt_stats->obytes += bnxt_stats->q_obytes[i]; - bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts); - bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_err_pkts); + + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 0); + if (unlikely(rc)) + return rc; + } + + rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats, NULL); + return rc; +} + +int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + unsigned int i; + int ret; + + ret = is_bnxt_in_error(bp); + if (ret) + return ret; + + if (!eth_dev->data->dev_started) { + PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); + return -EINVAL; + } + + ret = bnxt_clear_all_hwrm_stat_ctxs(bp); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail); } + + return ret; } -void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) +int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n) +{ + struct bnxt *bp = eth_dev->data->dev_private; + unsigned int count, i; + unsigned int rx_port_stats_ext_cnt; + unsigned int tx_port_stats_ext_cnt; + unsigned int stat_size = sizeof(uint64_t); + struct hwrm_func_qstats_output func_qstats = {0}; + unsigned int stat_count; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (xstats == NULL) + return 0; + + memset(xstats, 0, sizeof(*xstats)); + + bnxt_hwrm_func_qstats(bp, 0xffff, NULL, &func_qstats); + bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_ext_port_qstats(bp); + rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings), + (bp->fw_rx_port_stats_ext_size / + stat_size)); + tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings), + (bp->fw_tx_port_stats_ext_size / + stat_size)); + + count = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + + stat_count = count; + + if (n < count) + return count; + + count = 0; + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats; + xstats[count].id = count; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)rx_stats + + bnxt_rx_stats_strings[i].offset)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats; + xstats[count].id = count; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)tx_stats + + bnxt_tx_stats_strings[i].offset)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) { + xstats[count].id = count; + xstats[count].value = + rte_le_to_cpu_64(((uint64_t *)&func_qstats)[i]); + count++; + } + + + for (i = 0; i < rx_port_stats_ext_cnt; i++) { + uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext; + + xstats[count].value = rte_le_to_cpu_64 + (*(uint64_t *)((char *)rx_stats_ext + + bnxt_rx_ext_stats_strings[i].offset)); + + count++; + } + + for (i = 0; i < tx_port_stats_ext_cnt; i++) { + uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext; + + xstats[count].value = rte_le_to_cpu_64 + (*(uint64_t *)((char *)tx_stats_ext + + bnxt_tx_ext_stats_strings[i].offset)); + count++; + } + + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + bp->flow_xstat) { + int j; + + i = 0; + for (j = 0; j < bp->max_vnics; j++) { + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + + vnic = &bp->vnic_info[j]; + if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + if (STAILQ_EMPTY(&vnic->flow_list)) + continue; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + if (!flow || !flow->filter) + continue; + + filter = flow->filter; + xstats[count].id = count; + xstats[count].value = + filter->hw_stats.bytes; + count++; + xstats[count].id = count; + xstats[count].value = + filter->hw_stats.packets; + count++; + if (++i > bp->max_l2_ctx) + break; + } + if (i > bp->max_l2_ctx) + break; + } + } + + return stat_count; +} + +int bnxt_flow_stats_cnt(struct bnxt *bp) +{ + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + bp->flow_xstat) { + struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx]; + struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx]; + + return RTE_DIM(flow_bytes) + RTE_DIM(flow_pkts); + } + + return 0; +} + +int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + unsigned int i, count = 0; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (xstats_names != NULL) { + count = 0; + + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_rx_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_tx_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_func_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_rx_ext_stats_strings[i].name, + sizeof(xstats_names[count].name)); + + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_tx_ext_stats_strings[i].name, + sizeof(xstats_names[count].name)); + + count++; + } + + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + bp->flow_xstat) { + for (i = 0; i < bp->max_l2_ctx; i++) { + char buf[RTE_ETH_XSTATS_NAME_SIZE]; + + sprintf(buf, "flow_%d_bytes", i); + strlcpy(xstats_names[count].name, buf, + sizeof(xstats_names[count].name)); + count++; + + sprintf(buf, "flow_%d_packets", i); + strlcpy(xstats_names[count].name, buf, + sizeof(xstats_names[count].name)); + + count++; + } + } + } + + return stat_cnt; +} + +int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int ret; + + ret = is_bnxt_in_error(bp); + if (ret) + return ret; + + if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) || + !(bp->flags & BNXT_FLAG_PORT_STATS)) { + PMD_DRV_LOG(ERR, "Operation not supported\n"); + return -ENOTSUP; + } + + ret = bnxt_hwrm_port_clr_stats(bp); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n", + strerror(-ret)); + + return ret; +} + +int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit) +{ + struct bnxt *bp = dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + struct rte_eth_xstat xstats[stat_cnt]; + uint64_t values_copy[stat_cnt]; + uint16_t i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!ids) + return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt); + + bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt); + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + values[i] = values_copy[ids[i]]; + } + return stat_cnt; +} + +int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit) +{ + struct bnxt *bp = dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + uint16_t i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!ids) + return bnxt_dev_xstats_get_names_op(dev, xstats_names, + stat_cnt); + bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL, + stat_cnt); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return stat_cnt; +} + +/* Update the input context memory with the flow counter IDs + * of the flows that we are interested in. + * Also, update the output tables with the current local values + * since that is what will be used by FW to accumulate + */ +static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl, + uint64_t *out_tbl, + struct bnxt_filter_info *filter, + uint32_t *ptbl_cnt) +{ + uint32_t in_tbl_cnt = *ptbl_cnt; + + in_tbl[in_tbl_cnt] = filter->flow_id; + out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets; + out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes; + in_tbl_cnt++; + *ptbl_cnt = in_tbl_cnt; +} + +/* Post issuing counter_qstats cmd, update the driver's local stat + * entries with the values DMA-ed by FW in the output table + */ +static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter, + uint64_t *out_tbl, + uint32_t out_tbl_idx) +{ + filter->hw_stats.packets = out_tbl[2 * out_tbl_idx]; + filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1]; +} + +static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr, + struct bnxt_filter_info *en_tbl[], + uint16_t in_flow_cnt) +{ + uint32_t *in_rx_tbl; + uint64_t *out_rx_tbl; + uint32_t in_rx_tbl_cnt = 0; + uint32_t out_rx_tbl_cnt = 0; + int i, rc = 0; + + in_rx_tbl = (uint32_t *)bp->rx_fc_in_tbl.va; + out_rx_tbl = (uint64_t *)bp->rx_fc_out_tbl.va; + + for (i = 0; i < in_flow_cnt; i++) { + if (!en_tbl[i]) + continue; + + /* Currently only ingress/Rx flows are supported anyway. */ + bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl, + en_tbl[i], &in_rx_tbl_cnt); + } + + /* Currently only ingress/Rx flows are supported */ + if (in_rx_tbl_cnt) { + rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr, + in_rx_tbl_cnt); + if (rc) + return rc; + } + + for (i = 0; i < in_flow_cnt; i++) { + if (!en_tbl[i]) + continue; + + /* Currently only ingress/Rx flows are supported */ + bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl, + out_rx_tbl_cnt); + out_rx_tbl_cnt++; + } + + return rc; +} + +/* Walks through the list which has all the flows + * requesting for explicit flow counters. + */ +int bnxt_flow_stats_req(struct bnxt *bp) +{ + int i; + int rc = 0; + struct rte_flow *flow; + uint16_t in_flow_tbl_cnt = 0; + struct bnxt_vnic_info *vnic = NULL; + struct bnxt_filter_info *valid_en_tbl[bp->max_fc]; + uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC; + + bnxt_acquire_flow_lock(bp); + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + if (STAILQ_EMPTY(&vnic->flow_list)) + continue; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + if (!flow || !flow->filter) + continue; + + valid_en_tbl[in_flow_tbl_cnt++] = flow->filter; + if (in_flow_tbl_cnt >= bp->max_fc) { + rc = bnxt_update_fc_tbl(bp, counter_type, + valid_en_tbl, + in_flow_tbl_cnt); + if (rc) + goto err; + in_flow_tbl_cnt = 0; + continue; + } + } + } + + if (!in_flow_tbl_cnt) + goto out; + + rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl, + in_flow_tbl_cnt); + if (!rc) { + bnxt_release_flow_lock(bp); + return 0; + } - bnxt_clear_all_hwrm_stat_ctxs(bp); +err: + /* If cmd fails once, no need of + * invoking again every second + */ + bnxt_release_flow_lock(bp); + bnxt_cancel_fc_thread(bp); +out: + return rc; }