/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
-#include <stdbool.h>
-#include <stdint.h>
-#include <rte_common.h>
#include <rte_ethdev.h>
#include <rte_io.h>
#include <rte_malloc.h>
-#include <rte_spinlock.h>
#include "hns3_ethdev.h"
#include "hns3_rxtx.h"
#include "hns3_logs.h"
+#include "hns3_regs.h"
+
+/* The statistics of the per-rxq basic stats */
+static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
+ {"packets",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
+ {"bytes",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
+ {"errors",
+ HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
+};
+
+/* The statistics of the per-txq basic stats */
+static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
+ {"packets",
+ HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
+ {"bytes",
+ HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
+};
/* MAC statistics */
static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
};
-static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
- {"MAC_AFIFO_TNL_INT_R",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_intr_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST2",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_intr_st2_cnt)},
- {"SSU_PORT_BASED_ERR_INT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_intr_cnt)},
- {"PPP_PF_ABNORMAL_INT_ST0",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_intr_cnt)},
- {"PPU_PF_ABNORMAL_INT_ST",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_intr_cnt)}
-};
-
/* The statistic of reset */
static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
{"REQ_RESET_CNT",
/* The statistic of errors in Rx BD */
static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
- {"RX_PKT_LEN_ERRORS",
+ {"PKT_LEN_ERRORS",
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
- {"L2_RX_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)},
- {"RX_L3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_erros)},
- {"RX_L4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_erros)},
- {"RX_OL3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_erros)},
- {"RX_OL4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_erros)}
+ {"L2_ERRORS",
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
+};
+
+/* The dfx statistic in Rx datapath */
+static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
+ {"L3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
+ {"L4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
+ {"OL3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
+ {"OL4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
+};
+
+/* The dfx statistic in Tx datapath */
+static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
+ {"OVER_LENGTH_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
+ {"UNSUPPORTED_TUNNEL_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
+ {"QUEUE_FULL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
+ {"SHORT_PKT_PAD_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
+};
+
+/* The statistic of rx queue */
+static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
+ {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
+};
+
+/* The statistic of tx queue */
+static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
+ {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
+};
+
+/* The statistic of imissed packet */
+static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
+ {"RPU_DROP_CNT",
+ HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
+ {"SSU_DROP_CNT",
+ HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
};
#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
sizeof(hns3_mac_strings[0]))
-#define HNS3_NUM_ERROR_INT_XSTATS (sizeof(hns3_error_int_stats_strings) / \
- sizeof(hns3_error_int_stats_strings[0]))
-
#define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
sizeof(hns3_reset_stats_strings[0]))
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
- HNS3_NUM_RESET_XSTATS)
+#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
+ sizeof(hns3_rxq_dfx_stats_strings[0]))
+
+#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
+ sizeof(hns3_txq_dfx_stats_strings[0]))
+
+#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
+ sizeof(hns3_rx_queue_strings[0]))
+
+#define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
+ sizeof(hns3_tx_queue_strings[0]))
+
+#define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
+ sizeof(hns3_rxq_basic_stats_strings[0]))
+
+#define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
+ sizeof(hns3_txq_basic_stats_strings[0]))
+
+#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
+ sizeof(hns3_imissed_stats_strings[0]))
+
+#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
+
+static void hns3_tqp_stats_clear(struct hns3_hw *hw);
-/*
- * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
- * This command is used before send 'query_mac_stat command', the descriptor
- * number of 'query_mac_stat command' must match with reg_num in this command.
- * @praram hw
- * Pointer to structure hns3_hw.
- * @return
- * 0 on success.
- */
static int
-hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
+hns3_update_mac_stats(struct hns3_hw *hw)
{
+#define HNS3_MAC_STATS_REG_NUM_PER_DESC 4
+
uint64_t *data = (uint64_t *)(&hw->mac_stats);
struct hns3_cmd_desc *desc;
+ uint32_t stats_iterms;
uint64_t *desc_data;
- uint16_t i, k, n;
+ uint32_t desc_num;
+ uint16_t i;
int ret;
+ /* The first desc has a 64-bit header, so need to consider it. */
+ desc_num = hw->mac_stats_reg_num / HNS3_MAC_STATS_REG_NUM_PER_DESC + 1;
desc = rte_malloc("hns3_mac_desc",
desc_num * sizeof(struct hns3_cmd_desc), 0);
if (desc == NULL) {
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* For special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (uint64_t *)(&desc[i].data[0]);
- n = HNS3_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (uint64_t *)(&desc[i]);
- n = HNS3_RD_OTHER_STATS_NUM;
- }
-
- for (k = 0; k < n; k++) {
- *data += rte_le_to_cpu_64(*desc_data);
- data++;
- desc_data++;
- }
+ stats_iterms = RTE_MIN(sizeof(hw->mac_stats) / sizeof(uint64_t),
+ hw->mac_stats_reg_num);
+ desc_data = (uint64_t *)(&desc[0].data[0]);
+ for (i = 0; i < stats_iterms; i++) {
+ /*
+ * Data memory is continuous and only the first descriptor has a
+ * header in this command.
+ */
+ *data += rte_le_to_cpu_64(*desc_data);
+ data++;
+ desc_data++;
}
rte_free(desc);
return 0;
}
-/*
- * Query Mac stat reg num command ,opcode id: 0x0033.
- * This command is used before send 'query_mac_stat command', the descriptor
- * number of 'query_mac_stat command' must match with reg_num in this command.
- * @praram rte_stats
- * Pointer to structure rte_eth_stats.
- * @return
- * 0 on success.
- */
static int
-hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
+hns3_mac_query_reg_num(struct hns3_hw *hw, uint32_t *reg_num)
{
- struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
+#define HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B 3
struct hns3_cmd_desc desc;
- uint32_t *desc_data;
- uint32_t reg_num;
int ret;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
ret = hns3_cmd_send(hw, &desc, 1);
- if (ret)
+ if (ret) {
+ hns3_err(hw, "failed to query MAC statistic reg number, ret = %d",
+ ret);
return ret;
+ }
- /*
- * The num of MAC statistics registers that are provided by IMP in this
- * version.
- */
- desc_data = (uint32_t *)(&desc.data[0]);
- reg_num = rte_le_to_cpu_32(*desc_data);
+ /* The number of MAC statistics registers are provided by firmware. */
+ *reg_num = rte_le_to_cpu_32(desc.data[0]);
+ if (*reg_num == 0) {
+ hns3_err(hw, "MAC statistic reg number is invalid!");
+ return -ENODATA;
+ }
/*
- * The descriptor number of 'query_additional_mac_stat command' is
- * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
- * This value is 83 in this version
+ * If driver doesn't request the firmware to report more MAC statistics
+ * iterms and the total number of MAC statistics registers by using new
+ * method, firmware will only reports the number of valid statistics
+ * registers. However, structure hns3_mac_stats in driver contains valid
+ * and reserved statistics iterms. In this case, the total register
+ * number must be added to three reserved statistics registers.
*/
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
+ *reg_num += HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B;
+
+ return 0;
+}
+
+int
+hns3_query_mac_stats_reg_num(struct hns3_hw *hw)
+{
+ uint32_t mac_stats_reg_num = 0;
+ int ret;
+
+ ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num);
+ if (ret)
+ return ret;
+
+ hw->mac_stats_reg_num = mac_stats_reg_num;
+ if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t))
+ hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver.");
return 0;
}
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- uint32_t desc_num;
+
+ return hns3_update_mac_stats(hw);
+}
+
+static int
+hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+ struct hns3_query_rpu_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint64_t cnt;
+ uint32_t tc_num;
int ret;
- ret = hns3_mac_query_reg_num(dev, &desc_num);
- if (ret == 0)
- ret = hns3_update_mac_stats(hw, desc_num);
- else
- hns3_err(hw, "Query mac reg num fail : %d", ret);
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
+ req = (struct hns3_query_rpu_cmd *)desc.data;
+
+ /*
+ * tc_num is 0, means rpu stats of all TC channels will be
+ * get from firmware
+ */
+ tc_num = 0;
+ req->tc_queue_num = rte_cpu_to_le_32(tc_num);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "failed to query RPU stats: %d", ret);
+ return ret;
+ }
+
+ cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
+ stats->rpu_rx_drop_cnt += cnt;
+
+ return 0;
+}
+
+static void
+hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+
+ stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
+}
+
+static int
+hns3_update_rpu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret = 0;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
+ ret = hns3_update_port_rpu_drop_stats(hw);
+ else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
+ hns3_update_function_rpu_drop_stats(hw);
+
return ret;
}
-/* Get tqp stats from register */
static int
-hns3_update_tqp_stats(struct hns3_hw *hw)
+hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
+ int bd_num, bool is_rx)
{
- struct hns3_tqp_stats *stats = &hw->tqp_stats;
- struct hns3_cmd_desc desc;
+ struct hns3_query_ssu_cmd *req;
+ int ret;
+ int i;
+
+ for (i = 0; i < bd_num - 1; i++) {
+ hns3_cmd_setup_basic_desc(&desc[i],
+ HNS3_OPC_SSU_DROP_REG, true);
+ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+ }
+ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ req->rxtx = is_rx ? 0 : 1;
+ ret = hns3_cmd_send(hw, desc, bd_num);
+
+ return ret;
+}
+
+static int
+hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
+ struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
+ struct hns3_query_ssu_cmd *req;
uint64_t cnt;
- uint16_t i;
int ret;
- for (i = 0; i < hw->tqps_num; i++) {
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
- true);
+ ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
+ true);
+ if (ret) {
+ hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
+ return ret;
+ }
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
- ret = hns3_cmd_send(hw, &desc, 1);
- if (ret) {
- hns3_err(hw, "Failed to query RX No.%d queue stat: %d",
- i, ret);
- return ret;
- }
- cnt = rte_le_to_cpu_32(desc.data[1]);
- stats->rcb_rx_ring_pktnum_rcd += cnt;
- stats->rcb_rx_ring_pktnum[i] += cnt;
-
- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
- true);
-
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
- ret = hns3_cmd_send(hw, &desc, 1);
- if (ret) {
- hns3_err(hw, "Failed to query TX No.%d queue stat: %d",
- i, ret);
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
+ rte_le_to_cpu_32(req->full_drop_cnt) +
+ rte_le_to_cpu_32(req->part_drop_cnt);
+
+ stats->ssu_rx_drop_cnt += cnt;
+
+ return 0;
+}
+
+static int
+hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
+{
+ struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
+ struct hns3_query_ssu_cmd *req;
+ uint64_t cnt;
+ int ret;
+
+ ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
+ false);
+ if (ret) {
+ hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_query_ssu_cmd *)desc[0].data;
+ cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
+ rte_le_to_cpu_32(req->full_drop_cnt) +
+ rte_le_to_cpu_32(req->part_drop_cnt);
+
+ hw->oerror_stats += cnt;
+
+ return 0;
+}
+
+static int
+hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
+ return 0;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
+ ret = hns3_update_port_rx_ssu_drop_stats(hw);
+ if (ret)
return ret;
- }
- cnt = rte_le_to_cpu_32(desc.data[1]);
- stats->rcb_tx_ring_pktnum_rcd += cnt;
- stats->rcb_tx_ring_pktnum[i] += cnt;
}
+ ret = hns3_update_rpu_drop_stats(hw);
+ if (ret)
+ return ret;
+
+ if (is_clear)
+ memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
+
return 0;
}
+static int
+hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
+ return 0;
+
+ ret = hns3_update_port_tx_ssu_drop_stats(hw);
+ if (ret)
+ return ret;
+
+ if (is_clear)
+ hw->oerror_stats = 0;
+
+ return 0;
+}
+
+static void
+hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue *rxq,
+ struct hns3_tqp_stats *stats)
+{
+ uint32_t cnt;
+
+ cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+ stats->rcb_rx_ring_pktnum_rcd += cnt;
+ stats->rcb_rx_ring_pktnum[rxq->queue_id] += cnt;
+}
+
+static void
+hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue *txq,
+ struct hns3_tqp_stats *stats)
+{
+ uint32_t cnt;
+
+ cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+ stats->rcb_tx_ring_pktnum_rcd += cnt;
+ stats->rcb_tx_ring_pktnum[txq->queue_id] += cnt;
+}
+
/*
* Query tqp tx queue statistics ,opcode id: 0x0B03.
* Query tqp rx queue statistics ,opcode id: 0x0B13.
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
- uint64_t cnt;
- uint64_t num;
uint16_t i;
int ret;
- /* Update tqp stats by read register */
- ret = hns3_update_tqp_stats(hw);
+ /* Update imissed stats */
+ ret = hns3_update_imissed_stats(hw, false);
if (ret) {
- hns3_err(hw, "Update tqp stats fail : %d", ret);
- return ret;
+ hns3_err(hw, "update imissed stats failed, ret = %d", ret);
+ goto out;
}
+ rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
+ imissed_stats->ssu_rx_drop_cnt;
- /* Get the error stats of received packets */
- num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues);
- for (i = 0; i != num; ++i) {
+ /* Get the error stats and bytes of received packets */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
- if (rxq) {
- cnt = rxq->l2_errors + rxq->pkt_len_errors;
- rte_stats->q_errors[i] = cnt;
- rte_stats->q_ipackets[i] =
- stats->rcb_rx_ring_pktnum[i] - cnt;
- rte_stats->ierrors += cnt;
- }
+ if (rxq == NULL)
+ continue;
+
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_rcb_rx_ring_stats_get(rxq, stats);
+ rte_spinlock_unlock(&hw->stats_lock);
+
+ rte_stats->ierrors += rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
+ rte_stats->ibytes += rxq->basic_stats.bytes;
}
- /* Get the error stats of transmitted packets */
- num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues);
- for (i = 0; i < num; i++) {
+
+ /* Reads all the stats of a txq in a loop to keep them synchronized */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
- if (txq)
- rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i];
+ if (txq == NULL)
+ continue;
+
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_rcb_tx_ring_stats_get(txq, stats);
+ rte_spinlock_unlock(&hw->stats_lock);
+ rte_stats->obytes += txq->basic_stats.bytes;
+ }
+
+ ret = hns3_update_oerror_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update oerror stats failed, ret = %d", ret);
+ goto out;
}
+ rte_stats->oerrors = hw->oerror_stats;
- rte_stats->oerrors = 0;
- rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd -
- rte_stats->ierrors;
+ /*
+ * If HW statistics are reset by stats_reset, but a lot of residual
+ * packets exist in the hardware queue and these packets are error
+ * packets, flip overflow may occurred. So return 0 in this case.
+ */
+ rte_stats->ipackets =
+ stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
+ stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
rte_stats->oerrors;
rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
-
- return 0;
+out:
+ return ret;
}
int
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_tqp_stats *stats = &hw->tqp_stats;
- struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint16_t i;
int ret;
/*
- * If this is a reset xstats is NULL, and we have cleared the
- * registers by reading them.
+ * Note: Reading hardware statistics of imissed registers will
+ * clear them.
*/
- for (i = 0; i < hw->tqps_num; i++) {
- hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
- true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
- ret = hns3_cmd_send(hw, &desc_reset, 1);
- if (ret) {
- hns3_err(hw, "Failed to reset RX No.%d queue stat: %d",
- i, ret);
- }
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+ goto out;
+ }
- hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
- true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
- ret = hns3_cmd_send(hw, &desc_reset, 1);
- if (ret) {
- hns3_err(hw, "Failed to reset TX No.%d queue stat: %d",
- i, ret);
- }
+ /*
+ * Note: Reading hardware statistics of oerror registers will
+ * clear them.
+ */
+ ret = hns3_update_oerror_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear oerror stats failed, ret = %d", ret);
+ goto out;
}
- /* Clear Rx BD and Tx error stats */
- for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
- if (rxq) {
- rxq->pkt_len_errors = 0;
- rxq->l2_errors = 0;
- rxq->l3_csum_erros = 0;
- rxq->l4_csum_erros = 0;
- rxq->ol3_csum_erros = 0;
- rxq->ol4_csum_erros = 0;
- }
+ if (rxq == NULL)
+ continue;
+
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
}
- memset(stats, 0, sizeof(struct hns3_tqp_stats));
+ /* Clear all the stats of a rxq in a loop to keep them synchronized */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ rte_spinlock_lock(&hw->stats_lock);
+ memset(&rxq->basic_stats, 0,
+ sizeof(struct hns3_rx_basic_stats));
+
+ /* This register is read-clear */
+ (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
+ rte_spinlock_unlock(&hw->stats_lock);
+ }
- return 0;
+ /* Clear all the stats of a txq in a loop to keep them synchronized */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ rte_spinlock_lock(&hw->stats_lock);
+ memset(&txq->basic_stats, 0,
+ sizeof(struct hns3_tx_basic_stats));
+
+ /* This register is read-clear */
+ (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+ rte_spinlock_unlock(&hw->stats_lock);
+ }
+
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_tqp_stats_clear(hw);
+ rte_spinlock_unlock(&hw->stats_lock);
+out:
+ return ret;
}
-static void
+static int
hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
int ret;
ret = hns3_query_update_mac_stats(dev);
- if (ret)
+ if (ret) {
hns3_err(hw, "Clear Mac stats fail : %d", ret);
+ return ret;
+ }
memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
+
+ return 0;
+}
+
+static int
+hns3_get_imissed_stats_num(struct hns3_adapter *hns)
+{
+#define NO_IMISSED_STATS_NUM 0
+#define RPU_STATS_ITEM_NUM 1
+ struct hns3_hw *hw = &hns->hw;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
+ return NO_IMISSED_STATS_NUM;
+
+ if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
+ return HNS3_NUM_IMISSED_XSTATS;
+
+ return RPU_STATS_ITEM_NUM;
}
/* This function calculates the number of xstats based on the current config */
static int
hns3_xstats_calc_num(struct rte_eth_dev *dev)
{
+#define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
+ HNS3_NUM_RXQ_DFX_XSTATS + \
+ HNS3_NUM_RX_QUEUE_STATS + \
+ HNS3_NUM_RXQ_BASIC_STATS)
+#define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
+ HNS3_NUM_TX_QUEUE_STATS + \
+ HNS3_NUM_TXQ_BASIC_STATS)
+
struct hns3_adapter *hns = dev->data->dev_private;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
+ int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
+ int stats_num;
+
+ stats_num = rx_comm_stats_num + tx_comm_stats_num;
+ stats_num += hns3_get_imissed_stats_num(hns);
if (hns->is_vf)
- return dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS +
- HNS3_NUM_RESET_XSTATS;
+ stats_num += HNS3_NUM_RESET_XSTATS;
else
- return dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS +
- HNS3_FIX_NUM_STATS;
+ stats_num += HNS3_FIX_NUM_STATS;
+
+ return stats_num;
+}
+
+static void
+hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_offset;
+ uint16_t i, j;
+
+ /* Get rx queue stats */
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
+ reg_offset = hns3_get_tqp_reg_offset(j);
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_rx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+
+ /* Get tx queue stats */
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
+ reg_offset = hns3_get_tqp_reg_offset(j);
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_tx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_rx_dfx_stats *dfx_stats;
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ dfx_stats = &rxq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_rxq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_tx_dfx_stats *dfx_stats;
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ dfx_stats = &txq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_txq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ hns3_rxq_dfx_stats_get(dev, xstats, count);
+ hns3_txq_dfx_stats_get(dev, xstats, count);
+}
+
+static void
+hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+ struct hns3_rx_basic_stats *rxq_stats;
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ hns3_rcb_rx_ring_stats_get(rxq, stats);
+ rxq_stats = &rxq->basic_stats;
+ rxq_stats->errors = rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
+
+ /*
+ * If HW statistics are reset by stats_reset, but a lot of
+ * residual packets exist in the hardware queue and these
+ * packets are error packets, flip overflow may occurred.
+ * So return 0 in this case.
+ */
+ rxq_stats->packets =
+ stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
+ stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
+ for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
+ val = (char *)rxq_stats +
+ hns3_rxq_basic_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+ struct hns3_tx_basic_stats *txq_stats;
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ hns3_rcb_tx_ring_stats_get(txq, stats);
+
+ txq_stats = &txq->basic_stats;
+ txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
+
+ for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
+ val = (char *)txq_stats +
+ hns3_txq_basic_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ hns3_rxq_basic_stats_get(dev, xstats, count);
+ hns3_txq_basic_stats_get(dev, xstats, count);
+}
+
+static void
+hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
+ int imissed_stats_num;
+ int cnt = *count;
+ char *addr;
+ uint16_t i;
+
+ imissed_stats_num = hns3_get_imissed_stats_num(hns);
+
+ for (i = 0; i < imissed_stats_num; i++) {
+ addr = (char *)imissed_stats +
+ hns3_imissed_stats_strings[i].offset;
+ xstats[cnt].value = *(uint64_t *)addr;
+ xstats[cnt].id = cnt;
+ cnt++;
+ }
+
+ *count = cnt;
}
/*
* @praram xstats
* A pointer to a table of structure of type *rte_eth_xstat*
* to be filled with device statistics ids and values.
- * This parameter can be set to NULL if n is 0.
+ * This parameter can be set to NULL if and only if n is 0.
* @param n
* The size of the xstats array (number of elements).
+ * If lower than the required number of elements, the function returns the
+ * required number of elements.
+ * If equal to zero, the xstats parameter must be NULL, the function returns
+ * the required number of elements.
* @return
* 0 on fail, count(The size of the statistics elements) on success.
*/
unsigned int n)
{
struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_pf *pf = &hns->pf;
struct hns3_hw *hw = &hns->hw;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
+ struct hns3_rx_bd_errors_stats *rx_err_stats;
struct hns3_rx_queue *rxq;
uint16_t i, j;
char *addr;
int count;
int ret;
- if (xstats == NULL)
- return 0;
-
count = hns3_xstats_calc_num(dev);
if ((int)n < count)
return count;
count = 0;
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_tqp_basic_stats_get(dev, xstats, &count);
+
if (!hns->is_vf) {
/* Update Mac stats */
ret = hns3_query_update_mac_stats(dev);
- if (ret) {
+ if (ret < 0) {
hns3_err(hw, "Update Mac stats fail : %d", ret);
- return 0;
+ rte_spinlock_unlock(&hw->stats_lock);
+ return ret;
}
/* Get MAC stats from hw->hw_xstats.mac_stats struct */
xstats[count].id = count;
count++;
}
+ }
+ rte_spinlock_unlock(&hw->stats_lock);
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- addr = (char *)&pf->abn_int_stats +
- hns3_error_int_stats_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
- }
+ ret = hns3_update_imissed_stats(hw, false);
+ if (ret) {
+ hns3_err(hw, "update imissed stats failed, ret = %d", ret);
+ return ret;
}
+ hns3_imissed_stats_get(dev, xstats, &count);
+
/* Get the reset stat */
for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
}
/* Get the Rx BD errors stats */
- for (j = 0; j != dev->data->nb_rx_queues; ++j) {
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
rxq = dev->data->rx_queues[j];
- addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
+ if (rxq) {
+ rx_err_stats = &rxq->err_stats;
+ addr = (char *)rx_err_stats +
+ hns3_rx_bd_error_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
+ xstats[count].id = count;
+ count++;
+ }
}
}
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_tqp_dfx_stats_get(dev, xstats, &count);
+ hns3_queue_stats_get(dev, xstats, &count);
+ rte_spinlock_unlock(&hw->stats_lock);
+
return count;
}
+static void
+hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "rx_q%u_%s", i,
+ hns3_rxq_basic_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "tx_q%u_%s", i,
+ hns3_txq_basic_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "rx_q%u_%s", i,
+ hns3_rxq_dfx_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "tx_q%u_%s", i,
+ hns3_txq_dfx_stats_strings[j].name);
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ uint32_t cnt = *count;
+ int imissed_stats_num;
+ uint16_t i;
+
+ imissed_stats_num = hns3_get_imissed_stats_num(hns);
+
+ for (i = 0; i < imissed_stats_num; i++) {
+ snprintf(xstats_names[cnt].name,
+ sizeof(xstats_names[cnt].name),
+ "%s", hns3_imissed_stats_strings[i].name);
+ cnt++;
+ }
+
+ *count = cnt;
+}
+
/*
* Retrieve names of extended statistics of an Ethernet device.
*
* is the number of entries filled in the stats table.
*/
int
-hns3_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int size)
{
if (xstats_names == NULL)
return cnt_stats;
+ hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
+
/* Note: size limited checked in rte_eth_xstats_get_names() */
if (!hns->is_vf) {
/* Get MAC name from hw->hw_xstats.mac_stats struct */
"%s", hns3_mac_strings[i].name);
count++;
}
-
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s", hns3_error_int_stats_strings[i].name);
- count++;
- }
}
+
+ hns3_imissed_stats_name_get(dev, xstats_names, &count);
+
for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "rx_q%u%s", j,
+ "rx_q%u_%s", j,
hns3_rx_bd_error_strings[i].name);
count++;
}
}
+ hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
+
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
+ count++;
+ }
+ }
+
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
+ count++;
+ }
+ }
+
return count;
}
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
- * all avalible statistics.
+ * all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, uint32_t size)
{
+ const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_pf *pf = &hns->pf;
+ struct rte_eth_xstat *values_copy;
struct hns3_hw *hw = &hns->hw;
- struct hns3_mac_stats *mac_stats = &hw->mac_stats;
- struct hns3_reset_stats *reset_stats = &hw->reset.stats;
- struct hns3_rx_queue *rxq;
- const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
- uint64_t *values_copy;
+ uint32_t count_value;
uint64_t len;
- uint32_t count = 0;
- uint16_t i, j;
- char *addr;
- int ret;
+ uint32_t i;
- if (ids == NULL || size < cnt_stats)
+ if (ids == NULL && values == NULL)
return cnt_stats;
- /* Update tqp stats by read register */
- ret = hns3_update_tqp_stats(hw);
- if (ret) {
- hns3_err(hw, "Update tqp stats fail : %d", ret);
- return ret;
- }
+ if (ids == NULL)
+ if (size < cnt_stats)
+ return cnt_stats;
- len = cnt_stats * HNS3_VALUES_BYTES;
+ len = cnt_stats * sizeof(struct rte_eth_xstat);
values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
if (values_copy == NULL) {
- hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
- "to store statistics values", len);
+ hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics values",
+ len);
return -ENOMEM;
}
- if (!hns->is_vf) {
- /* Get MAC name from hw->hw_xstats.mac_stats */
- for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
- addr = (char *)mac_stats + hns3_mac_strings[i].offset;
- values_copy[count] = *(uint64_t *)addr;
- count++;
- }
-
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- addr = (char *)&pf->abn_int_stats +
- hns3_error_int_stats_strings[i].offset;
- values_copy[count] = *(uint64_t *)addr;
- count++;
- }
+ count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
+ if (count_value != cnt_stats) {
+ rte_free(values_copy);
+ return -EINVAL;
}
- for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
- addr = (char *)reset_stats +
- hns3_reset_stats_strings[i].offset;
- values_copy[count] = *(uint64_t *)addr;
- count++;
- }
+ if (ids == NULL && values != NULL) {
+ for (i = 0; i < cnt_stats; i++)
+ memcpy(&values[i], &values_copy[i].value,
+ sizeof(values[i]));
- for (j = 0; j != dev->data->nb_rx_queues; ++j) {
- for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
- rxq = dev->data->rx_queues[j];
- addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset;
- values_copy[count] = *(uint64_t *)addr;
- count++;
- }
+ rte_free(values_copy);
+ return cnt_stats;
}
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
- "should < %u", i, ids[i], cnt_stats);
+ hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
+ i, ids[i], cnt_stats);
rte_free(values_copy);
return -EINVAL;
}
- memcpy(&values[i], &values_copy[ids[i]], sizeof(values[i]));
+ memcpy(&values[i], &values_copy[ids[i]].value,
+ sizeof(values[i]));
}
rte_free(values_copy);
*
* @param dev
* Pointer to Ethernet device.
+ * @param ids
+ * IDs array given by app to retrieve specific statistics
* @param xstats_names
* An rte_eth_xstat_name array of at least *size* elements to
* be filled. If set to NULL, the function returns the required number
* of elements.
- * @param ids
- * IDs array given by app to retrieve specific statistics
* @param size
* The size of the xstats_names array (number of elements).
* @return
*/
int
hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
- const uint64_t *ids, uint32_t size)
+ uint32_t size)
{
+ const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
struct hns3_adapter *hns = dev->data->dev_private;
- struct rte_eth_xstat_name *xstats_names_copy;
+ struct rte_eth_xstat_name *names_copy;
struct hns3_hw *hw = &hns->hw;
- const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
- uint16_t count_name = 0;
- uint16_t i, j;
uint64_t len;
+ uint32_t i;
- if (ids == NULL || xstats_names == NULL)
+ if (xstats_names == NULL)
return cnt_stats;
+ if (ids == NULL) {
+ if (size < cnt_stats)
+ return cnt_stats;
+
+ return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
+ }
+
len = cnt_stats * sizeof(struct rte_eth_xstat_name);
- xstats_names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
- if (xstats_names_copy == NULL) {
- hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
- "to store statistics names", len);
+ names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
+ if (names_copy == NULL) {
+ hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics names",
+ len);
return -ENOMEM;
}
- if (!hns->is_vf) {
- for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
- snprintf(xstats_names_copy[count_name].name,
- sizeof(xstats_names_copy[count_name].name),
- "%s", hns3_mac_strings[i].name);
- count_name++;
- }
- for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
- snprintf(xstats_names_copy[count_name].name,
- sizeof(xstats_names_copy[count_name].name),
- "%s", hns3_error_int_stats_strings[i].name);
- count_name++;
- }
- }
- for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
- snprintf(xstats_names_copy[count_name].name,
- sizeof(xstats_names_copy[count_name].name),
- "%s", hns3_reset_stats_strings[i].name);
- count_name++;
- }
- for (j = 0; j != dev->data->nb_rx_queues; ++j) {
- for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
- snprintf(xstats_names_copy[count_name].name,
- sizeof(xstats_names_copy[count_name].name),
- "rx_q%u%s", j,
- hns3_rx_bd_error_strings[i].name);
- count_name++;
- }
- }
+ (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
- "should < %u", i, ids[i], cnt_stats);
- rte_free(xstats_names_copy);
+ hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
+ i, ids[i], cnt_stats);
+ rte_free(names_copy);
return -EINVAL;
}
snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
- "%s", xstats_names_copy[ids[i]].name);
+ "%s", names_copy[ids[i]].name);
}
- rte_free(xstats_names_copy);
+ rte_free(names_copy);
return size;
}
+static void
+hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
+{
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ /* Clear Rx dfx stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq)
+ memset(&rxq->dfx_stats, 0,
+ sizeof(struct hns3_rx_dfx_stats));
+ }
+
+ /* Clear Tx dfx stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq)
+ memset(&txq->dfx_stats, 0,
+ sizeof(struct hns3_tx_dfx_stats));
+ }
+}
+
int
hns3_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_pf *pf = &hns->pf;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
/* Clear tqp stats */
- (void)hns3_stats_reset(dev);
+ ret = hns3_stats_reset(dev);
+ if (ret)
+ return ret;
+
+ rte_spinlock_lock(&hw->stats_lock);
+ hns3_tqp_dfx_stats_clear(dev);
+
/* Clear reset stats */
memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
if (hns->is_vf)
- return 0;
+ goto out;
/* HW registers are cleared on read */
- hns3_mac_stats_reset(dev);
- /* Clear error stats */
- memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats));
+ ret = hns3_mac_stats_reset(dev);
+
+out:
+ rte_spinlock_unlock(&hw->stats_lock);
+
+ return ret;
+}
+
+static int
+hns3_tqp_stats_init(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate rx_ring pkt_num.");
+ return -ENOMEM;
+ }
+
+ tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate tx_ring pkt_num.");
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ return -ENOMEM;
+ }
return 0;
}
+
+static void
+hns3_tqp_stats_uninit(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ rte_free(tqp_stats->rcb_tx_ring_pktnum);
+ tqp_stats->rcb_tx_ring_pktnum = NULL;
+}
+
+static void
+hns3_tqp_stats_clear(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+
+ stats->rcb_rx_ring_pktnum_rcd = 0;
+ stats->rcb_tx_ring_pktnum_rcd = 0;
+ memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+ memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+}
+
+int
+hns3_stats_init(struct hns3_hw *hw)
+{
+ int ret;
+
+ rte_spinlock_init(&hw->stats_lock);
+ /* Hardware statistics of imissed registers cleared. */
+ ret = hns3_update_imissed_stats(hw, true);
+ if (ret) {
+ hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
+ return ret;
+ }
+
+ return hns3_tqp_stats_init(hw);
+}
+
+void
+hns3_stats_uninit(struct hns3_hw *hw)
+{
+ hns3_tqp_stats_uninit(hw);
+}
+
+static void
+hns3_update_queues_stats(struct hns3_hw *hw)
+{
+ struct rte_eth_dev_data *data = hw->data;
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ rxq = data->rx_queues[i];
+ if (rxq != NULL)
+ hns3_rcb_rx_ring_stats_get(rxq, &hw->tqp_stats);
+ }
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ txq = data->tx_queues[i];
+ if (txq != NULL)
+ hns3_rcb_tx_ring_stats_get(txq, &hw->tqp_stats);
+ }
+}
+
+/*
+ * Some hardware statistics registers are not 64-bit. If hardware statistics are
+ * not obtained for a long time, these statistics may be reversed. This function
+ * is used to update these hardware statistics in periodic task.
+ */
+void
+hns3_update_hw_stats(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+
+ rte_spinlock_lock(&hw->stats_lock);
+ if (!hns->is_vf)
+ hns3_update_mac_stats(hw);
+
+ hns3_update_queues_stats(hw);
+ rte_spinlock_unlock(&hw->stats_lock);
+}