static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
{"MAC_AFIFO_TNL_INT_R",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_intr_cnt)},
- {"PPU_MPF_ABNORMAL_INT_ST2",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_intr_st2_cnt)},
- {"SSU_PORT_BASED_ERR_INT",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_intr_cnt)},
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_int_cnt)},
+ {"PPU_MPF_ABNORMAL_INT_ST2_MSIX",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_msix_cnt)},
+ {"SSU_PORT_BASED_ERR_INT_MSIX",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_int_cnt)},
{"PPP_PF_ABNORMAL_INT_ST0",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_intr_cnt)},
- {"PPU_PF_ABNORMAL_INT_ST",
- HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_intr_cnt)}
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_int_cnt)},
+ {"PPU_PF_ABNORMAL_INT_ST_MSIX",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_msix_cnt)},
+ {"IMP_TCM_ECC_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_tcm_ecc_int_cnt)},
+ {"CMDQ_MEM_ECC_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(cmdq_mem_ecc_int_cnt)},
+ {"IMP_RD_POISON_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_rd_poison_int_cnt)},
+ {"TQP_INT_ECC_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(tqp_int_ecc_int_cnt)},
+ {"MSIX_ECC_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(msix_ecc_int_cnt)},
+ {"SSU_ECC_MULTI_BIT_INT_0",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_0_cnt)},
+ {"SSU_ECC_MULTI_BIT_INT_1",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_1_cnt)},
+ {"SSU_COMMON_ERR_INT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_common_ecc_int_cnt)},
+ {"IGU_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_int_cnt)},
+ {"PPP_MPF_ABNORMAL_INT_ST1",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st1_cnt)},
+ {"PPP_MPF_ABNORMAL_INT_ST3",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st3_cnt)},
+ {"PPU_MPF_ABNORMAL_INT_ST1",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st1_cnt)},
+ {"PPU_MPF_ABNORMAL_INT_ST2_RAS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_ras_cnt)},
+ {"PPU_MPF_ABNORMAL_INT_ST3",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st3_cnt)},
+ {"TM_SCH_RINT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(tm_sch_int_cnt)},
+ {"QCN_FIFO_RINT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_fifo_int_cnt)},
+ {"QCN_ECC_RINT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_ecc_int_cnt)},
+ {"NCSI_ECC_INT_RPT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ncsi_ecc_int_cnt)},
+ {"SSU_PORT_BASED_ERR_INT_RAS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_err_int_cnt)},
+ {"SSU_FIFO_OVERFLOW_INT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_fifo_overflow_int_cnt)},
+ {"SSU_ETS_TCG_INT",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ets_tcg_int_cnt)},
+ {"IGU_EGU_TNL_INT_STS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_egu_tnl_int_cnt)},
+ {"PPU_PF_ABNORMAL_INT_ST_RAS",
+ HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_ras_cnt)},
};
/* The statistic of reset */
{"L2_RX_ERRORS",
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)},
{"RX_L3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_erros)},
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)},
{"RX_L4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_erros)},
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)},
{"RX_OL3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_erros)},
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)},
{"RX_OL4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_erros)}
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)}
+};
+
+/* The statistic of the Tx errors */
+static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = {
+ {"TX_OVER_LENGTH_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
+ {"TX_EXCEED_LIMITED_BD_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
+ {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
+ {"TX_UNSUPPORTED_TUNNEL_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
+ {"TX_QUEUE_FULL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)},
+ {"TX_SHORT_PKT_PAD_FAIL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
};
/* The statistic of rx queue */
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
+#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \
+ sizeof(hns3_tx_errors_strings[0]))
+
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
sizeof(hns3_rx_queue_strings[0]))
#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
HNS3_NUM_RESET_XSTATS)
+static void hns3_tqp_stats_clear(struct hns3_hw *hw);
+
/*
* Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
* This command is used before send 'query_mac_stat command', the descriptor
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
true);
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "Failed to query RX No.%d queue stat: %d",
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
true);
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "Failed to query TX No.%d queue stat: %d",
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint16_t i;
int ret;
/*
- * If this is a reset xstats is NULL, and we have cleared the
- * registers by reading them.
+ * Note: Reading hardware statistics of rx/tx queue packet number
+ * will clear them.
*/
for (i = 0; i < hw->tqps_num; i++) {
hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
hns3_err(hw, "Failed to reset RX No.%d queue stat: %d",
i, ret);
+ return ret;
}
hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
hns3_err(hw, "Failed to reset TX No.%d queue stat: %d",
i, ret);
+ return ret;
}
}
- /* Clear Rx BD and Tx error stats */
+ /* Clear the Rx BD errors stats */
for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
rxq->pkt_len_errors = 0;
rxq->l2_errors = 0;
- rxq->l3_csum_erros = 0;
- rxq->l4_csum_erros = 0;
- rxq->ol3_csum_erros = 0;
- rxq->ol4_csum_erros = 0;
+ rxq->l3_csum_errors = 0;
+ rxq->l4_csum_errors = 0;
+ rxq->ol3_csum_errors = 0;
+ rxq->ol4_csum_errors = 0;
+ }
+ }
+
+ /* Clear the Tx errors stats */
+ for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq) {
+ txq->over_length_pkt_cnt = 0;
+ txq->exceed_limit_bd_pkt_cnt = 0;
+ txq->exceed_limit_bd_reassem_fail = 0;
+ txq->unsupported_tunnel_pkt_cnt = 0;
+ txq->queue_full_cnt = 0;
+ txq->pkt_padding_fail_cnt = 0;
}
}
- memset(stats, 0, sizeof(struct hns3_tqp_stats));
+ hns3_tqp_stats_clear(hw);
return 0;
}
-static void
+static int
hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
int ret;
ret = hns3_query_update_mac_stats(dev);
- if (ret)
+ if (ret) {
hns3_err(hw, "Clear Mac stats fail : %d", ret);
+ return ret;
+ }
memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
+
+ return 0;
}
/* This function calculates the number of xstats based on the current config */
{
struct hns3_adapter *hns = dev->data->dev_private;
int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS;
+ int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS;
int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS;
int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf)
- return bderr_stats + rx_queue_stats + tx_queue_stats +
- HNS3_NUM_RESET_XSTATS;
+ return bderr_stats + tx_err_stats + rx_queue_stats +
+ tx_queue_stats + HNS3_NUM_RESET_XSTATS;
else
- return bderr_stats + rx_queue_stats + tx_queue_stats +
- HNS3_FIX_NUM_STATS;
+ return bderr_stats + tx_err_stats + rx_queue_stats +
+ tx_queue_stats + HNS3_FIX_NUM_STATS;
+}
+
+static void
+hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_offset;
+ uint16_t i, j;
+
+ /* Get rx queue stats */
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
+ reg_offset = hns3_get_tqp_reg_offset(j);
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_rx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+
+ /* Get tx queue stats */
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
+ reg_offset = hns3_get_tqp_reg_offset(j);
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_tx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+
+}
+
+void
+hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
+{
+ struct hns3_pf *pf = &hns->pf;
+ uint16_t i;
+ char *addr;
+
+ for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
+ if (strcmp(hns3_error_int_stats_strings[i].name, err) == 0) {
+ addr = (char *)&pf->abn_int_stats +
+ hns3_error_int_stats_strings[i].offset;
+ *(uint64_t *)addr += 1;
+ break;
+ }
+ }
}
/*
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_queue *rxq;
- uint32_t reg_offset;
+ struct hns3_tx_queue *txq;
uint16_t i, j;
char *addr;
int count;
}
}
- /* Get rx queue stats */
- for (j = 0; j < dev->data->nb_rx_queues; j++) {
- for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
- xstats[count].value = hns3_read_dev(hw,
- reg_offset + hns3_rx_queue_strings[i].offset);
- xstats[count].id = count;
- count++;
- }
- }
-
- /* Get tx queue stats */
+ /* Get the Tx errors stats */
for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
- xstats[count].value = hns3_read_dev(hw,
- reg_offset + hns3_tx_queue_strings[i].offset);
+ for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
+ txq = dev->data->tx_queues[j];
+ addr = (char *)txq + hns3_tx_errors_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
xstats[count].id = count;
count++;
}
}
+ hns3_get_queue_stats(dev, xstats, &count);
return count;
}
}
}
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u%s", j,
+ hns3_tx_errors_strings[i].name);
+ count++;
+ }
+ }
+
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_pf *pf = &hns->pf;
+ int ret;
/* Clear tqp stats */
- (void)hns3_stats_reset(dev);
+ ret = hns3_stats_reset(dev);
+ if (ret)
+ return ret;
+
/* Clear reset stats */
memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
return 0;
/* HW registers are cleared on read */
- hns3_mac_stats_reset(dev);
+ ret = hns3_mac_stats_reset(dev);
+ if (ret)
+ return ret;
+
/* Clear error stats */
memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats));
return 0;
}
+
+int
+hns3_tqp_stats_init(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate rx_ring pkt_num.");
+ return -ENOMEM;
+ }
+
+ tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate tx_ring pkt_num.");
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+hns3_tqp_stats_uninit(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ rte_free(tqp_stats->rcb_tx_ring_pktnum);
+ tqp_stats->rcb_tx_ring_pktnum = NULL;
+}
+
+static void
+hns3_tqp_stats_clear(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+
+ stats->rcb_rx_ring_pktnum_rcd = 0;
+ stats->rcb_tx_ring_pktnum_rcd = 0;
+ memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+ memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+}