rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
- rxq->l2_errors = 0;
- rxq->pkt_len_errors = 0;
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
+ memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
+ memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
+ memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
+
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
/* Fill in tunneling parameters if necessary */
if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
- txq->unsupported_tunnel_pkt_cnt++;
+ txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
return -EINVAL;
}
* driver support, the packet will be ignored.
*/
if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
- txq->over_length_pkt_cnt++;
+ txq->dfx_stats.over_length_pkt_cnt++;
return -EINVAL;
}
max_non_tso_bd_num = txq->max_non_tso_bd_num;
if (unlikely(nb_buf > max_non_tso_bd_num)) {
- txq->exceed_limit_bd_pkt_cnt++;
+ txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
max_non_tso_bd_num);
if (ret) {
- txq->exceed_limit_bd_reassem_fail++;
+ txq->dfx_stats.exceed_limit_bd_reassem_fail++;
return ret;
}
*m_seg = new_pkt;
nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
if (unlikely(nb_pkts == 0)) {
if (txq->tx_bd_ready == 0)
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
return 0;
}
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
- txq->queue_full_cnt++;
+ txq->dfx_stats.queue_full_cnt++;
if (nb_tx == 0)
return 0;
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
if (appended == NULL) {
- txq->pkt_padding_fail_cnt++;
+ txq->dfx_stats.pkt_padding_fail_cnt++;
break;
}
struct rte_mbuf *mbuf;
};
+struct hns3_rx_dfx_stats {
+ uint64_t l3_csum_errors;
+ uint64_t l4_csum_errors;
+ uint64_t ol3_csum_errors;
+ uint64_t ol4_csum_errors;
+};
+
+struct hns3_rx_bd_errors_stats {
+ uint64_t l2_errors;
+ uint64_t pkt_len_errors;
+};
+
struct hns3_rx_queue {
void *io_base;
volatile void *io_head_reg;
bool pvid_sw_discard_en;
bool enabled; /* indicate if Rx queue has been enabled */
- uint64_t l2_errors;
- uint64_t pkt_len_errors;
- uint64_t l3_csum_errors;
- uint64_t l4_csum_errors;
- uint64_t ol3_csum_errors;
- uint64_t ol4_csum_errors;
+ /* DFX statistics that driver does not need to discard packets */
+ struct hns3_rx_dfx_stats dfx_stats;
+ /* Error statistics that driver needs to discard packets */
+ struct hns3_rx_bd_errors_stats err_stats;
struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
uint16_t bulk_mbuf_num;
struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
};
+/*
+ * The following items are used for the abnormal errors statistics in
+ * the Tx datapath. When upper level application calls the
+ * rte_eth_tx_burst API function to send multiple packets at a time with
+ * burst mode based on hns3 network engine, there are some abnormal
+ * conditions that cause the driver to fail to operate the hardware to
+ * send packets correctly.
+ * Note: When using burst mode to call the rte_eth_tx_burst API function
+ * to send multiple packets at a time. When the first abnormal error is
+ * detected, add one to the relevant error statistics item, and then
+ * exit the loop of sending multiple packets of the function. That is to
+ * say, even if there are multiple packets in which abnormal errors may
+ * be detected in the burst, the relevant error statistics in the driver
+ * will only be increased by one.
+ * The detail description of the Tx abnormal errors statistic items as
+ * below:
+ * - over_length_pkt_cnt
+ * Total number of greater than HNS3_MAX_FRAME_LEN the driver
+ * supported.
+ *
+ * - exceed_limit_bd_pkt_cnt
+ * Total number of exceeding the hardware limited bd which process
+ * a packet needed bd numbers.
+ *
+ * - exceed_limit_bd_reassem_fail
+ * Total number of exceeding the hardware limited bd fail which
+ * process a packet needed bd numbers and reassemble fail.
+ *
+ * - unsupported_tunnel_pkt_cnt
+ * Total number of unsupported tunnel packet. The unsupported tunnel
+ * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
+ * with MPLS-in-UDP RFC 7510 header.
+ *
+ * - queue_full_cnt
+ * Total count which the available bd numbers in current bd queue is
+ * less than the bd numbers with the pkt process needed.
+ *
+ * - pkt_padding_fail_cnt
+ * Total count which the packet length is less than minimum packet
+ * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
+ * hardware in Tx direction and fail to be appended with 0.
+ */
+struct hns3_tx_dfx_stats {
+ uint64_t over_length_pkt_cnt;
+ uint64_t exceed_limit_bd_pkt_cnt;
+ uint64_t exceed_limit_bd_reassem_fail;
+ uint64_t unsupported_tunnel_pkt_cnt;
+ uint64_t queue_full_cnt;
+ uint64_t pkt_padding_fail_cnt;
+};
+
struct hns3_tx_queue {
void *io_base;
volatile void *io_tail_reg;
bool pvid_sw_shift_en;
bool enabled; /* indicate if Tx queue has been enabled */
- /*
- * The following items are used for the abnormal errors statistics in
- * the Tx datapath. When upper level application calls the
- * rte_eth_tx_burst API function to send multiple packets at a time with
- * burst mode based on hns3 network engine, there are some abnormal
- * conditions that cause the driver to fail to operate the hardware to
- * send packets correctly.
- * Note: When using burst mode to call the rte_eth_tx_burst API function
- * to send multiple packets at a time. When the first abnormal error is
- * detected, add one to the relevant error statistics item, and then
- * exit the loop of sending multiple packets of the function. That is to
- * say, even if there are multiple packets in which abnormal errors may
- * be detected in the burst, the relevant error statistics in the driver
- * will only be increased by one.
- * The detail description of the Tx abnormal errors statistic items as
- * below:
- * - over_length_pkt_cnt
- * Total number of greater than HNS3_MAX_FRAME_LEN the driver
- * supported.
- *
- * - exceed_limit_bd_pkt_cnt
- * Total number of exceeding the hardware limited bd which process
- * a packet needed bd numbers.
- *
- * - exceed_limit_bd_reassem_fail
- * Total number of exceeding the hardware limited bd fail which
- * process a packet needed bd numbers and reassemble fail.
- *
- * - unsupported_tunnel_pkt_cnt
- * Total number of unsupported tunnel packet. The unsupported tunnel
- * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
- * with MPLS-in-UDP RFC 7510 header.
- *
- * - queue_full_cnt
- * Total count which the available bd numbers in current bd queue is
- * less than the bd numbers with the pkt process needed.
- *
- * - pkt_padding_fail_cnt
- * Total count which the packet length is less than minimum packet
- * length(struct hns3_tx_queue::min_tx_pkt_len) supported by
- * hardware in Tx direction and fail to be appended with 0.
- */
- uint64_t over_length_pkt_cnt;
- uint64_t exceed_limit_bd_pkt_cnt;
- uint64_t exceed_limit_bd_reassem_fail;
- uint64_t unsupported_tunnel_pkt_cnt;
- uint64_t queue_full_cnt;
- uint64_t pkt_padding_fail_cnt;
+ struct hns3_tx_dfx_stats dfx_stats;
};
#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
if (l234_info & BIT(HNS3_RXD_L2E_B))
- rxq->l2_errors++;
+ rxq->err_stats.l2_errors++;
else
- rxq->pkt_len_errors++;
+ rxq->err_stats.pkt_len_errors++;
return -EINVAL;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- rxq->l3_csum_errors++;
+ rxq->dfx_stats.l3_csum_errors++;
tmp |= HNS3_L3_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- rxq->l4_csum_errors++;
+ rxq->dfx_stats.l4_csum_errors++;
tmp |= HNS3_L4_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
- rxq->ol3_csum_errors++;
+ rxq->dfx_stats.ol3_csum_errors++;
tmp |= HNS3_OUTER_L3_CKSUM_ERR;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
- rxq->ol4_csum_errors++;
+ rxq->dfx_stats.ol4_csum_errors++;
tmp |= HNS3_OUTER_L4_CKSUM_ERR;
}
}
/* The statistic of errors in Rx BD */
static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
- {"RX_PKT_LEN_ERRORS",
+ {"PKT_LEN_ERRORS",
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
- {"L2_RX_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)},
- {"RX_L3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)},
- {"RX_L4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)},
- {"RX_OL3_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)},
- {"RX_OL4_CHECKSUM_ERRORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)}
+ {"L2_ERRORS",
+ HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
};
-/* The statistic of the Tx errors */
-static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = {
- {"TX_OVER_LENGTH_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
- {"TX_EXCEED_LIMITED_BD_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
- {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
- {"TX_UNSUPPORTED_TUNNEL_PKT_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
- {"TX_QUEUE_FULL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)},
- {"TX_SHORT_PKT_PAD_FAIL_CNT",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
+/* The dfx statistic in Rx datapath */
+static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
+ {"L3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
+ {"L4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
+ {"OL3_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
+ {"OL4_CHECKSUM_ERRORS",
+ HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
+};
+
+/* The dfx statistic in Tx datapath */
+static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
+ {"OVER_LENGTH_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
+ {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
+ {"UNSUPPORTED_TUNNEL_PKT_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
+ {"QUEUE_FULL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
+ {"SHORT_PKT_PAD_FAIL_CNT",
+ HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
};
/* The statistic of rx queue */
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \
- sizeof(hns3_tx_errors_strings[0]))
+#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
+ sizeof(hns3_rxq_dfx_stats_strings[0]))
+
+#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
+ sizeof(hns3_txq_dfx_stats_strings[0]))
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
sizeof(hns3_rx_queue_strings[0]))
for (i = 0; i != num; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
- cnt = rxq->l2_errors + rxq->pkt_len_errors;
+ cnt = rxq->err_stats.l2_errors +
+ rxq->err_stats.pkt_len_errors;
rte_stats->q_errors[i] = cnt;
rte_stats->q_ipackets[i] =
stats->rcb_rx_ring_pktnum[i] - cnt;
* Clear soft stats of rx error packet which will be dropped
* in driver.
*/
- for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
- rxq->pkt_len_errors = 0;
- rxq->l2_errors = 0;
+ rxq->err_stats.pkt_len_errors = 0;
+ rxq->err_stats.l2_errors = 0;
}
}
hns3_xstats_calc_num(struct rte_eth_dev *dev)
{
struct hns3_adapter *hns = dev->data->dev_private;
- int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS;
- int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS;
- int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS;
- int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+ int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS;
+ int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS;
+ int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS;
+ int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS;
+ int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf)
- return bderr_stats + tx_err_stats + rx_queue_stats +
- tx_queue_stats + HNS3_NUM_RESET_XSTATS;
+ return bderr_stats + rx_dfx_stats + tx_dfx_stats +
+ rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS;
else
- return bderr_stats + tx_err_stats + rx_queue_stats +
- tx_queue_stats + HNS3_FIX_NUM_STATS;
+ return bderr_stats + rx_dfx_stats + tx_dfx_stats +
+ rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS;
}
static void
-hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
int *count)
{
struct hns3_adapter *hns = dev->data->dev_private;
}
}
+static void
+hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_rx_dfx_stats *dfx_stats;
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ dfx_stats = &rxq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_rxq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_tx_dfx_stats *dfx_stats;
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+ char *val;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ dfx_stats = &txq->dfx_stats;
+ for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
+ val = (char *)dfx_stats +
+ hns3_txq_dfx_stats_strings[j].offset;
+ xstats[*count].value = *(uint64_t *)val;
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+}
+
+static void
+hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ hns3_rxq_dfx_stats_get(dev, xstats, count);
+ hns3_txq_dfx_stats_get(dev, xstats, count);
+}
/*
* Retrieve extended(tqp | Mac) statistics of an Ethernet device.
* @param dev
struct hns3_hw *hw = &hns->hw;
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
+ struct hns3_rx_bd_errors_stats *rx_err_stats;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint16_t i, j;
char *addr;
int count;
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
rxq = dev->data->rx_queues[j];
- addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
+ if (rxq) {
+ rx_err_stats = &rxq->err_stats;
+ addr = (char *)rx_err_stats +
+ hns3_rx_bd_error_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
+ xstats[count].id = count;
+ count++;
+ }
}
}
- /* Get the Tx errors stats */
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
- txq = dev->data->tx_queues[j];
- addr = (char *)txq + hns3_tx_errors_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
+ hns3_tqp_dfx_stats_get(dev, xstats, &count);
+ hns3_queue_stats_get(dev, xstats, &count);
+
+ return count;
+}
+
+static void
+hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ uint32_t *count)
+{
+ uint16_t i, j;
+
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "rx_q%u_%s", j,
+ hns3_rxq_dfx_stats_strings[i].name);
+ (*count)++;
}
}
- hns3_get_queue_stats(dev, xstats, &count);
- return count;
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) {
+ snprintf(xstats_names[*count].name,
+ sizeof(xstats_names[*count].name),
+ "tx_q%u_%s", j,
+ hns3_txq_dfx_stats_strings[i].name);
+ (*count)++;
+ }
+ }
}
/*
for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "rx_q%u%s", j,
+ "rx_q%u_%s", j,
hns3_rx_bd_error_strings[i].name);
count++;
}
}
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "tx_q%u%s", j,
- hns3_tx_errors_strings[i].name);
- count++;
- }
- }
+ hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "rx_q%u%s", j, hns3_rx_queue_strings[i].name);
+ "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
count++;
}
}
for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "tx_q%u%s", j, hns3_tx_queue_strings[i].name);
+ "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
count++;
}
}
{
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
- int i;
+ uint16_t i;
/* Clear Rx dfx stats */
- for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (rxq) {
- rxq->l3_csum_errors = 0;
- rxq->l4_csum_errors = 0;
- rxq->ol3_csum_errors = 0;
- rxq->ol4_csum_errors = 0;
- }
+ if (rxq)
+ memset(&rxq->dfx_stats, 0,
+ sizeof(struct hns3_rx_dfx_stats));
}
/* Clear Tx dfx stats */
- for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (txq) {
- txq->over_length_pkt_cnt = 0;
- txq->exceed_limit_bd_pkt_cnt = 0;
- txq->exceed_limit_bd_reassem_fail = 0;
- txq->unsupported_tunnel_pkt_cnt = 0;
- txq->queue_full_cnt = 0;
- txq->pkt_padding_fail_cnt = 0;
- }
+ if (txq)
+ memset(&txq->dfx_stats, 0,
+ sizeof(struct hns3_tx_dfx_stats));
}
}