txq->configured = true;
txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ txq->over_length_pkt_cnt = 0;
+ txq->exceed_limit_bd_pkt_cnt = 0;
+ txq->exceed_limit_bd_reassem_fail = 0;
+ txq->unsupported_tunnel_pkt_cnt = 0;
+ txq->queue_full_cnt = 0;
+ txq->pkt_padding_fail_cnt = 0;
rte_spinlock_lock(&hw->lock);
dev->data->tx_queues[idx] = txq;
rte_spinlock_unlock(&hw->lock);
if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
(void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
- hdr_lens))
+ hdr_lens)) {
+ txq->unsupported_tunnel_pkt_cnt++;
return -EINVAL;
+ }
}
/* Enable checksum offloading */
if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
* If packet length is greater than HNS3_MAX_FRAME_LEN
* driver support, the packet will be ignored.
*/
- if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
+ txq->over_length_pkt_cnt++;
return -EINVAL;
+ }
if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+ txq->exceed_limit_bd_pkt_cnt++;
ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
- if (ret)
+ if (ret) {
+ txq->exceed_limit_bd_reassem_fail++;
return ret;
+ }
*m_seg = new_pkt;
}
nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) {
+ txq->queue_full_cnt++;
if (nb_tx == 0)
return 0;
add_len = HNS3_MIN_PKT_SIZE -
rte_pktmbuf_pkt_len(tx_pkt);
appended = rte_pktmbuf_append(tx_pkt, add_len);
- if (appended == NULL)
+ if (appended == NULL) {
+ txq->pkt_padding_fail_cnt++;
break;
+ }
memset(appended, 0, add_len);
}
bool tx_deferred_start; /* don't start this queue in dev start */
bool configured; /* indicate if tx queue has been configured */
+
+ /*
+ * The following items are used for the abnormal errors statistics in
+ * the Tx datapath. When upper level application calls the
+ * rte_eth_tx_burst API function to send multiple packets at a time with
+ * burst mode based on hns3 network engine, there are some abnormal
+ * conditions that cause the driver to fail to operate the hardware to
+ * send packets correctly.
+ * Note: When using burst mode to call the rte_eth_tx_burst API function
+ * to send multiple packets at a time. When the first abnormal error is
+ * detected, add one to the relevant error statistics item, and then
+ * exit the loop of sending multiple packets of the function. That is to
+ * say, even if there are multiple packets in which abnormal errors may
+ * be detected in the burst, the relevant error statistics in the driver
+ * will only be increased by one.
+ * The detail description of the Tx abnormal errors statistic items as
+ * below:
+ * - over_length_pkt_cnt
+ * Total number of greater than HNS3_MAX_FRAME_LEN the driver
+ * supported.
+ *
+ * - exceed_limit_bd_pkt_cnt
+ * Total number of exceeding the hardware limited bd which process
+ * a packet needed bd numbers.
+ *
+ * - exceed_limit_bd_reassem_fail
+ * Total number of exceeding the hardware limited bd fail which
+ * process a packet needed bd numbers and reassemble fail.
+ *
+ * - unsupported_tunnel_pkt_cnt
+ * Total number of unsupported tunnel packet. The unsupported tunnel
+ * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
+ * with MPLS-in-UDP RFC 7510 header.
+ *
+ * - queue_full_cnt
+ * Total count which the available bd numbers in current bd queue is
+ * less than the bd numbers with the pkt process needed.
+ *
+ * - pkt_padding_fail_cnt
+ * Total count which the packet length is less than minimum packet
+ * size HNS3_MIN_PKT_SIZE and fail to be appended with 0.
+ */
+ uint64_t over_length_pkt_cnt;
+ uint64_t exceed_limit_bd_pkt_cnt;
+ uint64_t exceed_limit_bd_reassem_fail;
+ uint64_t unsupported_tunnel_pkt_cnt;
+ uint64_t queue_full_cnt;
+ uint64_t pkt_padding_fail_cnt;
};
struct hns3_queue_info {
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_erros)}
};
+/* The statistic of the Tx errors */
+static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = {
+ {"TX_OVER_LENGTH_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
+ {"TX_EXCEED_LIMITED_BD_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
+ {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
+ {"TX_UNSUPPORTED_TUNNEL_PKT_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
+ {"TX_QUEUE_FULL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)},
+ {"TX_SHORT_PKT_PAD_FAIL_CNT",
+ HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
+};
+
/* The statistic of rx queue */
static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
{"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
+#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \
+ sizeof(hns3_tx_errors_strings[0]))
+
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
sizeof(hns3_rx_queue_strings[0]))
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint16_t i;
int ret;
}
}
- /* Clear Rx BD and Tx error stats */
+ /* Clear the Rx BD errors stats */
for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
}
}
+ /* Clear the Tx errors stats */
+ for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq) {
+ txq->over_length_pkt_cnt = 0;
+ txq->exceed_limit_bd_pkt_cnt = 0;
+ txq->exceed_limit_bd_reassem_fail = 0;
+ txq->unsupported_tunnel_pkt_cnt = 0;
+ txq->queue_full_cnt = 0;
+ txq->pkt_padding_fail_cnt = 0;
+ }
+ }
+
memset(stats, 0, sizeof(struct hns3_tqp_stats));
return 0;
{
struct hns3_adapter *hns = dev->data->dev_private;
int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS;
+ int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS;
int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS;
int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf)
- return bderr_stats + rx_queue_stats + tx_queue_stats +
- HNS3_NUM_RESET_XSTATS;
+ return bderr_stats + tx_err_stats + rx_queue_stats +
+ tx_queue_stats + HNS3_NUM_RESET_XSTATS;
else
- return bderr_stats + rx_queue_stats + tx_queue_stats +
- HNS3_FIX_NUM_STATS;
+ return bderr_stats + tx_err_stats + rx_queue_stats +
+ tx_queue_stats + HNS3_FIX_NUM_STATS;
+}
+
+static void
+hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ int *count)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ uint32_t reg_offset;
+ uint16_t i, j;
+
+ /* Get rx queue stats */
+ for (j = 0; j < dev->data->nb_rx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
+ reg_offset = HNS3_TQP_REG_OFFSET +
+ HNS3_TQP_REG_SIZE * j;
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_rx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+
+ /* Get tx queue stats */
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
+ reg_offset = HNS3_TQP_REG_OFFSET +
+ HNS3_TQP_REG_SIZE * j;
+ xstats[*count].value = hns3_read_dev(hw,
+ reg_offset + hns3_tx_queue_strings[i].offset);
+ xstats[*count].id = *count;
+ (*count)++;
+ }
+ }
+
}
/*
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_queue *rxq;
- uint32_t reg_offset;
+ struct hns3_tx_queue *txq;
uint16_t i, j;
char *addr;
int count;
}
}
- /* Get rx queue stats */
- for (j = 0; j < dev->data->nb_rx_queues; j++) {
- for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
- xstats[count].value = hns3_read_dev(hw,
- reg_offset + hns3_rx_queue_strings[i].offset);
- xstats[count].id = count;
- count++;
- }
- }
-
- /* Get tx queue stats */
+ /* Get the Tx errors stats */
for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
- xstats[count].value = hns3_read_dev(hw,
- reg_offset + hns3_tx_queue_strings[i].offset);
+ for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
+ txq = dev->data->tx_queues[j];
+ addr = (char *)txq + hns3_tx_errors_strings[i].offset;
+ xstats[count].value = *(uint64_t *)addr;
xstats[count].id = count;
count++;
}
}
+ hns3_get_queue_stats(dev, xstats, &count);
return count;
}
}
}
+ for (j = 0; j < dev->data->nb_tx_queues; j++) {
+ for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u%s", j,
+ hns3_tx_errors_strings[i].name);
+ count++;
+ }
+ }
+
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
snprintf(xstats_names[count].name,