1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <rte_ethdev.h>
7 #include <rte_malloc.h>
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
14 /* The statistics of the per-rxq basic stats */
15 static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
17 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
19 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
21 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
24 /* The statistics of the per-txq basic stats */
25 static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
27 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
29 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
33 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 {"mac_tx_mac_pause_num",
35 HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 {"mac_rx_mac_pause_num",
37 HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 {"mac_tx_control_pkt_num",
39 HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 {"mac_rx_control_pkt_num",
41 HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 {"mac_tx_pfc_pkt_num",
43 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 {"mac_tx_pfc_pri0_pkt_num",
45 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 {"mac_tx_pfc_pri1_pkt_num",
47 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 {"mac_tx_pfc_pri2_pkt_num",
49 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 {"mac_tx_pfc_pri3_pkt_num",
51 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 {"mac_tx_pfc_pri4_pkt_num",
53 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 {"mac_tx_pfc_pri5_pkt_num",
55 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 {"mac_tx_pfc_pri6_pkt_num",
57 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 {"mac_tx_pfc_pri7_pkt_num",
59 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 {"mac_rx_pfc_pkt_num",
61 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 {"mac_rx_pfc_pri0_pkt_num",
63 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 {"mac_rx_pfc_pri1_pkt_num",
65 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 {"mac_rx_pfc_pri2_pkt_num",
67 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 {"mac_rx_pfc_pri3_pkt_num",
69 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 {"mac_rx_pfc_pri4_pkt_num",
71 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 {"mac_rx_pfc_pri5_pkt_num",
73 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 {"mac_rx_pfc_pri6_pkt_num",
75 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 {"mac_rx_pfc_pri7_pkt_num",
77 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 {"mac_tx_total_pkt_num",
79 HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 {"mac_tx_total_oct_num",
81 HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 {"mac_tx_good_pkt_num",
83 HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 {"mac_tx_bad_pkt_num",
85 HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 {"mac_tx_good_oct_num",
87 HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 {"mac_tx_bad_oct_num",
89 HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 {"mac_tx_uni_pkt_num",
91 HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 {"mac_tx_multi_pkt_num",
93 HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 {"mac_tx_broad_pkt_num",
95 HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 {"mac_tx_undersize_pkt_num",
97 HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 {"mac_tx_oversize_pkt_num",
99 HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 {"mac_tx_64_oct_pkt_num",
101 HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 {"mac_tx_65_127_oct_pkt_num",
103 HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 {"mac_tx_128_255_oct_pkt_num",
105 HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 {"mac_tx_256_511_oct_pkt_num",
107 HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 {"mac_tx_512_1023_oct_pkt_num",
109 HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 {"mac_tx_1024_1518_oct_pkt_num",
111 HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 {"mac_tx_1519_2047_oct_pkt_num",
113 HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 {"mac_tx_2048_4095_oct_pkt_num",
115 HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 {"mac_tx_4096_8191_oct_pkt_num",
117 HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 {"mac_tx_8192_9216_oct_pkt_num",
119 HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 {"mac_tx_9217_12287_oct_pkt_num",
121 HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 {"mac_tx_12288_16383_oct_pkt_num",
123 HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 {"mac_tx_1519_max_good_pkt_num",
125 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 {"mac_tx_1519_max_bad_pkt_num",
127 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 {"mac_rx_total_pkt_num",
129 HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 {"mac_rx_total_oct_num",
131 HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 {"mac_rx_good_pkt_num",
133 HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 {"mac_rx_bad_pkt_num",
135 HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 {"mac_rx_good_oct_num",
137 HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 {"mac_rx_bad_oct_num",
139 HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 {"mac_rx_uni_pkt_num",
141 HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 {"mac_rx_multi_pkt_num",
143 HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 {"mac_rx_broad_pkt_num",
145 HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 {"mac_rx_undersize_pkt_num",
147 HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 {"mac_rx_oversize_pkt_num",
149 HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 {"mac_rx_64_oct_pkt_num",
151 HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 {"mac_rx_65_127_oct_pkt_num",
153 HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 {"mac_rx_128_255_oct_pkt_num",
155 HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 {"mac_rx_256_511_oct_pkt_num",
157 HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 {"mac_rx_512_1023_oct_pkt_num",
159 HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 {"mac_rx_1024_1518_oct_pkt_num",
161 HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 {"mac_rx_1519_2047_oct_pkt_num",
163 HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 {"mac_rx_2048_4095_oct_pkt_num",
165 HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 {"mac_rx_4096_8191_oct_pkt_num",
167 HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 {"mac_rx_8192_9216_oct_pkt_num",
169 HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 {"mac_rx_9217_12287_oct_pkt_num",
171 HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 {"mac_rx_12288_16383_oct_pkt_num",
173 HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 {"mac_rx_1519_max_good_pkt_num",
175 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 {"mac_rx_1519_max_bad_pkt_num",
177 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 {"mac_tx_fragment_pkt_num",
179 HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 {"mac_tx_undermin_pkt_num",
181 HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 {"mac_tx_jabber_pkt_num",
183 HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 {"mac_tx_err_all_pkt_num",
185 HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 {"mac_tx_from_app_good_pkt_num",
187 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 {"mac_tx_from_app_bad_pkt_num",
189 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 {"mac_rx_fragment_pkt_num",
191 HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 {"mac_rx_undermin_pkt_num",
193 HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 {"mac_rx_jabber_pkt_num",
195 HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 {"mac_rx_fcs_err_pkt_num",
197 HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 {"mac_rx_send_app_good_pkt_num",
199 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 {"mac_rx_send_app_bad_pkt_num",
201 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
204 /* The statistic of reset */
205 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
207 HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
209 HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
211 HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
213 HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
214 {"RESET_SUCCESS_CNT",
215 HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
217 HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
219 HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
222 /* The statistic of errors in Rx BD */
223 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
225 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
227 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
230 /* The dfx statistic in Rx datapath */
231 static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
232 {"L3_CHECKSUM_ERRORS",
233 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
234 {"L4_CHECKSUM_ERRORS",
235 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
236 {"OL3_CHECKSUM_ERRORS",
237 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
238 {"OL4_CHECKSUM_ERRORS",
239 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
242 /* The dfx statistic in Tx datapath */
243 static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
244 {"OVER_LENGTH_PKT_CNT",
245 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
246 {"EXCEED_LIMITED_BD_PKT_CNT",
247 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
248 {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
249 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
250 {"UNSUPPORTED_TUNNEL_PKT_CNT",
251 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
253 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
254 {"SHORT_PKT_PAD_FAIL_CNT",
255 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
258 /* The statistic of rx queue */
259 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
260 {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
263 /* The statistic of tx queue */
264 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
265 {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
268 /* The statistic of imissed packet */
269 static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
271 HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
273 HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
276 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
277 sizeof(hns3_mac_strings[0]))
279 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
280 sizeof(hns3_reset_stats_strings[0]))
282 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
283 sizeof(hns3_rx_bd_error_strings[0]))
285 #define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
286 sizeof(hns3_rxq_dfx_stats_strings[0]))
288 #define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
289 sizeof(hns3_txq_dfx_stats_strings[0]))
291 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
292 sizeof(hns3_rx_queue_strings[0]))
294 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
295 sizeof(hns3_tx_queue_strings[0]))
297 #define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
298 sizeof(hns3_rxq_basic_stats_strings[0]))
300 #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
301 sizeof(hns3_txq_basic_stats_strings[0]))
303 #define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
304 sizeof(hns3_imissed_stats_strings[0]))
306 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
308 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
311 hns3_update_mac_stats(struct hns3_hw *hw)
313 #define HNS3_MAC_STATS_REG_NUM_PER_DESC 4
315 uint64_t *data = (uint64_t *)(&hw->mac_stats);
316 struct hns3_cmd_desc *desc;
317 uint32_t stats_iterms;
323 /* The first desc has a 64-bit header, so need to consider it. */
324 desc_num = hw->mac_stats_reg_num / HNS3_MAC_STATS_REG_NUM_PER_DESC + 1;
325 desc = rte_malloc("hns3_mac_desc",
326 desc_num * sizeof(struct hns3_cmd_desc), 0);
328 hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
332 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
333 ret = hns3_cmd_send(hw, desc, desc_num);
335 hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
340 stats_iterms = RTE_MIN(sizeof(hw->mac_stats) / sizeof(uint64_t),
341 hw->mac_stats_reg_num);
342 desc_data = (uint64_t *)(&desc[0].data[0]);
343 for (i = 0; i < stats_iterms; i++) {
345 * Data memory is continuous and only the first descriptor has a
346 * header in this command.
348 *data += rte_le_to_cpu_64(*desc_data);
358 hns3_mac_query_reg_num(struct hns3_hw *hw, uint32_t *reg_num)
360 #define HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B 3
361 struct hns3_cmd_desc desc;
364 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
365 ret = hns3_cmd_send(hw, &desc, 1);
367 hns3_err(hw, "failed to query MAC statistic reg number, ret = %d",
372 /* The number of MAC statistics registers are provided by firmware. */
373 *reg_num = rte_le_to_cpu_32(desc.data[0]);
375 hns3_err(hw, "MAC statistic reg number is invalid!");
380 * If driver doesn't request the firmware to report more MAC statistics
381 * iterms and the total number of MAC statistics registers by using new
382 * method, firmware will only reports the number of valid statistics
383 * registers. However, structure hns3_mac_stats in driver contains valid
384 * and reserved statistics iterms. In this case, the total register
385 * number must be added to three reserved statistics registers.
387 *reg_num += HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B;
393 hns3_query_mac_stats_reg_num(struct hns3_hw *hw)
395 uint32_t mac_stats_reg_num = 0;
398 ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num);
402 hw->mac_stats_reg_num = mac_stats_reg_num;
403 if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t))
404 hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver.");
410 hns3_query_update_mac_stats(struct rte_eth_dev *dev)
412 struct hns3_adapter *hns = dev->data->dev_private;
413 struct hns3_hw *hw = &hns->hw;
415 return hns3_update_mac_stats(hw);
419 hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
421 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
422 struct hns3_query_rpu_cmd *req;
423 struct hns3_cmd_desc desc;
428 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
429 req = (struct hns3_query_rpu_cmd *)desc.data;
432 * tc_num is 0, means rpu stats of all TC channels will be
436 req->tc_queue_num = rte_cpu_to_le_32(tc_num);
437 ret = hns3_cmd_send(hw, &desc, 1);
439 hns3_err(hw, "failed to query RPU stats: %d", ret);
443 cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
444 stats->rpu_rx_drop_cnt += cnt;
450 hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
452 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
454 stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
458 hns3_update_rpu_drop_stats(struct hns3_hw *hw)
460 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
463 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
464 ret = hns3_update_port_rpu_drop_stats(hw);
465 else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
466 hns3_update_function_rpu_drop_stats(hw);
472 hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
473 int bd_num, bool is_rx)
475 struct hns3_query_ssu_cmd *req;
479 for (i = 0; i < bd_num - 1; i++) {
480 hns3_cmd_setup_basic_desc(&desc[i],
481 HNS3_OPC_SSU_DROP_REG, true);
482 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
484 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
485 req = (struct hns3_query_ssu_cmd *)desc[0].data;
486 req->rxtx = is_rx ? 0 : 1;
487 ret = hns3_cmd_send(hw, desc, bd_num);
493 hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
495 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
496 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
497 struct hns3_query_ssu_cmd *req;
501 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
504 hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
508 req = (struct hns3_query_ssu_cmd *)desc[0].data;
509 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
510 rte_le_to_cpu_32(req->full_drop_cnt) +
511 rte_le_to_cpu_32(req->part_drop_cnt);
513 stats->ssu_rx_drop_cnt += cnt;
519 hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
521 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
522 struct hns3_query_ssu_cmd *req;
526 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
529 hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
533 req = (struct hns3_query_ssu_cmd *)desc[0].data;
534 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
535 rte_le_to_cpu_32(req->full_drop_cnt) +
536 rte_le_to_cpu_32(req->part_drop_cnt);
538 hw->oerror_stats += cnt;
544 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
546 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
549 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
552 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
553 ret = hns3_update_port_rx_ssu_drop_stats(hw);
558 ret = hns3_update_rpu_drop_stats(hw);
563 memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
569 hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
571 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
574 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
577 ret = hns3_update_port_tx_ssu_drop_stats(hw);
582 hw->oerror_stats = 0;
588 hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue *rxq,
589 struct hns3_tqp_stats *stats)
593 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
594 stats->rcb_rx_ring_pktnum_rcd += cnt;
595 stats->rcb_rx_ring_pktnum[rxq->queue_id] += cnt;
599 hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue *txq,
600 struct hns3_tqp_stats *stats)
604 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
605 stats->rcb_tx_ring_pktnum_rcd += cnt;
606 stats->rcb_tx_ring_pktnum[txq->queue_id] += cnt;
610 * Query tqp tx queue statistics ,opcode id: 0x0B03.
611 * Query tqp rx queue statistics ,opcode id: 0x0B13.
612 * Get all statistics of a port.
614 * Pointer to Ethernet device.
616 * Pointer to structure rte_eth_stats.
621 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
623 struct hns3_adapter *hns = eth_dev->data->dev_private;
624 struct hns3_hw *hw = &hns->hw;
625 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
626 struct hns3_tqp_stats *stats = &hw->tqp_stats;
627 struct hns3_rx_queue *rxq;
628 struct hns3_tx_queue *txq;
632 /* Update imissed stats */
633 ret = hns3_update_imissed_stats(hw, false);
635 hns3_err(hw, "update imissed stats failed, ret = %d", ret);
638 rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
639 imissed_stats->ssu_rx_drop_cnt;
641 /* Get the error stats and bytes of received packets */
642 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
643 rxq = eth_dev->data->rx_queues[i];
647 rte_spinlock_lock(&hw->stats_lock);
648 hns3_rcb_rx_ring_stats_get(rxq, stats);
649 rte_spinlock_unlock(&hw->stats_lock);
651 rte_stats->ierrors += rxq->err_stats.l2_errors +
652 rxq->err_stats.pkt_len_errors;
653 rte_stats->ibytes += rxq->basic_stats.bytes;
656 /* Reads all the stats of a txq in a loop to keep them synchronized */
657 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
658 txq = eth_dev->data->tx_queues[i];
662 rte_spinlock_lock(&hw->stats_lock);
663 hns3_rcb_tx_ring_stats_get(txq, stats);
664 rte_spinlock_unlock(&hw->stats_lock);
665 rte_stats->obytes += txq->basic_stats.bytes;
668 ret = hns3_update_oerror_stats(hw, false);
670 hns3_err(hw, "update oerror stats failed, ret = %d", ret);
673 rte_stats->oerrors = hw->oerror_stats;
676 * If HW statistics are reset by stats_reset, but a lot of residual
677 * packets exist in the hardware queue and these packets are error
678 * packets, flip overflow may occurred. So return 0 in this case.
680 rte_stats->ipackets =
681 stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
682 stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
683 rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
685 rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
691 hns3_stats_reset(struct rte_eth_dev *eth_dev)
693 struct hns3_adapter *hns = eth_dev->data->dev_private;
694 struct hns3_hw *hw = &hns->hw;
695 struct hns3_rx_queue *rxq;
696 struct hns3_tx_queue *txq;
701 * Note: Reading hardware statistics of imissed registers will
704 ret = hns3_update_imissed_stats(hw, true);
706 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
711 * Note: Reading hardware statistics of oerror registers will
714 ret = hns3_update_oerror_stats(hw, true);
716 hns3_err(hw, "clear oerror stats failed, ret = %d", ret);
720 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
721 rxq = eth_dev->data->rx_queues[i];
725 rxq->err_stats.pkt_len_errors = 0;
726 rxq->err_stats.l2_errors = 0;
729 /* Clear all the stats of a rxq in a loop to keep them synchronized */
730 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
731 rxq = eth_dev->data->rx_queues[i];
735 rte_spinlock_lock(&hw->stats_lock);
736 memset(&rxq->basic_stats, 0,
737 sizeof(struct hns3_rx_basic_stats));
739 /* This register is read-clear */
740 (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
741 rxq->err_stats.pkt_len_errors = 0;
742 rxq->err_stats.l2_errors = 0;
743 rte_spinlock_unlock(&hw->stats_lock);
746 /* Clear all the stats of a txq in a loop to keep them synchronized */
747 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
748 txq = eth_dev->data->tx_queues[i];
752 rte_spinlock_lock(&hw->stats_lock);
753 memset(&txq->basic_stats, 0,
754 sizeof(struct hns3_tx_basic_stats));
756 /* This register is read-clear */
757 (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
758 rte_spinlock_unlock(&hw->stats_lock);
761 rte_spinlock_lock(&hw->stats_lock);
762 hns3_tqp_stats_clear(hw);
763 rte_spinlock_unlock(&hw->stats_lock);
769 hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
771 struct hns3_adapter *hns = dev->data->dev_private;
772 struct hns3_hw *hw = &hns->hw;
773 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
776 ret = hns3_query_update_mac_stats(dev);
778 hns3_err(hw, "Clear Mac stats fail : %d", ret);
782 memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
788 hns3_get_imissed_stats_num(struct hns3_adapter *hns)
790 #define NO_IMISSED_STATS_NUM 0
791 #define RPU_STATS_ITEM_NUM 1
792 struct hns3_hw *hw = &hns->hw;
794 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
795 return NO_IMISSED_STATS_NUM;
797 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
798 return HNS3_NUM_IMISSED_XSTATS;
800 return RPU_STATS_ITEM_NUM;
803 /* This function calculates the number of xstats based on the current config */
805 hns3_xstats_calc_num(struct rte_eth_dev *dev)
807 #define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
808 HNS3_NUM_RXQ_DFX_XSTATS + \
809 HNS3_NUM_RX_QUEUE_STATS + \
810 HNS3_NUM_RXQ_BASIC_STATS)
811 #define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
812 HNS3_NUM_TX_QUEUE_STATS + \
813 HNS3_NUM_TXQ_BASIC_STATS)
815 struct hns3_adapter *hns = dev->data->dev_private;
816 uint16_t nb_rx_q = dev->data->nb_rx_queues;
817 uint16_t nb_tx_q = dev->data->nb_tx_queues;
818 int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
819 int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
822 stats_num = rx_comm_stats_num + tx_comm_stats_num;
823 stats_num += hns3_get_imissed_stats_num(hns);
826 stats_num += HNS3_NUM_RESET_XSTATS;
828 stats_num += HNS3_FIX_NUM_STATS;
834 hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
837 struct hns3_adapter *hns = dev->data->dev_private;
838 struct hns3_hw *hw = &hns->hw;
842 /* Get rx queue stats */
843 for (j = 0; j < dev->data->nb_rx_queues; j++) {
844 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
845 reg_offset = hns3_get_tqp_reg_offset(j);
846 xstats[*count].value = hns3_read_dev(hw,
847 reg_offset + hns3_rx_queue_strings[i].offset);
848 xstats[*count].id = *count;
853 /* Get tx queue stats */
854 for (j = 0; j < dev->data->nb_tx_queues; j++) {
855 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
856 reg_offset = hns3_get_tqp_reg_offset(j);
857 xstats[*count].value = hns3_read_dev(hw,
858 reg_offset + hns3_tx_queue_strings[i].offset);
859 xstats[*count].id = *count;
866 hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
869 struct hns3_rx_dfx_stats *dfx_stats;
870 struct hns3_rx_queue *rxq;
874 for (i = 0; i < dev->data->nb_rx_queues; i++) {
875 rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
879 dfx_stats = &rxq->dfx_stats;
880 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
881 val = (char *)dfx_stats +
882 hns3_rxq_dfx_stats_strings[j].offset;
883 xstats[*count].value = *(uint64_t *)val;
884 xstats[*count].id = *count;
891 hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
894 struct hns3_tx_dfx_stats *dfx_stats;
895 struct hns3_tx_queue *txq;
899 for (i = 0; i < dev->data->nb_tx_queues; i++) {
900 txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
904 dfx_stats = &txq->dfx_stats;
905 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
906 val = (char *)dfx_stats +
907 hns3_txq_dfx_stats_strings[j].offset;
908 xstats[*count].value = *(uint64_t *)val;
909 xstats[*count].id = *count;
916 hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
919 hns3_rxq_dfx_stats_get(dev, xstats, count);
920 hns3_txq_dfx_stats_get(dev, xstats, count);
924 hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
927 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
928 struct hns3_tqp_stats *stats = &hw->tqp_stats;
929 struct hns3_rx_basic_stats *rxq_stats;
930 struct hns3_rx_queue *rxq;
934 for (i = 0; i < dev->data->nb_rx_queues; i++) {
935 rxq = dev->data->rx_queues[i];
939 hns3_rcb_rx_ring_stats_get(rxq, stats);
940 rxq_stats = &rxq->basic_stats;
941 rxq_stats->errors = rxq->err_stats.l2_errors +
942 rxq->err_stats.pkt_len_errors;
945 * If HW statistics are reset by stats_reset, but a lot of
946 * residual packets exist in the hardware queue and these
947 * packets are error packets, flip overflow may occurred.
948 * So return 0 in this case.
951 stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
952 stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
953 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
954 val = (char *)rxq_stats +
955 hns3_rxq_basic_stats_strings[j].offset;
956 xstats[*count].value = *(uint64_t *)val;
957 xstats[*count].id = *count;
964 hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
967 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
968 struct hns3_tqp_stats *stats = &hw->tqp_stats;
969 struct hns3_tx_basic_stats *txq_stats;
970 struct hns3_tx_queue *txq;
974 for (i = 0; i < dev->data->nb_tx_queues; i++) {
975 txq = dev->data->tx_queues[i];
979 hns3_rcb_tx_ring_stats_get(txq, stats);
981 txq_stats = &txq->basic_stats;
982 txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
984 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
985 val = (char *)txq_stats +
986 hns3_txq_basic_stats_strings[j].offset;
987 xstats[*count].value = *(uint64_t *)val;
988 xstats[*count].id = *count;
995 hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
998 hns3_rxq_basic_stats_get(dev, xstats, count);
999 hns3_txq_basic_stats_get(dev, xstats, count);
1003 hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1006 struct hns3_adapter *hns = dev->data->dev_private;
1007 struct hns3_hw *hw = &hns->hw;
1008 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
1009 int imissed_stats_num;
1014 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1016 for (i = 0; i < imissed_stats_num; i++) {
1017 addr = (char *)imissed_stats +
1018 hns3_imissed_stats_strings[i].offset;
1019 xstats[cnt].value = *(uint64_t *)addr;
1020 xstats[cnt].id = cnt;
1028 * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
1030 * Pointer to Ethernet device.
1032 * A pointer to a table of structure of type *rte_eth_xstat*
1033 * to be filled with device statistics ids and values.
1034 * This parameter can be set to NULL if and only if n is 0.
1036 * The size of the xstats array (number of elements).
1037 * If lower than the required number of elements, the function returns the
1038 * required number of elements.
1039 * If equal to zero, the xstats parameter must be NULL, the function returns
1040 * the required number of elements.
1042 * 0 on fail, count(The size of the statistics elements) on success.
1045 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1048 struct hns3_adapter *hns = dev->data->dev_private;
1049 struct hns3_hw *hw = &hns->hw;
1050 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
1051 struct hns3_reset_stats *reset_stats = &hw->reset.stats;
1052 struct hns3_rx_bd_errors_stats *rx_err_stats;
1053 struct hns3_rx_queue *rxq;
1059 count = hns3_xstats_calc_num(dev);
1065 rte_spinlock_lock(&hw->stats_lock);
1066 hns3_tqp_basic_stats_get(dev, xstats, &count);
1069 /* Update Mac stats */
1070 ret = hns3_query_update_mac_stats(dev);
1072 hns3_err(hw, "Update Mac stats fail : %d", ret);
1073 rte_spinlock_unlock(&hw->stats_lock);
1077 /* Get MAC stats from hw->hw_xstats.mac_stats struct */
1078 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1079 addr = (char *)mac_stats + hns3_mac_strings[i].offset;
1080 xstats[count].value = *(uint64_t *)addr;
1081 xstats[count].id = count;
1085 rte_spinlock_unlock(&hw->stats_lock);
1087 ret = hns3_update_imissed_stats(hw, false);
1089 hns3_err(hw, "update imissed stats failed, ret = %d", ret);
1093 hns3_imissed_stats_get(dev, xstats, &count);
1095 /* Get the reset stat */
1096 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1097 addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
1098 xstats[count].value = *(uint64_t *)addr;
1099 xstats[count].id = count;
1103 /* Get the Rx BD errors stats */
1104 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1105 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1106 rxq = dev->data->rx_queues[j];
1108 rx_err_stats = &rxq->err_stats;
1109 addr = (char *)rx_err_stats +
1110 hns3_rx_bd_error_strings[i].offset;
1111 xstats[count].value = *(uint64_t *)addr;
1112 xstats[count].id = count;
1118 rte_spinlock_lock(&hw->stats_lock);
1119 hns3_tqp_dfx_stats_get(dev, xstats, &count);
1120 hns3_queue_stats_get(dev, xstats, &count);
1121 rte_spinlock_unlock(&hw->stats_lock);
1127 hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
1128 struct rte_eth_xstat_name *xstats_names,
1133 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1134 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
1135 snprintf(xstats_names[*count].name,
1136 sizeof(xstats_names[*count].name),
1138 hns3_rxq_basic_stats_strings[j].name);
1142 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1143 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
1144 snprintf(xstats_names[*count].name,
1145 sizeof(xstats_names[*count].name),
1147 hns3_txq_basic_stats_strings[j].name);
1154 hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
1155 struct rte_eth_xstat_name *xstats_names,
1160 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1161 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
1162 snprintf(xstats_names[*count].name,
1163 sizeof(xstats_names[*count].name),
1165 hns3_rxq_dfx_stats_strings[j].name);
1170 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1171 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
1172 snprintf(xstats_names[*count].name,
1173 sizeof(xstats_names[*count].name),
1175 hns3_txq_dfx_stats_strings[j].name);
1182 hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
1183 struct rte_eth_xstat_name *xstats_names,
1186 struct hns3_adapter *hns = dev->data->dev_private;
1187 uint32_t cnt = *count;
1188 int imissed_stats_num;
1191 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1193 for (i = 0; i < imissed_stats_num; i++) {
1194 snprintf(xstats_names[cnt].name,
1195 sizeof(xstats_names[cnt].name),
1196 "%s", hns3_imissed_stats_strings[i].name);
1204 * Retrieve names of extended statistics of an Ethernet device.
1206 * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
1208 * xstats_names[i].name => xstats[i].value
1210 * And the array index is same with id field of 'struct rte_eth_xstat':
1213 * This assumption makes key-value pair matching less flexible but simpler.
1216 * Pointer to Ethernet device.
1217 * @param xstats_names
1218 * An rte_eth_xstat_name array of at least *size* elements to
1219 * be filled. If set to NULL, the function returns the required number
1222 * The size of the xstats_names array (number of elements).
1224 * - A positive value lower or equal to size: success. The return value
1225 * is the number of entries filled in the stats table.
1228 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1229 struct rte_eth_xstat_name *xstats_names,
1230 __rte_unused unsigned int size)
1232 struct hns3_adapter *hns = dev->data->dev_private;
1233 int cnt_stats = hns3_xstats_calc_num(dev);
1237 if (xstats_names == NULL)
1240 hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1242 /* Note: size limited checked in rte_eth_xstats_get_names() */
1244 /* Get MAC name from hw->hw_xstats.mac_stats struct */
1245 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1246 snprintf(xstats_names[count].name,
1247 sizeof(xstats_names[count].name),
1248 "%s", hns3_mac_strings[i].name);
1253 hns3_imissed_stats_name_get(dev, xstats_names, &count);
1255 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1256 snprintf(xstats_names[count].name,
1257 sizeof(xstats_names[count].name),
1258 "%s", hns3_reset_stats_strings[i].name);
1262 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1263 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1264 snprintf(xstats_names[count].name,
1265 sizeof(xstats_names[count].name),
1267 hns3_rx_bd_error_strings[i].name);
1272 hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1274 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1275 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1276 snprintf(xstats_names[count].name,
1277 sizeof(xstats_names[count].name),
1278 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1283 for (j = 0; j < dev->data->nb_tx_queues; j++) {
1284 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1285 snprintf(xstats_names[count].name,
1286 sizeof(xstats_names[count].name),
1287 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1296 * Retrieve extended statistics of an Ethernet device.
1299 * Pointer to Ethernet device.
1301 * A pointer to an ids array passed by application. This tells which
1302 * statistics values function should retrieve. This parameter
1303 * can be set to NULL if size is 0. In this case function will retrieve
1304 * all available statistics.
1306 * A pointer to a table to be filled with device statistics values.
1308 * The size of the ids array (number of elements).
1310 * - A positive value lower or equal to size: success. The return value
1311 * is the number of entries filled in the stats table.
1312 * - A positive value higher than size: error, the given statistics table
1313 * is too small. The return value corresponds to the size that should
1314 * be given to succeed. The entries in the table are not valid and
1315 * shall not be used by the caller.
1319 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1320 uint64_t *values, uint32_t size)
1322 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1323 struct hns3_adapter *hns = dev->data->dev_private;
1324 struct rte_eth_xstat *values_copy;
1325 struct hns3_hw *hw = &hns->hw;
1326 uint32_t count_value;
1330 if (ids == NULL && values == NULL)
1334 if (size < cnt_stats)
1337 len = cnt_stats * sizeof(struct rte_eth_xstat);
1338 values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1339 if (values_copy == NULL) {
1340 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics values",
1345 count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1346 if (count_value != cnt_stats) {
1347 rte_free(values_copy);
1351 if (ids == NULL && values != NULL) {
1352 for (i = 0; i < cnt_stats; i++)
1353 memcpy(&values[i], &values_copy[i].value,
1356 rte_free(values_copy);
1360 for (i = 0; i < size; i++) {
1361 if (ids[i] >= cnt_stats) {
1362 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
1363 i, ids[i], cnt_stats);
1364 rte_free(values_copy);
1367 memcpy(&values[i], &values_copy[ids[i]].value,
1371 rte_free(values_copy);
1376 * Retrieve names of extended statistics of an Ethernet device.
1379 * Pointer to Ethernet device.
1381 * IDs array given by app to retrieve specific statistics
1382 * @param xstats_names
1383 * An rte_eth_xstat_name array of at least *size* elements to
1384 * be filled. If set to NULL, the function returns the required number
1387 * The size of the xstats_names array (number of elements).
1389 * - A positive value lower or equal to size: success. The return value
1390 * is the number of entries filled in the stats table.
1391 * - A positive value higher than size: error, the given statistics table
1392 * is too small. The return value corresponds to the size that should
1393 * be given to succeed. The entries in the table are not valid and
1394 * shall not be used by the caller.
1397 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1398 const uint64_t *ids,
1399 struct rte_eth_xstat_name *xstats_names,
1402 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1403 struct hns3_adapter *hns = dev->data->dev_private;
1404 struct rte_eth_xstat_name *names_copy;
1405 struct hns3_hw *hw = &hns->hw;
1409 if (xstats_names == NULL)
1413 if (size < cnt_stats)
1416 return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1419 len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1420 names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1421 if (names_copy == NULL) {
1422 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics names",
1427 (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1429 for (i = 0; i < size; i++) {
1430 if (ids[i] >= cnt_stats) {
1431 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
1432 i, ids[i], cnt_stats);
1433 rte_free(names_copy);
1436 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1437 "%s", names_copy[ids[i]].name);
1440 rte_free(names_copy);
1445 hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1447 struct hns3_rx_queue *rxq;
1448 struct hns3_tx_queue *txq;
1451 /* Clear Rx dfx stats */
1452 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1453 rxq = dev->data->rx_queues[i];
1455 memset(&rxq->dfx_stats, 0,
1456 sizeof(struct hns3_rx_dfx_stats));
1459 /* Clear Tx dfx stats */
1460 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1461 txq = dev->data->tx_queues[i];
1463 memset(&txq->dfx_stats, 0,
1464 sizeof(struct hns3_tx_dfx_stats));
1469 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1471 struct hns3_adapter *hns = dev->data->dev_private;
1472 struct hns3_hw *hw = &hns->hw;
1475 /* Clear tqp stats */
1476 ret = hns3_stats_reset(dev);
1480 rte_spinlock_lock(&hw->stats_lock);
1481 hns3_tqp_dfx_stats_clear(dev);
1483 /* Clear reset stats */
1484 memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1489 /* HW registers are cleared on read */
1490 ret = hns3_mac_stats_reset(dev);
1493 rte_spinlock_unlock(&hw->stats_lock);
1499 hns3_tqp_stats_init(struct hns3_hw *hw)
1501 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1503 tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1504 sizeof(uint64_t) * hw->tqps_num, 0);
1505 if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1506 hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1510 tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1511 sizeof(uint64_t) * hw->tqps_num, 0);
1512 if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1513 hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1514 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1515 tqp_stats->rcb_rx_ring_pktnum = NULL;
1523 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1525 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1527 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1528 tqp_stats->rcb_rx_ring_pktnum = NULL;
1529 rte_free(tqp_stats->rcb_tx_ring_pktnum);
1530 tqp_stats->rcb_tx_ring_pktnum = NULL;
1534 hns3_tqp_stats_clear(struct hns3_hw *hw)
1536 struct hns3_tqp_stats *stats = &hw->tqp_stats;
1538 stats->rcb_rx_ring_pktnum_rcd = 0;
1539 stats->rcb_tx_ring_pktnum_rcd = 0;
1540 memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1541 memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1545 hns3_stats_init(struct hns3_hw *hw)
1549 rte_spinlock_init(&hw->stats_lock);
1550 /* Hardware statistics of imissed registers cleared. */
1551 ret = hns3_update_imissed_stats(hw, true);
1553 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1557 return hns3_tqp_stats_init(hw);
1561 hns3_stats_uninit(struct hns3_hw *hw)
1563 hns3_tqp_stats_uninit(hw);
1567 hns3_update_queues_stats(struct hns3_hw *hw)
1569 struct rte_eth_dev_data *data = hw->data;
1570 struct hns3_rx_queue *rxq;
1571 struct hns3_tx_queue *txq;
1574 for (i = 0; i < data->nb_rx_queues; i++) {
1575 rxq = data->rx_queues[i];
1577 hns3_rcb_rx_ring_stats_get(rxq, &hw->tqp_stats);
1580 for (i = 0; i < data->nb_tx_queues; i++) {
1581 txq = data->tx_queues[i];
1583 hns3_rcb_tx_ring_stats_get(txq, &hw->tqp_stats);
1588 * Some hardware statistics registers are not 64-bit. If hardware statistics are
1589 * not obtained for a long time, these statistics may be reversed. This function
1590 * is used to update these hardware statistics in periodic task.
1593 hns3_update_hw_stats(struct hns3_hw *hw)
1595 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1597 rte_spinlock_lock(&hw->stats_lock);
1599 hns3_update_mac_stats(hw);
1601 hns3_update_queues_stats(hw);
1602 rte_spinlock_unlock(&hw->stats_lock);