1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
5 #include <rte_ethdev.h>
7 #include <rte_malloc.h>
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
14 /* The statistics of the per-rxq basic stats */
15 static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
17 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
19 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
21 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
24 /* The statistics of the per-txq basic stats */
25 static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
27 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
29 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
33 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 {"mac_tx_mac_pause_num",
35 HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 {"mac_rx_mac_pause_num",
37 HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 {"mac_tx_control_pkt_num",
39 HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 {"mac_rx_control_pkt_num",
41 HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 {"mac_tx_pfc_pkt_num",
43 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 {"mac_tx_pfc_pri0_pkt_num",
45 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 {"mac_tx_pfc_pri1_pkt_num",
47 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 {"mac_tx_pfc_pri2_pkt_num",
49 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 {"mac_tx_pfc_pri3_pkt_num",
51 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 {"mac_tx_pfc_pri4_pkt_num",
53 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 {"mac_tx_pfc_pri5_pkt_num",
55 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 {"mac_tx_pfc_pri6_pkt_num",
57 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 {"mac_tx_pfc_pri7_pkt_num",
59 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 {"mac_rx_pfc_pkt_num",
61 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 {"mac_rx_pfc_pri0_pkt_num",
63 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 {"mac_rx_pfc_pri1_pkt_num",
65 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 {"mac_rx_pfc_pri2_pkt_num",
67 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 {"mac_rx_pfc_pri3_pkt_num",
69 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 {"mac_rx_pfc_pri4_pkt_num",
71 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 {"mac_rx_pfc_pri5_pkt_num",
73 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 {"mac_rx_pfc_pri6_pkt_num",
75 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 {"mac_rx_pfc_pri7_pkt_num",
77 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 {"mac_tx_total_pkt_num",
79 HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 {"mac_tx_total_oct_num",
81 HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 {"mac_tx_good_pkt_num",
83 HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 {"mac_tx_bad_pkt_num",
85 HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 {"mac_tx_good_oct_num",
87 HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 {"mac_tx_bad_oct_num",
89 HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 {"mac_tx_uni_pkt_num",
91 HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 {"mac_tx_multi_pkt_num",
93 HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 {"mac_tx_broad_pkt_num",
95 HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 {"mac_tx_undersize_pkt_num",
97 HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 {"mac_tx_oversize_pkt_num",
99 HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 {"mac_tx_64_oct_pkt_num",
101 HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 {"mac_tx_65_127_oct_pkt_num",
103 HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 {"mac_tx_128_255_oct_pkt_num",
105 HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 {"mac_tx_256_511_oct_pkt_num",
107 HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 {"mac_tx_512_1023_oct_pkt_num",
109 HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 {"mac_tx_1024_1518_oct_pkt_num",
111 HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 {"mac_tx_1519_2047_oct_pkt_num",
113 HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 {"mac_tx_2048_4095_oct_pkt_num",
115 HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 {"mac_tx_4096_8191_oct_pkt_num",
117 HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 {"mac_tx_8192_9216_oct_pkt_num",
119 HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 {"mac_tx_9217_12287_oct_pkt_num",
121 HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 {"mac_tx_12288_16383_oct_pkt_num",
123 HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 {"mac_tx_1519_max_good_pkt_num",
125 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 {"mac_tx_1519_max_bad_pkt_num",
127 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 {"mac_rx_total_pkt_num",
129 HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 {"mac_rx_total_oct_num",
131 HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 {"mac_rx_good_pkt_num",
133 HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 {"mac_rx_bad_pkt_num",
135 HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 {"mac_rx_good_oct_num",
137 HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 {"mac_rx_bad_oct_num",
139 HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 {"mac_rx_uni_pkt_num",
141 HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 {"mac_rx_multi_pkt_num",
143 HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 {"mac_rx_broad_pkt_num",
145 HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 {"mac_rx_undersize_pkt_num",
147 HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 {"mac_rx_oversize_pkt_num",
149 HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 {"mac_rx_64_oct_pkt_num",
151 HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 {"mac_rx_65_127_oct_pkt_num",
153 HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 {"mac_rx_128_255_oct_pkt_num",
155 HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 {"mac_rx_256_511_oct_pkt_num",
157 HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 {"mac_rx_512_1023_oct_pkt_num",
159 HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 {"mac_rx_1024_1518_oct_pkt_num",
161 HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 {"mac_rx_1519_2047_oct_pkt_num",
163 HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 {"mac_rx_2048_4095_oct_pkt_num",
165 HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 {"mac_rx_4096_8191_oct_pkt_num",
167 HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 {"mac_rx_8192_9216_oct_pkt_num",
169 HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 {"mac_rx_9217_12287_oct_pkt_num",
171 HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 {"mac_rx_12288_16383_oct_pkt_num",
173 HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 {"mac_rx_1519_max_good_pkt_num",
175 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 {"mac_rx_1519_max_bad_pkt_num",
177 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 {"mac_tx_fragment_pkt_num",
179 HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 {"mac_tx_undermin_pkt_num",
181 HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 {"mac_tx_jabber_pkt_num",
183 HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 {"mac_tx_err_all_pkt_num",
185 HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 {"mac_tx_from_app_good_pkt_num",
187 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 {"mac_tx_from_app_bad_pkt_num",
189 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 {"mac_rx_fragment_pkt_num",
191 HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 {"mac_rx_undermin_pkt_num",
193 HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 {"mac_rx_jabber_pkt_num",
195 HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 {"mac_rx_fcs_err_pkt_num",
197 HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 {"mac_rx_send_app_good_pkt_num",
199 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 {"mac_rx_send_app_bad_pkt_num",
201 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
204 static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
205 {"MAC_AFIFO_TNL_INT_R",
206 HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_int_cnt)},
207 {"PPU_MPF_ABNORMAL_INT_ST2_MSIX",
208 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_msix_cnt)},
209 {"SSU_PORT_BASED_ERR_INT_MSIX",
210 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_int_cnt)},
211 {"PPP_PF_ABNORMAL_INT_ST0",
212 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_int_cnt)},
213 {"PPU_PF_ABNORMAL_INT_ST_MSIX",
214 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_msix_cnt)},
215 {"IMP_TCM_ECC_INT_STS",
216 HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_tcm_ecc_int_cnt)},
217 {"CMDQ_MEM_ECC_INT_STS",
218 HNS3_ERR_INT_STATS_FIELD_OFFSET(cmdq_mem_ecc_int_cnt)},
219 {"IMP_RD_POISON_INT_STS",
220 HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_rd_poison_int_cnt)},
221 {"TQP_INT_ECC_INT_STS",
222 HNS3_ERR_INT_STATS_FIELD_OFFSET(tqp_int_ecc_int_cnt)},
224 HNS3_ERR_INT_STATS_FIELD_OFFSET(msix_ecc_int_cnt)},
225 {"SSU_ECC_MULTI_BIT_INT_0",
226 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_0_cnt)},
227 {"SSU_ECC_MULTI_BIT_INT_1",
228 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_1_cnt)},
229 {"SSU_COMMON_ERR_INT",
230 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_common_ecc_int_cnt)},
232 HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_int_cnt)},
233 {"PPP_MPF_ABNORMAL_INT_ST1",
234 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st1_cnt)},
235 {"PPP_MPF_ABNORMAL_INT_ST3",
236 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st3_cnt)},
237 {"PPU_MPF_ABNORMAL_INT_ST1",
238 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st1_cnt)},
239 {"PPU_MPF_ABNORMAL_INT_ST2_RAS",
240 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_ras_cnt)},
241 {"PPU_MPF_ABNORMAL_INT_ST3",
242 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st3_cnt)},
244 HNS3_ERR_INT_STATS_FIELD_OFFSET(tm_sch_int_cnt)},
246 HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_fifo_int_cnt)},
248 HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_ecc_int_cnt)},
250 HNS3_ERR_INT_STATS_FIELD_OFFSET(ncsi_ecc_int_cnt)},
251 {"SSU_PORT_BASED_ERR_INT_RAS",
252 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_err_int_cnt)},
253 {"SSU_FIFO_OVERFLOW_INT",
254 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_fifo_overflow_int_cnt)},
256 HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ets_tcg_int_cnt)},
257 {"IGU_EGU_TNL_INT_STS",
258 HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_egu_tnl_int_cnt)},
259 {"PPU_PF_ABNORMAL_INT_ST_RAS",
260 HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_ras_cnt)},
263 /* The statistic of reset */
264 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
266 HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
268 HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
270 HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
272 HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
273 {"RESET_SUCCESS_CNT",
274 HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
276 HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
278 HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
281 /* The statistic of errors in Rx BD */
282 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
284 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
286 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
289 /* The dfx statistic in Rx datapath */
290 static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
291 {"L3_CHECKSUM_ERRORS",
292 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
293 {"L4_CHECKSUM_ERRORS",
294 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
295 {"OL3_CHECKSUM_ERRORS",
296 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
297 {"OL4_CHECKSUM_ERRORS",
298 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
301 /* The dfx statistic in Tx datapath */
302 static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
303 {"OVER_LENGTH_PKT_CNT",
304 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
305 {"EXCEED_LIMITED_BD_PKT_CNT",
306 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
307 {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
308 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
309 {"UNSUPPORTED_TUNNEL_PKT_CNT",
310 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
312 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
313 {"SHORT_PKT_PAD_FAIL_CNT",
314 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
317 /* The statistic of rx queue */
318 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
319 {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
322 /* The statistic of tx queue */
323 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
324 {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
327 /* The statistic of imissed packet */
328 static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
330 HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
333 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
334 sizeof(hns3_mac_strings[0]))
336 #define HNS3_NUM_ERROR_INT_XSTATS (sizeof(hns3_error_int_stats_strings) / \
337 sizeof(hns3_error_int_stats_strings[0]))
339 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
340 sizeof(hns3_reset_stats_strings[0]))
342 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
343 sizeof(hns3_rx_bd_error_strings[0]))
345 #define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
346 sizeof(hns3_rxq_dfx_stats_strings[0]))
348 #define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
349 sizeof(hns3_txq_dfx_stats_strings[0]))
351 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
352 sizeof(hns3_rx_queue_strings[0]))
354 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
355 sizeof(hns3_tx_queue_strings[0]))
357 #define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
358 sizeof(hns3_rxq_basic_stats_strings[0]))
360 #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
361 sizeof(hns3_txq_basic_stats_strings[0]))
363 #define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
364 sizeof(hns3_imissed_stats_strings[0]))
366 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
367 HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
369 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
370 static void hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev);
373 * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
374 * This command is used before send 'query_mac_stat command', the descriptor
375 * number of 'query_mac_stat command' must match with reg_num in this command.
377 * Pointer to structure hns3_hw.
382 hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
384 uint64_t *data = (uint64_t *)(&hw->mac_stats);
385 struct hns3_cmd_desc *desc;
390 desc = rte_malloc("hns3_mac_desc",
391 desc_num * sizeof(struct hns3_cmd_desc), 0);
393 hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
397 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
398 ret = hns3_cmd_send(hw, desc, desc_num);
400 hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
405 for (i = 0; i < desc_num; i++) {
406 /* For special opcode 0034, only the first desc has the head */
408 desc_data = (uint64_t *)(&desc[i].data[0]);
409 n = HNS3_RD_FIRST_STATS_NUM;
411 desc_data = (uint64_t *)(&desc[i]);
412 n = HNS3_RD_OTHER_STATS_NUM;
415 for (k = 0; k < n; k++) {
416 *data += rte_le_to_cpu_64(*desc_data);
427 * Query Mac stat reg num command ,opcode id: 0x0033.
428 * This command is used before send 'query_mac_stat command', the descriptor
429 * number of 'query_mac_stat command' must match with reg_num in this command.
431 * Pointer to structure rte_eth_stats.
436 hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
438 struct hns3_adapter *hns = dev->data->dev_private;
439 struct hns3_hw *hw = &hns->hw;
440 struct hns3_cmd_desc desc;
445 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
446 ret = hns3_cmd_send(hw, &desc, 1);
451 * The num of MAC statistics registers that are provided by IMP in this
454 desc_data = (uint32_t *)(&desc.data[0]);
455 reg_num = rte_le_to_cpu_32(*desc_data);
458 * The descriptor number of 'query_additional_mac_stat command' is
459 * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
460 * This value is 83 in this version
462 *desc_num = 1 + ((reg_num - 3) >> 2) +
463 (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
469 hns3_query_update_mac_stats(struct rte_eth_dev *dev)
471 struct hns3_adapter *hns = dev->data->dev_private;
472 struct hns3_hw *hw = &hns->hw;
476 ret = hns3_mac_query_reg_num(dev, &desc_num);
478 ret = hns3_update_mac_stats(hw, desc_num);
480 hns3_err(hw, "Query mac reg num fail : %d", ret);
484 /* Get tqp stats from register */
486 hns3_update_tqp_stats(struct hns3_hw *hw)
488 struct hns3_tqp_stats *stats = &hw->tqp_stats;
489 struct hns3_cmd_desc desc;
494 for (i = 0; i < hw->tqps_num; i++) {
495 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
498 desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
499 ret = hns3_cmd_send(hw, &desc, 1);
501 hns3_err(hw, "Failed to query RX No.%u queue stat: %d",
505 cnt = rte_le_to_cpu_32(desc.data[1]);
506 stats->rcb_rx_ring_pktnum_rcd += cnt;
507 stats->rcb_rx_ring_pktnum[i] += cnt;
509 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
512 desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
513 ret = hns3_cmd_send(hw, &desc, 1);
515 hns3_err(hw, "Failed to query TX No.%u queue stat: %d",
519 cnt = rte_le_to_cpu_32(desc.data[1]);
520 stats->rcb_tx_ring_pktnum_rcd += cnt;
521 stats->rcb_tx_ring_pktnum[i] += cnt;
528 hns3_update_rpu_drop_stats(struct hns3_hw *hw)
530 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
531 struct hns3_query_rpu_cmd *req;
532 struct hns3_cmd_desc desc;
537 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
538 req = (struct hns3_query_rpu_cmd *)desc.data;
541 * tc_num is 0, means rpu stats of all TC channels will be
545 req->tc_queue_num = rte_cpu_to_le_32(tc_num);
546 ret = hns3_cmd_send(hw, &desc, 1);
548 hns3_err(hw, "failed to query RPU stats: %d", ret);
552 cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
553 stats->rpu_rx_drop_cnt += cnt;
559 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
563 ret = hns3_update_rpu_drop_stats(hw);
568 memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
574 * Query tqp tx queue statistics ,opcode id: 0x0B03.
575 * Query tqp rx queue statistics ,opcode id: 0x0B13.
576 * Get all statistics of a port.
578 * Pointer to Ethernet device.
580 * Pointer to structure rte_eth_stats.
585 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
587 struct hns3_adapter *hns = eth_dev->data->dev_private;
588 struct hns3_hw *hw = &hns->hw;
589 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
590 struct hns3_tqp_stats *stats = &hw->tqp_stats;
591 struct hns3_rx_queue *rxq;
596 /* Update tqp stats by read register */
597 ret = hns3_update_tqp_stats(hw);
599 hns3_err(hw, "Update tqp stats fail : %d", ret);
604 /* Update imissed stats */
605 ret = hns3_update_imissed_stats(hw, false);
607 hns3_err(hw, "update imissed stats failed, ret = %d",
612 rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt;
615 /* Get the error stats and bytes of received packets */
616 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
617 rxq = eth_dev->data->rx_queues[i];
619 cnt = rxq->err_stats.l2_errors +
620 rxq->err_stats.pkt_len_errors;
621 rte_stats->ierrors += cnt;
623 rte_stats->ibytes += rxq->basic_stats.bytes;
627 /* Get the bytes of received packets */
628 struct hns3_tx_queue *txq;
629 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
630 txq = eth_dev->data->tx_queues[i];
632 rte_stats->obytes += txq->basic_stats.bytes;
635 rte_stats->oerrors = 0;
637 * If HW statistics are reset by stats_reset, but a lot of residual
638 * packets exist in the hardware queue and these packets are error
639 * packets, flip overflow may occurred. So return 0 in this case.
641 rte_stats->ipackets =
642 stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
643 stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
644 rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
646 rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
652 hns3_stats_reset(struct rte_eth_dev *eth_dev)
654 struct hns3_adapter *hns = eth_dev->data->dev_private;
655 struct hns3_hw *hw = &hns->hw;
656 struct hns3_cmd_desc desc_reset;
657 struct hns3_rx_queue *rxq;
662 * Note: Reading hardware statistics of rx/tx queue packet number
665 for (i = 0; i < hw->tqps_num; i++) {
666 hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
668 desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
669 ret = hns3_cmd_send(hw, &desc_reset, 1);
671 hns3_err(hw, "Failed to reset RX No.%u queue stat: %d",
676 hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
678 desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
679 ret = hns3_cmd_send(hw, &desc_reset, 1);
681 hns3_err(hw, "Failed to reset TX No.%u queue stat: %d",
689 * Note: Reading hardware statistics of imissed registers will
692 ret = hns3_update_imissed_stats(hw, true);
694 hns3_err(hw, "clear imissed stats failed, ret = %d",
701 * Clear soft stats of rx error packet which will be dropped
704 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
705 rxq = eth_dev->data->rx_queues[i];
707 rxq->err_stats.pkt_len_errors = 0;
708 rxq->err_stats.l2_errors = 0;
713 * 'packets' in hns3_tx_basic_stats and hns3_rx_basic_stats come
714 * from hw->tqp_stats. And clearing tqp stats is like clearing
717 hns3_tqp_stats_clear(hw);
718 hns3_tqp_basic_stats_clear(eth_dev);
724 hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
726 struct hns3_adapter *hns = dev->data->dev_private;
727 struct hns3_hw *hw = &hns->hw;
728 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
731 ret = hns3_query_update_mac_stats(dev);
733 hns3_err(hw, "Clear Mac stats fail : %d", ret);
737 memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
742 /* This function calculates the number of xstats based on the current config */
744 hns3_xstats_calc_num(struct rte_eth_dev *dev)
746 #define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
747 HNS3_NUM_RXQ_DFX_XSTATS + \
748 HNS3_NUM_RX_QUEUE_STATS + \
749 HNS3_NUM_RXQ_BASIC_STATS)
750 #define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
751 HNS3_NUM_TX_QUEUE_STATS + \
752 HNS3_NUM_TXQ_BASIC_STATS)
754 struct hns3_adapter *hns = dev->data->dev_private;
755 uint16_t nb_rx_q = dev->data->nb_rx_queues;
756 uint16_t nb_tx_q = dev->data->nb_tx_queues;
757 int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
758 int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
761 return rx_comm_stats_num + tx_comm_stats_num +
762 HNS3_NUM_RESET_XSTATS;
764 return rx_comm_stats_num + tx_comm_stats_num +
769 hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
772 struct hns3_adapter *hns = dev->data->dev_private;
773 struct hns3_hw *hw = &hns->hw;
777 /* Get rx queue stats */
778 for (j = 0; j < dev->data->nb_rx_queues; j++) {
779 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
780 reg_offset = hns3_get_tqp_reg_offset(j);
781 xstats[*count].value = hns3_read_dev(hw,
782 reg_offset + hns3_rx_queue_strings[i].offset);
783 xstats[*count].id = *count;
788 /* Get tx queue stats */
789 for (j = 0; j < dev->data->nb_tx_queues; j++) {
790 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
791 reg_offset = hns3_get_tqp_reg_offset(j);
792 xstats[*count].value = hns3_read_dev(hw,
793 reg_offset + hns3_tx_queue_strings[i].offset);
794 xstats[*count].id = *count;
801 hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
803 struct hns3_pf *pf = &hns->pf;
807 for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
808 if (strcmp(hns3_error_int_stats_strings[i].name, err) == 0) {
809 addr = (char *)&pf->abn_int_stats +
810 hns3_error_int_stats_strings[i].offset;
811 *(uint64_t *)addr += 1;
818 hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
821 struct hns3_rx_dfx_stats *dfx_stats;
822 struct hns3_rx_queue *rxq;
826 for (i = 0; i < dev->data->nb_rx_queues; i++) {
827 rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
831 dfx_stats = &rxq->dfx_stats;
832 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
833 val = (char *)dfx_stats +
834 hns3_rxq_dfx_stats_strings[j].offset;
835 xstats[*count].value = *(uint64_t *)val;
836 xstats[*count].id = *count;
843 hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
846 struct hns3_tx_dfx_stats *dfx_stats;
847 struct hns3_tx_queue *txq;
851 for (i = 0; i < dev->data->nb_tx_queues; i++) {
852 txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
856 dfx_stats = &txq->dfx_stats;
857 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
858 val = (char *)dfx_stats +
859 hns3_txq_dfx_stats_strings[j].offset;
860 xstats[*count].value = *(uint64_t *)val;
861 xstats[*count].id = *count;
868 hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
871 hns3_rxq_dfx_stats_get(dev, xstats, count);
872 hns3_txq_dfx_stats_get(dev, xstats, count);
876 hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
879 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
880 struct hns3_tqp_stats *stats = &hw->tqp_stats;
881 struct hns3_rx_basic_stats *rxq_stats;
882 struct hns3_rx_queue *rxq;
886 for (i = 0; i < dev->data->nb_rx_queues; i++) {
887 rxq = dev->data->rx_queues[i];
891 rxq_stats = &rxq->basic_stats;
892 rxq_stats->errors = rxq->err_stats.l2_errors +
893 rxq->err_stats.pkt_len_errors;
895 * If HW statistics are reset by stats_reset, but a lot of
896 * residual packets exist in the hardware queue and these
897 * packets are error packets, flip overflow may occurred.
898 * So return 0 in this case.
901 stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
902 stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
903 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
904 val = (char *)rxq_stats +
905 hns3_rxq_basic_stats_strings[j].offset;
906 xstats[*count].value = *(uint64_t *)val;
907 xstats[*count].id = *count;
914 hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
917 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
918 struct hns3_tqp_stats *stats = &hw->tqp_stats;
919 struct hns3_tx_basic_stats *txq_stats;
920 struct hns3_tx_queue *txq;
924 for (i = 0; i < dev->data->nb_tx_queues; i++) {
925 txq = dev->data->tx_queues[i];
929 txq_stats = &txq->basic_stats;
930 txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
932 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
933 val = (char *)txq_stats +
934 hns3_txq_basic_stats_strings[j].offset;
935 xstats[*count].value = *(uint64_t *)val;
936 xstats[*count].id = *count;
943 hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
946 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949 /* Update tqp stats by read register */
950 ret = hns3_update_tqp_stats(hw);
952 hns3_err(hw, "Update tqp stats fail, ret = %d.", ret);
956 hns3_rxq_basic_stats_get(dev, xstats, count);
957 hns3_txq_basic_stats_get(dev, xstats, count);
963 * The function is only called by hns3_dev_xstats_reset to clear
964 * basic stats of per-queue. TQP stats are all cleared in hns3_stats_reset
965 * which is called before this function.
968 * Pointer to Ethernet device.
971 hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev)
973 struct hns3_tx_queue *txq;
974 struct hns3_rx_queue *rxq;
977 for (i = 0; i < dev->data->nb_rx_queues; i++) {
978 rxq = dev->data->rx_queues[i];
980 memset(&rxq->basic_stats, 0,
981 sizeof(struct hns3_rx_basic_stats));
984 for (i = 0; i < dev->data->nb_tx_queues; i++) {
985 txq = dev->data->tx_queues[i];
987 memset(&txq->basic_stats, 0,
988 sizeof(struct hns3_tx_basic_stats));
993 * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
995 * Pointer to Ethernet device.
997 * A pointer to a table of structure of type *rte_eth_xstat*
998 * to be filled with device statistics ids and values.
999 * This parameter can be set to NULL if n is 0.
1001 * The size of the xstats array (number of elements).
1003 * 0 on fail, count(The size of the statistics elements) on success.
1006 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1009 struct hns3_adapter *hns = dev->data->dev_private;
1010 struct hns3_pf *pf = &hns->pf;
1011 struct hns3_hw *hw = &hns->hw;
1012 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
1013 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
1014 struct hns3_reset_stats *reset_stats = &hw->reset.stats;
1015 struct hns3_rx_bd_errors_stats *rx_err_stats;
1016 struct hns3_rx_queue *rxq;
1025 count = hns3_xstats_calc_num(dev);
1031 ret = hns3_tqp_basic_stats_get(dev, xstats, &count);
1036 /* Update Mac stats */
1037 ret = hns3_query_update_mac_stats(dev);
1039 hns3_err(hw, "Update Mac stats fail : %d", ret);
1043 /* Get MAC stats from hw->hw_xstats.mac_stats struct */
1044 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1045 addr = (char *)mac_stats + hns3_mac_strings[i].offset;
1046 xstats[count].value = *(uint64_t *)addr;
1047 xstats[count].id = count;
1051 ret = hns3_update_imissed_stats(hw, false);
1053 hns3_err(hw, "update imissed stats failed, ret = %d",
1058 for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
1059 addr = (char *)imissed_stats +
1060 hns3_imissed_stats_strings[i].offset;
1061 xstats[count].value = *(uint64_t *)addr;
1062 xstats[count].id = count;
1066 for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
1067 addr = (char *)&pf->abn_int_stats +
1068 hns3_error_int_stats_strings[i].offset;
1069 xstats[count].value = *(uint64_t *)addr;
1070 xstats[count].id = count;
1075 /* Get the reset stat */
1076 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1077 addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
1078 xstats[count].value = *(uint64_t *)addr;
1079 xstats[count].id = count;
1083 /* Get the Rx BD errors stats */
1084 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1085 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1086 rxq = dev->data->rx_queues[j];
1088 rx_err_stats = &rxq->err_stats;
1089 addr = (char *)rx_err_stats +
1090 hns3_rx_bd_error_strings[i].offset;
1091 xstats[count].value = *(uint64_t *)addr;
1092 xstats[count].id = count;
1098 hns3_tqp_dfx_stats_get(dev, xstats, &count);
1099 hns3_queue_stats_get(dev, xstats, &count);
1105 hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
1106 struct rte_eth_xstat_name *xstats_names,
1111 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1112 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
1113 snprintf(xstats_names[*count].name,
1114 sizeof(xstats_names[*count].name),
1116 hns3_rxq_basic_stats_strings[j].name);
1120 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1121 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
1122 snprintf(xstats_names[*count].name,
1123 sizeof(xstats_names[*count].name),
1125 hns3_txq_basic_stats_strings[j].name);
1132 hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
1133 struct rte_eth_xstat_name *xstats_names,
1138 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1139 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
1140 snprintf(xstats_names[*count].name,
1141 sizeof(xstats_names[*count].name),
1143 hns3_rxq_dfx_stats_strings[j].name);
1148 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1149 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
1150 snprintf(xstats_names[*count].name,
1151 sizeof(xstats_names[*count].name),
1153 hns3_txq_dfx_stats_strings[j].name);
1160 * Retrieve names of extended statistics of an Ethernet device.
1162 * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
1164 * xstats_names[i].name => xstats[i].value
1166 * And the array index is same with id field of 'struct rte_eth_xstat':
1169 * This assumption makes key-value pair matching less flexible but simpler.
1172 * Pointer to Ethernet device.
1173 * @param xstats_names
1174 * An rte_eth_xstat_name array of at least *size* elements to
1175 * be filled. If set to NULL, the function returns the required number
1178 * The size of the xstats_names array (number of elements).
1180 * - A positive value lower or equal to size: success. The return value
1181 * is the number of entries filled in the stats table.
1184 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1185 struct rte_eth_xstat_name *xstats_names,
1186 __rte_unused unsigned int size)
1188 struct hns3_adapter *hns = dev->data->dev_private;
1189 int cnt_stats = hns3_xstats_calc_num(dev);
1193 if (xstats_names == NULL)
1196 hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1198 /* Note: size limited checked in rte_eth_xstats_get_names() */
1200 /* Get MAC name from hw->hw_xstats.mac_stats struct */
1201 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1202 snprintf(xstats_names[count].name,
1203 sizeof(xstats_names[count].name),
1204 "%s", hns3_mac_strings[i].name);
1208 for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
1209 snprintf(xstats_names[count].name,
1210 sizeof(xstats_names[count].name),
1211 "%s", hns3_imissed_stats_strings[i].name);
1215 for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
1216 snprintf(xstats_names[count].name,
1217 sizeof(xstats_names[count].name),
1218 "%s", hns3_error_int_stats_strings[i].name);
1222 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1223 snprintf(xstats_names[count].name,
1224 sizeof(xstats_names[count].name),
1225 "%s", hns3_reset_stats_strings[i].name);
1229 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1230 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1231 snprintf(xstats_names[count].name,
1232 sizeof(xstats_names[count].name),
1234 hns3_rx_bd_error_strings[i].name);
1239 hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1241 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1242 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1243 snprintf(xstats_names[count].name,
1244 sizeof(xstats_names[count].name),
1245 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1250 for (j = 0; j < dev->data->nb_tx_queues; j++) {
1251 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1252 snprintf(xstats_names[count].name,
1253 sizeof(xstats_names[count].name),
1254 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1263 * Retrieve extended statistics of an Ethernet device.
1266 * Pointer to Ethernet device.
1268 * A pointer to an ids array passed by application. This tells which
1269 * statistics values function should retrieve. This parameter
1270 * can be set to NULL if size is 0. In this case function will retrieve
1271 * all avalible statistics.
1273 * A pointer to a table to be filled with device statistics values.
1275 * The size of the ids array (number of elements).
1277 * - A positive value lower or equal to size: success. The return value
1278 * is the number of entries filled in the stats table.
1279 * - A positive value higher than size: error, the given statistics table
1280 * is too small. The return value corresponds to the size that should
1281 * be given to succeed. The entries in the table are not valid and
1282 * shall not be used by the caller.
1286 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1287 uint64_t *values, uint32_t size)
1289 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1290 struct hns3_adapter *hns = dev->data->dev_private;
1291 struct rte_eth_xstat *values_copy;
1292 struct hns3_hw *hw = &hns->hw;
1293 uint32_t count_value;
1297 if (ids == NULL && values == NULL)
1301 if (size < cnt_stats)
1304 len = cnt_stats * sizeof(struct rte_eth_xstat);
1305 values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1306 if (values_copy == NULL) {
1307 hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1308 "to store statistics values", len);
1312 count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1313 if (count_value != cnt_stats) {
1314 rte_free(values_copy);
1318 if (ids == NULL && values != NULL) {
1319 for (i = 0; i < cnt_stats; i++)
1320 memcpy(&values[i], &values_copy[i].value,
1323 rte_free(values_copy);
1327 for (i = 0; i < size; i++) {
1328 if (ids[i] >= cnt_stats) {
1329 hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1330 "should < %u", i, ids[i], cnt_stats);
1331 rte_free(values_copy);
1334 memcpy(&values[i], &values_copy[ids[i]].value,
1338 rte_free(values_copy);
1343 * Retrieve names of extended statistics of an Ethernet device.
1346 * Pointer to Ethernet device.
1347 * @param xstats_names
1348 * An rte_eth_xstat_name array of at least *size* elements to
1349 * be filled. If set to NULL, the function returns the required number
1352 * IDs array given by app to retrieve specific statistics
1354 * The size of the xstats_names array (number of elements).
1356 * - A positive value lower or equal to size: success. The return value
1357 * is the number of entries filled in the stats table.
1358 * - A positive value higher than size: error, the given statistics table
1359 * is too small. The return value corresponds to the size that should
1360 * be given to succeed. The entries in the table are not valid and
1361 * shall not be used by the caller.
1364 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1365 struct rte_eth_xstat_name *xstats_names,
1366 const uint64_t *ids, uint32_t size)
1368 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1369 struct hns3_adapter *hns = dev->data->dev_private;
1370 struct rte_eth_xstat_name *names_copy;
1371 struct hns3_hw *hw = &hns->hw;
1375 if (xstats_names == NULL)
1379 if (size < cnt_stats)
1382 return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1385 len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1386 names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1387 if (names_copy == NULL) {
1388 hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1389 "to store statistics names", len);
1393 (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1395 for (i = 0; i < size; i++) {
1396 if (ids[i] >= cnt_stats) {
1397 hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1398 "should < %u", i, ids[i], cnt_stats);
1399 rte_free(names_copy);
1402 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1403 "%s", names_copy[ids[i]].name);
1406 rte_free(names_copy);
1411 hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1413 struct hns3_rx_queue *rxq;
1414 struct hns3_tx_queue *txq;
1417 /* Clear Rx dfx stats */
1418 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1419 rxq = dev->data->rx_queues[i];
1421 memset(&rxq->dfx_stats, 0,
1422 sizeof(struct hns3_rx_dfx_stats));
1425 /* Clear Tx dfx stats */
1426 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1427 txq = dev->data->tx_queues[i];
1429 memset(&txq->dfx_stats, 0,
1430 sizeof(struct hns3_tx_dfx_stats));
1435 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1437 struct hns3_adapter *hns = dev->data->dev_private;
1438 struct hns3_pf *pf = &hns->pf;
1441 /* Clear tqp stats */
1442 ret = hns3_stats_reset(dev);
1446 hns3_tqp_dfx_stats_clear(dev);
1448 /* Clear reset stats */
1449 memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1454 /* HW registers are cleared on read */
1455 ret = hns3_mac_stats_reset(dev);
1459 /* Clear error stats */
1460 memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats));
1466 hns3_tqp_stats_init(struct hns3_hw *hw)
1468 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1470 tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1471 sizeof(uint64_t) * hw->tqps_num, 0);
1472 if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1473 hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1477 tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1478 sizeof(uint64_t) * hw->tqps_num, 0);
1479 if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1480 hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1481 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1482 tqp_stats->rcb_rx_ring_pktnum = NULL;
1490 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1492 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1494 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1495 tqp_stats->rcb_rx_ring_pktnum = NULL;
1496 rte_free(tqp_stats->rcb_tx_ring_pktnum);
1497 tqp_stats->rcb_tx_ring_pktnum = NULL;
1501 hns3_tqp_stats_clear(struct hns3_hw *hw)
1503 struct hns3_tqp_stats *stats = &hw->tqp_stats;
1505 stats->rcb_rx_ring_pktnum_rcd = 0;
1506 stats->rcb_tx_ring_pktnum_rcd = 0;
1507 memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1508 memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);