1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
5 #include <rte_ethdev.h>
7 #include <rte_malloc.h>
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
14 /* The statistics of the per-rxq basic stats */
15 static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
17 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
19 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
21 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
24 /* The statistics of the per-txq basic stats */
25 static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
27 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
29 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
33 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 {"mac_tx_mac_pause_num",
35 HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 {"mac_rx_mac_pause_num",
37 HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 {"mac_tx_control_pkt_num",
39 HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 {"mac_rx_control_pkt_num",
41 HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 {"mac_tx_pfc_pkt_num",
43 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 {"mac_tx_pfc_pri0_pkt_num",
45 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 {"mac_tx_pfc_pri1_pkt_num",
47 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 {"mac_tx_pfc_pri2_pkt_num",
49 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 {"mac_tx_pfc_pri3_pkt_num",
51 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 {"mac_tx_pfc_pri4_pkt_num",
53 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 {"mac_tx_pfc_pri5_pkt_num",
55 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 {"mac_tx_pfc_pri6_pkt_num",
57 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 {"mac_tx_pfc_pri7_pkt_num",
59 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 {"mac_rx_pfc_pkt_num",
61 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 {"mac_rx_pfc_pri0_pkt_num",
63 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 {"mac_rx_pfc_pri1_pkt_num",
65 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 {"mac_rx_pfc_pri2_pkt_num",
67 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 {"mac_rx_pfc_pri3_pkt_num",
69 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 {"mac_rx_pfc_pri4_pkt_num",
71 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 {"mac_rx_pfc_pri5_pkt_num",
73 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 {"mac_rx_pfc_pri6_pkt_num",
75 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 {"mac_rx_pfc_pri7_pkt_num",
77 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 {"mac_tx_total_pkt_num",
79 HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 {"mac_tx_total_oct_num",
81 HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 {"mac_tx_good_pkt_num",
83 HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 {"mac_tx_bad_pkt_num",
85 HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 {"mac_tx_good_oct_num",
87 HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 {"mac_tx_bad_oct_num",
89 HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 {"mac_tx_uni_pkt_num",
91 HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 {"mac_tx_multi_pkt_num",
93 HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 {"mac_tx_broad_pkt_num",
95 HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 {"mac_tx_undersize_pkt_num",
97 HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 {"mac_tx_oversize_pkt_num",
99 HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 {"mac_tx_64_oct_pkt_num",
101 HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 {"mac_tx_65_127_oct_pkt_num",
103 HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 {"mac_tx_128_255_oct_pkt_num",
105 HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 {"mac_tx_256_511_oct_pkt_num",
107 HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 {"mac_tx_512_1023_oct_pkt_num",
109 HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 {"mac_tx_1024_1518_oct_pkt_num",
111 HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 {"mac_tx_1519_2047_oct_pkt_num",
113 HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 {"mac_tx_2048_4095_oct_pkt_num",
115 HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 {"mac_tx_4096_8191_oct_pkt_num",
117 HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 {"mac_tx_8192_9216_oct_pkt_num",
119 HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 {"mac_tx_9217_12287_oct_pkt_num",
121 HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 {"mac_tx_12288_16383_oct_pkt_num",
123 HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 {"mac_tx_1519_max_good_pkt_num",
125 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 {"mac_tx_1519_max_bad_pkt_num",
127 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 {"mac_rx_total_pkt_num",
129 HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 {"mac_rx_total_oct_num",
131 HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 {"mac_rx_good_pkt_num",
133 HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 {"mac_rx_bad_pkt_num",
135 HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 {"mac_rx_good_oct_num",
137 HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 {"mac_rx_bad_oct_num",
139 HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 {"mac_rx_uni_pkt_num",
141 HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 {"mac_rx_multi_pkt_num",
143 HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 {"mac_rx_broad_pkt_num",
145 HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 {"mac_rx_undersize_pkt_num",
147 HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 {"mac_rx_oversize_pkt_num",
149 HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 {"mac_rx_64_oct_pkt_num",
151 HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 {"mac_rx_65_127_oct_pkt_num",
153 HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 {"mac_rx_128_255_oct_pkt_num",
155 HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 {"mac_rx_256_511_oct_pkt_num",
157 HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 {"mac_rx_512_1023_oct_pkt_num",
159 HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 {"mac_rx_1024_1518_oct_pkt_num",
161 HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 {"mac_rx_1519_2047_oct_pkt_num",
163 HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 {"mac_rx_2048_4095_oct_pkt_num",
165 HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 {"mac_rx_4096_8191_oct_pkt_num",
167 HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 {"mac_rx_8192_9216_oct_pkt_num",
169 HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 {"mac_rx_9217_12287_oct_pkt_num",
171 HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 {"mac_rx_12288_16383_oct_pkt_num",
173 HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 {"mac_rx_1519_max_good_pkt_num",
175 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 {"mac_rx_1519_max_bad_pkt_num",
177 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 {"mac_tx_fragment_pkt_num",
179 HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 {"mac_tx_undermin_pkt_num",
181 HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 {"mac_tx_jabber_pkt_num",
183 HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 {"mac_tx_err_all_pkt_num",
185 HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 {"mac_tx_from_app_good_pkt_num",
187 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 {"mac_tx_from_app_bad_pkt_num",
189 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 {"mac_rx_fragment_pkt_num",
191 HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 {"mac_rx_undermin_pkt_num",
193 HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 {"mac_rx_jabber_pkt_num",
195 HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 {"mac_rx_fcs_err_pkt_num",
197 HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 {"mac_rx_send_app_good_pkt_num",
199 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 {"mac_rx_send_app_bad_pkt_num",
201 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
204 /* The statistic of reset */
205 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
207 HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
209 HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
211 HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
213 HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
214 {"RESET_SUCCESS_CNT",
215 HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
217 HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
219 HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
222 /* The statistic of errors in Rx BD */
223 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
225 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
227 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
230 /* The dfx statistic in Rx datapath */
231 static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
232 {"L3_CHECKSUM_ERRORS",
233 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
234 {"L4_CHECKSUM_ERRORS",
235 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
236 {"OL3_CHECKSUM_ERRORS",
237 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
238 {"OL4_CHECKSUM_ERRORS",
239 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
242 /* The dfx statistic in Tx datapath */
243 static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
244 {"OVER_LENGTH_PKT_CNT",
245 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
246 {"EXCEED_LIMITED_BD_PKT_CNT",
247 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
248 {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
249 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
250 {"UNSUPPORTED_TUNNEL_PKT_CNT",
251 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
253 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
254 {"SHORT_PKT_PAD_FAIL_CNT",
255 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
258 /* The statistic of rx queue */
259 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
260 {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
263 /* The statistic of tx queue */
264 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
265 {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
268 /* The statistic of imissed packet */
269 static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
271 HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
274 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
275 sizeof(hns3_mac_strings[0]))
277 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
278 sizeof(hns3_reset_stats_strings[0]))
280 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
281 sizeof(hns3_rx_bd_error_strings[0]))
283 #define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
284 sizeof(hns3_rxq_dfx_stats_strings[0]))
286 #define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
287 sizeof(hns3_txq_dfx_stats_strings[0]))
289 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
290 sizeof(hns3_rx_queue_strings[0]))
292 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
293 sizeof(hns3_tx_queue_strings[0]))
295 #define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
296 sizeof(hns3_rxq_basic_stats_strings[0]))
298 #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
299 sizeof(hns3_txq_basic_stats_strings[0]))
301 #define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
302 sizeof(hns3_imissed_stats_strings[0]))
304 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + \
305 HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS)
307 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
310 * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
311 * This command is used before send 'query_mac_stat command', the descriptor
312 * number of 'query_mac_stat command' must match with reg_num in this command.
314 * Pointer to structure hns3_hw.
319 hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
321 uint64_t *data = (uint64_t *)(&hw->mac_stats);
322 struct hns3_cmd_desc *desc;
327 desc = rte_malloc("hns3_mac_desc",
328 desc_num * sizeof(struct hns3_cmd_desc), 0);
330 hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
334 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
335 ret = hns3_cmd_send(hw, desc, desc_num);
337 hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
342 for (i = 0; i < desc_num; i++) {
343 /* For special opcode 0034, only the first desc has the head */
345 desc_data = (uint64_t *)(&desc[i].data[0]);
346 n = HNS3_RD_FIRST_STATS_NUM;
348 desc_data = (uint64_t *)(&desc[i]);
349 n = HNS3_RD_OTHER_STATS_NUM;
352 for (k = 0; k < n; k++) {
353 *data += rte_le_to_cpu_64(*desc_data);
364 * Query Mac stat reg num command ,opcode id: 0x0033.
365 * This command is used before send 'query_mac_stat command', the descriptor
366 * number of 'query_mac_stat command' must match with reg_num in this command.
368 * Pointer to structure rte_eth_stats.
373 hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
375 struct hns3_adapter *hns = dev->data->dev_private;
376 struct hns3_hw *hw = &hns->hw;
377 struct hns3_cmd_desc desc;
382 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
383 ret = hns3_cmd_send(hw, &desc, 1);
388 * The num of MAC statistics registers that are provided by IMP in this
391 desc_data = (uint32_t *)(&desc.data[0]);
392 reg_num = rte_le_to_cpu_32(*desc_data);
395 * The descriptor number of 'query_additional_mac_stat command' is
396 * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
397 * This value is 83 in this version
399 *desc_num = 1 + ((reg_num - 3) >> 2) +
400 (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
406 hns3_query_update_mac_stats(struct rte_eth_dev *dev)
408 struct hns3_adapter *hns = dev->data->dev_private;
409 struct hns3_hw *hw = &hns->hw;
413 ret = hns3_mac_query_reg_num(dev, &desc_num);
415 ret = hns3_update_mac_stats(hw, desc_num);
417 hns3_err(hw, "Query mac reg num fail : %d", ret);
422 hns3_update_rpu_drop_stats(struct hns3_hw *hw)
424 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
425 struct hns3_query_rpu_cmd *req;
426 struct hns3_cmd_desc desc;
431 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
432 req = (struct hns3_query_rpu_cmd *)desc.data;
435 * tc_num is 0, means rpu stats of all TC channels will be
439 req->tc_queue_num = rte_cpu_to_le_32(tc_num);
440 ret = hns3_cmd_send(hw, &desc, 1);
442 hns3_err(hw, "failed to query RPU stats: %d", ret);
446 cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
447 stats->rpu_rx_drop_cnt += cnt;
453 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
457 ret = hns3_update_rpu_drop_stats(hw);
462 memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
468 * Query tqp tx queue statistics ,opcode id: 0x0B03.
469 * Query tqp rx queue statistics ,opcode id: 0x0B13.
470 * Get all statistics of a port.
472 * Pointer to Ethernet device.
474 * Pointer to structure rte_eth_stats.
479 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
481 struct hns3_adapter *hns = eth_dev->data->dev_private;
482 struct hns3_hw *hw = &hns->hw;
483 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
484 struct hns3_tqp_stats *stats = &hw->tqp_stats;
485 struct hns3_rx_queue *rxq;
486 struct hns3_tx_queue *txq;
492 /* Update imissed stats */
493 ret = hns3_update_imissed_stats(hw, false);
495 hns3_err(hw, "update imissed stats failed, ret = %d",
500 rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt;
503 /* Reads all the stats of a rxq in a loop to keep them synchronized */
504 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
505 rxq = eth_dev->data->rx_queues[i];
509 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
511 * Read hardware and software in adjacent positions to minumize
512 * the timing variance.
514 rte_stats->ierrors += rxq->err_stats.l2_errors +
515 rxq->err_stats.pkt_len_errors;
516 stats->rcb_rx_ring_pktnum_rcd += cnt;
517 stats->rcb_rx_ring_pktnum[i] += cnt;
518 rte_stats->ibytes += rxq->basic_stats.bytes;
521 /* Reads all the stats of a txq in a loop to keep them synchronized */
522 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
523 txq = eth_dev->data->tx_queues[i];
527 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
528 stats->rcb_tx_ring_pktnum_rcd += cnt;
529 stats->rcb_tx_ring_pktnum[i] += cnt;
530 rte_stats->obytes += txq->basic_stats.bytes;
533 rte_stats->oerrors = 0;
535 * If HW statistics are reset by stats_reset, but a lot of residual
536 * packets exist in the hardware queue and these packets are error
537 * packets, flip overflow may occurred. So return 0 in this case.
539 rte_stats->ipackets =
540 stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
541 stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
542 rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
544 rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
550 hns3_stats_reset(struct rte_eth_dev *eth_dev)
552 struct hns3_adapter *hns = eth_dev->data->dev_private;
553 struct hns3_hw *hw = &hns->hw;
554 struct hns3_rx_queue *rxq;
555 struct hns3_tx_queue *txq;
561 * Note: Reading hardware statistics of imissed registers will
564 ret = hns3_update_imissed_stats(hw, true);
566 hns3_err(hw, "clear imissed stats failed, ret = %d",
572 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
573 rxq = eth_dev->data->rx_queues[i];
577 rxq->err_stats.pkt_len_errors = 0;
578 rxq->err_stats.l2_errors = 0;
581 /* Clear all the stats of a rxq in a loop to keep them synchronized */
582 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
583 rxq = eth_dev->data->rx_queues[i];
587 memset(&rxq->basic_stats, 0,
588 sizeof(struct hns3_rx_basic_stats));
590 /* This register is read-clear */
591 (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
592 rxq->err_stats.pkt_len_errors = 0;
593 rxq->err_stats.l2_errors = 0;
596 /* Clear all the stats of a txq in a loop to keep them synchronized */
597 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
598 txq = eth_dev->data->tx_queues[i];
602 memset(&txq->basic_stats, 0,
603 sizeof(struct hns3_tx_basic_stats));
605 /* This register is read-clear */
606 (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
609 hns3_tqp_stats_clear(hw);
615 hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
617 struct hns3_adapter *hns = dev->data->dev_private;
618 struct hns3_hw *hw = &hns->hw;
619 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
622 ret = hns3_query_update_mac_stats(dev);
624 hns3_err(hw, "Clear Mac stats fail : %d", ret);
628 memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
633 /* This function calculates the number of xstats based on the current config */
635 hns3_xstats_calc_num(struct rte_eth_dev *dev)
637 #define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
638 HNS3_NUM_RXQ_DFX_XSTATS + \
639 HNS3_NUM_RX_QUEUE_STATS + \
640 HNS3_NUM_RXQ_BASIC_STATS)
641 #define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
642 HNS3_NUM_TX_QUEUE_STATS + \
643 HNS3_NUM_TXQ_BASIC_STATS)
645 struct hns3_adapter *hns = dev->data->dev_private;
646 uint16_t nb_rx_q = dev->data->nb_rx_queues;
647 uint16_t nb_tx_q = dev->data->nb_tx_queues;
648 int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
649 int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
652 return rx_comm_stats_num + tx_comm_stats_num +
653 HNS3_NUM_RESET_XSTATS;
655 return rx_comm_stats_num + tx_comm_stats_num +
660 hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
663 struct hns3_adapter *hns = dev->data->dev_private;
664 struct hns3_hw *hw = &hns->hw;
668 /* Get rx queue stats */
669 for (j = 0; j < dev->data->nb_rx_queues; j++) {
670 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
671 reg_offset = hns3_get_tqp_reg_offset(j);
672 xstats[*count].value = hns3_read_dev(hw,
673 reg_offset + hns3_rx_queue_strings[i].offset);
674 xstats[*count].id = *count;
679 /* Get tx queue stats */
680 for (j = 0; j < dev->data->nb_tx_queues; j++) {
681 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
682 reg_offset = hns3_get_tqp_reg_offset(j);
683 xstats[*count].value = hns3_read_dev(hw,
684 reg_offset + hns3_tx_queue_strings[i].offset);
685 xstats[*count].id = *count;
692 hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
695 struct hns3_rx_dfx_stats *dfx_stats;
696 struct hns3_rx_queue *rxq;
700 for (i = 0; i < dev->data->nb_rx_queues; i++) {
701 rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
705 dfx_stats = &rxq->dfx_stats;
706 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
707 val = (char *)dfx_stats +
708 hns3_rxq_dfx_stats_strings[j].offset;
709 xstats[*count].value = *(uint64_t *)val;
710 xstats[*count].id = *count;
717 hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
720 struct hns3_tx_dfx_stats *dfx_stats;
721 struct hns3_tx_queue *txq;
725 for (i = 0; i < dev->data->nb_tx_queues; i++) {
726 txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
730 dfx_stats = &txq->dfx_stats;
731 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
732 val = (char *)dfx_stats +
733 hns3_txq_dfx_stats_strings[j].offset;
734 xstats[*count].value = *(uint64_t *)val;
735 xstats[*count].id = *count;
742 hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
745 hns3_rxq_dfx_stats_get(dev, xstats, count);
746 hns3_txq_dfx_stats_get(dev, xstats, count);
750 hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
753 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
754 struct hns3_tqp_stats *stats = &hw->tqp_stats;
755 struct hns3_rx_basic_stats *rxq_stats;
756 struct hns3_rx_queue *rxq;
761 for (i = 0; i < dev->data->nb_rx_queues; i++) {
762 rxq = dev->data->rx_queues[i];
766 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
768 * Read hardware and software in adjacent positions to minimize
769 * the time difference.
771 rxq_stats = &rxq->basic_stats;
772 rxq_stats->errors = rxq->err_stats.l2_errors +
773 rxq->err_stats.pkt_len_errors;
774 stats->rcb_rx_ring_pktnum_rcd += cnt;
775 stats->rcb_rx_ring_pktnum[i] += cnt;
778 * If HW statistics are reset by stats_reset, but a lot of
779 * residual packets exist in the hardware queue and these
780 * packets are error packets, flip overflow may occurred.
781 * So return 0 in this case.
784 stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
785 stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
786 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
787 val = (char *)rxq_stats +
788 hns3_rxq_basic_stats_strings[j].offset;
789 xstats[*count].value = *(uint64_t *)val;
790 xstats[*count].id = *count;
797 hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
800 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801 struct hns3_tqp_stats *stats = &hw->tqp_stats;
802 struct hns3_tx_basic_stats *txq_stats;
803 struct hns3_tx_queue *txq;
808 for (i = 0; i < dev->data->nb_tx_queues; i++) {
809 txq = dev->data->tx_queues[i];
813 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
814 stats->rcb_tx_ring_pktnum_rcd += cnt;
815 stats->rcb_tx_ring_pktnum[i] += cnt;
817 txq_stats = &txq->basic_stats;
818 txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
820 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
821 val = (char *)txq_stats +
822 hns3_txq_basic_stats_strings[j].offset;
823 xstats[*count].value = *(uint64_t *)val;
824 xstats[*count].id = *count;
831 hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
834 hns3_rxq_basic_stats_get(dev, xstats, count);
835 hns3_txq_basic_stats_get(dev, xstats, count);
839 * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
841 * Pointer to Ethernet device.
843 * A pointer to a table of structure of type *rte_eth_xstat*
844 * to be filled with device statistics ids and values.
845 * This parameter can be set to NULL if n is 0.
847 * The size of the xstats array (number of elements).
849 * 0 on fail, count(The size of the statistics elements) on success.
852 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
855 struct hns3_adapter *hns = dev->data->dev_private;
856 struct hns3_hw *hw = &hns->hw;
857 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
858 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
859 struct hns3_reset_stats *reset_stats = &hw->reset.stats;
860 struct hns3_rx_bd_errors_stats *rx_err_stats;
861 struct hns3_rx_queue *rxq;
870 count = hns3_xstats_calc_num(dev);
876 hns3_tqp_basic_stats_get(dev, xstats, &count);
879 /* Update Mac stats */
880 ret = hns3_query_update_mac_stats(dev);
882 hns3_err(hw, "Update Mac stats fail : %d", ret);
886 /* Get MAC stats from hw->hw_xstats.mac_stats struct */
887 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
888 addr = (char *)mac_stats + hns3_mac_strings[i].offset;
889 xstats[count].value = *(uint64_t *)addr;
890 xstats[count].id = count;
894 ret = hns3_update_imissed_stats(hw, false);
896 hns3_err(hw, "update imissed stats failed, ret = %d",
901 for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
902 addr = (char *)imissed_stats +
903 hns3_imissed_stats_strings[i].offset;
904 xstats[count].value = *(uint64_t *)addr;
905 xstats[count].id = count;
911 /* Get the reset stat */
912 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
913 addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
914 xstats[count].value = *(uint64_t *)addr;
915 xstats[count].id = count;
919 /* Get the Rx BD errors stats */
920 for (j = 0; j < dev->data->nb_rx_queues; j++) {
921 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
922 rxq = dev->data->rx_queues[j];
924 rx_err_stats = &rxq->err_stats;
925 addr = (char *)rx_err_stats +
926 hns3_rx_bd_error_strings[i].offset;
927 xstats[count].value = *(uint64_t *)addr;
928 xstats[count].id = count;
934 hns3_tqp_dfx_stats_get(dev, xstats, &count);
935 hns3_queue_stats_get(dev, xstats, &count);
941 hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
942 struct rte_eth_xstat_name *xstats_names,
947 for (i = 0; i < dev->data->nb_rx_queues; i++) {
948 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
949 snprintf(xstats_names[*count].name,
950 sizeof(xstats_names[*count].name),
952 hns3_rxq_basic_stats_strings[j].name);
956 for (i = 0; i < dev->data->nb_tx_queues; i++) {
957 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
958 snprintf(xstats_names[*count].name,
959 sizeof(xstats_names[*count].name),
961 hns3_txq_basic_stats_strings[j].name);
968 hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
969 struct rte_eth_xstat_name *xstats_names,
974 for (i = 0; i < dev->data->nb_rx_queues; i++) {
975 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
976 snprintf(xstats_names[*count].name,
977 sizeof(xstats_names[*count].name),
979 hns3_rxq_dfx_stats_strings[j].name);
984 for (i = 0; i < dev->data->nb_tx_queues; i++) {
985 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
986 snprintf(xstats_names[*count].name,
987 sizeof(xstats_names[*count].name),
989 hns3_txq_dfx_stats_strings[j].name);
996 * Retrieve names of extended statistics of an Ethernet device.
998 * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
1000 * xstats_names[i].name => xstats[i].value
1002 * And the array index is same with id field of 'struct rte_eth_xstat':
1005 * This assumption makes key-value pair matching less flexible but simpler.
1008 * Pointer to Ethernet device.
1009 * @param xstats_names
1010 * An rte_eth_xstat_name array of at least *size* elements to
1011 * be filled. If set to NULL, the function returns the required number
1014 * The size of the xstats_names array (number of elements).
1016 * - A positive value lower or equal to size: success. The return value
1017 * is the number of entries filled in the stats table.
1020 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1021 struct rte_eth_xstat_name *xstats_names,
1022 __rte_unused unsigned int size)
1024 struct hns3_adapter *hns = dev->data->dev_private;
1025 int cnt_stats = hns3_xstats_calc_num(dev);
1029 if (xstats_names == NULL)
1032 hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1034 /* Note: size limited checked in rte_eth_xstats_get_names() */
1036 /* Get MAC name from hw->hw_xstats.mac_stats struct */
1037 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1038 snprintf(xstats_names[count].name,
1039 sizeof(xstats_names[count].name),
1040 "%s", hns3_mac_strings[i].name);
1044 for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) {
1045 snprintf(xstats_names[count].name,
1046 sizeof(xstats_names[count].name),
1047 "%s", hns3_imissed_stats_strings[i].name);
1051 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1052 snprintf(xstats_names[count].name,
1053 sizeof(xstats_names[count].name),
1054 "%s", hns3_reset_stats_strings[i].name);
1058 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1059 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1060 snprintf(xstats_names[count].name,
1061 sizeof(xstats_names[count].name),
1063 hns3_rx_bd_error_strings[i].name);
1068 hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1070 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1071 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1072 snprintf(xstats_names[count].name,
1073 sizeof(xstats_names[count].name),
1074 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1079 for (j = 0; j < dev->data->nb_tx_queues; j++) {
1080 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1081 snprintf(xstats_names[count].name,
1082 sizeof(xstats_names[count].name),
1083 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1092 * Retrieve extended statistics of an Ethernet device.
1095 * Pointer to Ethernet device.
1097 * A pointer to an ids array passed by application. This tells which
1098 * statistics values function should retrieve. This parameter
1099 * can be set to NULL if size is 0. In this case function will retrieve
1100 * all avalible statistics.
1102 * A pointer to a table to be filled with device statistics values.
1104 * The size of the ids array (number of elements).
1106 * - A positive value lower or equal to size: success. The return value
1107 * is the number of entries filled in the stats table.
1108 * - A positive value higher than size: error, the given statistics table
1109 * is too small. The return value corresponds to the size that should
1110 * be given to succeed. The entries in the table are not valid and
1111 * shall not be used by the caller.
1115 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1116 uint64_t *values, uint32_t size)
1118 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1119 struct hns3_adapter *hns = dev->data->dev_private;
1120 struct rte_eth_xstat *values_copy;
1121 struct hns3_hw *hw = &hns->hw;
1122 uint32_t count_value;
1126 if (ids == NULL && values == NULL)
1130 if (size < cnt_stats)
1133 len = cnt_stats * sizeof(struct rte_eth_xstat);
1134 values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1135 if (values_copy == NULL) {
1136 hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1137 "to store statistics values", len);
1141 count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1142 if (count_value != cnt_stats) {
1143 rte_free(values_copy);
1147 if (ids == NULL && values != NULL) {
1148 for (i = 0; i < cnt_stats; i++)
1149 memcpy(&values[i], &values_copy[i].value,
1152 rte_free(values_copy);
1156 for (i = 0; i < size; i++) {
1157 if (ids[i] >= cnt_stats) {
1158 hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1159 "should < %u", i, ids[i], cnt_stats);
1160 rte_free(values_copy);
1163 memcpy(&values[i], &values_copy[ids[i]].value,
1167 rte_free(values_copy);
1172 * Retrieve names of extended statistics of an Ethernet device.
1175 * Pointer to Ethernet device.
1176 * @param xstats_names
1177 * An rte_eth_xstat_name array of at least *size* elements to
1178 * be filled. If set to NULL, the function returns the required number
1181 * IDs array given by app to retrieve specific statistics
1183 * The size of the xstats_names array (number of elements).
1185 * - A positive value lower or equal to size: success. The return value
1186 * is the number of entries filled in the stats table.
1187 * - A positive value higher than size: error, the given statistics table
1188 * is too small. The return value corresponds to the size that should
1189 * be given to succeed. The entries in the table are not valid and
1190 * shall not be used by the caller.
1193 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1194 struct rte_eth_xstat_name *xstats_names,
1195 const uint64_t *ids, uint32_t size)
1197 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1198 struct hns3_adapter *hns = dev->data->dev_private;
1199 struct rte_eth_xstat_name *names_copy;
1200 struct hns3_hw *hw = &hns->hw;
1204 if (xstats_names == NULL)
1208 if (size < cnt_stats)
1211 return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1214 len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1215 names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1216 if (names_copy == NULL) {
1217 hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1218 "to store statistics names", len);
1222 (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1224 for (i = 0; i < size; i++) {
1225 if (ids[i] >= cnt_stats) {
1226 hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1227 "should < %u", i, ids[i], cnt_stats);
1228 rte_free(names_copy);
1231 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1232 "%s", names_copy[ids[i]].name);
1235 rte_free(names_copy);
1240 hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1242 struct hns3_rx_queue *rxq;
1243 struct hns3_tx_queue *txq;
1246 /* Clear Rx dfx stats */
1247 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1248 rxq = dev->data->rx_queues[i];
1250 memset(&rxq->dfx_stats, 0,
1251 sizeof(struct hns3_rx_dfx_stats));
1254 /* Clear Tx dfx stats */
1255 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1256 txq = dev->data->tx_queues[i];
1258 memset(&txq->dfx_stats, 0,
1259 sizeof(struct hns3_tx_dfx_stats));
1264 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1266 struct hns3_adapter *hns = dev->data->dev_private;
1269 /* Clear tqp stats */
1270 ret = hns3_stats_reset(dev);
1274 hns3_tqp_dfx_stats_clear(dev);
1276 /* Clear reset stats */
1277 memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1282 /* HW registers are cleared on read */
1283 ret = hns3_mac_stats_reset(dev);
1291 hns3_tqp_stats_init(struct hns3_hw *hw)
1293 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1295 tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1296 sizeof(uint64_t) * hw->tqps_num, 0);
1297 if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1298 hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1302 tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1303 sizeof(uint64_t) * hw->tqps_num, 0);
1304 if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1305 hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1306 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1307 tqp_stats->rcb_rx_ring_pktnum = NULL;
1315 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1317 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1319 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1320 tqp_stats->rcb_rx_ring_pktnum = NULL;
1321 rte_free(tqp_stats->rcb_tx_ring_pktnum);
1322 tqp_stats->rcb_tx_ring_pktnum = NULL;
1326 hns3_tqp_stats_clear(struct hns3_hw *hw)
1328 struct hns3_tqp_stats *stats = &hw->tqp_stats;
1330 stats->rcb_rx_ring_pktnum_rcd = 0;
1331 stats->rcb_tx_ring_pktnum_rcd = 0;
1332 memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1333 memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);