1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_ethdev.h"
7 #define CNXK_NB_RXQ_STATS 5
8 #define CNXK_NB_TXQ_STATS 4
11 cnxk_nix_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
13 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
14 struct roc_nix *nix = &dev->nix;
15 struct roc_nix_stats nix_stats;
18 rc = roc_nix_stats_get(nix, &nix_stats);
22 stats->opackets = nix_stats.tx_ucast;
23 stats->opackets += nix_stats.tx_mcast;
24 stats->opackets += nix_stats.tx_bcast;
25 stats->oerrors = nix_stats.tx_drop;
26 stats->obytes = nix_stats.tx_octs;
28 stats->ipackets = nix_stats.rx_ucast;
29 stats->ipackets += nix_stats.rx_mcast;
30 stats->ipackets += nix_stats.rx_bcast;
31 stats->imissed = nix_stats.rx_drop;
32 stats->ibytes = nix_stats.rx_octs;
33 stats->ierrors = nix_stats.rx_err;
35 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
36 struct roc_nix_stats_queue qstats;
39 if (dev->txq_stat_map[i] & (1U << 31)) {
40 qidx = dev->txq_stat_map[i] & 0xFFFF;
41 rc = roc_nix_stats_queue_get(nix, qidx, 0, &qstats);
44 stats->q_opackets[i] = qstats.tx_pkts;
45 stats->q_obytes[i] = qstats.tx_octs;
46 stats->q_errors[i] = qstats.tx_drop_pkts;
49 if (dev->rxq_stat_map[i] & (1U << 31)) {
50 qidx = dev->rxq_stat_map[i] & 0xFFFF;
51 rc = roc_nix_stats_queue_get(nix, qidx, 1, &qstats);
54 stats->q_ipackets[i] = qstats.rx_pkts;
55 stats->q_ibytes[i] = qstats.rx_octs;
56 stats->q_errors[i] += qstats.rx_drop_pkts;
64 cnxk_nix_stats_reset(struct rte_eth_dev *eth_dev)
66 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
68 return roc_nix_stats_reset(&dev->nix);
72 cnxk_nix_queue_stats_mapping(struct rte_eth_dev *eth_dev, uint16_t queue_id,
73 uint8_t stat_idx, uint8_t is_rx)
75 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
78 if (queue_id >= dev->nb_rxq)
80 dev->rxq_stat_map[stat_idx] = ((1U << 31) | queue_id);
82 if (queue_id >= dev->nb_txq)
84 dev->txq_stat_map[stat_idx] = ((1U << 31) | queue_id);
91 cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
94 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
95 struct roc_nix_xstat roc_xstats[n];
96 struct roc_nix *nix = &dev->nix;
97 int roc_size, q, idx = 0, size;
99 roc_size = roc_nix_xstats_get(nix, roc_xstats, n);
104 /* Per Queue statistics also returned as part of xstats */
105 size = roc_size + (dev->nb_rxq * CNXK_NB_RXQ_STATS) +
106 (dev->nb_txq * CNXK_NB_TXQ_STATS);
108 /* If requested array do not have space then return with count */
109 if (size > (int)n || xstats == NULL)
112 for (idx = 0; idx < roc_size; idx++) {
113 xstats[idx].id = roc_xstats[idx].id;
114 xstats[idx].value = roc_xstats[idx].value;
116 for (q = 0; q < dev->nb_rxq; q++) {
117 struct roc_nix_stats_queue qstats;
120 rc = roc_nix_stats_queue_get(nix, q, 1, &qstats);
124 xstats[idx].id = idx;
125 xstats[idx].value = qstats.rx_pkts;
127 xstats[idx].id = idx;
128 xstats[idx].value = qstats.rx_octs;
130 xstats[idx].id = idx;
131 xstats[idx].value = qstats.rx_drop_pkts;
133 xstats[idx].id = idx;
134 xstats[idx].value = qstats.rx_drop_octs;
136 xstats[idx].id = idx;
137 xstats[idx].value = qstats.rx_error_pkts;
140 for (q = 0; q < dev->nb_txq; q++) {
141 struct roc_nix_stats_queue qstats;
144 rc = roc_nix_stats_queue_get(nix, q, 0, &qstats);
148 xstats[idx].id = idx;
149 xstats[idx].value = qstats.tx_pkts;
151 xstats[idx].id = idx;
152 xstats[idx].value = qstats.tx_octs;
154 xstats[idx].id = idx;
155 xstats[idx].value = qstats.tx_drop_pkts;
157 xstats[idx].id = idx;
158 xstats[idx].value = qstats.tx_drop_octs;
166 cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
167 struct rte_eth_xstat_name *xstats_names,
170 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
171 struct roc_nix_xstat_name roc_xstats_name[limit];
172 struct roc_nix *nix = &dev->nix;
173 int roc_size, size, i, q;
175 roc_size = roc_nix_num_xstats_get(nix);
176 /* Per Queue statistics also returned as part of xstats */
177 size = roc_size + (dev->nb_rxq * CNXK_NB_RXQ_STATS) +
178 (dev->nb_txq * CNXK_NB_TXQ_STATS);
180 if (xstats_names == NULL)
183 if ((int)limit < size && xstats_names != NULL)
186 roc_size = roc_nix_xstats_names_get(nix, roc_xstats_name, limit);
188 for (i = 0; i < roc_size; i++)
189 rte_strscpy(xstats_names[i].name, roc_xstats_name[i].name,
190 sizeof(xstats_names[i].name));
192 for (q = 0; q < dev->nb_rxq; q++) {
193 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
196 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
199 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
200 "rxq_%d_drop_pkts", q);
202 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
203 "rxq_%d_drop_octs", q);
205 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
206 "rxq_%d_err_pkts", q);
210 for (q = 0; q < dev->nb_txq; q++) {
211 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
214 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
217 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
218 "txq_%d_drop_pkts", q);
220 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
221 "txq_%d_drop_octs", q);
229 cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
231 struct rte_eth_xstat_name *xstats_names,
234 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
235 uint32_t nix_cnt = roc_nix_num_xstats_get(&dev->nix);
236 uint32_t stat_cnt = nix_cnt + (dev->nb_rxq * CNXK_NB_RXQ_STATS) +
237 (dev->nb_txq * CNXK_NB_TXQ_STATS);
238 struct rte_eth_xstat_name xnames[stat_cnt];
241 if (limit < stat_cnt && ids == NULL)
244 if (limit > stat_cnt)
247 if (xstats_names == NULL)
250 cnxk_nix_xstats_get_names(eth_dev, xnames, stat_cnt);
252 for (i = 0; i < limit; i++) {
253 if (ids[i] >= stat_cnt)
256 rte_strscpy(xstats_names[i].name, xnames[ids[i]].name,
257 sizeof(xstats_names[i].name));
264 cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
265 uint64_t *values, unsigned int n)
267 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
268 uint32_t nix_cnt = roc_nix_num_xstats_get(&dev->nix);
269 uint32_t stat_cnt = nix_cnt + (dev->nb_rxq * CNXK_NB_RXQ_STATS) +
270 (dev->nb_txq * CNXK_NB_TXQ_STATS);
271 struct rte_eth_xstat xstats[stat_cnt];
274 if (n < stat_cnt && ids == NULL)
283 cnxk_nix_xstats_get(eth_dev, xstats, stat_cnt);
285 for (i = 0; i < n; i++) {
286 if (ids[i] >= stat_cnt)
288 values[i] = xstats[ids[i]].value;
295 cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev)
297 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
298 struct roc_nix *nix = &dev->nix;
301 rc = roc_nix_stats_reset(nix);
305 /* Reset Rx Queues */
306 for (i = 0; i < dev->nb_rxq; i++) {
307 rc = roc_nix_stats_queue_reset(nix, i, 1);
312 /* Reset Tx Queues */
313 for (i = 0; i < dev->nb_txq; i++) {
314 rc = roc_nix_stats_queue_reset(nix, i, 0);