1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include "otx2_ethdev.h"
9 struct otx2_nix_xstats_name {
10 char name[RTE_ETH_XSTATS_NAME_SIZE];
14 static const struct otx2_nix_xstats_name nix_tx_xstats[] = {
15 {"tx_ucast", NIX_STAT_LF_TX_TX_UCAST},
16 {"tx_bcast", NIX_STAT_LF_TX_TX_BCAST},
17 {"tx_mcast", NIX_STAT_LF_TX_TX_MCAST},
18 {"tx_drop", NIX_STAT_LF_TX_TX_DROP},
19 {"tx_octs", NIX_STAT_LF_TX_TX_OCTS},
22 static const struct otx2_nix_xstats_name nix_rx_xstats[] = {
23 {"rx_octs", NIX_STAT_LF_RX_RX_OCTS},
24 {"rx_ucast", NIX_STAT_LF_RX_RX_UCAST},
25 {"rx_bcast", NIX_STAT_LF_RX_RX_BCAST},
26 {"rx_mcast", NIX_STAT_LF_RX_RX_MCAST},
27 {"rx_drop", NIX_STAT_LF_RX_RX_DROP},
28 {"rx_drop_octs", NIX_STAT_LF_RX_RX_DROP_OCTS},
29 {"rx_fcs", NIX_STAT_LF_RX_RX_FCS},
30 {"rx_err", NIX_STAT_LF_RX_RX_ERR},
31 {"rx_drp_bcast", NIX_STAT_LF_RX_RX_DRP_BCAST},
32 {"rx_drp_mcast", NIX_STAT_LF_RX_RX_DRP_MCAST},
33 {"rx_drp_l3bcast", NIX_STAT_LF_RX_RX_DRP_L3BCAST},
34 {"rx_drp_l3mcast", NIX_STAT_LF_RX_RX_DRP_L3MCAST},
37 static const struct otx2_nix_xstats_name nix_q_xstats[] = {
38 {"rq_op_re_pkts", NIX_LF_RQ_OP_RE_PKTS},
41 #define OTX2_NIX_NUM_RX_XSTATS RTE_DIM(nix_rx_xstats)
42 #define OTX2_NIX_NUM_TX_XSTATS RTE_DIM(nix_tx_xstats)
43 #define OTX2_NIX_NUM_QUEUE_XSTATS RTE_DIM(nix_q_xstats)
45 #define OTX2_NIX_NUM_XSTATS_REG (OTX2_NIX_NUM_RX_XSTATS + \
46 OTX2_NIX_NUM_TX_XSTATS + OTX2_NIX_NUM_QUEUE_XSTATS)
49 otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev,
50 struct rte_eth_stats *stats)
52 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
57 stats->opackets = otx2_read64(dev->base +
58 NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_UCAST));
59 stats->opackets += otx2_read64(dev->base +
60 NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_MCAST));
61 stats->opackets += otx2_read64(dev->base +
62 NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_BCAST));
63 stats->oerrors = otx2_read64(dev->base +
64 NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_DROP));
65 stats->obytes = otx2_read64(dev->base +
66 NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_OCTS));
68 stats->ipackets = otx2_read64(dev->base +
69 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_UCAST));
70 stats->ipackets += otx2_read64(dev->base +
71 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_MCAST));
72 stats->ipackets += otx2_read64(dev->base +
73 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_BCAST));
74 stats->imissed = otx2_read64(dev->base +
75 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_DROP));
76 stats->ibytes = otx2_read64(dev->base +
77 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_OCTS));
78 stats->ierrors = otx2_read64(dev->base +
79 NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_ERR));
81 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
82 if (dev->txmap[i] & (1U << 31)) {
83 qidx = dev->txmap[i] & 0xFFFF;
84 reg = (((uint64_t)qidx) << 32);
86 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
87 val = otx2_atomic64_add_nosync(reg, addr);
90 stats->q_opackets[i] = val;
92 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
93 val = otx2_atomic64_add_nosync(reg, addr);
96 stats->q_obytes[i] = val;
98 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_DROP_PKTS);
99 val = otx2_atomic64_add_nosync(reg, addr);
102 stats->q_errors[i] = val;
106 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
107 if (dev->rxmap[i] & (1U << 31)) {
108 qidx = dev->rxmap[i] & 0xFFFF;
109 reg = (((uint64_t)qidx) << 32);
111 addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_PKTS);
112 val = otx2_atomic64_add_nosync(reg, addr);
115 stats->q_ipackets[i] = val;
117 addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_OCTS);
118 val = otx2_atomic64_add_nosync(reg, addr);
121 stats->q_ibytes[i] = val;
123 addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_DROP_PKTS);
124 val = otx2_atomic64_add_nosync(reg, addr);
127 stats->q_errors[i] += val;
135 otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev)
137 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
138 struct otx2_mbox *mbox = dev->mbox;
140 if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL)
143 return otx2_mbox_process(mbox);
147 otx2_nix_queue_stats_mapping(struct rte_eth_dev *eth_dev, uint16_t queue_id,
148 uint8_t stat_idx, uint8_t is_rx)
150 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
153 dev->rxmap[stat_idx] = ((1U << 31) | queue_id);
155 dev->txmap[stat_idx] = ((1U << 31) | queue_id);
161 otx2_nix_xstats_get(struct rte_eth_dev *eth_dev,
162 struct rte_eth_xstat *xstats,
165 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
166 unsigned int i, count = 0;
169 if (n < OTX2_NIX_NUM_XSTATS_REG)
170 return OTX2_NIX_NUM_XSTATS_REG;
175 for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) {
176 xstats[count].value = otx2_read64(dev->base +
177 NIX_LF_TX_STATX(nix_tx_xstats[i].offset));
178 xstats[count].id = count;
182 for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) {
183 xstats[count].value = otx2_read64(dev->base +
184 NIX_LF_RX_STATX(nix_rx_xstats[i].offset));
185 xstats[count].id = count;
189 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
190 reg = (((uint64_t)i) << 32);
191 val = otx2_atomic64_add_nosync(reg, (int64_t *)(dev->base +
192 nix_q_xstats[0].offset));
195 xstats[count].value += val;
197 xstats[count].id = count;
204 otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
205 struct rte_eth_xstat_name *xstats_names,
208 unsigned int i, count = 0;
210 RTE_SET_USED(eth_dev);
212 if (limit < OTX2_NIX_NUM_XSTATS_REG && xstats_names != NULL)
216 for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) {
217 snprintf(xstats_names[count].name,
218 sizeof(xstats_names[count].name),
219 "%s", nix_tx_xstats[i].name);
223 for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) {
224 snprintf(xstats_names[count].name,
225 sizeof(xstats_names[count].name),
226 "%s", nix_rx_xstats[i].name);
230 for (i = 0; i < OTX2_NIX_NUM_QUEUE_XSTATS; i++) {
231 snprintf(xstats_names[count].name,
232 sizeof(xstats_names[count].name),
233 "%s", nix_q_xstats[i].name);
238 return OTX2_NIX_NUM_XSTATS_REG;
242 otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
243 struct rte_eth_xstat_name *xstats_names,
244 const uint64_t *ids, unsigned int limit)
246 struct rte_eth_xstat_name xstats_names_copy[OTX2_NIX_NUM_XSTATS_REG];
249 if (limit < OTX2_NIX_NUM_XSTATS_REG && ids == NULL)
250 return OTX2_NIX_NUM_XSTATS_REG;
252 if (limit > OTX2_NIX_NUM_XSTATS_REG)
255 if (xstats_names == NULL)
258 otx2_nix_xstats_get_names(eth_dev, xstats_names_copy, limit);
260 for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) {
261 if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) {
262 otx2_err("Invalid id value");
265 strncpy(xstats_names[i].name, xstats_names_copy[ids[i]].name,
266 sizeof(xstats_names[i].name));
273 otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
274 uint64_t *values, unsigned int n)
276 struct rte_eth_xstat xstats[OTX2_NIX_NUM_XSTATS_REG];
279 if (n < OTX2_NIX_NUM_XSTATS_REG && ids == NULL)
280 return OTX2_NIX_NUM_XSTATS_REG;
282 if (n > OTX2_NIX_NUM_XSTATS_REG)
288 otx2_nix_xstats_get(eth_dev, xstats, n);
290 for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) {
291 if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) {
292 otx2_err("Invalid id value");
295 values[i] = xstats[ids[i]].value;
302 nix_queue_stats_reset(struct rte_eth_dev *eth_dev)
304 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
305 struct otx2_mbox *mbox = dev->mbox;
306 struct nix_aq_enq_rsp *rsp;
307 struct nix_aq_enq_req *aq;
311 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
312 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
314 aq->ctype = NIX_AQ_CTYPE_RQ;
315 aq->op = NIX_AQ_INSTOP_READ;
316 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
318 otx2_err("Failed to read rq context");
321 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
323 aq->ctype = NIX_AQ_CTYPE_RQ;
324 aq->op = NIX_AQ_INSTOP_WRITE;
325 otx2_mbox_memcpy(&aq->rq, &rsp->rq, sizeof(rsp->rq));
326 otx2_mbox_memset(&aq->rq_mask, 0, sizeof(aq->rq_mask));
329 aq->rq.drop_octs = 0;
330 aq->rq.drop_pkts = 0;
333 aq->rq_mask.octs = ~(aq->rq_mask.octs);
334 aq->rq_mask.pkts = ~(aq->rq_mask.pkts);
335 aq->rq_mask.drop_octs = ~(aq->rq_mask.drop_octs);
336 aq->rq_mask.drop_pkts = ~(aq->rq_mask.drop_pkts);
337 aq->rq_mask.re_pkts = ~(aq->rq_mask.re_pkts);
338 rc = otx2_mbox_process(mbox);
340 otx2_err("Failed to write rq context");
345 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
346 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
348 aq->ctype = NIX_AQ_CTYPE_SQ;
349 aq->op = NIX_AQ_INSTOP_READ;
350 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
352 otx2_err("Failed to read sq context");
355 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
357 aq->ctype = NIX_AQ_CTYPE_SQ;
358 aq->op = NIX_AQ_INSTOP_WRITE;
359 otx2_mbox_memcpy(&aq->sq, &rsp->sq, sizeof(rsp->sq));
360 otx2_mbox_memset(&aq->sq_mask, 0, sizeof(aq->sq_mask));
363 aq->sq.drop_octs = 0;
364 aq->sq.drop_pkts = 0;
366 aq->sq_mask.octs = ~(aq->sq_mask.octs);
367 aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
368 aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
369 aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
370 rc = otx2_mbox_process(mbox);
372 otx2_err("Failed to write sq context");
381 otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev)
383 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
384 struct otx2_mbox *mbox = dev->mbox;
387 if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL)
390 ret = otx2_mbox_process(mbox);
394 /* Reset queue stats */
395 return nix_queue_stats_reset(eth_dev);