1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #include "roc_nix_xstats.h"
11 #define NIX_RX_STATS(val) plt_read64(nix->base + NIX_LF_RX_STATX(val))
12 #define NIX_TX_STATS(val) plt_read64(nix->base + NIX_LF_TX_STATX(val))
15 roc_nix_num_xstats_get(struct roc_nix *roc_nix)
17 if (roc_nix_is_vf_or_sdp(roc_nix))
18 return CNXK_NIX_NUM_XSTATS_REG;
19 else if (roc_model_is_cn9k())
20 return CNXK_NIX_NUM_XSTATS_CGX;
22 return CNXK_NIX_NUM_XSTATS_RPM;
26 roc_nix_stats_get(struct roc_nix *roc_nix, struct roc_nix_stats *stats)
28 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
33 stats->rx_octs = NIX_RX_STATS(NIX_STAT_LF_RX_RX_OCTS);
34 stats->rx_ucast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_UCAST);
35 stats->rx_bcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_BCAST);
36 stats->rx_mcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_MCAST);
37 stats->rx_drop = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP);
38 stats->rx_drop_octs = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DROP_OCTS);
39 stats->rx_fcs = NIX_RX_STATS(NIX_STAT_LF_RX_RX_FCS);
40 stats->rx_err = NIX_RX_STATS(NIX_STAT_LF_RX_RX_ERR);
41 stats->rx_drop_bcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_BCAST);
42 stats->rx_drop_mcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_MCAST);
43 stats->rx_drop_l3_bcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3BCAST);
44 stats->rx_drop_l3_mcast = NIX_RX_STATS(NIX_STAT_LF_RX_RX_DRP_L3MCAST);
46 stats->tx_ucast = NIX_TX_STATS(NIX_STAT_LF_TX_TX_UCAST);
47 stats->tx_bcast = NIX_TX_STATS(NIX_STAT_LF_TX_TX_BCAST);
48 stats->tx_mcast = NIX_TX_STATS(NIX_STAT_LF_TX_TX_MCAST);
49 stats->tx_drop = NIX_TX_STATS(NIX_STAT_LF_TX_TX_DROP);
50 stats->tx_octs = NIX_TX_STATS(NIX_STAT_LF_TX_TX_OCTS);
55 roc_nix_stats_reset(struct roc_nix *roc_nix)
57 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
58 struct mbox *mbox = (&nix->dev)->mbox;
60 if (mbox_alloc_msg_nix_stats_rst(mbox) == NULL)
63 return mbox_process(mbox);
67 queue_is_valid(struct nix *nix, uint16_t qid, bool is_rx)
72 nb_queues = nix->nb_rx_queues;
74 nb_queues = nix->nb_tx_queues;
77 return NIX_ERR_QUEUE_INVALID_RANGE;
83 qstat_read(struct nix *nix, uint16_t qid, uint32_t off)
88 addr = (int64_t *)(nix->base + off);
89 reg = (((uint64_t)qid) << 32);
90 val = roc_atomic64_add_nosync(reg, addr);
91 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR))
97 nix_stat_rx_queue_get(struct nix *nix, uint16_t qid,
98 struct roc_nix_stats_queue *qstats)
100 qstats->rx_pkts = qstat_read(nix, qid, NIX_LF_RQ_OP_PKTS);
101 qstats->rx_octs = qstat_read(nix, qid, NIX_LF_RQ_OP_OCTS);
102 qstats->rx_drop_pkts = qstat_read(nix, qid, NIX_LF_RQ_OP_DROP_PKTS);
103 qstats->rx_drop_octs = qstat_read(nix, qid, NIX_LF_RQ_OP_DROP_OCTS);
104 qstats->rx_error_pkts = qstat_read(nix, qid, NIX_LF_RQ_OP_RE_PKTS);
108 nix_stat_tx_queue_get(struct nix *nix, uint16_t qid,
109 struct roc_nix_stats_queue *qstats)
111 qstats->tx_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_PKTS);
112 qstats->tx_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_OCTS);
113 qstats->tx_drop_pkts = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_PKTS);
114 qstats->tx_drop_octs = qstat_read(nix, qid, NIX_LF_SQ_OP_DROP_OCTS);
118 nix_stat_rx_queue_reset(struct nix *nix, uint16_t qid)
120 struct mbox *mbox = (&nix->dev)->mbox;
123 if (roc_model_is_cn9k()) {
124 struct nix_aq_enq_req *aq;
126 aq = mbox_alloc_msg_nix_aq_enq(mbox);
131 aq->ctype = NIX_AQ_CTYPE_RQ;
132 aq->op = NIX_AQ_INSTOP_WRITE;
136 aq->rq.drop_octs = 0;
137 aq->rq.drop_pkts = 0;
140 aq->rq_mask.octs = ~(aq->rq_mask.octs);
141 aq->rq_mask.pkts = ~(aq->rq_mask.pkts);
142 aq->rq_mask.drop_octs = ~(aq->rq_mask.drop_octs);
143 aq->rq_mask.drop_pkts = ~(aq->rq_mask.drop_pkts);
144 aq->rq_mask.re_pkts = ~(aq->rq_mask.re_pkts);
146 struct nix_cn10k_aq_enq_req *aq;
148 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
153 aq->ctype = NIX_AQ_CTYPE_RQ;
154 aq->op = NIX_AQ_INSTOP_WRITE;
158 aq->rq.drop_octs = 0;
159 aq->rq.drop_pkts = 0;
162 aq->rq_mask.octs = ~(aq->rq_mask.octs);
163 aq->rq_mask.pkts = ~(aq->rq_mask.pkts);
164 aq->rq_mask.drop_octs = ~(aq->rq_mask.drop_octs);
165 aq->rq_mask.drop_pkts = ~(aq->rq_mask.drop_pkts);
166 aq->rq_mask.re_pkts = ~(aq->rq_mask.re_pkts);
169 rc = mbox_process(mbox);
170 return rc ? NIX_ERR_AQ_WRITE_FAILED : 0;
174 nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
176 struct mbox *mbox = (&nix->dev)->mbox;
179 if (roc_model_is_cn9k()) {
180 struct nix_aq_enq_req *aq;
182 aq = mbox_alloc_msg_nix_aq_enq(mbox);
187 aq->ctype = NIX_AQ_CTYPE_SQ;
188 aq->op = NIX_AQ_INSTOP_WRITE;
191 aq->sq.drop_octs = 0;
192 aq->sq.drop_pkts = 0;
194 aq->sq_mask.octs = ~(aq->sq_mask.octs);
195 aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
196 aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
197 aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
199 struct nix_cn10k_aq_enq_req *aq;
201 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
206 aq->ctype = NIX_AQ_CTYPE_SQ;
207 aq->op = NIX_AQ_INSTOP_WRITE;
210 aq->sq.drop_octs = 0;
211 aq->sq.drop_pkts = 0;
213 aq->sq_mask.octs = ~(aq->sq_mask.octs);
214 aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
215 aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
216 aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
219 rc = mbox_process(mbox);
220 return rc ? NIX_ERR_AQ_WRITE_FAILED : 0;
224 roc_nix_stats_queue_get(struct roc_nix *roc_nix, uint16_t qid, bool is_rx,
225 struct roc_nix_stats_queue *qstats)
227 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
231 return NIX_ERR_PARAM;
233 rc = queue_is_valid(nix, qid, is_rx);
238 nix_stat_rx_queue_get(nix, qid, qstats);
240 nix_stat_tx_queue_get(nix, qid, qstats);
247 roc_nix_stats_queue_reset(struct roc_nix *roc_nix, uint16_t qid, bool is_rx)
249 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
252 rc = queue_is_valid(nix, qid, is_rx);
257 rc = nix_stat_rx_queue_reset(nix, qid);
259 rc = nix_stat_tx_queue_reset(nix, qid);
266 roc_nix_xstats_get(struct roc_nix *roc_nix, struct roc_nix_xstat *xstats,
269 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
270 struct mbox *mbox = (&nix->dev)->mbox;
271 struct cgx_stats_rsp *cgx_resp;
272 struct rpm_stats_rsp *rpm_resp;
273 uint64_t i, count = 0;
278 xstat_cnt = roc_nix_num_xstats_get(roc_nix);
285 memset(xstats, 0, (xstat_cnt * sizeof(*xstats)));
286 for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS; i++) {
287 xstats[count].value = NIX_TX_STATS(nix_tx_xstats[i].offset);
288 xstats[count].id = count;
292 for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS; i++) {
293 xstats[count].value = NIX_RX_STATS(nix_rx_xstats[i].offset);
294 xstats[count].id = count;
298 for (i = 0; i < nix->nb_rx_queues; i++)
299 xstats[count].value +=
300 qstat_read(nix, i, nix_q_xstats[0].offset);
302 xstats[count].id = count;
305 if (roc_nix_is_vf_or_sdp(roc_nix))
308 if (roc_model_is_cn9k()) {
309 req = mbox_alloc_msg_cgx_stats(mbox);
313 req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
315 rc = mbox_process_msg(mbox, (void *)&cgx_resp);
319 for (i = 0; i < roc_nix_num_rx_xstats(); i++) {
320 xstats[count].value =
321 cgx_resp->rx_stats[nix_rx_xstats_cgx[i].offset];
322 xstats[count].id = count;
326 for (i = 0; i < roc_nix_num_tx_xstats(); i++) {
327 xstats[count].value =
328 cgx_resp->tx_stats[nix_tx_xstats_cgx[i].offset];
329 xstats[count].id = count;
333 req = mbox_alloc_msg_rpm_stats(mbox);
337 req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
339 rc = mbox_process_msg(mbox, (void *)&rpm_resp);
343 for (i = 0; i < roc_nix_num_rx_xstats(); i++) {
344 xstats[count].value =
345 rpm_resp->rx_stats[nix_rx_xstats_rpm[i].offset];
346 xstats[count].id = count;
350 for (i = 0; i < roc_nix_num_tx_xstats(); i++) {
351 xstats[count].value =
352 rpm_resp->tx_stats[nix_tx_xstats_rpm[i].offset];
353 xstats[count].id = count;
357 for (i = 0; i < CNXK_NIX_NUM_CN10K_RX_XSTATS; i++) {
358 xstats[count].value =
359 NIX_RX_STATS(nix_cn10k_rx_xstats[i].offset);
360 xstats[count].id = count;
369 roc_nix_xstats_names_get(struct roc_nix *roc_nix,
370 struct roc_nix_xstat_name *xstats_names,
373 uint64_t i, count = 0;
376 xstat_cnt = roc_nix_num_xstats_get(roc_nix);
377 if (limit < xstat_cnt && xstats_names != NULL)
381 for (i = 0; i < CNXK_NIX_NUM_TX_XSTATS; i++) {
382 snprintf(xstats_names[count].name,
383 sizeof(xstats_names[count].name), "%s",
384 nix_tx_xstats[i].name);
388 for (i = 0; i < CNXK_NIX_NUM_RX_XSTATS; i++) {
389 snprintf(xstats_names[count].name,
390 sizeof(xstats_names[count].name), "%s",
391 nix_rx_xstats[i].name);
394 for (i = 0; i < CNXK_NIX_NUM_QUEUE_XSTATS; i++) {
395 snprintf(xstats_names[count].name,
396 sizeof(xstats_names[count].name), "%s",
397 nix_q_xstats[i].name);
401 if (roc_nix_is_vf_or_sdp(roc_nix))
404 if (roc_model_is_cn9k()) {
405 for (i = 0; i < roc_nix_num_rx_xstats(); i++) {
406 snprintf(xstats_names[count].name,
407 sizeof(xstats_names[count].name), "%s",
408 nix_rx_xstats_cgx[i].name);
412 for (i = 0; i < roc_nix_num_tx_xstats(); i++) {
413 snprintf(xstats_names[count].name,
414 sizeof(xstats_names[count].name), "%s",
415 nix_tx_xstats_cgx[i].name);
419 for (i = 0; i < roc_nix_num_rx_xstats(); i++) {
420 snprintf(xstats_names[count].name,
421 sizeof(xstats_names[count].name), "%s",
422 nix_rx_xstats_rpm[i].name);
426 for (i = 0; i < roc_nix_num_tx_xstats(); i++) {
427 snprintf(xstats_names[count].name,
428 sizeof(xstats_names[count].name), "%s",
429 nix_tx_xstats_rpm[i].name);
433 for (i = 0; i < CNXK_NIX_NUM_CN10K_RX_XSTATS; i++) {
434 snprintf(xstats_names[count].name,
435 sizeof(xstats_names[count].name), "%s",
436 nix_cn10k_rx_xstats[i].name);