cba1228d3067cffbadb75694cbe9c0190e801f36
[dpdk.git] / drivers / net / octeontx2 / otx2_stats.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <inttypes.h>
6
7 #include "otx2_ethdev.h"
8
9 int
10 otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev,
11                        struct rte_eth_stats *stats)
12 {
13         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
14         uint64_t reg, val;
15         uint32_t qidx, i;
16         int64_t *addr;
17
18         stats->opackets = otx2_read64(dev->base +
19                         NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_UCAST));
20         stats->opackets += otx2_read64(dev->base +
21                         NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_MCAST));
22         stats->opackets += otx2_read64(dev->base +
23                         NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_BCAST));
24         stats->oerrors = otx2_read64(dev->base +
25                         NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_DROP));
26         stats->obytes = otx2_read64(dev->base +
27                         NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_OCTS));
28
29         stats->ipackets = otx2_read64(dev->base +
30                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_UCAST));
31         stats->ipackets += otx2_read64(dev->base +
32                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_MCAST));
33         stats->ipackets += otx2_read64(dev->base +
34                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_BCAST));
35         stats->imissed = otx2_read64(dev->base +
36                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_DROP));
37         stats->ibytes = otx2_read64(dev->base +
38                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_OCTS));
39         stats->ierrors = otx2_read64(dev->base +
40                         NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_ERR));
41
42         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
43                 if (dev->txmap[i] & (1U << 31)) {
44                         qidx = dev->txmap[i] & 0xFFFF;
45                         reg = (((uint64_t)qidx) << 32);
46
47                         addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
48                         val = otx2_atomic64_add_nosync(reg, addr);
49                         if (val & OP_ERR)
50                                 val = 0;
51                         stats->q_opackets[i] = val;
52
53                         addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
54                         val = otx2_atomic64_add_nosync(reg, addr);
55                         if (val & OP_ERR)
56                                 val = 0;
57                         stats->q_obytes[i] = val;
58
59                         addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_DROP_PKTS);
60                         val = otx2_atomic64_add_nosync(reg, addr);
61                         if (val & OP_ERR)
62                                 val = 0;
63                         stats->q_errors[i] = val;
64                 }
65         }
66
67         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
68                 if (dev->rxmap[i] & (1U << 31)) {
69                         qidx = dev->rxmap[i] & 0xFFFF;
70                         reg = (((uint64_t)qidx) << 32);
71
72                         addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_PKTS);
73                         val = otx2_atomic64_add_nosync(reg, addr);
74                         if (val & OP_ERR)
75                                 val = 0;
76                         stats->q_ipackets[i] = val;
77
78                         addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_OCTS);
79                         val = otx2_atomic64_add_nosync(reg, addr);
80                         if (val & OP_ERR)
81                                 val = 0;
82                         stats->q_ibytes[i] = val;
83
84                         addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_DROP_PKTS);
85                         val = otx2_atomic64_add_nosync(reg, addr);
86                         if (val & OP_ERR)
87                                 val = 0;
88                         stats->q_errors[i] += val;
89                 }
90         }
91
92         return 0;
93 }
94
95 void
96 otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev)
97 {
98         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
99         struct otx2_mbox *mbox = dev->mbox;
100
101         otx2_mbox_alloc_msg_nix_stats_rst(mbox);
102         otx2_mbox_process(mbox);
103 }
104
105 int
106 otx2_nix_queue_stats_mapping(struct rte_eth_dev *eth_dev, uint16_t queue_id,
107                              uint8_t stat_idx, uint8_t is_rx)
108 {
109         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
110
111         if (is_rx)
112                 dev->rxmap[stat_idx] = ((1U << 31) | queue_id);
113         else
114                 dev->txmap[stat_idx] = ((1U << 31) | queue_id);
115
116         return 0;
117 }