1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_EVDEV_STATS_H__
6 #define __OTX2_EVDEV_STATS_H__
8 #include "otx2_evdev.h"
10 struct otx2_sso_xstats_name {
11 const char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
15 uint64_t reset_snap[OTX2_SSO_MAX_VHGRP];
18 static struct otx2_sso_xstats_name sso_hws_xstats[] = {
19 {"last_grp_serviced", offsetof(struct sso_hws_stats, arbitration),
21 {"affinity_arbitration_credits",
22 offsetof(struct sso_hws_stats, arbitration),
26 static struct otx2_sso_xstats_name sso_grp_xstats[] = {
27 {"wrk_sched", offsetof(struct sso_grp_stats, ws_pc), ~0x0, 0,
29 {"xaq_dram", offsetof(struct sso_grp_stats, ext_pc), ~0x0,
31 {"add_wrk", offsetof(struct sso_grp_stats, wa_pc), ~0x0, 0,
33 {"tag_switch_req", offsetof(struct sso_grp_stats, ts_pc), ~0x0, 0,
35 {"desched_req", offsetof(struct sso_grp_stats, ds_pc), ~0x0, 0,
37 {"desched_wrk", offsetof(struct sso_grp_stats, dq_pc), ~0x0, 0,
39 {"xaq_cached", offsetof(struct sso_grp_stats, aw_status), 0x3,
41 {"work_inflight", offsetof(struct sso_grp_stats, aw_status), 0x3F,
43 {"inuse_pages", offsetof(struct sso_grp_stats, page_cnt),
47 #define OTX2_SSO_NUM_HWS_XSTATS RTE_DIM(sso_hws_xstats)
48 #define OTX2_SSO_NUM_GRP_XSTATS RTE_DIM(sso_grp_xstats)
50 #define OTX2_SSO_NUM_XSTATS (OTX2_SSO_NUM_HWS_XSTATS + OTX2_SSO_NUM_GRP_XSTATS)
53 otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
54 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
55 const unsigned int ids[], uint64_t values[], unsigned int n)
57 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
58 struct otx2_sso_xstats_name *xstats;
59 struct otx2_sso_xstats_name *xstat;
60 struct otx2_mbox *mbox = dev->mbox;
61 uint32_t xstats_mode_count = 0;
62 uint32_t start_offset = 0;
69 case RTE_EVENT_DEV_XSTATS_DEVICE:
71 case RTE_EVENT_DEV_XSTATS_PORT:
72 if (queue_port_id >= (signed int)dev->nb_event_ports)
75 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
76 xstats = sso_hws_xstats;
78 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
79 ((struct sso_info_req *)req_rsp)->hws = queue_port_id;
80 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
85 case RTE_EVENT_DEV_XSTATS_QUEUE:
86 if (queue_port_id >= (signed int)dev->nb_event_queues)
89 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
90 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
91 xstats = sso_grp_xstats;
93 req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
94 ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
95 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
101 otx2_err("Invalid mode received");
105 for (i = 0; i < n && i < xstats_mode_count; i++) {
106 xstat = &xstats[ids[i] - start_offset];
107 value = *(uint64_t *)((char *)req_rsp + xstat->offset);
108 value = (value >> xstat->shift) & xstat->mask;
111 values[i] -= xstat->reset_snap[queue_port_id];
120 otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
121 enum rte_event_dev_xstats_mode mode,
122 int16_t queue_port_id, const uint32_t ids[], uint32_t n)
124 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
125 struct otx2_sso_xstats_name *xstats;
126 struct otx2_sso_xstats_name *xstat;
127 struct otx2_mbox *mbox = dev->mbox;
128 uint32_t xstats_mode_count = 0;
129 uint32_t start_offset = 0;
136 case RTE_EVENT_DEV_XSTATS_DEVICE:
138 case RTE_EVENT_DEV_XSTATS_PORT:
139 if (queue_port_id >= (signed int)dev->nb_event_ports)
142 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
143 xstats = sso_hws_xstats;
145 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
146 ((struct sso_info_req *)req_rsp)->hws = queue_port_id;
147 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
152 case RTE_EVENT_DEV_XSTATS_QUEUE:
153 if (queue_port_id >= (signed int)dev->nb_event_queues)
156 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
157 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
158 xstats = sso_grp_xstats;
160 req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
161 ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
162 rc = otx2_mbox_process_msg(mbox, (void *)&req_rsp);
168 otx2_err("Invalid mode received");
172 for (i = 0; i < n && i < xstats_mode_count; i++) {
173 xstat = &xstats[ids[i] - start_offset];
174 value = *(uint64_t *)((char *)req_rsp + xstat->offset);
175 value = (value >> xstat->shift) & xstat->mask;
177 xstat->reset_snap[queue_port_id] = value;
185 otx2_sso_xstats_get_names(const struct rte_eventdev *event_dev,
186 enum rte_event_dev_xstats_mode mode,
187 uint8_t queue_port_id,
188 struct rte_event_dev_xstats_name *xstats_names,
189 unsigned int *ids, unsigned int size)
191 struct rte_event_dev_xstats_name xstats_names_copy[OTX2_SSO_NUM_XSTATS];
192 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
193 uint32_t xstats_mode_count = 0;
194 uint32_t start_offset = 0;
195 unsigned int xidx = 0;
198 for (i = 0; i < OTX2_SSO_NUM_HWS_XSTATS; i++) {
199 snprintf(xstats_names_copy[i].name,
200 sizeof(xstats_names_copy[i].name), "%s",
201 sso_hws_xstats[i].name);
204 for (; i < OTX2_SSO_NUM_XSTATS; i++) {
205 snprintf(xstats_names_copy[i].name,
206 sizeof(xstats_names_copy[i].name), "%s",
207 sso_grp_xstats[i - OTX2_SSO_NUM_HWS_XSTATS].name);
211 case RTE_EVENT_DEV_XSTATS_DEVICE:
213 case RTE_EVENT_DEV_XSTATS_PORT:
214 if (queue_port_id >= (signed int)dev->nb_event_ports)
216 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
218 case RTE_EVENT_DEV_XSTATS_QUEUE:
219 if (queue_port_id >= (signed int)dev->nb_event_queues)
221 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
222 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
225 otx2_err("Invalid mode received");
229 if (xstats_mode_count > size || !ids || !xstats_names)
230 return xstats_mode_count;
232 for (i = 0; i < xstats_mode_count; i++) {
233 xidx = i + start_offset;
234 strncpy(xstats_names[i].name, xstats_names_copy[xidx].name,
235 sizeof(xstats_names[i].name));