1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Ericsson AB
10 #include <rte_debug.h>
12 /* The high bits in the xstats id is used to store an additional
13 * parameter (beyond the queue or port id already in the xstats
16 #define DSW_XSTATS_ID_PARAM_BITS (8)
17 #define DSW_XSTATS_ID_STAT_BITS \
18 (sizeof(unsigned int)*CHAR_BIT - DSW_XSTATS_ID_PARAM_BITS)
19 #define DSW_XSTATS_ID_STAT_MASK ((1 << DSW_XSTATS_ID_STAT_BITS) - 1)
21 #define DSW_XSTATS_ID_GET_PARAM(id) \
22 ((id)>>DSW_XSTATS_ID_STAT_BITS)
24 #define DSW_XSTATS_ID_GET_STAT(id) \
25 ((id) & DSW_XSTATS_ID_STAT_MASK)
27 #define DSW_XSTATS_ID_CREATE(id, param_value) \
28 (((param_value) << DSW_XSTATS_ID_STAT_BITS) | id)
31 uint64_t (*dsw_xstats_dev_get_value_fn)(struct dsw_evdev *dsw);
33 struct dsw_xstat_dev {
35 dsw_xstats_dev_get_value_fn get_value_fn;
39 uint64_t (*dsw_xstats_port_get_value_fn)(struct dsw_evdev *dsw,
40 uint8_t port_id, uint8_t queue_id);
42 struct dsw_xstats_port {
44 dsw_xstats_port_get_value_fn get_value_fn;
49 dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
51 return rte_atomic32_read(&dsw->credits_on_loan);
54 static struct dsw_xstat_dev dsw_dev_xstats[] = {
55 { "dev_credits_on_loan", dsw_xstats_dev_credits_on_loan }
58 #define DSW_GEN_PORT_ACCESS_FN(_variable) \
60 dsw_xstats_port_get_ ## _variable(struct dsw_evdev *dsw, \
62 uint8_t queue_id __rte_unused) \
64 return dsw->ports[port_id]._variable; \
67 DSW_GEN_PORT_ACCESS_FN(new_enqueued)
68 DSW_GEN_PORT_ACCESS_FN(forward_enqueued)
69 DSW_GEN_PORT_ACCESS_FN(release_enqueued)
72 dsw_xstats_port_get_queue_enqueued(struct dsw_evdev *dsw, uint8_t port_id,
75 return dsw->ports[port_id].queue_enqueued[queue_id];
78 DSW_GEN_PORT_ACCESS_FN(dequeued)
81 dsw_xstats_port_get_queue_dequeued(struct dsw_evdev *dsw, uint8_t port_id,
84 return dsw->ports[port_id].queue_dequeued[queue_id];
87 DSW_GEN_PORT_ACCESS_FN(emigrations)
88 DSW_GEN_PORT_ACCESS_FN(immigrations)
91 dsw_xstats_port_get_migration_latency(struct dsw_evdev *dsw, uint8_t port_id,
92 uint8_t queue_id __rte_unused)
94 uint64_t total_latency = dsw->ports[port_id].emigration_latency;
95 uint64_t num_emigrations = dsw->ports[port_id].emigrations;
97 return num_emigrations > 0 ? total_latency / num_emigrations : 0;
101 dsw_xstats_port_get_event_proc_latency(struct dsw_evdev *dsw, uint8_t port_id,
102 uint8_t queue_id __rte_unused)
104 uint64_t total_busy_cycles =
105 dsw->ports[port_id].total_busy_cycles;
107 dsw->ports[port_id].dequeued;
109 return dequeued > 0 ? total_busy_cycles / dequeued : 0;
113 dsw_xstats_port_get_busy_cycles(struct dsw_evdev *dsw, uint8_t port_id,
114 uint8_t queue_id __rte_unused)
116 return dsw->ports[port_id].total_busy_cycles;
119 DSW_GEN_PORT_ACCESS_FN(inflight_credits)
121 DSW_GEN_PORT_ACCESS_FN(pending_releases)
124 dsw_xstats_port_get_load(struct dsw_evdev *dsw, uint8_t port_id,
125 uint8_t queue_id __rte_unused)
129 load = rte_atomic16_read(&dsw->ports[port_id].load);
131 return DSW_LOAD_TO_PERCENT(load);
134 DSW_GEN_PORT_ACCESS_FN(last_bg)
136 static struct dsw_xstats_port dsw_port_xstats[] = {
137 { "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued,
139 { "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued,
141 { "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued,
143 { "port_%u_queue_%u_enqueued", dsw_xstats_port_get_queue_enqueued,
145 { "port_%u_dequeued", dsw_xstats_port_get_dequeued,
147 { "port_%u_queue_%u_dequeued", dsw_xstats_port_get_queue_dequeued,
149 { "port_%u_emigrations", dsw_xstats_port_get_emigrations,
151 { "port_%u_migration_latency", dsw_xstats_port_get_migration_latency,
153 { "port_%u_immigrations", dsw_xstats_port_get_immigrations,
155 { "port_%u_event_proc_latency", dsw_xstats_port_get_event_proc_latency,
157 { "port_%u_busy_cycles", dsw_xstats_port_get_busy_cycles,
159 { "port_%u_inflight_credits", dsw_xstats_port_get_inflight_credits,
161 { "port_%u_pending_releases", dsw_xstats_port_get_pending_releases,
163 { "port_%u_load", dsw_xstats_port_get_load,
165 { "port_%u_last_bg", dsw_xstats_port_get_last_bg,
170 void (*dsw_xstats_foreach_fn)(const char *xstats_name,
171 enum rte_event_dev_xstats_mode mode,
172 uint8_t queue_port_id, unsigned int xstats_id,
176 dsw_xstats_dev_foreach(dsw_xstats_foreach_fn fn, void *fn_data)
180 for (i = 0; i < RTE_DIM(dsw_dev_xstats); i++)
181 fn(dsw_dev_xstats[i].name, RTE_EVENT_DEV_XSTATS_DEVICE, 0,
186 dsw_xstats_port_foreach(struct dsw_evdev *dsw, uint8_t port_id,
187 dsw_xstats_foreach_fn fn, void *fn_data)
190 unsigned int stat_idx;
192 for (stat_idx = 0, queue_id = 0;
193 stat_idx < RTE_DIM(dsw_port_xstats);) {
194 struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
195 char xstats_name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
196 unsigned int xstats_id;
198 if (xstat->per_queue) {
199 xstats_id = DSW_XSTATS_ID_CREATE(stat_idx, queue_id);
200 snprintf(xstats_name, sizeof(xstats_name),
201 dsw_port_xstats[stat_idx].name_fmt, port_id,
205 xstats_id = stat_idx;
206 snprintf(xstats_name, sizeof(xstats_name),
207 dsw_port_xstats[stat_idx].name_fmt, port_id);
210 fn(xstats_name, RTE_EVENT_DEV_XSTATS_PORT, port_id,
213 if (!(xstat->per_queue && queue_id < dsw->num_queues)) {
221 struct rte_event_dev_xstats_name *names;
224 unsigned int capacity;
228 dsw_xstats_store_stat(const char *xstats_name,
229 enum rte_event_dev_xstats_mode mode,
230 uint8_t queue_port_id, unsigned int xstats_id,
233 struct store_ctx *ctx = data;
236 RTE_SET_USED(queue_port_id);
238 if (ctx->count < ctx->capacity) {
239 strcpy(ctx->names[ctx->count].name, xstats_name);
240 ctx->ids[ctx->count] = xstats_id;
247 dsw_xstats_get_names(const struct rte_eventdev *dev,
248 enum rte_event_dev_xstats_mode mode,
249 uint8_t queue_port_id,
250 struct rte_event_dev_xstats_name *xstats_names,
251 unsigned int *ids, unsigned int capacity)
253 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
255 struct store_ctx ctx = {
256 .names = xstats_names,
262 case RTE_EVENT_DEV_XSTATS_DEVICE:
263 dsw_xstats_dev_foreach(dsw_xstats_store_stat, &ctx);
265 case RTE_EVENT_DEV_XSTATS_PORT:
266 dsw_xstats_port_foreach(dsw, queue_port_id,
267 dsw_xstats_store_stat, &ctx);
269 case RTE_EVENT_DEV_XSTATS_QUEUE:
278 dsw_xstats_dev_get(const struct rte_eventdev *dev,
279 const unsigned int ids[], uint64_t values[], unsigned int n)
281 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
284 for (i = 0; i < n; i++) {
285 unsigned int id = ids[i];
286 struct dsw_xstat_dev *xstat = &dsw_dev_xstats[id];
287 values[i] = xstat->get_value_fn(dsw);
293 dsw_xstats_port_get(const struct rte_eventdev *dev, uint8_t port_id,
294 const unsigned int ids[], uint64_t values[], unsigned int n)
296 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
299 for (i = 0; i < n; i++) {
300 unsigned int id = ids[i];
301 unsigned int stat_idx = DSW_XSTATS_ID_GET_STAT(id);
302 struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
303 uint8_t queue_id = 0;
305 if (xstat->per_queue)
306 queue_id = DSW_XSTATS_ID_GET_PARAM(id);
308 values[i] = xstat->get_value_fn(dsw, port_id, queue_id);
314 dsw_xstats_get(const struct rte_eventdev *dev,
315 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
316 const unsigned int ids[], uint64_t values[], unsigned int n)
319 case RTE_EVENT_DEV_XSTATS_DEVICE:
320 return dsw_xstats_dev_get(dev, ids, values, n);
321 case RTE_EVENT_DEV_XSTATS_PORT:
322 return dsw_xstats_port_get(dev, queue_port_id, ids, values, n);
323 case RTE_EVENT_DEV_XSTATS_QUEUE:
333 const struct rte_eventdev *dev;
340 dsw_xstats_find_stat(const char *xstats_name,
341 enum rte_event_dev_xstats_mode mode,
342 uint8_t queue_port_id, unsigned int xstats_id,
345 struct find_ctx *ctx = data;
347 if (strcmp(ctx->name, xstats_name) == 0) {
349 *ctx->id = xstats_id;
350 dsw_xstats_get(ctx->dev, mode, queue_port_id, &xstats_id,
356 dsw_xstats_get_by_name(const struct rte_eventdev *dev, const char *name,
359 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
362 struct find_ctx ctx = {
369 dsw_xstats_dev_foreach(dsw_xstats_find_stat, &ctx);
371 for (port_id = 0; port_id < dsw->num_ports; port_id++)
372 dsw_xstats_port_foreach(dsw, port_id, dsw_xstats_find_stat,