1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Ericsson AB
10 #include <rte_debug.h>
12 /* The high bits in the xstats id is used to store an additional
13 * parameter (beyond the queue or port id already in the xstats
16 #define DSW_XSTATS_ID_PARAM_BITS (8)
17 #define DSW_XSTATS_ID_STAT_BITS \
18 (sizeof(unsigned int)*CHAR_BIT - DSW_XSTATS_ID_PARAM_BITS)
19 #define DSW_XSTATS_ID_STAT_MASK ((1 << DSW_XSTATS_ID_STAT_BITS) - 1)
21 #define DSW_XSTATS_ID_GET_PARAM(id) \
22 ((id)>>DSW_XSTATS_ID_STAT_BITS)
24 #define DSW_XSTATS_ID_GET_STAT(id) \
25 ((id) & DSW_XSTATS_ID_STAT_MASK)
27 #define DSW_XSTATS_ID_CREATE(id, param_value) \
28 (((param_value) << DSW_XSTATS_ID_STAT_BITS) | id)
31 uint64_t (*dsw_xstats_dev_get_value_fn)(struct dsw_evdev *dsw);
33 struct dsw_xstat_dev {
35 dsw_xstats_dev_get_value_fn get_value_fn;
39 uint64_t (*dsw_xstats_port_get_value_fn)(struct dsw_evdev *dsw,
40 uint8_t port_id, uint8_t queue_id);
42 struct dsw_xstats_port {
44 dsw_xstats_port_get_value_fn get_value_fn;
49 dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
51 return rte_atomic32_read(&dsw->credits_on_loan);
54 static struct dsw_xstat_dev dsw_dev_xstats[] = {
55 { "dev_credits_on_loan", dsw_xstats_dev_credits_on_loan }
58 #define DSW_GEN_PORT_ACCESS_FN(_variable) \
60 dsw_xstats_port_get_ ## _variable(struct dsw_evdev *dsw, \
62 uint8_t queue_id __rte_unused) \
64 return dsw->ports[port_id]._variable; \
67 DSW_GEN_PORT_ACCESS_FN(new_enqueued)
68 DSW_GEN_PORT_ACCESS_FN(forward_enqueued)
69 DSW_GEN_PORT_ACCESS_FN(release_enqueued)
72 dsw_xstats_port_get_queue_enqueued(struct dsw_evdev *dsw, uint8_t port_id,
75 return dsw->ports[port_id].queue_enqueued[queue_id];
78 DSW_GEN_PORT_ACCESS_FN(dequeued)
81 dsw_xstats_port_get_queue_dequeued(struct dsw_evdev *dsw, uint8_t port_id,
84 return dsw->ports[port_id].queue_dequeued[queue_id];
87 DSW_GEN_PORT_ACCESS_FN(migrations)
90 dsw_xstats_port_get_migration_latency(struct dsw_evdev *dsw, uint8_t port_id,
91 uint8_t queue_id __rte_unused)
93 uint64_t total_latency = dsw->ports[port_id].migration_latency;
94 uint64_t num_migrations = dsw->ports[port_id].migrations;
96 return num_migrations > 0 ? total_latency / num_migrations : 0;
100 dsw_xstats_port_get_event_proc_latency(struct dsw_evdev *dsw, uint8_t port_id,
101 uint8_t queue_id __rte_unused)
103 uint64_t total_busy_cycles =
104 dsw->ports[port_id].total_busy_cycles;
106 dsw->ports[port_id].dequeued;
108 return dequeued > 0 ? total_busy_cycles / dequeued : 0;
111 DSW_GEN_PORT_ACCESS_FN(inflight_credits)
114 dsw_xstats_port_get_load(struct dsw_evdev *dsw, uint8_t port_id,
115 uint8_t queue_id __rte_unused)
119 load = rte_atomic16_read(&dsw->ports[port_id].load);
121 return DSW_LOAD_TO_PERCENT(load);
124 DSW_GEN_PORT_ACCESS_FN(last_bg)
126 static struct dsw_xstats_port dsw_port_xstats[] = {
127 { "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued,
129 { "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued,
131 { "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued,
133 { "port_%u_queue_%u_enqueued", dsw_xstats_port_get_queue_enqueued,
135 { "port_%u_dequeued", dsw_xstats_port_get_dequeued,
137 { "port_%u_queue_%u_dequeued", dsw_xstats_port_get_queue_dequeued,
139 { "port_%u_migrations", dsw_xstats_port_get_migrations,
141 { "port_%u_migration_latency", dsw_xstats_port_get_migration_latency,
143 { "port_%u_event_proc_latency", dsw_xstats_port_get_event_proc_latency,
145 { "port_%u_inflight_credits", dsw_xstats_port_get_inflight_credits,
147 { "port_%u_load", dsw_xstats_port_get_load,
149 { "port_%u_last_bg", dsw_xstats_port_get_last_bg,
154 void (*dsw_xstats_foreach_fn)(const char *xstats_name,
155 enum rte_event_dev_xstats_mode mode,
156 uint8_t queue_port_id, unsigned int xstats_id,
160 dsw_xstats_dev_foreach(dsw_xstats_foreach_fn fn, void *fn_data)
164 for (i = 0; i < RTE_DIM(dsw_dev_xstats); i++)
165 fn(dsw_dev_xstats[i].name, RTE_EVENT_DEV_XSTATS_DEVICE, 0,
170 dsw_xstats_port_foreach(struct dsw_evdev *dsw, uint8_t port_id,
171 dsw_xstats_foreach_fn fn, void *fn_data)
174 unsigned int stat_idx;
176 for (stat_idx = 0, queue_id = 0;
177 stat_idx < RTE_DIM(dsw_port_xstats);) {
178 struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
179 char xstats_name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
180 unsigned int xstats_id;
182 if (xstat->per_queue) {
183 xstats_id = DSW_XSTATS_ID_CREATE(stat_idx, queue_id);
184 snprintf(xstats_name, sizeof(xstats_name),
185 dsw_port_xstats[stat_idx].name_fmt, port_id,
189 xstats_id = stat_idx;
190 snprintf(xstats_name, sizeof(xstats_name),
191 dsw_port_xstats[stat_idx].name_fmt, port_id);
194 fn(xstats_name, RTE_EVENT_DEV_XSTATS_PORT, port_id,
197 if (!(xstat->per_queue && queue_id < dsw->num_queues)) {
205 struct rte_event_dev_xstats_name *names;
208 unsigned int capacity;
212 dsw_xstats_store_stat(const char *xstats_name,
213 enum rte_event_dev_xstats_mode mode,
214 uint8_t queue_port_id, unsigned int xstats_id,
217 struct store_ctx *ctx = data;
220 RTE_SET_USED(queue_port_id);
222 if (ctx->count < ctx->capacity) {
223 strcpy(ctx->names[ctx->count].name, xstats_name);
224 ctx->ids[ctx->count] = xstats_id;
231 dsw_xstats_get_names(const struct rte_eventdev *dev,
232 enum rte_event_dev_xstats_mode mode,
233 uint8_t queue_port_id,
234 struct rte_event_dev_xstats_name *xstats_names,
235 unsigned int *ids, unsigned int capacity)
237 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
239 struct store_ctx ctx = {
240 .names = xstats_names,
246 case RTE_EVENT_DEV_XSTATS_DEVICE:
247 dsw_xstats_dev_foreach(dsw_xstats_store_stat, &ctx);
249 case RTE_EVENT_DEV_XSTATS_PORT:
250 dsw_xstats_port_foreach(dsw, queue_port_id,
251 dsw_xstats_store_stat, &ctx);
253 case RTE_EVENT_DEV_XSTATS_QUEUE:
262 dsw_xstats_dev_get(const struct rte_eventdev *dev,
263 const unsigned int ids[], uint64_t values[], unsigned int n)
265 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
268 for (i = 0; i < n; i++) {
269 unsigned int id = ids[i];
270 struct dsw_xstat_dev *xstat = &dsw_dev_xstats[id];
271 values[i] = xstat->get_value_fn(dsw);
277 dsw_xstats_port_get(const struct rte_eventdev *dev, uint8_t port_id,
278 const unsigned int ids[], uint64_t values[], unsigned int n)
280 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
283 for (i = 0; i < n; i++) {
284 unsigned int id = ids[i];
285 unsigned int stat_idx = DSW_XSTATS_ID_GET_STAT(id);
286 struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
287 uint8_t queue_id = 0;
289 if (xstat->per_queue)
290 queue_id = DSW_XSTATS_ID_GET_PARAM(id);
292 values[i] = xstat->get_value_fn(dsw, port_id, queue_id);
298 dsw_xstats_get(const struct rte_eventdev *dev,
299 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
300 const unsigned int ids[], uint64_t values[], unsigned int n)
303 case RTE_EVENT_DEV_XSTATS_DEVICE:
304 return dsw_xstats_dev_get(dev, ids, values, n);
305 case RTE_EVENT_DEV_XSTATS_PORT:
306 return dsw_xstats_port_get(dev, queue_port_id, ids, values, n);
307 case RTE_EVENT_DEV_XSTATS_QUEUE:
317 const struct rte_eventdev *dev;
324 dsw_xstats_find_stat(const char *xstats_name,
325 enum rte_event_dev_xstats_mode mode,
326 uint8_t queue_port_id, unsigned int xstats_id,
329 struct find_ctx *ctx = data;
331 if (strcmp(ctx->name, xstats_name) == 0) {
333 *ctx->id = xstats_id;
334 dsw_xstats_get(ctx->dev, mode, queue_port_id, &xstats_id,
340 dsw_xstats_get_by_name(const struct rte_eventdev *dev, const char *name,
343 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
346 struct find_ctx ctx = {
353 dsw_xstats_dev_foreach(dsw_xstats_find_stat, &ctx);
355 for (port_id = 0; port_id < dsw->num_ports; port_id++)
356 dsw_xstats_port_foreach(dsw, port_id, dsw_xstats_find_stat,