1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_event_ring.h>
17 /* device instance specific */
26 poll_return, /* for zero-count and used also for port bucket loop */
30 /* qid port mapping specific */
32 pkts, /* note: qid-to-port pkts */
35 typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
36 uint16_t obj_idx, /* port or queue id */
37 enum xstats_type stat, int extra_arg);
39 struct sw_xstats_entry {
40 struct rte_event_dev_xstats_name name;
43 enum xstats_type stat;
44 enum rte_event_dev_xstats_mode mode;
46 uint8_t reset_allowed; /* when set, this value can be reset */
47 uint64_t reset_value; /* an offset to be taken away to emulate resets */
51 get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
52 enum xstats_type type, int extra_arg __rte_unused)
55 case rx: return sw->stats.rx_pkts;
56 case tx: return sw->stats.tx_pkts;
57 case dropped: return sw->stats.rx_dropped;
58 case calls: return sw->sched_called;
59 case no_iq_enq: return sw->sched_no_iq_enqueues;
60 case no_cq_enq: return sw->sched_no_cq_enqueues;
66 get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
67 enum xstats_type type, int extra_arg __rte_unused)
69 const struct sw_port *p = &sw->ports[obj_idx];
72 case rx: return p->stats.rx_pkts;
73 case tx: return p->stats.tx_pkts;
74 case dropped: return p->stats.rx_dropped;
75 case inflight: return p->inflights;
76 case pkt_cycles: return p->avg_pkt_ticks;
77 case calls: return p->total_polls;
78 case credits: return p->inflight_credits;
79 case poll_return: return p->zero_polls;
80 case rx_used: return rte_event_ring_count(p->rx_worker_ring);
81 case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
82 case tx_used: return rte_event_ring_count(p->cq_worker_ring);
83 case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
89 get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
90 enum xstats_type type, int extra_arg)
92 const struct sw_port *p = &sw->ports[obj_idx];
95 case poll_return: return p->poll_buckets[extra_arg];
101 get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
102 enum xstats_type type, int extra_arg __rte_unused)
104 const struct sw_qid *qid = &sw->qids[obj_idx];
107 case rx: return qid->stats.rx_pkts;
108 case tx: return qid->stats.tx_pkts;
109 case dropped: return qid->stats.rx_dropped;
114 for (i = 0; i < RTE_DIM(qid->fids); i++)
115 infl += qid->fids[i].pcount;
119 case iq_size: return RTE_DIM(qid->iq[0]->ring);
125 get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
126 enum xstats_type type, int extra_arg)
128 const struct sw_qid *qid = &sw->qids[obj_idx];
129 const int iq_idx = extra_arg;
132 case iq_used: return iq_ring_count(qid->iq[iq_idx]);
138 get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
139 enum xstats_type type, int extra_arg)
141 const struct sw_qid *qid = &sw->qids[obj_idx];
142 uint16_t port = extra_arg;
149 for (i = 0; i < RTE_DIM(qid->fids); i++)
150 if (qid->fids[i].cq == port)
156 return qid->to_port[port];
162 sw_xstats_init(struct sw_evdev *sw)
165 * define the stats names and types. Used to build up the device
167 * There are multiple set of stats:
170 * - per-port-dequeue-burst-sizes
175 * For each of these sets, we have three parallel arrays, one for the
176 * names, the other for the stat type parameter to be passed in the fn
177 * call to get that stat. The third array allows resetting or not.
178 * All these arrays must be kept in sync
180 static const char * const dev_stats[] = { "rx", "tx", "drop",
181 "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
183 static const enum xstats_type dev_types[] = { rx, tx, dropped,
184 calls, no_iq_enq, no_cq_enq,
186 /* all device stats are allowed to be reset */
188 static const char * const port_stats[] = {"rx", "tx", "drop",
189 "inflight", "avg_pkt_cycles", "credits",
190 "rx_ring_used", "rx_ring_free",
191 "cq_ring_used", "cq_ring_free",
192 "dequeue_calls", "dequeues_returning_0",
194 static const enum xstats_type port_types[] = { rx, tx, dropped,
195 inflight, pkt_cycles, credits,
196 rx_used, rx_free, tx_used, tx_free,
199 static const uint8_t port_reset_allowed[] = {1, 1, 1,
205 static const char * const port_bucket_stats[] = {
206 "dequeues_returning" };
207 static const enum xstats_type port_bucket_types[] = { poll_return };
208 /* all bucket dequeues are allowed to be reset, handled in loop below */
210 static const char * const qid_stats[] = {"rx", "tx", "drop",
211 "inflight", "iq_size"
213 static const enum xstats_type qid_types[] = { rx, tx, dropped,
216 static const uint8_t qid_reset_allowed[] = {1, 1, 1,
220 static const char * const qid_iq_stats[] = { "used" };
221 static const enum xstats_type qid_iq_types[] = { iq_used };
224 static const char * const qid_port_stats[] = { "pinned_flows",
227 static const enum xstats_type qid_port_types[] = { pinned, pkts };
228 static const uint8_t qid_port_reset_allowed[] = {0, 1};
230 /* ---- end of stat definitions ---- */
232 /* check sizes, since a missed comma can lead to strings being
233 * joined by the compiler.
235 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
236 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
237 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
238 RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
239 RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
240 RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
241 RTE_DIM(port_bucket_types));
243 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
244 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
247 const uint32_t cons_bkt_shift =
248 (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
249 const unsigned int count = RTE_DIM(dev_stats) +
250 sw->port_count * RTE_DIM(port_stats) +
251 sw->port_count * RTE_DIM(port_bucket_stats) *
252 (cons_bkt_shift + 1) +
253 sw->qid_count * RTE_DIM(qid_stats) +
254 sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
255 sw->qid_count * sw->port_count *
256 RTE_DIM(qid_port_stats);
257 unsigned int i, port, qid, iq, bkt, stat = 0;
259 sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
260 sw->data->socket_id);
261 if (sw->xstats == NULL)
264 #define sname sw->xstats[stat].name.name
265 for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
266 sw->xstats[stat] = (struct sw_xstats_entry){
268 .stat = dev_types[i],
269 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
272 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
274 sw->xstats_count_mode_dev = stat;
276 for (port = 0; port < sw->port_count; port++) {
277 sw->xstats_offset_for_port[port] = stat;
279 uint32_t count_offset = stat;
281 for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
282 sw->xstats[stat] = (struct sw_xstats_entry){
285 .stat = port_types[i],
286 .mode = RTE_EVENT_DEV_XSTATS_PORT,
287 .reset_allowed = port_reset_allowed[i],
289 snprintf(sname, sizeof(sname), "port_%u_%s",
290 port, port_stats[i]);
293 for (bkt = 0; bkt < (rte_event_ring_get_capacity(
294 sw->ports[port].cq_worker_ring) >>
295 SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
296 for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
297 sw->xstats[stat] = (struct sw_xstats_entry){
298 .fn = get_port_bucket_stat,
300 .stat = port_bucket_types[i],
301 .mode = RTE_EVENT_DEV_XSTATS_PORT,
305 snprintf(sname, sizeof(sname),
307 port, port_bucket_stats[i],
308 (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
309 (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
314 sw->xstats_count_per_port[port] = stat - count_offset;
317 sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
319 for (qid = 0; qid < sw->qid_count; qid++) {
320 uint32_t count_offset = stat;
321 sw->xstats_offset_for_qid[qid] = stat;
323 for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
324 sw->xstats[stat] = (struct sw_xstats_entry){
327 .stat = qid_types[i],
328 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
329 .reset_allowed = qid_reset_allowed[i],
331 snprintf(sname, sizeof(sname), "qid_%u_%s",
334 for (iq = 0; iq < SW_IQS_MAX; iq++)
335 for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
336 sw->xstats[stat] = (struct sw_xstats_entry){
337 .fn = get_qid_iq_stat,
339 .stat = qid_iq_types[i],
340 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
344 snprintf(sname, sizeof(sname),
350 for (port = 0; port < sw->port_count; port++)
351 for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
352 sw->xstats[stat] = (struct sw_xstats_entry){
353 .fn = get_qid_port_stat,
355 .stat = qid_port_types[i],
356 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
359 qid_port_reset_allowed[i],
361 snprintf(sname, sizeof(sname),
367 sw->xstats_count_per_qid[qid] = stat - count_offset;
370 sw->xstats_count_mode_queue = stat -
371 (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
374 sw->xstats_count = stat;
380 sw_xstats_uninit(struct sw_evdev *sw)
382 rte_free(sw->xstats);
383 sw->xstats_count = 0;
388 sw_xstats_get_names(const struct rte_eventdev *dev,
389 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
390 struct rte_event_dev_xstats_name *xstats_names,
391 unsigned int *ids, unsigned int size)
393 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
395 unsigned int xidx = 0;
397 RTE_SET_USED(queue_port_id);
399 uint32_t xstats_mode_count = 0;
400 uint32_t start_offset = 0;
403 case RTE_EVENT_DEV_XSTATS_DEVICE:
404 xstats_mode_count = sw->xstats_count_mode_dev;
406 case RTE_EVENT_DEV_XSTATS_PORT:
407 if (queue_port_id >= (signed int)sw->port_count)
409 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
410 start_offset = sw->xstats_offset_for_port[queue_port_id];
412 case RTE_EVENT_DEV_XSTATS_QUEUE:
413 if (queue_port_id >= (signed int)sw->qid_count)
415 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
416 start_offset = sw->xstats_offset_for_qid[queue_port_id];
419 SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
423 if (xstats_mode_count > size || !ids || !xstats_names)
424 return xstats_mode_count;
426 for (i = 0; i < sw->xstats_count && xidx < size; i++) {
427 if (sw->xstats[i].mode != mode)
430 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
431 queue_port_id != sw->xstats[i].obj_idx)
434 xstats_names[xidx] = sw->xstats[i].name;
436 ids[xidx] = start_offset + xidx;
443 sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
444 uint8_t queue_port_id, const unsigned int ids[],
445 uint64_t values[], unsigned int n, const uint32_t reset,
446 const uint32_t ret_if_n_lt_nstats)
449 unsigned int xidx = 0;
451 RTE_SET_USED(queue_port_id);
453 uint32_t xstats_mode_count = 0;
456 case RTE_EVENT_DEV_XSTATS_DEVICE:
457 xstats_mode_count = sw->xstats_count_mode_dev;
459 case RTE_EVENT_DEV_XSTATS_PORT:
460 if (queue_port_id >= (signed int)sw->port_count)
462 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
464 case RTE_EVENT_DEV_XSTATS_QUEUE:
465 if (queue_port_id >= (signed int)sw->qid_count)
467 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
470 SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
474 /* this function can check num stats and return them (xstats_get() style
475 * behaviour) or ignore n for reset() of a single stat style behaviour.
477 if (ret_if_n_lt_nstats && xstats_mode_count > n)
478 return xstats_mode_count;
480 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
481 struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
482 if (ids[i] > sw->xstats_count || xs->mode != mode)
485 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
486 queue_port_id != xs->obj_idx)
489 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
495 if (xs->reset_allowed && reset)
496 xs->reset_value = val;
507 sw_xstats_get(const struct rte_eventdev *dev,
508 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
509 const unsigned int ids[], uint64_t values[], unsigned int n)
511 struct sw_evdev *sw = sw_pmd_priv(dev);
512 const uint32_t reset = 0;
513 const uint32_t ret_n_lt_stats = 0;
514 return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
515 reset, ret_n_lt_stats);
519 sw_xstats_get_by_name(const struct rte_eventdev *dev,
520 const char *name, unsigned int *id)
522 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
525 for (i = 0; i < sw->xstats_count; i++) {
526 struct sw_xstats_entry *xs = &sw->xstats[i];
527 if (strncmp(xs->name.name, name,
528 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
531 return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
541 sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
544 for (i = start; i < start + num; i++) {
545 struct sw_xstats_entry *xs = &sw->xstats[i];
546 if (!xs->reset_allowed)
549 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
551 xs->reset_value = val;
556 sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
557 const uint32_t ids[], uint32_t nb_ids)
559 const uint32_t reset = 1;
560 const uint32_t ret_n_lt_stats = 0;
562 uint32_t nb_reset = sw_xstats_update(sw,
563 RTE_EVENT_DEV_XSTATS_QUEUE,
564 queue_id, ids, NULL, nb_ids,
565 reset, ret_n_lt_stats);
566 return nb_reset == nb_ids ? 0 : -EINVAL;
570 sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
571 sw->xstats_count_per_qid[queue_id]);
577 sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
578 const uint32_t ids[], uint32_t nb_ids)
580 const uint32_t reset = 1;
581 const uint32_t ret_n_lt_stats = 0;
582 int offset = sw->xstats_offset_for_port[port_id];
583 int nb_stat = sw->xstats_count_per_port[port_id];
586 uint32_t nb_reset = sw_xstats_update(sw,
587 RTE_EVENT_DEV_XSTATS_PORT, port_id,
589 reset, ret_n_lt_stats);
590 return nb_reset == nb_ids ? 0 : -EINVAL;
593 sw_xstats_reset_range(sw, offset, nb_stat);
598 sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
602 for (i = 0; i < nb_ids; i++) {
603 uint32_t id = ids[i];
604 if (id >= sw->xstats_count_mode_dev)
606 sw_xstats_reset_range(sw, id, 1);
609 for (i = 0; i < sw->xstats_count_mode_dev; i++)
610 sw_xstats_reset_range(sw, i, 1);
617 sw_xstats_reset(struct rte_eventdev *dev,
618 enum rte_event_dev_xstats_mode mode,
619 int16_t queue_port_id,
620 const uint32_t ids[],
623 struct sw_evdev *sw = sw_pmd_priv(dev);
626 /* handle -1 for queue_port_id here, looping over all ports/queues */
628 case RTE_EVENT_DEV_XSTATS_DEVICE:
629 sw_xstats_reset_dev(sw, ids, nb_ids);
631 case RTE_EVENT_DEV_XSTATS_PORT:
632 if (queue_port_id == -1) {
633 for (i = 0; i < sw->port_count; i++) {
634 err = sw_xstats_reset_port(sw, i, ids, nb_ids);
638 } else if (queue_port_id < (int16_t)sw->port_count)
639 sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
641 case RTE_EVENT_DEV_XSTATS_QUEUE:
642 if (queue_port_id == -1) {
643 for (i = 0; i < sw->qid_count; i++) {
644 err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
648 } else if (queue_port_id < (int16_t)sw->qid_count)
649 sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);