1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
12 #include "pf/base/dlb2_regs.h"
14 enum dlb2_xstats_type {
15 /* common to device and port */
16 rx_ok, /**< Receive an event */
17 rx_drop, /**< Error bit set in received QE */
18 rx_interrupt_wait, /**< Wait on an interrupt */
19 rx_umonitor_umwait, /**< Block using umwait */
20 tx_ok, /**< Transmit an event */
21 total_polls, /**< Call dequeue_burst */
22 zero_polls, /**< Call dequeue burst and return 0 */
23 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
24 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
25 tx_nospc_hw_credits, /**< Insufficient h/w credits */
26 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
27 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
28 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
36 tx_new, /**< Send an OP_NEW event */
37 tx_fwd, /**< Send an OP_FORWARD event */
38 tx_rel, /**< Send an OP_RELEASE event */
39 tx_implicit_rel, /**< Issue an implicit event release */
40 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
41 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
42 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
43 tx_sched_directed, /**< Send a directed event */
44 tx_invalid, /**< Send an event with an invalid op */
45 outstanding_releases, /**< # of releases a port owes */
46 max_outstanding_releases, /**< max # of releases a port can owe */
47 rx_sched_ordered, /**< Dequeue an ordered event */
48 rx_sched_unordered, /**< Dequeue an unordered event */
49 rx_sched_atomic, /**< Dequeue an atomic event */
50 rx_sched_directed, /**< Dequeue an directed event */
51 rx_sched_invalid, /**< Dequeue event sched type invalid */
52 /* common to port and queue */
53 is_configured, /**< Port is configured */
54 is_load_balanced, /**< Port is LDB */
55 hw_id, /**< Hardware ID */
57 num_links, /**< Number of ports linked */
58 sched_type, /**< Queue sched type */
59 enq_ok, /**< # events enqueued to the queue */
60 current_depth, /**< Current queue depth */
61 depth_threshold, /**< Programmed depth threshold */
63 /**< Depth LE to 50% of the configured hardware threshold */
64 depth_gt50_le75_threshold,
65 /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
66 depth_gt75_le100_threshold,
67 /**< Depth GT 75%. but LE to the configured hardware threshold */
69 /**< Depth GT 100% of the configured hw threshold */
72 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
73 uint16_t obj_idx, /* port or queue id */
74 enum dlb2_xstats_type stat, int extra_arg);
76 enum dlb2_xstats_fn_type {
82 struct dlb2_xstats_entry {
83 struct rte_event_dev_xstats_name name;
84 uint64_t reset_value; /* an offset to be taken away to emulate resets */
85 enum dlb2_xstats_fn_type fn_id;
86 enum dlb2_xstats_type stat;
87 enum rte_event_dev_xstats_mode mode;
90 uint8_t reset_allowed; /* when set, this value can be reset */
93 /* Some device stats are simply a summation of the corresponding port values */
95 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
101 for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
102 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
104 if (!port->setup_done)
107 switch (which_stat) {
109 val += port->stats.traffic.rx_ok;
112 val += port->stats.traffic.rx_drop;
114 case rx_interrupt_wait:
115 val += port->stats.traffic.rx_interrupt_wait;
117 case rx_umonitor_umwait:
118 val += port->stats.traffic.rx_umonitor_umwait;
121 val += port->stats.traffic.tx_ok;
124 val += port->stats.traffic.total_polls;
127 val += port->stats.traffic.zero_polls;
129 case tx_nospc_ldb_hw_credits:
130 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
132 case tx_nospc_dir_hw_credits:
133 val += port->stats.traffic.tx_nospc_dir_hw_credits;
135 case tx_nospc_hw_credits:
136 val += port->stats.traffic.tx_nospc_hw_credits;
138 case tx_nospc_inflight_max:
139 val += port->stats.traffic.tx_nospc_inflight_max;
141 case tx_nospc_new_event_limit:
142 val += port->stats.traffic.tx_nospc_new_event_limit;
144 case tx_nospc_inflight_credits:
145 val += port->stats.traffic.tx_nospc_inflight_credits;
155 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
156 enum dlb2_xstats_type type, int extra_arg __rte_unused)
161 case rx_interrupt_wait:
162 case rx_umonitor_umwait:
166 case tx_nospc_ldb_hw_credits:
167 case tx_nospc_dir_hw_credits:
168 case tx_nospc_hw_credits:
169 case tx_nospc_inflight_max:
170 case tx_nospc_new_event_limit:
171 case tx_nospc_inflight_credits:
172 return dlb2_device_traffic_stat_get(dlb2, type);
173 case nb_events_limit:
174 return dlb2->new_event_limit;
175 case inflight_events:
176 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
178 return dlb2->num_ldb_credits;
180 return dlb2->num_dir_credits;
182 return dlb2->num_credits;
188 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
189 enum dlb2_xstats_type type, int extra_arg __rte_unused)
191 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
194 case rx_ok: return ev_port->stats.traffic.rx_ok;
196 case rx_drop: return ev_port->stats.traffic.rx_drop;
198 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
200 case rx_umonitor_umwait:
201 return ev_port->stats.traffic.rx_umonitor_umwait;
203 case tx_ok: return ev_port->stats.traffic.tx_ok;
205 case total_polls: return ev_port->stats.traffic.total_polls;
207 case zero_polls: return ev_port->stats.traffic.zero_polls;
209 case tx_nospc_ldb_hw_credits:
210 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
212 case tx_nospc_dir_hw_credits:
213 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
215 case tx_nospc_hw_credits:
216 return ev_port->stats.traffic.tx_nospc_hw_credits;
218 case tx_nospc_inflight_max:
219 return ev_port->stats.traffic.tx_nospc_inflight_max;
221 case tx_nospc_new_event_limit:
222 return ev_port->stats.traffic.tx_nospc_new_event_limit;
224 case tx_nospc_inflight_credits:
225 return ev_port->stats.traffic.tx_nospc_inflight_credits;
227 case is_configured: return ev_port->setup_done;
229 case is_load_balanced: return !ev_port->qm_port.is_directed;
231 case hw_id: return ev_port->qm_port.id;
233 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
235 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
237 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
239 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
241 case tx_sched_ordered:
242 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
244 case tx_sched_unordered:
245 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
247 case tx_sched_atomic:
248 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
250 case tx_sched_directed:
251 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
253 case tx_invalid: return ev_port->stats.tx_invalid;
255 case outstanding_releases: return ev_port->outstanding_releases;
257 case max_outstanding_releases:
258 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
260 case rx_sched_ordered:
261 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
263 case rx_sched_unordered:
264 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
266 case rx_sched_atomic:
267 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
269 case rx_sched_directed:
270 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
272 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
279 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
284 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
285 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
291 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
294 uint64_t enq_ok_tally = 0;
296 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
297 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
303 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
304 enum dlb2_xstats_type type, int extra_arg __rte_unused)
306 struct dlb2_eventdev_queue *ev_queue =
307 &dlb2->ev_queues[obj_idx];
310 case is_configured: return ev_queue->setup_done;
312 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
314 case hw_id: return ev_queue->qm_queue.id;
316 case num_links: return ev_queue->num_links;
318 case sched_type: return ev_queue->qm_queue.sched_type;
320 case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
322 case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
324 case depth_threshold: return ev_queue->depth_threshold;
326 case depth_le50_threshold:
327 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328 DLB2_QID_DEPTH_LE50);
330 case depth_gt50_le75_threshold:
331 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
332 DLB2_QID_DEPTH_GT50_LE75);
334 case depth_gt75_le100_threshold:
335 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
336 DLB2_QID_DEPTH_GT75_LE100);
338 case depth_gt100_threshold:
339 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
340 DLB2_QID_DEPTH_GT100);
347 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
350 * define the stats names and types. Used to build up the device
352 * There are multiple set of stats:
357 * For each of these sets, we have three parallel arrays, one for the
358 * names, the other for the stat type parameter to be passed in the fn
359 * call to get that stat. The third array allows resetting or not.
360 * All these arrays must be kept in sync
362 static const char * const dev_stats[] = {
366 "rx_umonitor_umwait",
370 "tx_nospc_ldb_hw_credits",
371 "tx_nospc_dir_hw_credits",
372 "tx_nospc_hw_credits",
373 "tx_nospc_inflight_max",
374 "tx_nospc_new_event_limit",
375 "tx_nospc_inflight_credits",
382 static const enum dlb2_xstats_type dev_types[] = {
390 tx_nospc_ldb_hw_credits,
391 tx_nospc_dir_hw_credits,
393 tx_nospc_inflight_max,
394 tx_nospc_new_event_limit,
395 tx_nospc_inflight_credits,
402 /* Note: generated device stats are not allowed to be reset. */
403 static const uint8_t dev_reset_allowed[] = {
406 0, /* rx_interrupt_wait */
407 0, /* rx_umonitor_umwait */
411 0, /* tx_nospc_ldb_hw_credits */
412 0, /* tx_nospc_dir_hw_credits */
413 0, /* tx_nospc_hw_credits */
414 0, /* tx_nospc_inflight_max */
415 0, /* tx_nospc_new_event_limit */
416 0, /* tx_nospc_inflight_credits */
417 0, /* nb_events_limit */
418 0, /* inflight_events */
419 0, /* ldb_pool_size */
420 0, /* dir_pool_size */
423 static const char * const port_stats[] = {
430 "rx_umonitor_umwait",
434 "tx_nospc_ldb_hw_credits",
435 "tx_nospc_dir_hw_credits",
436 "tx_nospc_hw_credits",
437 "tx_nospc_inflight_max",
438 "tx_nospc_new_event_limit",
439 "tx_nospc_inflight_credits",
445 "tx_sched_unordered",
449 "outstanding_releases",
450 "max_outstanding_releases",
452 "rx_sched_unordered",
457 static const enum dlb2_xstats_type port_types[] = {
468 tx_nospc_ldb_hw_credits,
469 tx_nospc_dir_hw_credits,
471 tx_nospc_inflight_max,
472 tx_nospc_new_event_limit,
473 tx_nospc_inflight_credits,
483 outstanding_releases,
484 max_outstanding_releases,
491 static const uint8_t port_reset_allowed[] = {
492 0, /* is_configured */
493 0, /* is_load_balanced */
497 1, /* rx_interrupt_wait */
498 1, /* rx_umonitor_umwait */
502 1, /* tx_nospc_ldb_hw_credits */
503 1, /* tx_nospc_dir_hw_credits */
504 1, /* tx_nospc_hw_credits */
505 1, /* tx_nospc_inflight_max */
506 1, /* tx_nospc_new_event_limit */
507 1, /* tx_nospc_inflight_credits */
511 1, /* tx_implicit_rel */
512 1, /* tx_sched_ordered */
513 1, /* tx_sched_unordered */
514 1, /* tx_sched_atomic */
515 1, /* tx_sched_directed */
517 0, /* outstanding_releases */
518 0, /* max_outstanding_releases */
519 1, /* rx_sched_ordered */
520 1, /* rx_sched_unordered */
521 1, /* rx_sched_atomic */
522 1, /* rx_sched_directed */
523 1 /* rx_sched_invalid */
526 /* QID specific stats */
527 static const char * const qid_stats[] = {
536 "depth_le50_threshold",
537 "depth_gt50_le75_threshold",
538 "depth_gt75_le100_threshold",
539 "depth_gt100_threshold",
541 static const enum dlb2_xstats_type qid_types[] = {
550 depth_le50_threshold,
551 depth_gt50_le75_threshold,
552 depth_gt75_le100_threshold,
553 depth_gt100_threshold,
555 static const uint8_t qid_reset_allowed[] = {
556 0, /* is_configured */
557 0, /* is_load_balanced */
562 0, /* current_depth */
563 0, /* depth_threshold */
564 1, /* depth_le50_threshold */
565 1, /* depth_gt50_le75_threshold */
566 1, /* depth_gt75_le100_threshold */
567 1, /* depth_gt100_threshold */
570 /* ---- end of stat definitions ---- */
572 /* check sizes, since a missed comma can lead to strings being
573 * joined by the compiler.
575 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
576 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
577 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
579 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
580 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
581 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
584 const unsigned int count = RTE_DIM(dev_stats) +
585 DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) +
586 DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats);
587 unsigned int i, port, qid, stat_id = 0;
589 dlb2->xstats = rte_zmalloc_socket(NULL,
590 sizeof(dlb2->xstats[0]) * count, 0,
591 dlb2->qm_instance.info.socket_id);
592 if (dlb2->xstats == NULL)
595 #define sname dlb2->xstats[stat_id].name.name
596 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
597 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
598 .fn_id = DLB2_XSTATS_FN_DEV,
599 .stat = dev_types[i],
600 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
601 .reset_allowed = dev_reset_allowed[i],
603 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
605 dlb2->xstats_count_mode_dev = stat_id;
607 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) {
608 dlb2->xstats_offset_for_port[port] = stat_id;
610 uint32_t count_offset = stat_id;
612 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
613 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
614 .fn_id = DLB2_XSTATS_FN_PORT,
616 .stat = port_types[i],
617 .mode = RTE_EVENT_DEV_XSTATS_PORT,
618 .reset_allowed = port_reset_allowed[i],
620 snprintf(sname, sizeof(sname), "port_%u_%s",
621 port, port_stats[i]);
624 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
627 dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
629 for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) {
630 uint32_t count_offset = stat_id;
632 dlb2->xstats_offset_for_qid[qid] = stat_id;
634 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
635 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
636 .fn_id = DLB2_XSTATS_FN_QUEUE,
638 .stat = qid_types[i],
639 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
640 .reset_allowed = qid_reset_allowed[i],
642 snprintf(sname, sizeof(sname), "qid_%u_%s",
646 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
649 dlb2->xstats_count_mode_queue = stat_id -
650 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
653 dlb2->xstats_count = stat_id;
659 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
661 rte_free(dlb2->xstats);
662 dlb2->xstats_count = 0;
666 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
667 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
668 struct rte_event_dev_xstats_name *xstats_names,
669 unsigned int *ids, unsigned int size)
671 const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
673 unsigned int xidx = 0;
674 uint32_t xstats_mode_count = 0;
675 uint32_t start_offset = 0;
678 case RTE_EVENT_DEV_XSTATS_DEVICE:
679 xstats_mode_count = dlb2->xstats_count_mode_dev;
681 case RTE_EVENT_DEV_XSTATS_PORT:
682 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
684 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
685 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
687 case RTE_EVENT_DEV_XSTATS_QUEUE:
688 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) &&
689 (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255))
691 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
692 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
698 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
699 return xstats_mode_count;
701 for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
702 if (dlb2->xstats[i].mode != mode)
705 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
706 queue_port_id != dlb2->xstats[i].obj_idx)
709 xstats_names[xidx] = dlb2->xstats[i].name;
711 ids[xidx] = start_offset + xidx;
718 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
719 enum rte_event_dev_xstats_mode mode,
720 uint8_t queue_port_id, const unsigned int ids[],
721 uint64_t values[], unsigned int n, const uint32_t reset)
724 unsigned int xidx = 0;
725 uint32_t xstats_mode_count = 0;
728 case RTE_EVENT_DEV_XSTATS_DEVICE:
729 xstats_mode_count = dlb2->xstats_count_mode_dev;
731 case RTE_EVENT_DEV_XSTATS_PORT:
732 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
734 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
736 case RTE_EVENT_DEV_XSTATS_QUEUE:
737 #if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */
738 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version))
741 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
747 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
748 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
751 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
754 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
755 queue_port_id != xs->obj_idx)
759 case DLB2_XSTATS_FN_DEV:
762 case DLB2_XSTATS_FN_PORT:
765 case DLB2_XSTATS_FN_QUEUE:
769 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
773 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
774 xs->extra_arg) - xs->reset_value;
779 if (xs->reset_allowed && reset)
780 xs->reset_value += val;
792 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
793 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
794 const unsigned int ids[], uint64_t values[], unsigned int n)
796 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
797 const uint32_t reset = 0;
799 return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
804 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
805 const char *name, unsigned int *id)
807 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
811 for (i = 0; i < dlb2->xstats_count; i++) {
812 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
814 if (strncmp(xs->name.name, name,
815 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
820 case DLB2_XSTATS_FN_DEV:
823 case DLB2_XSTATS_FN_PORT:
826 case DLB2_XSTATS_FN_QUEUE:
830 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
835 return fn(dlb2, xs->obj_idx, xs->stat,
836 xs->extra_arg) - xs->reset_value;
845 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
851 for (i = start; i < start + num; i++) {
852 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
854 if (!xs->reset_allowed)
858 case DLB2_XSTATS_FN_DEV:
861 case DLB2_XSTATS_FN_PORT:
864 case DLB2_XSTATS_FN_QUEUE:
868 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
872 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
873 xs->reset_value = val;
878 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
879 const uint32_t ids[], uint32_t nb_ids)
881 const uint32_t reset = 1;
884 uint32_t nb_reset = dlb2_xstats_update(dlb2,
885 RTE_EVENT_DEV_XSTATS_QUEUE,
886 queue_id, ids, NULL, nb_ids,
888 return nb_reset == nb_ids ? 0 : -EINVAL;
892 dlb2_xstats_reset_range(dlb2,
893 dlb2->xstats_offset_for_qid[queue_id],
894 dlb2->xstats_count_per_qid[queue_id]);
900 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
901 const uint32_t ids[], uint32_t nb_ids)
903 const uint32_t reset = 1;
904 int offset = dlb2->xstats_offset_for_port[port_id];
905 int nb_stat = dlb2->xstats_count_per_port[port_id];
908 uint32_t nb_reset = dlb2_xstats_update(dlb2,
909 RTE_EVENT_DEV_XSTATS_PORT, port_id,
912 return nb_reset == nb_ids ? 0 : -EINVAL;
915 dlb2_xstats_reset_range(dlb2, offset, nb_stat);
920 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
926 for (i = 0; i < nb_ids; i++) {
927 uint32_t id = ids[i];
929 if (id >= dlb2->xstats_count_mode_dev)
931 dlb2_xstats_reset_range(dlb2, id, 1);
934 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
935 dlb2_xstats_reset_range(dlb2, i, 1);
942 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
943 enum rte_event_dev_xstats_mode mode,
944 int16_t queue_port_id,
945 const uint32_t ids[],
948 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
951 /* handle -1 for queue_port_id here, looping over all ports/queues */
953 case RTE_EVENT_DEV_XSTATS_DEVICE:
954 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
957 case RTE_EVENT_DEV_XSTATS_PORT:
958 if (queue_port_id == -1) {
960 i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
961 if (dlb2_xstats_reset_port(dlb2, i,
965 } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) {
966 if (dlb2_xstats_reset_port(dlb2, queue_port_id,
971 case RTE_EVENT_DEV_XSTATS_QUEUE:
972 if (queue_port_id == -1) {
974 i < DLB2_MAX_NUM_QUEUES(dlb2->version); i++) {
975 if (dlb2_xstats_reset_queue(dlb2, i,
979 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) {
980 if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
991 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
993 struct dlb2_eventdev *dlb2;
994 struct dlb2_hw_dev *handle;
997 dlb2 = dlb2_pmd_priv(dev);
1000 fprintf(f, "DLB2 Event device cannot be dumped!\n");
1004 if (!dlb2->configured)
1005 fprintf(f, "DLB2 Event device is not configured\n");
1007 handle = &dlb2->qm_instance;
1009 fprintf(f, "================\n");
1010 fprintf(f, "DLB2 Device Dump\n");
1011 fprintf(f, "================\n");
1013 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
1014 dlb2->umwait_allowed ? "yes" : "no");
1016 /* Generic top level device information */
1018 fprintf(f, "device is configured and run state =");
1019 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1020 fprintf(f, "STOPPED\n");
1021 else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1022 fprintf(f, "STOPPING\n");
1023 else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1024 fprintf(f, "STARTING\n");
1025 else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1026 fprintf(f, "STARTED\n");
1028 fprintf(f, "UNEXPECTED\n");
1030 fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1031 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1033 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1034 dlb2->num_dir_ports, dlb2->num_dir_queues);
1036 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1037 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1039 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1040 handle->cfg.resources.num_atomic_inflights,
1041 handle->cfg.resources.num_hist_list_entries);
1043 fprintf(f, "results from most recent hw resource query:\n");
1045 fprintf(f, "\tnum_sched_domains = %u\n",
1046 dlb2->hw_rsrc_query_results.num_sched_domains);
1048 fprintf(f, "\tnum_ldb_queues = %u\n",
1049 dlb2->hw_rsrc_query_results.num_ldb_queues);
1051 fprintf(f, "\tnum_ldb_ports = %u\n",
1052 dlb2->hw_rsrc_query_results.num_ldb_ports);
1054 fprintf(f, "\tnum_dir_ports = %u\n",
1055 dlb2->hw_rsrc_query_results.num_dir_ports);
1057 fprintf(f, "\tnum_atomic_inflights = %u\n",
1058 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1060 fprintf(f, "\tnum_hist_list_entries = %u\n",
1061 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1063 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1064 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1066 fprintf(f, "\tnum_ldb_credits = %u\n",
1067 dlb2->hw_rsrc_query_results.num_ldb_credits);
1069 fprintf(f, "\tnum_dir_credits = %u\n",
1070 dlb2->hw_rsrc_query_results.num_dir_credits);
1072 fprintf(f, "\tnum_credits = %u\n",
1073 dlb2->hw_rsrc_query_results.num_credits);
1075 /* Port level information */
1077 for (i = 0; i < dlb2->num_ports; i++) {
1078 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1081 if (!p->enq_configured)
1082 fprintf(f, "Port_%d is not configured\n", i);
1084 fprintf(f, "Port_%d\n", i);
1085 fprintf(f, "=======\n");
1087 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1088 p->id, p->setup_done);
1090 fprintf(f, "\tconfig state=%d, port state=%d\n",
1091 p->qm_port.config_state, p->qm_port.state);
1093 fprintf(f, "\tport is %s\n",
1094 p->qm_port.is_directed ? "directed" : "load balanced");
1096 fprintf(f, "\toutstanding releases=%u\n",
1097 p->outstanding_releases);
1099 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1100 p->inflight_max, p->inflight_credits);
1102 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1103 p->credit_update_quanta, p->implicit_release);
1105 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1107 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1108 if (p->link[j].valid)
1109 fprintf(f, "id=%u prio=%u ",
1110 p->link[j].queue_id,
1111 p->link[j].priority);
1115 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1117 fprintf(f, "\tcached_ldb_credits=%u\n",
1118 p->qm_port.cached_ldb_credits);
1120 fprintf(f, "\tldb_credits = %u\n",
1121 p->qm_port.ldb_credits);
1123 fprintf(f, "\tcached_dir_credits = %u\n",
1124 p->qm_port.cached_dir_credits);
1126 fprintf(f, "\tdir_credits = %u\n",
1127 p->qm_port.dir_credits);
1129 fprintf(f, "\tcached_credits = %u\n",
1130 p->qm_port.cached_credits);
1132 fprintf(f, "\tdir_credits = %u\n",
1133 p->qm_port.credits);
1135 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1138 p->qm_port.cq_depth);
1140 fprintf(f, "\tinterrupt armed=%d\n",
1141 p->qm_port.int_armed);
1143 fprintf(f, "\tPort statistics\n");
1145 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1146 p->stats.traffic.rx_ok);
1148 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1149 p->stats.traffic.rx_drop);
1151 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1152 p->stats.traffic.rx_interrupt_wait);
1154 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1155 p->stats.traffic.rx_umonitor_umwait);
1157 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1158 p->stats.traffic.tx_ok);
1160 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1161 p->stats.traffic.total_polls);
1163 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1164 p->stats.traffic.zero_polls);
1166 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1167 p->stats.traffic.tx_nospc_ldb_hw_credits);
1169 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1170 p->stats.traffic.tx_nospc_dir_hw_credits);
1172 fprintf(f, "\t\ttx_nospc_hw_credits %" PRIu64 "\n",
1173 p->stats.traffic.tx_nospc_hw_credits);
1175 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1176 p->stats.traffic.tx_nospc_inflight_max);
1178 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1179 p->stats.traffic.tx_nospc_new_event_limit);
1181 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1182 p->stats.traffic.tx_nospc_inflight_credits);
1184 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1185 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1187 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1188 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1190 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1191 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1193 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1194 p->stats.tx_implicit_rel);
1196 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1197 p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1199 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1200 p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1202 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1203 p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1205 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1206 p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1208 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1209 p->stats.tx_invalid);
1211 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1212 p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1214 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1215 p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1217 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1218 p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1220 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1221 p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1223 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1224 p->stats.rx_sched_invalid);
1227 /* Queue level information */
1229 for (i = 0; i < dlb2->num_queues; i++) {
1230 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1234 fprintf(f, "Queue_%d is not configured\n", i);
1236 fprintf(f, "Queue_%d\n", i);
1237 fprintf(f, "========\n");
1239 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1241 fprintf(f, "\tqueue is %s\n",
1242 q->qm_queue.is_directed ? "directed" : "load balanced");
1244 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1246 for (j = 0; j < dlb2->num_ports; j++) {
1247 struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1249 for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1250 if (p->link[k].valid &&
1251 p->link[k].queue_id == q->id)
1252 fprintf(f, "id=%u prio=%u ",
1253 p->id, p->link[k].priority);
1258 fprintf(f, "\tcurrent depth: %u events\n",
1259 dlb2_get_queue_depth(dlb2, q));
1261 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1262 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);