1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
13 enum dlb2_xstats_type {
14 /* common to device and port */
15 rx_ok, /**< Receive an event */
16 rx_drop, /**< Error bit set in received QE */
17 rx_interrupt_wait, /**< Wait on an interrupt */
18 rx_umonitor_umwait, /**< Block using umwait */
19 tx_ok, /**< Transmit an event */
20 total_polls, /**< Call dequeue_burst */
21 zero_polls, /**< Call dequeue burst and return 0 */
22 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
23 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
24 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
25 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
26 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
33 tx_new, /**< Send an OP_NEW event */
34 tx_fwd, /**< Send an OP_FORWARD event */
35 tx_rel, /**< Send an OP_RELEASE event */
36 tx_implicit_rel, /**< Issue an implicit event release */
37 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
38 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
39 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
40 tx_sched_directed, /**< Send a directed event */
41 tx_invalid, /**< Send an event with an invalid op */
42 outstanding_releases, /**< # of releases a port owes */
43 max_outstanding_releases, /**< max # of releases a port can owe */
44 rx_sched_ordered, /**< Dequeue an ordered event */
45 rx_sched_unordered, /**< Dequeue an unordered event */
46 rx_sched_atomic, /**< Dequeue an atomic event */
47 rx_sched_directed, /**< Dequeue an directed event */
48 rx_sched_invalid, /**< Dequeue event sched type invalid */
49 /* common to port and queue */
50 is_configured, /**< Port is configured */
51 is_load_balanced, /**< Port is LDB */
52 hw_id, /**< Hardware ID */
54 num_links, /**< Number of ports linked */
55 sched_type, /**< Queue sched type */
56 enq_ok, /**< # events enqueued to the queue */
57 current_depth, /**< Current queue depth */
58 depth_threshold, /**< Programmed depth threshold */
60 /**< Depth LE to 50% of the configured hardware threshold */
61 depth_gt50_le75_threshold,
62 /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
63 depth_gt75_le100_threshold,
64 /**< Depth GT 75%. but LE to the configured hardware threshold */
66 /**< Depth GT 100% of the configured hw threshold */
69 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
70 uint16_t obj_idx, /* port or queue id */
71 enum dlb2_xstats_type stat, int extra_arg);
73 enum dlb2_xstats_fn_type {
79 struct dlb2_xstats_entry {
80 struct rte_event_dev_xstats_name name;
81 uint64_t reset_value; /* an offset to be taken away to emulate resets */
82 enum dlb2_xstats_fn_type fn_id;
83 enum dlb2_xstats_type stat;
84 enum rte_event_dev_xstats_mode mode;
87 uint8_t reset_allowed; /* when set, this value can be reset */
90 /* Some device stats are simply a summation of the corresponding port values */
92 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
98 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
99 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
101 if (!port->setup_done)
104 switch (which_stat) {
106 val += port->stats.traffic.rx_ok;
109 val += port->stats.traffic.rx_drop;
111 case rx_interrupt_wait:
112 val += port->stats.traffic.rx_interrupt_wait;
114 case rx_umonitor_umwait:
115 val += port->stats.traffic.rx_umonitor_umwait;
118 val += port->stats.traffic.tx_ok;
121 val += port->stats.traffic.total_polls;
124 val += port->stats.traffic.zero_polls;
126 case tx_nospc_ldb_hw_credits:
127 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
129 case tx_nospc_dir_hw_credits:
130 val += port->stats.traffic.tx_nospc_dir_hw_credits;
132 case tx_nospc_inflight_max:
133 val += port->stats.traffic.tx_nospc_inflight_max;
135 case tx_nospc_new_event_limit:
136 val += port->stats.traffic.tx_nospc_new_event_limit;
138 case tx_nospc_inflight_credits:
139 val += port->stats.traffic.tx_nospc_inflight_credits;
149 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
150 enum dlb2_xstats_type type, int extra_arg __rte_unused)
155 case rx_interrupt_wait:
156 case rx_umonitor_umwait:
160 case tx_nospc_ldb_hw_credits:
161 case tx_nospc_dir_hw_credits:
162 case tx_nospc_inflight_max:
163 case tx_nospc_new_event_limit:
164 case tx_nospc_inflight_credits:
165 return dlb2_device_traffic_stat_get(dlb2, type);
166 case nb_events_limit:
167 return dlb2->new_event_limit;
168 case inflight_events:
169 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
171 return dlb2->num_ldb_credits;
173 return dlb2->num_dir_credits;
179 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
180 enum dlb2_xstats_type type, int extra_arg __rte_unused)
182 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
185 case rx_ok: return ev_port->stats.traffic.rx_ok;
187 case rx_drop: return ev_port->stats.traffic.rx_drop;
189 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
191 case rx_umonitor_umwait:
192 return ev_port->stats.traffic.rx_umonitor_umwait;
194 case tx_ok: return ev_port->stats.traffic.tx_ok;
196 case total_polls: return ev_port->stats.traffic.total_polls;
198 case zero_polls: return ev_port->stats.traffic.zero_polls;
200 case tx_nospc_ldb_hw_credits:
201 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
203 case tx_nospc_dir_hw_credits:
204 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
206 case tx_nospc_inflight_max:
207 return ev_port->stats.traffic.tx_nospc_inflight_max;
209 case tx_nospc_new_event_limit:
210 return ev_port->stats.traffic.tx_nospc_new_event_limit;
212 case tx_nospc_inflight_credits:
213 return ev_port->stats.traffic.tx_nospc_inflight_credits;
215 case is_configured: return ev_port->setup_done;
217 case is_load_balanced: return !ev_port->qm_port.is_directed;
219 case hw_id: return ev_port->qm_port.id;
221 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
223 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
225 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
227 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
229 case tx_sched_ordered:
230 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
232 case tx_sched_unordered:
233 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
235 case tx_sched_atomic:
236 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
238 case tx_sched_directed:
239 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
241 case tx_invalid: return ev_port->stats.tx_invalid;
243 case outstanding_releases: return ev_port->outstanding_releases;
245 case max_outstanding_releases:
246 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
248 case rx_sched_ordered:
249 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
251 case rx_sched_unordered:
252 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
254 case rx_sched_atomic:
255 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
257 case rx_sched_directed:
258 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
260 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
267 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
272 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
273 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
279 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
282 uint64_t enq_ok_tally = 0;
284 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
285 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
291 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
292 enum dlb2_xstats_type type, int extra_arg __rte_unused)
294 struct dlb2_eventdev_queue *ev_queue =
295 &dlb2->ev_queues[obj_idx];
298 case is_configured: return ev_queue->setup_done;
300 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
302 case hw_id: return ev_queue->qm_queue.id;
304 case num_links: return ev_queue->num_links;
306 case sched_type: return ev_queue->qm_queue.sched_type;
308 case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
310 case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
312 case depth_threshold: return ev_queue->depth_threshold;
314 case depth_le50_threshold:
315 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
316 DLB2_QID_DEPTH_LE50);
318 case depth_gt50_le75_threshold:
319 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
320 DLB2_QID_DEPTH_GT50_LE75);
322 case depth_gt75_le100_threshold:
323 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
324 DLB2_QID_DEPTH_GT75_LE100);
326 case depth_gt100_threshold:
327 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328 DLB2_QID_DEPTH_GT100);
335 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
338 * define the stats names and types. Used to build up the device
340 * There are multiple set of stats:
345 * For each of these sets, we have three parallel arrays, one for the
346 * names, the other for the stat type parameter to be passed in the fn
347 * call to get that stat. The third array allows resetting or not.
348 * All these arrays must be kept in sync
350 static const char * const dev_stats[] = {
354 "rx_umonitor_umwait",
358 "tx_nospc_ldb_hw_credits",
359 "tx_nospc_dir_hw_credits",
360 "tx_nospc_inflight_max",
361 "tx_nospc_new_event_limit",
362 "tx_nospc_inflight_credits",
368 static const enum dlb2_xstats_type dev_types[] = {
376 tx_nospc_ldb_hw_credits,
377 tx_nospc_dir_hw_credits,
378 tx_nospc_inflight_max,
379 tx_nospc_new_event_limit,
380 tx_nospc_inflight_credits,
386 /* Note: generated device stats are not allowed to be reset. */
387 static const uint8_t dev_reset_allowed[] = {
390 0, /* rx_interrupt_wait */
391 0, /* rx_umonitor_umwait */
395 0, /* tx_nospc_ldb_hw_credits */
396 0, /* tx_nospc_dir_hw_credits */
397 0, /* tx_nospc_inflight_max */
398 0, /* tx_nospc_new_event_limit */
399 0, /* tx_nospc_inflight_credits */
400 0, /* nb_events_limit */
401 0, /* inflight_events */
402 0, /* ldb_pool_size */
403 0, /* dir_pool_size */
405 static const char * const port_stats[] = {
412 "rx_umonitor_umwait",
416 "tx_nospc_ldb_hw_credits",
417 "tx_nospc_dir_hw_credits",
418 "tx_nospc_inflight_max",
419 "tx_nospc_new_event_limit",
420 "tx_nospc_inflight_credits",
426 "tx_sched_unordered",
430 "outstanding_releases",
431 "max_outstanding_releases",
433 "rx_sched_unordered",
438 static const enum dlb2_xstats_type port_types[] = {
449 tx_nospc_ldb_hw_credits,
450 tx_nospc_dir_hw_credits,
451 tx_nospc_inflight_max,
452 tx_nospc_new_event_limit,
453 tx_nospc_inflight_credits,
463 outstanding_releases,
464 max_outstanding_releases,
471 static const uint8_t port_reset_allowed[] = {
472 0, /* is_configured */
473 0, /* is_load_balanced */
477 1, /* rx_interrupt_wait */
478 1, /* rx_umonitor_umwait */
482 1, /* tx_nospc_ldb_hw_credits */
483 1, /* tx_nospc_dir_hw_credits */
484 1, /* tx_nospc_inflight_max */
485 1, /* tx_nospc_new_event_limit */
486 1, /* tx_nospc_inflight_credits */
490 1, /* tx_implicit_rel */
491 1, /* tx_sched_ordered */
492 1, /* tx_sched_unordered */
493 1, /* tx_sched_atomic */
494 1, /* tx_sched_directed */
496 0, /* outstanding_releases */
497 0, /* max_outstanding_releases */
498 1, /* rx_sched_ordered */
499 1, /* rx_sched_unordered */
500 1, /* rx_sched_atomic */
501 1, /* rx_sched_directed */
502 1 /* rx_sched_invalid */
505 /* QID specific stats */
506 static const char * const qid_stats[] = {
515 "depth_le50_threshold",
516 "depth_gt50_le75_threshold",
517 "depth_gt75_le100_threshold",
518 "depth_gt100_threshold",
520 static const enum dlb2_xstats_type qid_types[] = {
529 depth_le50_threshold,
530 depth_gt50_le75_threshold,
531 depth_gt75_le100_threshold,
532 depth_gt100_threshold,
534 static const uint8_t qid_reset_allowed[] = {
535 0, /* is_configured */
536 0, /* is_load_balanced */
541 0, /* current_depth */
542 0, /* depth_threshold */
543 1, /* depth_le50_threshold */
544 1, /* depth_gt50_le75_threshold */
545 1, /* depth_gt75_le100_threshold */
546 1, /* depth_gt100_threshold */
549 /* ---- end of stat definitions ---- */
551 /* check sizes, since a missed comma can lead to strings being
552 * joined by the compiler.
554 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
555 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
556 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
558 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
559 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
560 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
563 const unsigned int count = RTE_DIM(dev_stats) +
564 DLB2_MAX_NUM_PORTS * RTE_DIM(port_stats) +
565 DLB2_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
566 unsigned int i, port, qid, stat_id = 0;
568 dlb2->xstats = rte_zmalloc_socket(NULL,
569 sizeof(dlb2->xstats[0]) * count, 0,
570 dlb2->qm_instance.info.socket_id);
571 if (dlb2->xstats == NULL)
574 #define sname dlb2->xstats[stat_id].name.name
575 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
576 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
577 .fn_id = DLB2_XSTATS_FN_DEV,
578 .stat = dev_types[i],
579 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
580 .reset_allowed = dev_reset_allowed[i],
582 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
584 dlb2->xstats_count_mode_dev = stat_id;
586 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) {
587 dlb2->xstats_offset_for_port[port] = stat_id;
589 uint32_t count_offset = stat_id;
591 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
592 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
593 .fn_id = DLB2_XSTATS_FN_PORT,
595 .stat = port_types[i],
596 .mode = RTE_EVENT_DEV_XSTATS_PORT,
597 .reset_allowed = port_reset_allowed[i],
599 snprintf(sname, sizeof(sname), "port_%u_%s",
600 port, port_stats[i]);
603 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
606 dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
608 for (qid = 0; qid < DLB2_MAX_NUM_QUEUES; qid++) {
609 uint32_t count_offset = stat_id;
611 dlb2->xstats_offset_for_qid[qid] = stat_id;
613 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
614 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
615 .fn_id = DLB2_XSTATS_FN_QUEUE,
617 .stat = qid_types[i],
618 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
619 .reset_allowed = qid_reset_allowed[i],
621 snprintf(sname, sizeof(sname), "qid_%u_%s",
625 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
628 dlb2->xstats_count_mode_queue = stat_id -
629 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
632 dlb2->xstats_count = stat_id;
638 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
640 rte_free(dlb2->xstats);
641 dlb2->xstats_count = 0;
645 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
646 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
647 struct rte_event_dev_xstats_name *xstats_names,
648 unsigned int *ids, unsigned int size)
650 const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
652 unsigned int xidx = 0;
653 uint32_t xstats_mode_count = 0;
654 uint32_t start_offset = 0;
657 case RTE_EVENT_DEV_XSTATS_DEVICE:
658 xstats_mode_count = dlb2->xstats_count_mode_dev;
660 case RTE_EVENT_DEV_XSTATS_PORT:
661 if (queue_port_id >= DLB2_MAX_NUM_PORTS)
663 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
664 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
666 case RTE_EVENT_DEV_XSTATS_QUEUE:
667 #if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
668 if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
671 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
672 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
678 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
679 return xstats_mode_count;
681 for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
682 if (dlb2->xstats[i].mode != mode)
685 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
686 queue_port_id != dlb2->xstats[i].obj_idx)
689 xstats_names[xidx] = dlb2->xstats[i].name;
691 ids[xidx] = start_offset + xidx;
698 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
699 enum rte_event_dev_xstats_mode mode,
700 uint8_t queue_port_id, const unsigned int ids[],
701 uint64_t values[], unsigned int n, const uint32_t reset)
704 unsigned int xidx = 0;
705 uint32_t xstats_mode_count = 0;
708 case RTE_EVENT_DEV_XSTATS_DEVICE:
709 xstats_mode_count = dlb2->xstats_count_mode_dev;
711 case RTE_EVENT_DEV_XSTATS_PORT:
712 if (queue_port_id >= DLB2_MAX_NUM_PORTS)
714 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
716 case RTE_EVENT_DEV_XSTATS_QUEUE:
717 #if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
718 if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
721 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
727 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
728 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
731 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
734 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
735 queue_port_id != xs->obj_idx)
739 case DLB2_XSTATS_FN_DEV:
742 case DLB2_XSTATS_FN_PORT:
745 case DLB2_XSTATS_FN_QUEUE:
749 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
753 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
754 xs->extra_arg) - xs->reset_value;
759 if (xs->reset_allowed && reset)
760 xs->reset_value += val;
772 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
773 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
774 const unsigned int ids[], uint64_t values[], unsigned int n)
776 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
777 const uint32_t reset = 0;
779 return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
784 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
785 const char *name, unsigned int *id)
787 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
791 for (i = 0; i < dlb2->xstats_count; i++) {
792 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
794 if (strncmp(xs->name.name, name,
795 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
800 case DLB2_XSTATS_FN_DEV:
803 case DLB2_XSTATS_FN_PORT:
806 case DLB2_XSTATS_FN_QUEUE:
810 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
815 return fn(dlb2, xs->obj_idx, xs->stat,
816 xs->extra_arg) - xs->reset_value;
825 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
831 for (i = start; i < start + num; i++) {
832 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
834 if (!xs->reset_allowed)
838 case DLB2_XSTATS_FN_DEV:
841 case DLB2_XSTATS_FN_PORT:
844 case DLB2_XSTATS_FN_QUEUE:
848 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
852 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
853 xs->reset_value = val;
858 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
859 const uint32_t ids[], uint32_t nb_ids)
861 const uint32_t reset = 1;
864 uint32_t nb_reset = dlb2_xstats_update(dlb2,
865 RTE_EVENT_DEV_XSTATS_QUEUE,
866 queue_id, ids, NULL, nb_ids,
868 return nb_reset == nb_ids ? 0 : -EINVAL;
872 dlb2_xstats_reset_range(dlb2,
873 dlb2->xstats_offset_for_qid[queue_id],
874 dlb2->xstats_count_per_qid[queue_id]);
880 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
881 const uint32_t ids[], uint32_t nb_ids)
883 const uint32_t reset = 1;
884 int offset = dlb2->xstats_offset_for_port[port_id];
885 int nb_stat = dlb2->xstats_count_per_port[port_id];
888 uint32_t nb_reset = dlb2_xstats_update(dlb2,
889 RTE_EVENT_DEV_XSTATS_PORT, port_id,
892 return nb_reset == nb_ids ? 0 : -EINVAL;
895 dlb2_xstats_reset_range(dlb2, offset, nb_stat);
900 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
906 for (i = 0; i < nb_ids; i++) {
907 uint32_t id = ids[i];
909 if (id >= dlb2->xstats_count_mode_dev)
911 dlb2_xstats_reset_range(dlb2, id, 1);
914 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
915 dlb2_xstats_reset_range(dlb2, i, 1);
922 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
923 enum rte_event_dev_xstats_mode mode,
924 int16_t queue_port_id,
925 const uint32_t ids[],
928 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
931 /* handle -1 for queue_port_id here, looping over all ports/queues */
933 case RTE_EVENT_DEV_XSTATS_DEVICE:
934 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
937 case RTE_EVENT_DEV_XSTATS_PORT:
938 if (queue_port_id == -1) {
939 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
940 if (dlb2_xstats_reset_port(dlb2, i,
944 } else if (queue_port_id < DLB2_MAX_NUM_PORTS) {
945 if (dlb2_xstats_reset_port(dlb2, queue_port_id,
950 case RTE_EVENT_DEV_XSTATS_QUEUE:
951 if (queue_port_id == -1) {
952 for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) {
953 if (dlb2_xstats_reset_queue(dlb2, i,
957 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES) {
958 if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
969 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
971 struct dlb2_eventdev *dlb2;
972 struct dlb2_hw_dev *handle;
975 dlb2 = dlb2_pmd_priv(dev);
978 fprintf(f, "DLB2 Event device cannot be dumped!\n");
982 if (!dlb2->configured)
983 fprintf(f, "DLB2 Event device is not configured\n");
985 handle = &dlb2->qm_instance;
987 fprintf(f, "================\n");
988 fprintf(f, "DLB2 Device Dump\n");
989 fprintf(f, "================\n");
991 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
992 dlb2->umwait_allowed ? "yes" : "no");
994 /* Generic top level device information */
996 fprintf(f, "device is configured and run state =");
997 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
998 fprintf(f, "STOPPED\n");
999 else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1000 fprintf(f, "STOPPING\n");
1001 else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1002 fprintf(f, "STARTING\n");
1003 else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1004 fprintf(f, "STARTED\n");
1006 fprintf(f, "UNEXPECTED\n");
1008 fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1009 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1011 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1012 dlb2->num_dir_ports, dlb2->num_dir_queues);
1014 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1015 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1017 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1018 handle->cfg.resources.num_atomic_inflights,
1019 handle->cfg.resources.num_hist_list_entries);
1021 fprintf(f, "results from most recent hw resource query:\n");
1023 fprintf(f, "\tnum_sched_domains = %u\n",
1024 dlb2->hw_rsrc_query_results.num_sched_domains);
1026 fprintf(f, "\tnum_ldb_queues = %u\n",
1027 dlb2->hw_rsrc_query_results.num_ldb_queues);
1029 fprintf(f, "\tnum_ldb_ports = %u\n",
1030 dlb2->hw_rsrc_query_results.num_ldb_ports);
1032 fprintf(f, "\tnum_dir_ports = %u\n",
1033 dlb2->hw_rsrc_query_results.num_dir_ports);
1035 fprintf(f, "\tnum_atomic_inflights = %u\n",
1036 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1038 fprintf(f, "\tnum_hist_list_entries = %u\n",
1039 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1041 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1042 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1044 fprintf(f, "\tnum_ldb_credits = %u\n",
1045 dlb2->hw_rsrc_query_results.num_ldb_credits);
1047 fprintf(f, "\tnum_dir_credits = %u\n",
1048 dlb2->hw_rsrc_query_results.num_dir_credits);
1050 /* Port level information */
1052 for (i = 0; i < dlb2->num_ports; i++) {
1053 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1056 if (!p->enq_configured)
1057 fprintf(f, "Port_%d is not configured\n", i);
1059 fprintf(f, "Port_%d\n", i);
1060 fprintf(f, "=======\n");
1062 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1063 p->id, p->setup_done);
1065 fprintf(f, "\tconfig state=%d, port state=%d\n",
1066 p->qm_port.config_state, p->qm_port.state);
1068 fprintf(f, "\tport is %s\n",
1069 p->qm_port.is_directed ? "directed" : "load balanced");
1071 fprintf(f, "\toutstanding releases=%u\n",
1072 p->outstanding_releases);
1074 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1075 p->inflight_max, p->inflight_credits);
1077 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1078 p->credit_update_quanta, p->implicit_release);
1080 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1082 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1083 if (p->link[j].valid)
1084 fprintf(f, "id=%u prio=%u ",
1085 p->link[j].queue_id,
1086 p->link[j].priority);
1090 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1092 fprintf(f, "\tcached_ldb_credits=%u\n",
1093 p->qm_port.cached_ldb_credits);
1095 fprintf(f, "\tldb_credits = %u\n",
1096 p->qm_port.ldb_credits);
1098 fprintf(f, "\tcached_dir_credits = %u\n",
1099 p->qm_port.cached_dir_credits);
1101 fprintf(f, "\tdir_credits = %u\n",
1102 p->qm_port.dir_credits);
1104 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1107 p->qm_port.cq_depth);
1109 fprintf(f, "\tinterrupt armed=%d\n",
1110 p->qm_port.int_armed);
1112 fprintf(f, "\tPort statistics\n");
1114 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1115 p->stats.traffic.rx_ok);
1117 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1118 p->stats.traffic.rx_drop);
1120 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1121 p->stats.traffic.rx_interrupt_wait);
1123 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1124 p->stats.traffic.rx_umonitor_umwait);
1126 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1127 p->stats.traffic.tx_ok);
1129 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1130 p->stats.traffic.total_polls);
1132 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1133 p->stats.traffic.zero_polls);
1135 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1136 p->stats.traffic.tx_nospc_ldb_hw_credits);
1138 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1139 p->stats.traffic.tx_nospc_dir_hw_credits);
1141 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1142 p->stats.traffic.tx_nospc_inflight_max);
1144 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1145 p->stats.traffic.tx_nospc_new_event_limit);
1147 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1148 p->stats.traffic.tx_nospc_inflight_credits);
1150 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1151 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1153 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1154 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1156 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1157 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1159 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1160 p->stats.tx_implicit_rel);
1162 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1163 p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1165 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1166 p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1168 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1169 p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1171 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1172 p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1174 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1175 p->stats.tx_invalid);
1177 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1178 p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1180 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1181 p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1183 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1184 p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1186 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1187 p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1189 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1190 p->stats.rx_sched_invalid);
1193 /* Queue level information */
1195 for (i = 0; i < dlb2->num_queues; i++) {
1196 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1200 fprintf(f, "Queue_%d is not configured\n", i);
1202 fprintf(f, "Queue_%d\n", i);
1203 fprintf(f, "========\n");
1205 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1207 fprintf(f, "\tqueue is %s\n",
1208 q->qm_queue.is_directed ? "directed" : "load balanced");
1210 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1212 for (j = 0; j < dlb2->num_ports; j++) {
1213 struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1215 for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1216 if (p->link[k].valid &&
1217 p->link[k].queue_id == q->id)
1218 fprintf(f, "id=%u prio=%u ",
1219 p->id, p->link[k].priority);
1224 fprintf(f, "\tcurrent depth: %u events\n",
1225 dlb2_get_queue_depth(dlb2, q));
1227 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1228 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);