1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
13 enum dlb2_xstats_type {
14 /* common to device and port */
15 rx_ok, /**< Receive an event */
16 rx_drop, /**< Error bit set in received QE */
17 rx_interrupt_wait, /**< Wait on an interrupt */
18 rx_umonitor_umwait, /**< Block using umwait */
19 tx_ok, /**< Transmit an event */
20 total_polls, /**< Call dequeue_burst */
21 zero_polls, /**< Call dequeue burst and return 0 */
22 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
23 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
24 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
25 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
26 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
33 tx_new, /**< Send an OP_NEW event */
34 tx_fwd, /**< Send an OP_FORWARD event */
35 tx_rel, /**< Send an OP_RELEASE event */
36 tx_implicit_rel, /**< Issue an implicit event release */
37 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
38 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
39 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
40 tx_sched_directed, /**< Send a directed event */
41 tx_invalid, /**< Send an event with an invalid op */
42 outstanding_releases, /**< # of releases a port owes */
43 max_outstanding_releases, /**< max # of releases a port can owe */
44 rx_sched_ordered, /**< Dequeue an ordered event */
45 rx_sched_unordered, /**< Dequeue an unordered event */
46 rx_sched_atomic, /**< Dequeue an atomic event */
47 rx_sched_directed, /**< Dequeue an directed event */
48 rx_sched_invalid, /**< Dequeue event sched type invalid */
49 /* common to port and queue */
50 is_configured, /**< Port is configured */
51 is_load_balanced, /**< Port is LDB */
52 hw_id, /**< Hardware ID */
54 num_links, /**< Number of ports linked */
55 sched_type, /**< Queue sched type */
56 enq_ok, /**< # events enqueued to the queue */
57 current_depth, /**< Current queue depth */
58 depth_threshold, /**< Programmed depth threshold */
60 /**< Depth LE to 50% of the configured hardware threshold */
61 depth_gt50_le75_threshold,
62 /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
63 depth_gt75_le100_threshold,
64 /**< Depth GT 75%. but LE to the configured hardware threshold */
66 /**< Depth GT 100% of the configured hw threshold */
69 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
70 uint16_t obj_idx, /* port or queue id */
71 enum dlb2_xstats_type stat, int extra_arg);
73 enum dlb2_xstats_fn_type {
79 struct dlb2_xstats_entry {
80 struct rte_event_dev_xstats_name name;
81 uint64_t reset_value; /* an offset to be taken away to emulate resets */
82 enum dlb2_xstats_fn_type fn_id;
83 enum dlb2_xstats_type stat;
84 enum rte_event_dev_xstats_mode mode;
87 uint8_t reset_allowed; /* when set, this value can be reset */
90 /* Some device stats are simply a summation of the corresponding port values */
92 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
98 for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
99 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
101 if (!port->setup_done)
104 switch (which_stat) {
106 val += port->stats.traffic.rx_ok;
109 val += port->stats.traffic.rx_drop;
111 case rx_interrupt_wait:
112 val += port->stats.traffic.rx_interrupt_wait;
114 case rx_umonitor_umwait:
115 val += port->stats.traffic.rx_umonitor_umwait;
118 val += port->stats.traffic.tx_ok;
121 val += port->stats.traffic.total_polls;
124 val += port->stats.traffic.zero_polls;
126 case tx_nospc_ldb_hw_credits:
127 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
129 case tx_nospc_dir_hw_credits:
130 val += port->stats.traffic.tx_nospc_dir_hw_credits;
132 case tx_nospc_inflight_max:
133 val += port->stats.traffic.tx_nospc_inflight_max;
135 case tx_nospc_new_event_limit:
136 val += port->stats.traffic.tx_nospc_new_event_limit;
138 case tx_nospc_inflight_credits:
139 val += port->stats.traffic.tx_nospc_inflight_credits;
149 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
150 enum dlb2_xstats_type type, int extra_arg __rte_unused)
155 case rx_interrupt_wait:
156 case rx_umonitor_umwait:
160 case tx_nospc_ldb_hw_credits:
161 case tx_nospc_dir_hw_credits:
162 case tx_nospc_inflight_max:
163 case tx_nospc_new_event_limit:
164 case tx_nospc_inflight_credits:
165 return dlb2_device_traffic_stat_get(dlb2, type);
166 case nb_events_limit:
167 return dlb2->new_event_limit;
168 case inflight_events:
169 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
171 return dlb2->num_ldb_credits;
173 return dlb2->num_dir_credits;
179 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
180 enum dlb2_xstats_type type, int extra_arg __rte_unused)
182 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
185 case rx_ok: return ev_port->stats.traffic.rx_ok;
187 case rx_drop: return ev_port->stats.traffic.rx_drop;
189 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
191 case rx_umonitor_umwait:
192 return ev_port->stats.traffic.rx_umonitor_umwait;
194 case tx_ok: return ev_port->stats.traffic.tx_ok;
196 case total_polls: return ev_port->stats.traffic.total_polls;
198 case zero_polls: return ev_port->stats.traffic.zero_polls;
200 case tx_nospc_ldb_hw_credits:
201 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
203 case tx_nospc_dir_hw_credits:
204 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
206 case tx_nospc_inflight_max:
207 return ev_port->stats.traffic.tx_nospc_inflight_max;
209 case tx_nospc_new_event_limit:
210 return ev_port->stats.traffic.tx_nospc_new_event_limit;
212 case tx_nospc_inflight_credits:
213 return ev_port->stats.traffic.tx_nospc_inflight_credits;
215 case is_configured: return ev_port->setup_done;
217 case is_load_balanced: return !ev_port->qm_port.is_directed;
219 case hw_id: return ev_port->qm_port.id;
221 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
223 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
225 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
227 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
229 case tx_sched_ordered:
230 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
232 case tx_sched_unordered:
233 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
235 case tx_sched_atomic:
236 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
238 case tx_sched_directed:
239 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
241 case tx_invalid: return ev_port->stats.tx_invalid;
243 case outstanding_releases: return ev_port->outstanding_releases;
245 case max_outstanding_releases:
246 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
248 case rx_sched_ordered:
249 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
251 case rx_sched_unordered:
252 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
254 case rx_sched_atomic:
255 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
257 case rx_sched_directed:
258 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
260 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
267 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
272 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
273 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
279 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
282 uint64_t enq_ok_tally = 0;
284 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
285 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
291 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
292 enum dlb2_xstats_type type, int extra_arg __rte_unused)
294 struct dlb2_eventdev_queue *ev_queue =
295 &dlb2->ev_queues[obj_idx];
298 case is_configured: return ev_queue->setup_done;
300 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
302 case hw_id: return ev_queue->qm_queue.id;
304 case num_links: return ev_queue->num_links;
306 case sched_type: return ev_queue->qm_queue.sched_type;
308 case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
310 case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
312 case depth_threshold: return ev_queue->depth_threshold;
314 case depth_le50_threshold:
315 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
316 DLB2_QID_DEPTH_LE50);
318 case depth_gt50_le75_threshold:
319 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
320 DLB2_QID_DEPTH_GT50_LE75);
322 case depth_gt75_le100_threshold:
323 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
324 DLB2_QID_DEPTH_GT75_LE100);
326 case depth_gt100_threshold:
327 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328 DLB2_QID_DEPTH_GT100);
335 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
338 * define the stats names and types. Used to build up the device
340 * There are multiple set of stats:
345 * For each of these sets, we have three parallel arrays, one for the
346 * names, the other for the stat type parameter to be passed in the fn
347 * call to get that stat. The third array allows resetting or not.
348 * All these arrays must be kept in sync
350 static const char * const dev_stats[] = {
354 "rx_umonitor_umwait",
358 "tx_nospc_ldb_hw_credits",
359 "tx_nospc_dir_hw_credits",
360 "tx_nospc_inflight_max",
361 "tx_nospc_new_event_limit",
362 "tx_nospc_inflight_credits",
368 static const enum dlb2_xstats_type dev_types[] = {
376 tx_nospc_ldb_hw_credits,
377 tx_nospc_dir_hw_credits,
378 tx_nospc_inflight_max,
379 tx_nospc_new_event_limit,
380 tx_nospc_inflight_credits,
386 /* Note: generated device stats are not allowed to be reset. */
387 static const uint8_t dev_reset_allowed[] = {
390 0, /* rx_interrupt_wait */
391 0, /* rx_umonitor_umwait */
395 0, /* tx_nospc_ldb_hw_credits */
396 0, /* tx_nospc_dir_hw_credits */
397 0, /* tx_nospc_inflight_max */
398 0, /* tx_nospc_new_event_limit */
399 0, /* tx_nospc_inflight_credits */
400 0, /* nb_events_limit */
401 0, /* inflight_events */
402 0, /* ldb_pool_size */
403 0, /* dir_pool_size */
405 static const char * const port_stats[] = {
412 "rx_umonitor_umwait",
416 "tx_nospc_ldb_hw_credits",
417 "tx_nospc_dir_hw_credits",
418 "tx_nospc_inflight_max",
419 "tx_nospc_new_event_limit",
420 "tx_nospc_inflight_credits",
426 "tx_sched_unordered",
430 "outstanding_releases",
431 "max_outstanding_releases",
433 "rx_sched_unordered",
438 static const enum dlb2_xstats_type port_types[] = {
449 tx_nospc_ldb_hw_credits,
450 tx_nospc_dir_hw_credits,
451 tx_nospc_inflight_max,
452 tx_nospc_new_event_limit,
453 tx_nospc_inflight_credits,
463 outstanding_releases,
464 max_outstanding_releases,
471 static const uint8_t port_reset_allowed[] = {
472 0, /* is_configured */
473 0, /* is_load_balanced */
477 1, /* rx_interrupt_wait */
478 1, /* rx_umonitor_umwait */
482 1, /* tx_nospc_ldb_hw_credits */
483 1, /* tx_nospc_dir_hw_credits */
484 1, /* tx_nospc_inflight_max */
485 1, /* tx_nospc_new_event_limit */
486 1, /* tx_nospc_inflight_credits */
490 1, /* tx_implicit_rel */
491 1, /* tx_sched_ordered */
492 1, /* tx_sched_unordered */
493 1, /* tx_sched_atomic */
494 1, /* tx_sched_directed */
496 0, /* outstanding_releases */
497 0, /* max_outstanding_releases */
498 1, /* rx_sched_ordered */
499 1, /* rx_sched_unordered */
500 1, /* rx_sched_atomic */
501 1, /* rx_sched_directed */
502 1 /* rx_sched_invalid */
505 /* QID specific stats */
506 static const char * const qid_stats[] = {
515 "depth_le50_threshold",
516 "depth_gt50_le75_threshold",
517 "depth_gt75_le100_threshold",
518 "depth_gt100_threshold",
520 static const enum dlb2_xstats_type qid_types[] = {
529 depth_le50_threshold,
530 depth_gt50_le75_threshold,
531 depth_gt75_le100_threshold,
532 depth_gt100_threshold,
534 static const uint8_t qid_reset_allowed[] = {
535 0, /* is_configured */
536 0, /* is_load_balanced */
541 0, /* current_depth */
542 0, /* depth_threshold */
543 1, /* depth_le50_threshold */
544 1, /* depth_gt50_le75_threshold */
545 1, /* depth_gt75_le100_threshold */
546 1, /* depth_gt100_threshold */
549 /* ---- end of stat definitions ---- */
551 /* check sizes, since a missed comma can lead to strings being
552 * joined by the compiler.
554 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
555 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
556 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
558 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
559 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
560 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
563 const unsigned int count = RTE_DIM(dev_stats) +
564 DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) +
565 DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats);
566 unsigned int i, port, qid, stat_id = 0;
568 dlb2->xstats = rte_zmalloc_socket(NULL,
569 sizeof(dlb2->xstats[0]) * count, 0,
570 dlb2->qm_instance.info.socket_id);
571 if (dlb2->xstats == NULL)
574 #define sname dlb2->xstats[stat_id].name.name
575 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
576 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
577 .fn_id = DLB2_XSTATS_FN_DEV,
578 .stat = dev_types[i],
579 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
580 .reset_allowed = dev_reset_allowed[i],
582 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
584 dlb2->xstats_count_mode_dev = stat_id;
586 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) {
587 dlb2->xstats_offset_for_port[port] = stat_id;
589 uint32_t count_offset = stat_id;
591 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
592 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
593 .fn_id = DLB2_XSTATS_FN_PORT,
595 .stat = port_types[i],
596 .mode = RTE_EVENT_DEV_XSTATS_PORT,
597 .reset_allowed = port_reset_allowed[i],
599 snprintf(sname, sizeof(sname), "port_%u_%s",
600 port, port_stats[i]);
603 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
606 dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
608 for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) {
609 uint32_t count_offset = stat_id;
611 dlb2->xstats_offset_for_qid[qid] = stat_id;
613 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
614 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
615 .fn_id = DLB2_XSTATS_FN_QUEUE,
617 .stat = qid_types[i],
618 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
619 .reset_allowed = qid_reset_allowed[i],
621 snprintf(sname, sizeof(sname), "qid_%u_%s",
625 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
628 dlb2->xstats_count_mode_queue = stat_id -
629 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
632 dlb2->xstats_count = stat_id;
638 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
640 rte_free(dlb2->xstats);
641 dlb2->xstats_count = 0;
645 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
646 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
647 struct rte_event_dev_xstats_name *xstats_names,
648 unsigned int *ids, unsigned int size)
650 const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
652 unsigned int xidx = 0;
653 uint32_t xstats_mode_count = 0;
654 uint32_t start_offset = 0;
657 case RTE_EVENT_DEV_XSTATS_DEVICE:
658 xstats_mode_count = dlb2->xstats_count_mode_dev;
660 case RTE_EVENT_DEV_XSTATS_PORT:
661 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
663 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
664 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
666 case RTE_EVENT_DEV_XSTATS_QUEUE:
667 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) &&
668 (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255))
670 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
671 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
677 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
678 return xstats_mode_count;
680 for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
681 if (dlb2->xstats[i].mode != mode)
684 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
685 queue_port_id != dlb2->xstats[i].obj_idx)
688 xstats_names[xidx] = dlb2->xstats[i].name;
690 ids[xidx] = start_offset + xidx;
697 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
698 enum rte_event_dev_xstats_mode mode,
699 uint8_t queue_port_id, const unsigned int ids[],
700 uint64_t values[], unsigned int n, const uint32_t reset)
703 unsigned int xidx = 0;
704 uint32_t xstats_mode_count = 0;
707 case RTE_EVENT_DEV_XSTATS_DEVICE:
708 xstats_mode_count = dlb2->xstats_count_mode_dev;
710 case RTE_EVENT_DEV_XSTATS_PORT:
711 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
713 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
715 case RTE_EVENT_DEV_XSTATS_QUEUE:
716 #if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */
717 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version))
720 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
726 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
727 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
730 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
733 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
734 queue_port_id != xs->obj_idx)
738 case DLB2_XSTATS_FN_DEV:
741 case DLB2_XSTATS_FN_PORT:
744 case DLB2_XSTATS_FN_QUEUE:
748 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
752 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
753 xs->extra_arg) - xs->reset_value;
758 if (xs->reset_allowed && reset)
759 xs->reset_value += val;
771 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
772 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
773 const unsigned int ids[], uint64_t values[], unsigned int n)
775 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
776 const uint32_t reset = 0;
778 return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
783 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
784 const char *name, unsigned int *id)
786 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
790 for (i = 0; i < dlb2->xstats_count; i++) {
791 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
793 if (strncmp(xs->name.name, name,
794 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
799 case DLB2_XSTATS_FN_DEV:
802 case DLB2_XSTATS_FN_PORT:
805 case DLB2_XSTATS_FN_QUEUE:
809 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
814 return fn(dlb2, xs->obj_idx, xs->stat,
815 xs->extra_arg) - xs->reset_value;
824 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
830 for (i = start; i < start + num; i++) {
831 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
833 if (!xs->reset_allowed)
837 case DLB2_XSTATS_FN_DEV:
840 case DLB2_XSTATS_FN_PORT:
843 case DLB2_XSTATS_FN_QUEUE:
847 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
851 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
852 xs->reset_value = val;
857 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
858 const uint32_t ids[], uint32_t nb_ids)
860 const uint32_t reset = 1;
863 uint32_t nb_reset = dlb2_xstats_update(dlb2,
864 RTE_EVENT_DEV_XSTATS_QUEUE,
865 queue_id, ids, NULL, nb_ids,
867 return nb_reset == nb_ids ? 0 : -EINVAL;
871 dlb2_xstats_reset_range(dlb2,
872 dlb2->xstats_offset_for_qid[queue_id],
873 dlb2->xstats_count_per_qid[queue_id]);
879 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
880 const uint32_t ids[], uint32_t nb_ids)
882 const uint32_t reset = 1;
883 int offset = dlb2->xstats_offset_for_port[port_id];
884 int nb_stat = dlb2->xstats_count_per_port[port_id];
887 uint32_t nb_reset = dlb2_xstats_update(dlb2,
888 RTE_EVENT_DEV_XSTATS_PORT, port_id,
891 return nb_reset == nb_ids ? 0 : -EINVAL;
894 dlb2_xstats_reset_range(dlb2, offset, nb_stat);
899 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
905 for (i = 0; i < nb_ids; i++) {
906 uint32_t id = ids[i];
908 if (id >= dlb2->xstats_count_mode_dev)
910 dlb2_xstats_reset_range(dlb2, id, 1);
913 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
914 dlb2_xstats_reset_range(dlb2, i, 1);
921 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
922 enum rte_event_dev_xstats_mode mode,
923 int16_t queue_port_id,
924 const uint32_t ids[],
927 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
930 /* handle -1 for queue_port_id here, looping over all ports/queues */
932 case RTE_EVENT_DEV_XSTATS_DEVICE:
933 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
936 case RTE_EVENT_DEV_XSTATS_PORT:
937 if (queue_port_id == -1) {
938 for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version);
940 if (dlb2_xstats_reset_port(dlb2, i,
944 } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) {
945 if (dlb2_xstats_reset_port(dlb2, queue_port_id,
950 case RTE_EVENT_DEV_XSTATS_QUEUE:
951 if (queue_port_id == -1) {
952 for (i = 0; i < DLB2_MAX_NUM_QUEUES(dlb2->version);
954 if (dlb2_xstats_reset_queue(dlb2, i,
958 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) {
959 if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
970 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
972 struct dlb2_eventdev *dlb2;
973 struct dlb2_hw_dev *handle;
976 dlb2 = dlb2_pmd_priv(dev);
979 fprintf(f, "DLB2 Event device cannot be dumped!\n");
983 if (!dlb2->configured)
984 fprintf(f, "DLB2 Event device is not configured\n");
986 handle = &dlb2->qm_instance;
988 fprintf(f, "================\n");
989 fprintf(f, "DLB2 Device Dump\n");
990 fprintf(f, "================\n");
992 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
993 dlb2->umwait_allowed ? "yes" : "no");
995 /* Generic top level device information */
997 fprintf(f, "device is configured and run state =");
998 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
999 fprintf(f, "STOPPED\n");
1000 else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1001 fprintf(f, "STOPPING\n");
1002 else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1003 fprintf(f, "STARTING\n");
1004 else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1005 fprintf(f, "STARTED\n");
1007 fprintf(f, "UNEXPECTED\n");
1009 fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1010 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1012 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1013 dlb2->num_dir_ports, dlb2->num_dir_queues);
1015 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1016 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1018 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1019 handle->cfg.resources.num_atomic_inflights,
1020 handle->cfg.resources.num_hist_list_entries);
1022 fprintf(f, "results from most recent hw resource query:\n");
1024 fprintf(f, "\tnum_sched_domains = %u\n",
1025 dlb2->hw_rsrc_query_results.num_sched_domains);
1027 fprintf(f, "\tnum_ldb_queues = %u\n",
1028 dlb2->hw_rsrc_query_results.num_ldb_queues);
1030 fprintf(f, "\tnum_ldb_ports = %u\n",
1031 dlb2->hw_rsrc_query_results.num_ldb_ports);
1033 fprintf(f, "\tnum_dir_ports = %u\n",
1034 dlb2->hw_rsrc_query_results.num_dir_ports);
1036 fprintf(f, "\tnum_atomic_inflights = %u\n",
1037 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1039 fprintf(f, "\tnum_hist_list_entries = %u\n",
1040 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1042 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1043 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1045 fprintf(f, "\tnum_ldb_credits = %u\n",
1046 dlb2->hw_rsrc_query_results.num_ldb_credits);
1048 fprintf(f, "\tnum_dir_credits = %u\n",
1049 dlb2->hw_rsrc_query_results.num_dir_credits);
1051 /* Port level information */
1053 for (i = 0; i < dlb2->num_ports; i++) {
1054 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1057 if (!p->enq_configured)
1058 fprintf(f, "Port_%d is not configured\n", i);
1060 fprintf(f, "Port_%d\n", i);
1061 fprintf(f, "=======\n");
1063 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1064 p->id, p->setup_done);
1066 fprintf(f, "\tconfig state=%d, port state=%d\n",
1067 p->qm_port.config_state, p->qm_port.state);
1069 fprintf(f, "\tport is %s\n",
1070 p->qm_port.is_directed ? "directed" : "load balanced");
1072 fprintf(f, "\toutstanding releases=%u\n",
1073 p->outstanding_releases);
1075 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1076 p->inflight_max, p->inflight_credits);
1078 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1079 p->credit_update_quanta, p->implicit_release);
1081 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1083 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1084 if (p->link[j].valid)
1085 fprintf(f, "id=%u prio=%u ",
1086 p->link[j].queue_id,
1087 p->link[j].priority);
1091 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1093 fprintf(f, "\tcached_ldb_credits=%u\n",
1094 p->qm_port.cached_ldb_credits);
1096 fprintf(f, "\tldb_credits = %u\n",
1097 p->qm_port.ldb_credits);
1099 fprintf(f, "\tcached_dir_credits = %u\n",
1100 p->qm_port.cached_dir_credits);
1102 fprintf(f, "\tdir_credits = %u\n",
1103 p->qm_port.dir_credits);
1105 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1108 p->qm_port.cq_depth);
1110 fprintf(f, "\tinterrupt armed=%d\n",
1111 p->qm_port.int_armed);
1113 fprintf(f, "\tPort statistics\n");
1115 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1116 p->stats.traffic.rx_ok);
1118 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1119 p->stats.traffic.rx_drop);
1121 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1122 p->stats.traffic.rx_interrupt_wait);
1124 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1125 p->stats.traffic.rx_umonitor_umwait);
1127 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1128 p->stats.traffic.tx_ok);
1130 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1131 p->stats.traffic.total_polls);
1133 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1134 p->stats.traffic.zero_polls);
1136 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1137 p->stats.traffic.tx_nospc_ldb_hw_credits);
1139 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1140 p->stats.traffic.tx_nospc_dir_hw_credits);
1142 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1143 p->stats.traffic.tx_nospc_inflight_max);
1145 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1146 p->stats.traffic.tx_nospc_new_event_limit);
1148 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1149 p->stats.traffic.tx_nospc_inflight_credits);
1151 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1152 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1154 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1155 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1157 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1158 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1160 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1161 p->stats.tx_implicit_rel);
1163 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1164 p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1166 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1167 p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1169 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1170 p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1172 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1173 p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1175 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1176 p->stats.tx_invalid);
1178 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1179 p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1181 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1182 p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1184 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1185 p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1187 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1188 p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1190 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1191 p->stats.rx_sched_invalid);
1194 /* Queue level information */
1196 for (i = 0; i < dlb2->num_queues; i++) {
1197 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1201 fprintf(f, "Queue_%d is not configured\n", i);
1203 fprintf(f, "Queue_%d\n", i);
1204 fprintf(f, "========\n");
1206 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1208 fprintf(f, "\tqueue is %s\n",
1209 q->qm_queue.is_directed ? "directed" : "load balanced");
1211 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1213 for (j = 0; j < dlb2->num_ports; j++) {
1214 struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1216 for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1217 if (p->link[k].valid &&
1218 p->link[k].queue_id == q->id)
1219 fprintf(f, "id=%u prio=%u ",
1220 p->id, p->link[k].priority);
1225 fprintf(f, "\tcurrent depth: %u events\n",
1226 dlb2_get_queue_depth(dlb2, q));
1228 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1229 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);