event/dlb2: use new implementation of resource file
[dpdk.git] / drivers / event / dlb2 / dlb2_xstats.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
9
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
12
13 enum dlb2_xstats_type {
14         /* common to device and port */
15         rx_ok,                          /**< Receive an event */
16         rx_drop,                        /**< Error bit set in received QE */
17         rx_interrupt_wait,              /**< Wait on an interrupt */
18         rx_umonitor_umwait,             /**< Block using umwait */
19         tx_ok,                          /**< Transmit an event */
20         total_polls,                    /**< Call dequeue_burst */
21         zero_polls,                     /**< Call dequeue burst and return 0 */
22         tx_nospc_ldb_hw_credits,        /**< Insufficient LDB h/w credits */
23         tx_nospc_dir_hw_credits,        /**< Insufficient DIR h/w credits */
24         tx_nospc_inflight_max,          /**< Reach the new_event_threshold */
25         tx_nospc_new_event_limit,       /**< Insufficient s/w credits */
26         tx_nospc_inflight_credits,      /**< Port has too few s/w credits */
27         /* device specific */
28         nb_events_limit,
29         inflight_events,
30         ldb_pool_size,
31         dir_pool_size,
32         /* port specific */
33         tx_new,                         /**< Send an OP_NEW event */
34         tx_fwd,                         /**< Send an OP_FORWARD event */
35         tx_rel,                         /**< Send an OP_RELEASE event */
36         tx_implicit_rel,                /**< Issue an implicit event release */
37         tx_sched_ordered,               /**< Send a SCHED_TYPE_ORDERED event */
38         tx_sched_unordered,             /**< Send a SCHED_TYPE_PARALLEL event */
39         tx_sched_atomic,                /**< Send a SCHED_TYPE_ATOMIC event */
40         tx_sched_directed,              /**< Send a directed event */
41         tx_invalid,                     /**< Send an event with an invalid op */
42         outstanding_releases,           /**< # of releases a port owes */
43         max_outstanding_releases,       /**< max # of releases a port can owe */
44         rx_sched_ordered,               /**< Dequeue an ordered event */
45         rx_sched_unordered,             /**< Dequeue an unordered event */
46         rx_sched_atomic,                /**< Dequeue an atomic event */
47         rx_sched_directed,              /**< Dequeue an directed event */
48         rx_sched_invalid,               /**< Dequeue event sched type invalid */
49         /* common to port and queue */
50         is_configured,                  /**< Port is configured */
51         is_load_balanced,               /**< Port is LDB */
52         hw_id,                          /**< Hardware ID */
53         /* queue specific */
54         num_links,                      /**< Number of ports linked */
55         sched_type,                     /**< Queue sched type */
56         enq_ok,                         /**< # events enqueued to the queue */
57         current_depth,                  /**< Current queue depth */
58         depth_threshold,                /**< Programmed depth threshold */
59         depth_le50_threshold,
60         /**< Depth LE to 50% of the configured hardware threshold */
61         depth_gt50_le75_threshold,
62         /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
63         depth_gt75_le100_threshold,
64         /**< Depth GT 75%. but LE to the configured hardware threshold */
65         depth_gt100_threshold
66         /**< Depth GT 100% of the configured hw threshold */
67 };
68
69 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
70                 uint16_t obj_idx, /* port or queue id */
71                 enum dlb2_xstats_type stat, int extra_arg);
72
73 enum dlb2_xstats_fn_type {
74         DLB2_XSTATS_FN_DEV,
75         DLB2_XSTATS_FN_PORT,
76         DLB2_XSTATS_FN_QUEUE
77 };
78
79 struct dlb2_xstats_entry {
80         struct rte_event_dev_xstats_name name;
81         uint64_t reset_value; /* an offset to be taken away to emulate resets */
82         enum dlb2_xstats_fn_type fn_id;
83         enum dlb2_xstats_type stat;
84         enum rte_event_dev_xstats_mode mode;
85         int extra_arg;
86         uint16_t obj_idx;
87         uint8_t reset_allowed; /* when set, this value can be reset */
88 };
89
90 /* Some device stats are simply a summation of the corresponding port values */
91 static uint64_t
92 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
93                              int which_stat)
94 {
95         int i;
96         uint64_t val = 0;
97
98         for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
99                 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
100
101                 if (!port->setup_done)
102                         continue;
103
104                 switch (which_stat) {
105                 case rx_ok:
106                         val += port->stats.traffic.rx_ok;
107                         break;
108                 case rx_drop:
109                         val += port->stats.traffic.rx_drop;
110                         break;
111                 case rx_interrupt_wait:
112                         val += port->stats.traffic.rx_interrupt_wait;
113                         break;
114                 case rx_umonitor_umwait:
115                         val += port->stats.traffic.rx_umonitor_umwait;
116                         break;
117                 case tx_ok:
118                         val += port->stats.traffic.tx_ok;
119                         break;
120                 case total_polls:
121                         val += port->stats.traffic.total_polls;
122                         break;
123                 case zero_polls:
124                         val += port->stats.traffic.zero_polls;
125                         break;
126                 case tx_nospc_ldb_hw_credits:
127                         val += port->stats.traffic.tx_nospc_ldb_hw_credits;
128                         break;
129                 case tx_nospc_dir_hw_credits:
130                         val += port->stats.traffic.tx_nospc_dir_hw_credits;
131                         break;
132                 case tx_nospc_inflight_max:
133                         val += port->stats.traffic.tx_nospc_inflight_max;
134                         break;
135                 case tx_nospc_new_event_limit:
136                         val += port->stats.traffic.tx_nospc_new_event_limit;
137                         break;
138                 case tx_nospc_inflight_credits:
139                         val += port->stats.traffic.tx_nospc_inflight_credits;
140                         break;
141                 default:
142                         return -1;
143                 }
144         }
145         return val;
146 }
147
148 static uint64_t
149 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
150              enum dlb2_xstats_type type, int extra_arg __rte_unused)
151 {
152         switch (type) {
153         case rx_ok:
154         case rx_drop:
155         case rx_interrupt_wait:
156         case rx_umonitor_umwait:
157         case tx_ok:
158         case total_polls:
159         case zero_polls:
160         case tx_nospc_ldb_hw_credits:
161         case tx_nospc_dir_hw_credits:
162         case tx_nospc_inflight_max:
163         case tx_nospc_new_event_limit:
164         case tx_nospc_inflight_credits:
165                 return dlb2_device_traffic_stat_get(dlb2, type);
166         case nb_events_limit:
167                 return dlb2->new_event_limit;
168         case inflight_events:
169                 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
170         case ldb_pool_size:
171                 return dlb2->num_ldb_credits;
172         case dir_pool_size:
173                 return dlb2->num_dir_credits;
174         default: return -1;
175         }
176 }
177
178 static uint64_t
179 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
180               enum dlb2_xstats_type type, int extra_arg __rte_unused)
181 {
182         struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
183
184         switch (type) {
185         case rx_ok: return ev_port->stats.traffic.rx_ok;
186
187         case rx_drop: return ev_port->stats.traffic.rx_drop;
188
189         case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
190
191         case rx_umonitor_umwait:
192                 return ev_port->stats.traffic.rx_umonitor_umwait;
193
194         case tx_ok: return ev_port->stats.traffic.tx_ok;
195
196         case total_polls: return ev_port->stats.traffic.total_polls;
197
198         case zero_polls: return ev_port->stats.traffic.zero_polls;
199
200         case tx_nospc_ldb_hw_credits:
201                 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
202
203         case tx_nospc_dir_hw_credits:
204                 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
205
206         case tx_nospc_inflight_max:
207                 return ev_port->stats.traffic.tx_nospc_inflight_max;
208
209         case tx_nospc_new_event_limit:
210                 return ev_port->stats.traffic.tx_nospc_new_event_limit;
211
212         case tx_nospc_inflight_credits:
213                 return ev_port->stats.traffic.tx_nospc_inflight_credits;
214
215         case is_configured: return ev_port->setup_done;
216
217         case is_load_balanced: return !ev_port->qm_port.is_directed;
218
219         case hw_id: return ev_port->qm_port.id;
220
221         case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
222
223         case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
224
225         case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
226
227         case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
228
229         case tx_sched_ordered:
230                 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
231
232         case tx_sched_unordered:
233                 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
234
235         case tx_sched_atomic:
236                 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
237
238         case tx_sched_directed:
239                 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
240
241         case tx_invalid: return ev_port->stats.tx_invalid;
242
243         case outstanding_releases: return ev_port->outstanding_releases;
244
245         case max_outstanding_releases:
246                 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
247
248         case rx_sched_ordered:
249                 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
250
251         case rx_sched_unordered:
252                 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
253
254         case rx_sched_atomic:
255                 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
256
257         case rx_sched_directed:
258                 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
259
260         case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
261
262         default: return -1;
263         }
264 }
265
266 static uint64_t
267 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
268 {
269         int port = 0;
270         uint64_t tally = 0;
271
272         for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
273                 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
274
275         return tally;
276 }
277
278 static uint64_t
279 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
280 {
281         int port = 0;
282         uint64_t enq_ok_tally = 0;
283
284         for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
285                 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
286
287         return enq_ok_tally;
288 }
289
290 static uint64_t
291 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
292                enum dlb2_xstats_type type, int extra_arg __rte_unused)
293 {
294         struct dlb2_eventdev_queue *ev_queue =
295                 &dlb2->ev_queues[obj_idx];
296
297         switch (type) {
298         case is_configured: return ev_queue->setup_done;
299
300         case is_load_balanced: return !ev_queue->qm_queue.is_directed;
301
302         case hw_id: return ev_queue->qm_queue.id;
303
304         case num_links: return ev_queue->num_links;
305
306         case sched_type: return ev_queue->qm_queue.sched_type;
307
308         case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
309
310         case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
311
312         case depth_threshold: return ev_queue->depth_threshold;
313
314         case depth_le50_threshold:
315                 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
316                                                DLB2_QID_DEPTH_LE50);
317
318         case depth_gt50_le75_threshold:
319                 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
320                                                DLB2_QID_DEPTH_GT50_LE75);
321
322         case depth_gt75_le100_threshold:
323                 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
324                                                DLB2_QID_DEPTH_GT75_LE100);
325
326         case depth_gt100_threshold:
327                 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328                                                DLB2_QID_DEPTH_GT100);
329
330         default: return -1;
331         }
332 }
333
334 int
335 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
336 {
337         /*
338          * define the stats names and types. Used to build up the device
339          * xstats array
340          * There are multiple set of stats:
341          *   - device-level,
342          *   - per-port,
343          *   - per-qid,
344          *
345          * For each of these sets, we have three parallel arrays, one for the
346          * names, the other for the stat type parameter to be passed in the fn
347          * call to get that stat. The third array allows resetting or not.
348          * All these arrays must be kept in sync
349          */
350         static const char * const dev_stats[] = {
351                 "rx_ok",
352                 "rx_drop",
353                 "rx_interrupt_wait",
354                 "rx_umonitor_umwait",
355                 "tx_ok",
356                 "total_polls",
357                 "zero_polls",
358                 "tx_nospc_ldb_hw_credits",
359                 "tx_nospc_dir_hw_credits",
360                 "tx_nospc_inflight_max",
361                 "tx_nospc_new_event_limit",
362                 "tx_nospc_inflight_credits",
363                 "nb_events_limit",
364                 "inflight_events",
365                 "ldb_pool_size",
366                 "dir_pool_size",
367         };
368         static const enum dlb2_xstats_type dev_types[] = {
369                 rx_ok,
370                 rx_drop,
371                 rx_interrupt_wait,
372                 rx_umonitor_umwait,
373                 tx_ok,
374                 total_polls,
375                 zero_polls,
376                 tx_nospc_ldb_hw_credits,
377                 tx_nospc_dir_hw_credits,
378                 tx_nospc_inflight_max,
379                 tx_nospc_new_event_limit,
380                 tx_nospc_inflight_credits,
381                 nb_events_limit,
382                 inflight_events,
383                 ldb_pool_size,
384                 dir_pool_size,
385         };
386         /* Note: generated device stats are not allowed to be reset. */
387         static const uint8_t dev_reset_allowed[] = {
388                 0, /* rx_ok */
389                 0, /* rx_drop */
390                 0, /* rx_interrupt_wait */
391                 0, /* rx_umonitor_umwait */
392                 0, /* tx_ok */
393                 0, /* total_polls */
394                 0, /* zero_polls */
395                 0, /* tx_nospc_ldb_hw_credits */
396                 0, /* tx_nospc_dir_hw_credits */
397                 0, /* tx_nospc_inflight_max */
398                 0, /* tx_nospc_new_event_limit */
399                 0, /* tx_nospc_inflight_credits */
400                 0, /* nb_events_limit */
401                 0, /* inflight_events */
402                 0, /* ldb_pool_size */
403                 0, /* dir_pool_size */
404         };
405         static const char * const port_stats[] = {
406                 "is_configured",
407                 "is_load_balanced",
408                 "hw_id",
409                 "rx_ok",
410                 "rx_drop",
411                 "rx_interrupt_wait",
412                 "rx_umonitor_umwait",
413                 "tx_ok",
414                 "total_polls",
415                 "zero_polls",
416                 "tx_nospc_ldb_hw_credits",
417                 "tx_nospc_dir_hw_credits",
418                 "tx_nospc_inflight_max",
419                 "tx_nospc_new_event_limit",
420                 "tx_nospc_inflight_credits",
421                 "tx_new",
422                 "tx_fwd",
423                 "tx_rel",
424                 "tx_implicit_rel",
425                 "tx_sched_ordered",
426                 "tx_sched_unordered",
427                 "tx_sched_atomic",
428                 "tx_sched_directed",
429                 "tx_invalid",
430                 "outstanding_releases",
431                 "max_outstanding_releases",
432                 "rx_sched_ordered",
433                 "rx_sched_unordered",
434                 "rx_sched_atomic",
435                 "rx_sched_directed",
436                 "rx_sched_invalid"
437         };
438         static const enum dlb2_xstats_type port_types[] = {
439                 is_configured,
440                 is_load_balanced,
441                 hw_id,
442                 rx_ok,
443                 rx_drop,
444                 rx_interrupt_wait,
445                 rx_umonitor_umwait,
446                 tx_ok,
447                 total_polls,
448                 zero_polls,
449                 tx_nospc_ldb_hw_credits,
450                 tx_nospc_dir_hw_credits,
451                 tx_nospc_inflight_max,
452                 tx_nospc_new_event_limit,
453                 tx_nospc_inflight_credits,
454                 tx_new,
455                 tx_fwd,
456                 tx_rel,
457                 tx_implicit_rel,
458                 tx_sched_ordered,
459                 tx_sched_unordered,
460                 tx_sched_atomic,
461                 tx_sched_directed,
462                 tx_invalid,
463                 outstanding_releases,
464                 max_outstanding_releases,
465                 rx_sched_ordered,
466                 rx_sched_unordered,
467                 rx_sched_atomic,
468                 rx_sched_directed,
469                 rx_sched_invalid
470         };
471         static const uint8_t port_reset_allowed[] = {
472                 0, /* is_configured */
473                 0, /* is_load_balanced */
474                 0, /* hw_id */
475                 1, /* rx_ok */
476                 1, /* rx_drop */
477                 1, /* rx_interrupt_wait */
478                 1, /* rx_umonitor_umwait */
479                 1, /* tx_ok */
480                 1, /* total_polls */
481                 1, /* zero_polls */
482                 1, /* tx_nospc_ldb_hw_credits */
483                 1, /* tx_nospc_dir_hw_credits */
484                 1, /* tx_nospc_inflight_max */
485                 1, /* tx_nospc_new_event_limit */
486                 1, /* tx_nospc_inflight_credits */
487                 1, /* tx_new */
488                 1, /* tx_fwd */
489                 1, /* tx_rel */
490                 1, /* tx_implicit_rel */
491                 1, /* tx_sched_ordered */
492                 1, /* tx_sched_unordered */
493                 1, /* tx_sched_atomic */
494                 1, /* tx_sched_directed */
495                 1, /* tx_invalid */
496                 0, /* outstanding_releases */
497                 0, /* max_outstanding_releases */
498                 1, /* rx_sched_ordered */
499                 1, /* rx_sched_unordered */
500                 1, /* rx_sched_atomic */
501                 1, /* rx_sched_directed */
502                 1  /* rx_sched_invalid */
503         };
504
505         /* QID specific stats */
506         static const char * const qid_stats[] = {
507                 "is_configured",
508                 "is_load_balanced",
509                 "hw_id",
510                 "num_links",
511                 "sched_type",
512                 "enq_ok",
513                 "current_depth",
514                 "depth_threshold",
515                 "depth_le50_threshold",
516                 "depth_gt50_le75_threshold",
517                 "depth_gt75_le100_threshold",
518                 "depth_gt100_threshold",
519         };
520         static const enum dlb2_xstats_type qid_types[] = {
521                 is_configured,
522                 is_load_balanced,
523                 hw_id,
524                 num_links,
525                 sched_type,
526                 enq_ok,
527                 current_depth,
528                 depth_threshold,
529                 depth_le50_threshold,
530                 depth_gt50_le75_threshold,
531                 depth_gt75_le100_threshold,
532                 depth_gt100_threshold,
533         };
534         static const uint8_t qid_reset_allowed[] = {
535                 0, /* is_configured */
536                 0, /* is_load_balanced */
537                 0, /* hw_id */
538                 0, /* num_links */
539                 0, /* sched_type */
540                 1, /* enq_ok */
541                 0, /* current_depth */
542                 0, /* depth_threshold */
543                 1, /* depth_le50_threshold */
544                 1, /* depth_gt50_le75_threshold */
545                 1, /* depth_gt75_le100_threshold */
546                 1, /* depth_gt100_threshold */
547         };
548
549         /* ---- end of stat definitions ---- */
550
551         /* check sizes, since a missed comma can lead to strings being
552          * joined by the compiler.
553          */
554         RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
555         RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
556         RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
557
558         RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
559         RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
560         RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
561
562         /* other vars */
563         const unsigned int count = RTE_DIM(dev_stats) +
564                 DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) +
565                 DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats);
566         unsigned int i, port, qid, stat_id = 0;
567
568         dlb2->xstats = rte_zmalloc_socket(NULL,
569                         sizeof(dlb2->xstats[0]) * count, 0,
570                         dlb2->qm_instance.info.socket_id);
571         if (dlb2->xstats == NULL)
572                 return -ENOMEM;
573
574 #define sname dlb2->xstats[stat_id].name.name
575         for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
576                 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
577                         .fn_id = DLB2_XSTATS_FN_DEV,
578                         .stat = dev_types[i],
579                         .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
580                         .reset_allowed = dev_reset_allowed[i],
581                 };
582                 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
583         }
584         dlb2->xstats_count_mode_dev = stat_id;
585
586         for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) {
587                 dlb2->xstats_offset_for_port[port] = stat_id;
588
589                 uint32_t count_offset = stat_id;
590
591                 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
592                         dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
593                                 .fn_id = DLB2_XSTATS_FN_PORT,
594                                 .obj_idx = port,
595                                 .stat = port_types[i],
596                                 .mode = RTE_EVENT_DEV_XSTATS_PORT,
597                                 .reset_allowed = port_reset_allowed[i],
598                         };
599                         snprintf(sname, sizeof(sname), "port_%u_%s",
600                                  port, port_stats[i]);
601                 }
602
603                 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
604         }
605
606         dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
607
608         for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) {
609                 uint32_t count_offset = stat_id;
610
611                 dlb2->xstats_offset_for_qid[qid] = stat_id;
612
613                 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
614                         dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
615                                 .fn_id = DLB2_XSTATS_FN_QUEUE,
616                                 .obj_idx = qid,
617                                 .stat = qid_types[i],
618                                 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
619                                 .reset_allowed = qid_reset_allowed[i],
620                         };
621                         snprintf(sname, sizeof(sname), "qid_%u_%s",
622                                  qid, qid_stats[i]);
623                 }
624
625                 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
626         }
627
628         dlb2->xstats_count_mode_queue = stat_id -
629                 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
630 #undef sname
631
632         dlb2->xstats_count = stat_id;
633
634         return 0;
635 }
636
637 void
638 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
639 {
640         rte_free(dlb2->xstats);
641         dlb2->xstats_count = 0;
642 }
643
644 int
645 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
646                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
647                 struct rte_event_dev_xstats_name *xstats_names,
648                 unsigned int *ids, unsigned int size)
649 {
650         const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
651         unsigned int i;
652         unsigned int xidx = 0;
653         uint32_t xstats_mode_count = 0;
654         uint32_t start_offset = 0;
655
656         switch (mode) {
657         case RTE_EVENT_DEV_XSTATS_DEVICE:
658                 xstats_mode_count = dlb2->xstats_count_mode_dev;
659                 break;
660         case RTE_EVENT_DEV_XSTATS_PORT:
661                 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
662                         break;
663                 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
664                 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
665                 break;
666         case RTE_EVENT_DEV_XSTATS_QUEUE:
667                 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) &&
668                     (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255))
669                         break;
670                 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
671                 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
672                 break;
673         default:
674                 return -EINVAL;
675         };
676
677         if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
678                 return xstats_mode_count;
679
680         for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
681                 if (dlb2->xstats[i].mode != mode)
682                         continue;
683
684                 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
685                     queue_port_id != dlb2->xstats[i].obj_idx)
686                         continue;
687
688                 xstats_names[xidx] = dlb2->xstats[i].name;
689                 if (ids)
690                         ids[xidx] = start_offset + xidx;
691                 xidx++;
692         }
693         return xidx;
694 }
695
696 static int
697 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
698                 enum rte_event_dev_xstats_mode mode,
699                 uint8_t queue_port_id, const unsigned int ids[],
700                 uint64_t values[], unsigned int n, const uint32_t reset)
701 {
702         unsigned int i;
703         unsigned int xidx = 0;
704         uint32_t xstats_mode_count = 0;
705
706         switch (mode) {
707         case RTE_EVENT_DEV_XSTATS_DEVICE:
708                 xstats_mode_count = dlb2->xstats_count_mode_dev;
709                 break;
710         case RTE_EVENT_DEV_XSTATS_PORT:
711                 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
712                         goto invalid_value;
713                 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
714                 break;
715         case RTE_EVENT_DEV_XSTATS_QUEUE:
716 #if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */
717                 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version))
718                         goto invalid_value;
719 #endif
720                 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
721                 break;
722         default:
723                 goto invalid_value;
724         };
725
726         for (i = 0; i < n && xidx < xstats_mode_count; i++) {
727                 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
728                 dlb2_xstats_fn fn;
729
730                 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
731                         continue;
732
733                 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
734                     queue_port_id != xs->obj_idx)
735                         continue;
736
737                 switch (xs->fn_id) {
738                 case DLB2_XSTATS_FN_DEV:
739                         fn = get_dev_stat;
740                         break;
741                 case DLB2_XSTATS_FN_PORT:
742                         fn = get_port_stat;
743                         break;
744                 case DLB2_XSTATS_FN_QUEUE:
745                         fn = get_queue_stat;
746                         break;
747                 default:
748                         DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
749                         goto invalid_value;
750                 }
751
752                 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
753                                   xs->extra_arg) - xs->reset_value;
754
755                 if (values)
756                         values[xidx] = val;
757
758                 if (xs->reset_allowed && reset)
759                         xs->reset_value += val;
760
761                 xidx++;
762         }
763
764         return xidx;
765
766 invalid_value:
767         return -EINVAL;
768 }
769
770 int
771 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
772                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
773                 const unsigned int ids[], uint64_t values[], unsigned int n)
774 {
775         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
776         const uint32_t reset = 0;
777
778         return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
779                                   reset);
780 }
781
782 uint64_t
783 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
784                                  const char *name, unsigned int *id)
785 {
786         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
787         unsigned int i;
788         dlb2_xstats_fn fn;
789
790         for (i = 0; i < dlb2->xstats_count; i++) {
791                 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
792
793                 if (strncmp(xs->name.name, name,
794                             RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
795                         if (id != NULL)
796                                 *id = i;
797
798                         switch (xs->fn_id) {
799                         case DLB2_XSTATS_FN_DEV:
800                                 fn = get_dev_stat;
801                                 break;
802                         case DLB2_XSTATS_FN_PORT:
803                                 fn = get_port_stat;
804                                 break;
805                         case DLB2_XSTATS_FN_QUEUE:
806                                 fn = get_queue_stat;
807                                 break;
808                         default:
809                                 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
810                                           xs->fn_id);
811                                 return (uint64_t)-1;
812                         }
813
814                         return fn(dlb2, xs->obj_idx, xs->stat,
815                                   xs->extra_arg) - xs->reset_value;
816                 }
817         }
818         if (id != NULL)
819                 *id = (uint32_t)-1;
820         return (uint64_t)-1;
821 }
822
823 static void
824 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
825                         uint32_t num)
826 {
827         uint32_t i;
828         dlb2_xstats_fn fn;
829
830         for (i = start; i < start + num; i++) {
831                 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
832
833                 if (!xs->reset_allowed)
834                         continue;
835
836                 switch (xs->fn_id) {
837                 case DLB2_XSTATS_FN_DEV:
838                         fn = get_dev_stat;
839                         break;
840                 case DLB2_XSTATS_FN_PORT:
841                         fn = get_port_stat;
842                         break;
843                 case DLB2_XSTATS_FN_QUEUE:
844                         fn = get_queue_stat;
845                         break;
846                 default:
847                         DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
848                         return;
849                 }
850
851                 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
852                 xs->reset_value = val;
853         }
854 }
855
856 static int
857 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
858                         const uint32_t ids[], uint32_t nb_ids)
859 {
860         const uint32_t reset = 1;
861
862         if (ids) {
863                 uint32_t nb_reset = dlb2_xstats_update(dlb2,
864                                         RTE_EVENT_DEV_XSTATS_QUEUE,
865                                         queue_id, ids, NULL, nb_ids,
866                                         reset);
867                 return nb_reset == nb_ids ? 0 : -EINVAL;
868         }
869
870         if (ids == NULL)
871                 dlb2_xstats_reset_range(dlb2,
872                         dlb2->xstats_offset_for_qid[queue_id],
873                         dlb2->xstats_count_per_qid[queue_id]);
874
875         return 0;
876 }
877
878 static int
879 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
880                        const uint32_t ids[], uint32_t nb_ids)
881 {
882         const uint32_t reset = 1;
883         int offset = dlb2->xstats_offset_for_port[port_id];
884         int nb_stat = dlb2->xstats_count_per_port[port_id];
885
886         if (ids) {
887                 uint32_t nb_reset = dlb2_xstats_update(dlb2,
888                                         RTE_EVENT_DEV_XSTATS_PORT, port_id,
889                                         ids, NULL, nb_ids,
890                                         reset);
891                 return nb_reset == nb_ids ? 0 : -EINVAL;
892         }
893
894         dlb2_xstats_reset_range(dlb2, offset, nb_stat);
895         return 0;
896 }
897
898 static int
899 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
900                       uint32_t nb_ids)
901 {
902         uint32_t i;
903
904         if (ids) {
905                 for (i = 0; i < nb_ids; i++) {
906                         uint32_t id = ids[i];
907
908                         if (id >= dlb2->xstats_count_mode_dev)
909                                 return -EINVAL;
910                         dlb2_xstats_reset_range(dlb2, id, 1);
911                 }
912         } else {
913                 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
914                         dlb2_xstats_reset_range(dlb2, i, 1);
915         }
916
917         return 0;
918 }
919
920 int
921 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
922                            enum rte_event_dev_xstats_mode mode,
923                            int16_t queue_port_id,
924                            const uint32_t ids[],
925                            uint32_t nb_ids)
926 {
927         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
928         uint32_t i;
929
930         /* handle -1 for queue_port_id here, looping over all ports/queues */
931         switch (mode) {
932         case RTE_EVENT_DEV_XSTATS_DEVICE:
933                 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
934                         return -EINVAL;
935                 break;
936         case RTE_EVENT_DEV_XSTATS_PORT:
937                 if (queue_port_id == -1) {
938                         for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version);
939                                         i++) {
940                                 if (dlb2_xstats_reset_port(dlb2, i,
941                                                            ids, nb_ids))
942                                         return -EINVAL;
943                         }
944                 } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) {
945                         if (dlb2_xstats_reset_port(dlb2, queue_port_id,
946                                                    ids, nb_ids))
947                                 return -EINVAL;
948                 }
949                 break;
950         case RTE_EVENT_DEV_XSTATS_QUEUE:
951                 if (queue_port_id == -1) {
952                         for (i = 0; i < DLB2_MAX_NUM_QUEUES(dlb2->version);
953                                         i++) {
954                                 if (dlb2_xstats_reset_queue(dlb2, i,
955                                                             ids, nb_ids))
956                                         return -EINVAL;
957                         }
958                 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) {
959                         if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
960                                                     ids, nb_ids))
961                                 return -EINVAL;
962                 }
963                 break;
964         };
965
966         return 0;
967 }
968
969 void
970 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
971 {
972         struct dlb2_eventdev *dlb2;
973         struct dlb2_hw_dev *handle;
974         int i;
975
976         dlb2 = dlb2_pmd_priv(dev);
977
978         if (dlb2 == NULL) {
979                 fprintf(f, "DLB2 Event device cannot be dumped!\n");
980                 return;
981         }
982
983         if (!dlb2->configured)
984                 fprintf(f, "DLB2 Event device is not configured\n");
985
986         handle = &dlb2->qm_instance;
987
988         fprintf(f, "================\n");
989         fprintf(f, "DLB2 Device Dump\n");
990         fprintf(f, "================\n");
991
992         fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
993                 dlb2->umwait_allowed ? "yes" : "no");
994
995         /* Generic top level device information */
996
997         fprintf(f, "device is configured and run state =");
998         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
999                 fprintf(f, "STOPPED\n");
1000         else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1001                 fprintf(f, "STOPPING\n");
1002         else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1003                 fprintf(f, "STARTING\n");
1004         else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1005                 fprintf(f, "STARTED\n");
1006         else
1007                 fprintf(f, "UNEXPECTED\n");
1008
1009         fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1010                 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1011
1012         fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1013                 dlb2->num_dir_ports, dlb2->num_dir_queues);
1014
1015         fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1016                 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1017
1018         fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1019                 handle->cfg.resources.num_atomic_inflights,
1020                 handle->cfg.resources.num_hist_list_entries);
1021
1022         fprintf(f, "results from most recent hw resource query:\n");
1023
1024         fprintf(f, "\tnum_sched_domains = %u\n",
1025                 dlb2->hw_rsrc_query_results.num_sched_domains);
1026
1027         fprintf(f, "\tnum_ldb_queues = %u\n",
1028                 dlb2->hw_rsrc_query_results.num_ldb_queues);
1029
1030         fprintf(f, "\tnum_ldb_ports = %u\n",
1031                 dlb2->hw_rsrc_query_results.num_ldb_ports);
1032
1033         fprintf(f, "\tnum_dir_ports = %u\n",
1034                 dlb2->hw_rsrc_query_results.num_dir_ports);
1035
1036         fprintf(f, "\tnum_atomic_inflights = %u\n",
1037                 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1038
1039         fprintf(f, "\tnum_hist_list_entries = %u\n",
1040                 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1041
1042         fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1043                 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1044
1045         fprintf(f, "\tnum_ldb_credits = %u\n",
1046                 dlb2->hw_rsrc_query_results.num_ldb_credits);
1047
1048         fprintf(f, "\tnum_dir_credits = %u\n",
1049                 dlb2->hw_rsrc_query_results.num_dir_credits);
1050
1051         /* Port level information */
1052
1053         for (i = 0; i < dlb2->num_ports; i++) {
1054                 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1055                 int j;
1056
1057                 if (!p->enq_configured)
1058                         fprintf(f, "Port_%d is not configured\n", i);
1059
1060                 fprintf(f, "Port_%d\n", i);
1061                 fprintf(f, "=======\n");
1062
1063                 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1064                         p->id, p->setup_done);
1065
1066                 fprintf(f, "\tconfig state=%d, port state=%d\n",
1067                         p->qm_port.config_state, p->qm_port.state);
1068
1069                 fprintf(f, "\tport is %s\n",
1070                         p->qm_port.is_directed ? "directed" : "load balanced");
1071
1072                 fprintf(f, "\toutstanding releases=%u\n",
1073                         p->outstanding_releases);
1074
1075                 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1076                         p->inflight_max, p->inflight_credits);
1077
1078                 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1079                         p->credit_update_quanta, p->implicit_release);
1080
1081                 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1082
1083                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1084                         if (p->link[j].valid)
1085                                 fprintf(f, "id=%u prio=%u ",
1086                                         p->link[j].queue_id,
1087                                         p->link[j].priority);
1088                 }
1089                 fprintf(f, "\n");
1090
1091                 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1092
1093                 fprintf(f, "\tcached_ldb_credits=%u\n",
1094                         p->qm_port.cached_ldb_credits);
1095
1096                 fprintf(f, "\tldb_credits = %u\n",
1097                         p->qm_port.ldb_credits);
1098
1099                 fprintf(f, "\tcached_dir_credits = %u\n",
1100                         p->qm_port.cached_dir_credits);
1101
1102                 fprintf(f, "\tdir_credits = %u\n",
1103                         p->qm_port.dir_credits);
1104
1105                 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1106                         p->qm_port.gen_bit,
1107                         p->qm_port.cq_idx,
1108                         p->qm_port.cq_depth);
1109
1110                 fprintf(f, "\tinterrupt armed=%d\n",
1111                         p->qm_port.int_armed);
1112
1113                 fprintf(f, "\tPort statistics\n");
1114
1115                 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1116                         p->stats.traffic.rx_ok);
1117
1118                 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1119                         p->stats.traffic.rx_drop);
1120
1121                 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1122                         p->stats.traffic.rx_interrupt_wait);
1123
1124                 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1125                         p->stats.traffic.rx_umonitor_umwait);
1126
1127                 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1128                         p->stats.traffic.tx_ok);
1129
1130                 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1131                         p->stats.traffic.total_polls);
1132
1133                 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1134                         p->stats.traffic.zero_polls);
1135
1136                 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1137                         p->stats.traffic.tx_nospc_ldb_hw_credits);
1138
1139                 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1140                         p->stats.traffic.tx_nospc_dir_hw_credits);
1141
1142                 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1143                         p->stats.traffic.tx_nospc_inflight_max);
1144
1145                 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1146                         p->stats.traffic.tx_nospc_new_event_limit);
1147
1148                 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1149                         p->stats.traffic.tx_nospc_inflight_credits);
1150
1151                 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1152                         p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1153
1154                 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1155                         p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1156
1157                 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1158                         p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1159
1160                 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1161                         p->stats.tx_implicit_rel);
1162
1163                 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1164                         p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1165
1166                 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1167                         p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1168
1169                 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1170                         p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1171
1172                 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1173                         p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1174
1175                 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1176                         p->stats.tx_invalid);
1177
1178                 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1179                         p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1180
1181                 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1182                         p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1183
1184                 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1185                         p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1186
1187                 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1188                         p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1189
1190                 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1191                         p->stats.rx_sched_invalid);
1192         }
1193
1194         /* Queue level information */
1195
1196         for (i = 0; i < dlb2->num_queues; i++) {
1197                 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1198                 int j, k;
1199
1200                 if (!q->setup_done)
1201                         fprintf(f, "Queue_%d is not configured\n", i);
1202
1203                 fprintf(f, "Queue_%d\n", i);
1204                 fprintf(f, "========\n");
1205
1206                 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1207
1208                 fprintf(f, "\tqueue is %s\n",
1209                         q->qm_queue.is_directed ? "directed" : "load balanced");
1210
1211                 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1212
1213                 for (j = 0; j < dlb2->num_ports; j++) {
1214                         struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1215
1216                         for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1217                                 if (p->link[k].valid &&
1218                                     p->link[k].queue_id == q->id)
1219                                         fprintf(f, "id=%u prio=%u ",
1220                                                 p->id, p->link[k].priority);
1221                         }
1222                 }
1223                 fprintf(f, "\n");
1224
1225                 fprintf(f, "\tcurrent depth: %u events\n",
1226                         dlb2_get_queue_depth(dlb2, q));
1227
1228                 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1229                         q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
1230         }
1231 }