event/dlb2: add v2.5 get resources
[dpdk.git] / drivers / event / dlb2 / dlb2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <fcntl.h>
15
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <eventdev_pmd.h>
24 #include <rte_io.h>
25 #include <rte_kvargs.h>
26 #include <rte_log.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf.h>
29 #include <rte_power_intrinsics.h>
30 #include <rte_prefetch.h>
31 #include <rte_ring.h>
32 #include <rte_string_fns.h>
33
34 #include "dlb2_priv.h"
35 #include "dlb2_iface.h"
36 #include "dlb2_inline_fns.h"
37
38 /*
39  * Resources exposed to eventdev. Some values overridden at runtime using
40  * values returned by the DLB kernel driver.
41  */
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 #endif
45 static struct rte_event_dev_info evdev_dlb2_default_info = {
46         .driver_name = "", /* probe will set */
47         .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
48         .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
50         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 #else
52         .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
53 #endif
54         .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
55         .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
56         .max_event_priority_levels = DLB2_QID_PRIORITIES,
57         .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
58         .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
59         .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
60         .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
61         .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
62         .max_single_link_event_port_queue_pairs =
63                 DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
64         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
65                           RTE_EVENT_DEV_CAP_EVENT_QOS |
66                           RTE_EVENT_DEV_CAP_BURST_MODE |
67                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
68                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
69                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
70 };
71
72 struct process_local_port_data
73 dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES];
74
75 static void
76 dlb2_free_qe_mem(struct dlb2_port *qm_port)
77 {
78         if (qm_port == NULL)
79                 return;
80
81         rte_free(qm_port->qe4);
82         qm_port->qe4 = NULL;
83
84         rte_free(qm_port->int_arm_qe);
85         qm_port->int_arm_qe = NULL;
86
87         rte_free(qm_port->consume_qe);
88         qm_port->consume_qe = NULL;
89
90         rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
91         dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
92 }
93
94 /* override defaults with value(s) provided on command line */
95 static void
96 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
97                                  int *qid_depth_thresholds)
98 {
99         int q;
100
101         for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) {
102                 if (qid_depth_thresholds[q] != 0)
103                         dlb2->ev_queues[q].depth_threshold =
104                                 qid_depth_thresholds[q];
105         }
106 }
107
108 static int
109 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
110 {
111         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
112         struct dlb2_hw_resource_info *dlb2_info = &handle->info;
113         int ret;
114
115         /* Query driver resources provisioned for this device */
116
117         ret = dlb2_iface_get_num_resources(handle,
118                                            &dlb2->hw_rsrc_query_results);
119         if (ret) {
120                 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
121                 return ret;
122         }
123
124         /* Complete filling in device resource info returned to evdev app,
125          * overriding any default values.
126          * The capabilities (CAPs) were set at compile time.
127          */
128
129         evdev_dlb2_default_info.max_event_queues =
130                 dlb2->hw_rsrc_query_results.num_ldb_queues;
131
132         evdev_dlb2_default_info.max_event_ports =
133                 dlb2->hw_rsrc_query_results.num_ldb_ports;
134
135         if (dlb2->version == DLB2_HW_V2_5) {
136                 evdev_dlb2_default_info.max_num_events =
137                         dlb2->hw_rsrc_query_results.num_credits;
138         } else {
139                 evdev_dlb2_default_info.max_num_events =
140                         dlb2->hw_rsrc_query_results.num_ldb_credits;
141         }
142         /* Save off values used when creating the scheduling domain. */
143
144         handle->info.num_sched_domains =
145                 dlb2->hw_rsrc_query_results.num_sched_domains;
146
147         if (dlb2->version == DLB2_HW_V2_5) {
148                 handle->info.hw_rsrc_max.nb_events_limit =
149                         dlb2->hw_rsrc_query_results.num_credits;
150         } else {
151                 handle->info.hw_rsrc_max.nb_events_limit =
152                         dlb2->hw_rsrc_query_results.num_ldb_credits;
153         }
154         handle->info.hw_rsrc_max.num_queues =
155                 dlb2->hw_rsrc_query_results.num_ldb_queues +
156                 dlb2->hw_rsrc_query_results.num_dir_ports;
157
158         handle->info.hw_rsrc_max.num_ldb_queues =
159                 dlb2->hw_rsrc_query_results.num_ldb_queues;
160
161         handle->info.hw_rsrc_max.num_ldb_ports =
162                 dlb2->hw_rsrc_query_results.num_ldb_ports;
163
164         handle->info.hw_rsrc_max.num_dir_ports =
165                 dlb2->hw_rsrc_query_results.num_dir_ports;
166
167         handle->info.hw_rsrc_max.reorder_window_size =
168                 dlb2->hw_rsrc_query_results.num_hist_list_entries;
169
170         rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
171
172         return 0;
173 }
174
175 #define DLB2_BASE_10 10
176
177 static int
178 dlb2_string_to_int(int *result, const char *str)
179 {
180         long ret;
181         char *endptr;
182
183         if (str == NULL || result == NULL)
184                 return -EINVAL;
185
186         errno = 0;
187         ret = strtol(str, &endptr, DLB2_BASE_10);
188         if (errno)
189                 return -errno;
190
191         /* long int and int may be different width for some architectures */
192         if (ret < INT_MIN || ret > INT_MAX || endptr == str)
193                 return -EINVAL;
194
195         *result = ret;
196         return 0;
197 }
198
199 static int
200 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
201 {
202         int *socket_id = opaque;
203         int ret;
204
205         ret = dlb2_string_to_int(socket_id, value);
206         if (ret < 0)
207                 return ret;
208
209         if (*socket_id > RTE_MAX_NUMA_NODES)
210                 return -EINVAL;
211         return 0;
212 }
213
214 static int
215 set_max_num_events(const char *key __rte_unused,
216                    const char *value,
217                    void *opaque)
218 {
219         int *max_num_events = opaque;
220         int ret;
221
222         if (value == NULL || opaque == NULL) {
223                 DLB2_LOG_ERR("NULL pointer\n");
224                 return -EINVAL;
225         }
226
227         ret = dlb2_string_to_int(max_num_events, value);
228         if (ret < 0)
229                 return ret;
230
231         if (*max_num_events < 0 || *max_num_events >
232                         DLB2_MAX_NUM_LDB_CREDITS) {
233                 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
234                              DLB2_MAX_NUM_LDB_CREDITS);
235                 return -EINVAL;
236         }
237
238         return 0;
239 }
240
241 static int
242 set_num_dir_credits(const char *key __rte_unused,
243                     const char *value,
244                     void *opaque)
245 {
246         int *num_dir_credits = opaque;
247         int ret;
248
249         if (value == NULL || opaque == NULL) {
250                 DLB2_LOG_ERR("NULL pointer\n");
251                 return -EINVAL;
252         }
253
254         ret = dlb2_string_to_int(num_dir_credits, value);
255         if (ret < 0)
256                 return ret;
257
258         if (*num_dir_credits < 0 ||
259             *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
260                 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
261                              DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
262                 return -EINVAL;
263         }
264
265         return 0;
266 }
267
268 static int
269 set_dev_id(const char *key __rte_unused,
270            const char *value,
271            void *opaque)
272 {
273         int *dev_id = opaque;
274         int ret;
275
276         if (value == NULL || opaque == NULL) {
277                 DLB2_LOG_ERR("NULL pointer\n");
278                 return -EINVAL;
279         }
280
281         ret = dlb2_string_to_int(dev_id, value);
282         if (ret < 0)
283                 return ret;
284
285         return 0;
286 }
287
288 static int
289 set_cos(const char *key __rte_unused,
290         const char *value,
291         void *opaque)
292 {
293         enum dlb2_cos *cos_id = opaque;
294         int x = 0;
295         int ret;
296
297         if (value == NULL || opaque == NULL) {
298                 DLB2_LOG_ERR("NULL pointer\n");
299                 return -EINVAL;
300         }
301
302         ret = dlb2_string_to_int(&x, value);
303         if (ret < 0)
304                 return ret;
305
306         if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
307                 DLB2_LOG_ERR(
308                         "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
309                         x);
310                 return -EINVAL;
311         }
312
313         *cos_id = x;
314
315         return 0;
316 }
317
318 static int
319 set_qid_depth_thresh(const char *key __rte_unused,
320                      const char *value,
321                      void *opaque)
322 {
323         struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
324         int first, last, thresh, i;
325
326         if (value == NULL || opaque == NULL) {
327                 DLB2_LOG_ERR("NULL pointer\n");
328                 return -EINVAL;
329         }
330
331         /* command line override may take one of the following 3 forms:
332          * qid_depth_thresh=all:<threshold_value> ... all queues
333          * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
334          * qid_depth_thresh=qid:<threshold_value> ... just one queue
335          */
336         if (sscanf(value, "all:%d", &thresh) == 1) {
337                 first = 0;
338                 last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1;
339         } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
340                 /* we have everything we need */
341         } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
342                 last = first;
343         } else {
344                 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
345                 return -EINVAL;
346         }
347
348         if (first > last || first < 0 ||
349                 last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
350                 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
351                 return -EINVAL;
352         }
353
354         if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
355                 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
356                              DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
357                 return -EINVAL;
358         }
359
360         for (i = first; i <= last; i++)
361                 qid_thresh->val[i] = thresh; /* indexed by qid */
362
363         return 0;
364 }
365
366 static int
367 set_qid_depth_thresh_v2_5(const char *key __rte_unused,
368                           const char *value,
369                           void *opaque)
370 {
371         struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
372         int first, last, thresh, i;
373
374         if (value == NULL || opaque == NULL) {
375                 DLB2_LOG_ERR("NULL pointer\n");
376                 return -EINVAL;
377         }
378
379         /* command line override may take one of the following 3 forms:
380          * qid_depth_thresh=all:<threshold_value> ... all queues
381          * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
382          * qid_depth_thresh=qid:<threshold_value> ... just one queue
383          */
384         if (sscanf(value, "all:%d", &thresh) == 1) {
385                 first = 0;
386                 last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1;
387         } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
388                 /* we have everything we need */
389         } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
390                 last = first;
391         } else {
392                 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
393                 return -EINVAL;
394         }
395
396         if (first > last || first < 0 ||
397                 last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
398                 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
399                 return -EINVAL;
400         }
401
402         if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
403                 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
404                              DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
405                 return -EINVAL;
406         }
407
408         for (i = first; i <= last; i++)
409                 qid_thresh->val[i] = thresh; /* indexed by qid */
410
411         return 0;
412 }
413
414 static void
415 dlb2_eventdev_info_get(struct rte_eventdev *dev,
416                        struct rte_event_dev_info *dev_info)
417 {
418         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
419         int ret;
420
421         ret = dlb2_hw_query_resources(dlb2);
422         if (ret) {
423                 const struct rte_eventdev_data *data = dev->data;
424
425                 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
426                              ret, data->dev_id);
427                 /* fn is void, so fall through and return values set up in
428                  * probe
429                  */
430         }
431
432         /* Add num resources currently owned by this domain.
433          * These would become available if the scheduling domain were reset due
434          * to the application recalling eventdev_configure to *reconfigure* the
435          * domain.
436          */
437         evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
438         evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
439         evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
440
441         evdev_dlb2_default_info.max_event_queues =
442                 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
443                         RTE_EVENT_MAX_QUEUES_PER_DEV);
444
445         evdev_dlb2_default_info.max_num_events =
446                 RTE_MIN(evdev_dlb2_default_info.max_num_events,
447                         dlb2->max_num_events_override);
448
449         *dev_info = evdev_dlb2_default_info;
450 }
451
452 static int
453 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
454                             const struct dlb2_hw_rsrcs *resources_asked)
455 {
456         int ret = 0;
457         struct dlb2_create_sched_domain_args *cfg;
458
459         if (resources_asked == NULL) {
460                 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
461                 ret = EINVAL;
462                 goto error_exit;
463         }
464
465         /* Map generic qm resources to dlb2 resources */
466         cfg = &handle->cfg.resources;
467
468         /* DIR ports and queues */
469
470         cfg->num_dir_ports = resources_asked->num_dir_ports;
471
472         cfg->num_dir_credits = resources_asked->num_dir_credits;
473
474         /* LDB queues */
475
476         cfg->num_ldb_queues = resources_asked->num_ldb_queues;
477
478         /* LDB ports */
479
480         cfg->cos_strict = 0; /* Best effort */
481         cfg->num_cos_ldb_ports[0] = 0;
482         cfg->num_cos_ldb_ports[1] = 0;
483         cfg->num_cos_ldb_ports[2] = 0;
484         cfg->num_cos_ldb_ports[3] = 0;
485
486         switch (handle->cos_id) {
487         case DLB2_COS_0:
488                 cfg->num_ldb_ports = 0; /* no don't care ports */
489                 cfg->num_cos_ldb_ports[0] =
490                         resources_asked->num_ldb_ports;
491                 break;
492         case DLB2_COS_1:
493                 cfg->num_ldb_ports = 0; /* no don't care ports */
494                 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
495                 break;
496         case DLB2_COS_2:
497                 cfg->num_ldb_ports = 0; /* no don't care ports */
498                 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
499                 break;
500         case DLB2_COS_3:
501                 cfg->num_ldb_ports = 0; /* no don't care ports */
502                 cfg->num_cos_ldb_ports[3] =
503                         resources_asked->num_ldb_ports;
504                 break;
505         case DLB2_COS_DEFAULT:
506                 /* all ldb ports are don't care ports from a cos perspective */
507                 cfg->num_ldb_ports =
508                         resources_asked->num_ldb_ports;
509                 break;
510         }
511
512         cfg->num_ldb_credits =
513                 resources_asked->num_ldb_credits;
514
515         cfg->num_atomic_inflights =
516                 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
517                 cfg->num_ldb_queues;
518
519         cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
520                 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
521
522         DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
523                      cfg->num_ldb_queues,
524                      resources_asked->num_ldb_ports,
525                      cfg->num_dir_ports,
526                      cfg->num_atomic_inflights,
527                      cfg->num_hist_list_entries,
528                      cfg->num_ldb_credits,
529                      cfg->num_dir_credits);
530
531         /* Configure the QM */
532
533         ret = dlb2_iface_sched_domain_create(handle, cfg);
534         if (ret < 0) {
535                 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
536                              ret,
537                              dlb2_error_strings[cfg->response.status]);
538
539                 goto error_exit;
540         }
541
542         handle->domain_id = cfg->response.id;
543         handle->cfg.configured = true;
544
545 error_exit:
546
547         return ret;
548 }
549
550 static void
551 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
552 {
553         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
554         enum dlb2_configuration_state config_state;
555         int i, j;
556
557         dlb2_iface_domain_reset(dlb2);
558
559         /* Free all dynamically allocated port memory */
560         for (i = 0; i < dlb2->num_ports; i++)
561                 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
562
563         /* If reconfiguring, mark the device's queues and ports as "previously
564          * configured." If the user doesn't reconfigure them, the PMD will
565          * reapply their previous configuration when the device is started.
566          */
567         config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
568                 DLB2_NOT_CONFIGURED;
569
570         for (i = 0; i < dlb2->num_ports; i++) {
571                 dlb2->ev_ports[i].qm_port.config_state = config_state;
572                 /* Reset setup_done so ports can be reconfigured */
573                 dlb2->ev_ports[i].setup_done = false;
574                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
575                         dlb2->ev_ports[i].link[j].mapped = false;
576         }
577
578         for (i = 0; i < dlb2->num_queues; i++)
579                 dlb2->ev_queues[i].qm_queue.config_state = config_state;
580
581         for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++)
582                 dlb2->ev_queues[i].setup_done = false;
583
584         dlb2->num_ports = 0;
585         dlb2->num_ldb_ports = 0;
586         dlb2->num_dir_ports = 0;
587         dlb2->num_queues = 0;
588         dlb2->num_ldb_queues = 0;
589         dlb2->num_dir_queues = 0;
590         dlb2->configured = false;
591 }
592
593 /* Note: 1 QM instance per QM device, QM instance/device == event device */
594 static int
595 dlb2_eventdev_configure(const struct rte_eventdev *dev)
596 {
597         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
598         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
599         struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
600         const struct rte_eventdev_data *data = dev->data;
601         const struct rte_event_dev_config *config = &data->dev_conf;
602         int ret;
603
604         /* If this eventdev is already configured, we must release the current
605          * scheduling domain before attempting to configure a new one.
606          */
607         if (dlb2->configured) {
608                 dlb2_hw_reset_sched_domain(dev, true);
609
610                 ret = dlb2_hw_query_resources(dlb2);
611                 if (ret) {
612                         DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
613                                      ret, data->dev_id);
614                         return ret;
615                 }
616         }
617
618         if (config->nb_event_queues > rsrcs->num_queues) {
619                 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
620                              config->nb_event_queues,
621                              rsrcs->num_queues);
622                 return -EINVAL;
623         }
624         if (config->nb_event_ports > (rsrcs->num_ldb_ports
625                         + rsrcs->num_dir_ports)) {
626                 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
627                              config->nb_event_ports,
628                              (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
629                 return -EINVAL;
630         }
631         if (config->nb_events_limit > rsrcs->nb_events_limit) {
632                 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
633                              config->nb_events_limit,
634                              rsrcs->nb_events_limit);
635                 return -EINVAL;
636         }
637
638         if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
639                 dlb2->global_dequeue_wait = false;
640         else {
641                 uint32_t timeout32;
642
643                 dlb2->global_dequeue_wait = true;
644
645                 /* note size mismatch of timeout vals in eventdev lib. */
646                 timeout32 = config->dequeue_timeout_ns;
647
648                 dlb2->global_dequeue_wait_ticks =
649                         timeout32 * (rte_get_timer_hz() / 1E9);
650         }
651
652         /* Does this platform support umonitor/umwait? */
653         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
654                 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
655                     RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
656                         DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
657                                      RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
658                         return -EINVAL;
659                 }
660                 dlb2->umwait_allowed = true;
661         }
662
663         rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
664         rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
665         /* 1 dir queue per dir port */
666         rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
667
668         /* Scale down nb_events_limit by 4 for directed credits, since there
669          * are 4x as many load-balanced credits.
670          */
671         rsrcs->num_ldb_credits = 0;
672         rsrcs->num_dir_credits = 0;
673
674         if (rsrcs->num_ldb_queues)
675                 rsrcs->num_ldb_credits = config->nb_events_limit;
676         if (rsrcs->num_dir_ports)
677                 rsrcs->num_dir_credits = config->nb_events_limit / 4;
678         if (dlb2->num_dir_credits_override != -1)
679                 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
680
681         if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
682                 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
683                 return -ENODEV;
684         }
685
686         dlb2->new_event_limit = config->nb_events_limit;
687         __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
688
689         /* Save number of ports/queues for this event dev */
690         dlb2->num_ports = config->nb_event_ports;
691         dlb2->num_queues = config->nb_event_queues;
692         dlb2->num_dir_ports = rsrcs->num_dir_ports;
693         dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
694         dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
695         dlb2->num_dir_queues = dlb2->num_dir_ports;
696         dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
697         dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
698         dlb2->dir_credit_pool = rsrcs->num_dir_credits;
699         dlb2->max_dir_credits = rsrcs->num_dir_credits;
700
701         dlb2->configured = true;
702
703         return 0;
704 }
705
706 static void
707 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
708                                     uint8_t port_id,
709                                     struct rte_event_port_conf *port_conf)
710 {
711         RTE_SET_USED(port_id);
712         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
713
714         port_conf->new_event_threshold = dlb2->new_event_limit;
715         port_conf->dequeue_depth = 32;
716         port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
717         port_conf->event_port_cfg = 0;
718 }
719
720 static void
721 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
722                                      uint8_t queue_id,
723                                      struct rte_event_queue_conf *queue_conf)
724 {
725         RTE_SET_USED(dev);
726         RTE_SET_USED(queue_id);
727
728         queue_conf->nb_atomic_flows = 1024;
729         queue_conf->nb_atomic_order_sequences = 64;
730         queue_conf->event_queue_cfg = 0;
731         queue_conf->priority = 0;
732 }
733
734 static int32_t
735 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
736 {
737         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
738         struct dlb2_get_sn_allocation_args cfg;
739         int ret;
740
741         cfg.group = group;
742
743         ret = dlb2_iface_get_sn_allocation(handle, &cfg);
744         if (ret < 0) {
745                 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
746                              ret, dlb2_error_strings[cfg.response.status]);
747                 return ret;
748         }
749
750         return cfg.response.id;
751 }
752
753 static int
754 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
755 {
756         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
757         struct dlb2_set_sn_allocation_args cfg;
758         int ret;
759
760         cfg.num = num;
761         cfg.group = group;
762
763         ret = dlb2_iface_set_sn_allocation(handle, &cfg);
764         if (ret < 0) {
765                 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
766                              ret, dlb2_error_strings[cfg.response.status]);
767                 return ret;
768         }
769
770         return ret;
771 }
772
773 static int32_t
774 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
775 {
776         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
777         struct dlb2_get_sn_occupancy_args cfg;
778         int ret;
779
780         cfg.group = group;
781
782         ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
783         if (ret < 0) {
784                 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
785                              ret, dlb2_error_strings[cfg.response.status]);
786                 return ret;
787         }
788
789         return cfg.response.id;
790 }
791
792 /* Query the current sequence number allocations and, if they conflict with the
793  * requested LDB queue configuration, attempt to re-allocate sequence numbers.
794  * This is best-effort; if it fails, the PMD will attempt to configure the
795  * load-balanced queue and return an error.
796  */
797 static void
798 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
799                            const struct rte_event_queue_conf *queue_conf)
800 {
801         int grp_occupancy[DLB2_NUM_SN_GROUPS];
802         int grp_alloc[DLB2_NUM_SN_GROUPS];
803         int i, sequence_numbers;
804
805         sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
806
807         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
808                 int total_slots;
809
810                 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
811                 if (grp_alloc[i] < 0)
812                         return;
813
814                 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
815
816                 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
817                 if (grp_occupancy[i] < 0)
818                         return;
819
820                 /* DLB has at least one available slot for the requested
821                  * sequence numbers, so no further configuration required.
822                  */
823                 if (grp_alloc[i] == sequence_numbers &&
824                     grp_occupancy[i] < total_slots)
825                         return;
826         }
827
828         /* None of the sequence number groups are configured for the requested
829          * sequence numbers, so we have to reconfigure one of them. This is
830          * only possible if a group is not in use.
831          */
832         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
833                 if (grp_occupancy[i] == 0)
834                         break;
835         }
836
837         if (i == DLB2_NUM_SN_GROUPS) {
838                 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
839                        __func__, sequence_numbers);
840                 return;
841         }
842
843         /* Attempt to configure slot i with the requested number of sequence
844          * numbers. Ignore the return value -- if this fails, the error will be
845          * caught during subsequent queue configuration.
846          */
847         dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
848 }
849
850 static int32_t
851 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
852                          struct dlb2_eventdev_queue *ev_queue,
853                          const struct rte_event_queue_conf *evq_conf)
854 {
855         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
856         struct dlb2_queue *queue = &ev_queue->qm_queue;
857         struct dlb2_create_ldb_queue_args cfg;
858         int32_t ret;
859         uint32_t qm_qid;
860         int sched_type = -1;
861
862         if (evq_conf == NULL)
863                 return -EINVAL;
864
865         if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
866                 if (evq_conf->nb_atomic_order_sequences != 0)
867                         sched_type = RTE_SCHED_TYPE_ORDERED;
868                 else
869                         sched_type = RTE_SCHED_TYPE_PARALLEL;
870         } else
871                 sched_type = evq_conf->schedule_type;
872
873         cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
874         cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
875         cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
876
877         if (sched_type != RTE_SCHED_TYPE_ORDERED) {
878                 cfg.num_sequence_numbers = 0;
879                 cfg.num_qid_inflights = 2048;
880         }
881
882         /* App should set this to the number of hardware flows they want, not
883          * the overall number of flows they're going to use. E.g. if app is
884          * using 64 flows and sets compression to 64, best-case they'll get
885          * 64 unique hashed flows in hardware.
886          */
887         switch (evq_conf->nb_atomic_flows) {
888         /* Valid DLB2 compression levels */
889         case 64:
890         case 128:
891         case 256:
892         case 512:
893         case (1 * 1024): /* 1K */
894         case (2 * 1024): /* 2K */
895         case (4 * 1024): /* 4K */
896         case (64 * 1024): /* 64K */
897                 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
898                 break;
899         default:
900                 /* Invalid compression level */
901                 cfg.lock_id_comp_level = 0; /* no compression */
902         }
903
904         if (ev_queue->depth_threshold == 0) {
905                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
906                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
907         } else
908                 cfg.depth_threshold = ev_queue->depth_threshold;
909
910         ret = dlb2_iface_ldb_queue_create(handle, &cfg);
911         if (ret < 0) {
912                 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
913                              ret, dlb2_error_strings[cfg.response.status]);
914                 return -EINVAL;
915         }
916
917         qm_qid = cfg.response.id;
918
919         /* Save off queue config for debug, resource lookups, and reconfig */
920         queue->num_qid_inflights = cfg.num_qid_inflights;
921         queue->num_atm_inflights = cfg.num_atomic_inflights;
922
923         queue->sched_type = sched_type;
924         queue->config_state = DLB2_CONFIGURED;
925
926         DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
927                      qm_qid,
928                      cfg.num_atomic_inflights,
929                      cfg.num_sequence_numbers,
930                      cfg.num_qid_inflights);
931
932         return qm_qid;
933 }
934
935 static int
936 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
937                               struct dlb2_eventdev_queue *ev_queue,
938                               const struct rte_event_queue_conf *queue_conf)
939 {
940         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
941         int32_t qm_qid;
942
943         if (queue_conf->nb_atomic_order_sequences)
944                 dlb2_program_sn_allocation(dlb2, queue_conf);
945
946         qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
947         if (qm_qid < 0) {
948                 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
949
950                 return qm_qid;
951         }
952
953         dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
954
955         ev_queue->qm_queue.id = qm_qid;
956
957         return 0;
958 }
959
960 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
961 {
962         int i, num = 0;
963
964         for (i = 0; i < dlb2->num_queues; i++) {
965                 if (dlb2->ev_queues[i].setup_done &&
966                     dlb2->ev_queues[i].qm_queue.is_directed)
967                         num++;
968         }
969
970         return num;
971 }
972
973 static void
974 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
975                          struct dlb2_eventdev_queue *ev_queue)
976 {
977         struct dlb2_eventdev_port *ev_port;
978         int i, j;
979
980         for (i = 0; i < dlb2->num_ports; i++) {
981                 ev_port = &dlb2->ev_ports[i];
982
983                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
984                         if (!ev_port->link[j].valid ||
985                             ev_port->link[j].queue_id != ev_queue->id)
986                                 continue;
987
988                         ev_port->link[j].valid = false;
989                         ev_port->num_links--;
990                 }
991         }
992
993         ev_queue->num_links = 0;
994 }
995
996 static int
997 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
998                           uint8_t ev_qid,
999                           const struct rte_event_queue_conf *queue_conf)
1000 {
1001         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1002         struct dlb2_eventdev_queue *ev_queue;
1003         int ret;
1004
1005         if (queue_conf == NULL)
1006                 return -EINVAL;
1007
1008         if (ev_qid >= dlb2->num_queues)
1009                 return -EINVAL;
1010
1011         ev_queue = &dlb2->ev_queues[ev_qid];
1012
1013         ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
1014                 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
1015         ev_queue->id = ev_qid;
1016         ev_queue->conf = *queue_conf;
1017
1018         if (!ev_queue->qm_queue.is_directed) {
1019                 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
1020         } else {
1021                 /* The directed queue isn't setup until link time, at which
1022                  * point we know its directed port ID. Directed queue setup
1023                  * will only fail if this queue is already setup or there are
1024                  * no directed queues left to configure.
1025                  */
1026                 ret = 0;
1027
1028                 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
1029
1030                 if (ev_queue->setup_done ||
1031                     dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
1032                         ret = -EINVAL;
1033         }
1034
1035         /* Tear down pre-existing port->queue links */
1036         if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1037                 dlb2_queue_link_teardown(dlb2, ev_queue);
1038
1039         if (!ret)
1040                 ev_queue->setup_done = true;
1041
1042         return ret;
1043 }
1044
1045 static int
1046 dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
1047 {
1048         struct dlb2_cq_pop_qe *qe;
1049
1050         qe = rte_zmalloc(mz_name,
1051                         DLB2_NUM_QES_PER_CACHE_LINE *
1052                                 sizeof(struct dlb2_cq_pop_qe),
1053                         RTE_CACHE_LINE_SIZE);
1054
1055         if (qe == NULL) {
1056                 DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1057                 return -ENOMEM;
1058         }
1059         qm_port->consume_qe = qe;
1060
1061         qe->qe_valid = 0;
1062         qe->qe_frag = 0;
1063         qe->qe_comp = 0;
1064         qe->cq_token = 1;
1065         /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1066          * and so on.
1067          */
1068         qe->tokens = 0; /* set at run time */
1069         qe->meas_lat = 0;
1070         qe->no_dec = 0;
1071         /* Completion IDs are disabled */
1072         qe->cmp_id = 0;
1073
1074         return 0;
1075 }
1076
1077 static int
1078 dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1079 {
1080         struct dlb2_enqueue_qe *qe;
1081
1082         qe = rte_zmalloc(mz_name,
1083                         DLB2_NUM_QES_PER_CACHE_LINE *
1084                                 sizeof(struct dlb2_enqueue_qe),
1085                         RTE_CACHE_LINE_SIZE);
1086
1087         if (qe == NULL) {
1088                 DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1089                 return -ENOMEM;
1090         }
1091         qm_port->int_arm_qe = qe;
1092
1093         /* V2 - INT ARM is CQ_TOKEN + FRAG */
1094         qe->qe_valid = 0;
1095         qe->qe_frag = 1;
1096         qe->qe_comp = 0;
1097         qe->cq_token = 1;
1098         qe->meas_lat = 0;
1099         qe->no_dec = 0;
1100         /* Completion IDs are disabled */
1101         qe->cmp_id = 0;
1102
1103         return 0;
1104 }
1105
1106 static int
1107 dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1108 {
1109         int ret, sz;
1110
1111         sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1112
1113         qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1114
1115         if (qm_port->qe4 == NULL) {
1116                 DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1117                 ret = -ENOMEM;
1118                 goto error_exit;
1119         }
1120
1121         ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1122         if (ret < 0) {
1123                 DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1124                 goto error_exit;
1125         }
1126
1127         ret = dlb2_init_consume_qe(qm_port, mz_name);
1128         if (ret < 0) {
1129                 DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1130                 goto error_exit;
1131         }
1132
1133         return 0;
1134
1135 error_exit:
1136
1137         dlb2_free_qe_mem(qm_port);
1138
1139         return ret;
1140 }
1141
1142 static inline uint16_t
1143 dlb2_event_enqueue_delayed(void *event_port,
1144                            const struct rte_event events[]);
1145
1146 static inline uint16_t
1147 dlb2_event_enqueue_burst_delayed(void *event_port,
1148                                  const struct rte_event events[],
1149                                  uint16_t num);
1150
1151 static inline uint16_t
1152 dlb2_event_enqueue_new_burst_delayed(void *event_port,
1153                                      const struct rte_event events[],
1154                                      uint16_t num);
1155
1156 static inline uint16_t
1157 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
1158                                          const struct rte_event events[],
1159                                          uint16_t num);
1160
1161 static int
1162 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1163                         struct dlb2_eventdev_port *ev_port,
1164                         uint32_t dequeue_depth,
1165                         uint32_t enqueue_depth)
1166 {
1167         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1168         struct dlb2_create_ldb_port_args cfg = { {0} };
1169         int ret;
1170         struct dlb2_port *qm_port = NULL;
1171         char mz_name[RTE_MEMZONE_NAMESIZE];
1172         uint32_t qm_port_id;
1173         uint16_t ldb_credit_high_watermark;
1174         uint16_t dir_credit_high_watermark;
1175
1176         if (handle == NULL)
1177                 return -EINVAL;
1178
1179         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1180                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1181                              DLB2_MIN_CQ_DEPTH);
1182                 return -EINVAL;
1183         }
1184
1185         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1186                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1187                              DLB2_MIN_ENQUEUE_DEPTH);
1188                 return -EINVAL;
1189         }
1190
1191         rte_spinlock_lock(&handle->resource_lock);
1192
1193         /* We round up to the next power of 2 if necessary */
1194         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1195         cfg.cq_depth_threshold = 1;
1196
1197         cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1198
1199         if (handle->cos_id == DLB2_COS_DEFAULT)
1200                 cfg.cos_id = 0;
1201         else
1202                 cfg.cos_id = handle->cos_id;
1203
1204         cfg.cos_strict = 0;
1205
1206         /* User controls the LDB high watermark via enqueue depth. The DIR high
1207          * watermark is equal, unless the directed credit pool is too small.
1208          */
1209         ldb_credit_high_watermark = enqueue_depth;
1210
1211         /* If there are no directed ports, the kernel driver will ignore this
1212          * port's directed credit settings. Don't use enqueue_depth if it would
1213          * require more directed credits than are available.
1214          */
1215         dir_credit_high_watermark =
1216                 RTE_MIN(enqueue_depth,
1217                         handle->cfg.num_dir_credits / dlb2->num_ports);
1218
1219         /* Per QM values */
1220
1221         ret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);
1222         if (ret < 0) {
1223                 DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1224                              ret, dlb2_error_strings[cfg.response.status]);
1225                 goto error_exit;
1226         }
1227
1228         qm_port_id = cfg.response.id;
1229
1230         DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1231                      ev_port->id, qm_port_id);
1232
1233         qm_port = &ev_port->qm_port;
1234         qm_port->ev_port = ev_port; /* back ptr */
1235         qm_port->dlb2 = dlb2; /* back ptr */
1236         /*
1237          * Allocate and init local qe struct(s).
1238          * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1239          */
1240
1241         snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1242                  ev_port->id);
1243
1244         ret = dlb2_init_qe_mem(qm_port, mz_name);
1245         if (ret < 0) {
1246                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1247                 goto error_exit;
1248         }
1249
1250         qm_port->id = qm_port_id;
1251
1252         qm_port->cached_ldb_credits = 0;
1253         qm_port->cached_dir_credits = 0;
1254         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1255          * the effective depth is smaller.
1256          */
1257         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1258         qm_port->cq_idx = 0;
1259         qm_port->cq_idx_unmasked = 0;
1260
1261         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1262                 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1263         else
1264                 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1265
1266         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1267         /* starting value of gen bit - it toggles at wrap time */
1268         qm_port->gen_bit = 1;
1269
1270         qm_port->int_armed = false;
1271
1272         /* Save off for later use in info and lookup APIs. */
1273         qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1274
1275         qm_port->dequeue_depth = dequeue_depth;
1276         qm_port->token_pop_thresh = dequeue_depth;
1277
1278         /* The default enqueue functions do not include delayed-pop support for
1279          * performance reasons.
1280          */
1281         if (qm_port->token_pop_mode == DELAYED_POP) {
1282                 dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
1283                 dlb2->event_dev->enqueue_burst =
1284                         dlb2_event_enqueue_burst_delayed;
1285                 dlb2->event_dev->enqueue_new_burst =
1286                         dlb2_event_enqueue_new_burst_delayed;
1287                 dlb2->event_dev->enqueue_forward_burst =
1288                         dlb2_event_enqueue_forward_burst_delayed;
1289         }
1290
1291         qm_port->owed_tokens = 0;
1292         qm_port->issued_releases = 0;
1293
1294         /* Save config message too. */
1295         rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1296
1297         /* update state */
1298         qm_port->state = PORT_STARTED; /* enabled at create time */
1299         qm_port->config_state = DLB2_CONFIGURED;
1300
1301         qm_port->dir_credits = dir_credit_high_watermark;
1302         qm_port->ldb_credits = ldb_credit_high_watermark;
1303         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1304         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1305
1306         DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1307                      qm_port_id,
1308                      dequeue_depth,
1309                      qm_port->ldb_credits,
1310                      qm_port->dir_credits);
1311
1312         rte_spinlock_unlock(&handle->resource_lock);
1313
1314         return 0;
1315
1316 error_exit:
1317
1318         if (qm_port)
1319                 dlb2_free_qe_mem(qm_port);
1320
1321         rte_spinlock_unlock(&handle->resource_lock);
1322
1323         DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1324
1325         return ret;
1326 }
1327
1328 static void
1329 dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1330                         struct dlb2_eventdev_port *ev_port)
1331 {
1332         struct dlb2_eventdev_queue *ev_queue;
1333         int i;
1334
1335         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1336                 if (!ev_port->link[i].valid)
1337                         continue;
1338
1339                 ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1340
1341                 ev_port->link[i].valid = false;
1342                 ev_port->num_links--;
1343                 ev_queue->num_links--;
1344         }
1345 }
1346
1347 static int
1348 dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1349                         struct dlb2_eventdev_port *ev_port,
1350                         uint32_t dequeue_depth,
1351                         uint32_t enqueue_depth)
1352 {
1353         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1354         struct dlb2_create_dir_port_args cfg = { {0} };
1355         int ret;
1356         struct dlb2_port *qm_port = NULL;
1357         char mz_name[RTE_MEMZONE_NAMESIZE];
1358         uint32_t qm_port_id;
1359         uint16_t ldb_credit_high_watermark;
1360         uint16_t dir_credit_high_watermark;
1361
1362         if (dlb2 == NULL || handle == NULL)
1363                 return -EINVAL;
1364
1365         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1366                 DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1367                              DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1368                 return -EINVAL;
1369         }
1370
1371         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1372                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1373                              DLB2_MIN_ENQUEUE_DEPTH);
1374                 return -EINVAL;
1375         }
1376
1377         rte_spinlock_lock(&handle->resource_lock);
1378
1379         /* Directed queues are configured at link time. */
1380         cfg.queue_id = -1;
1381
1382         /* We round up to the next power of 2 if necessary */
1383         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1384         cfg.cq_depth_threshold = 1;
1385
1386         /* User controls the LDB high watermark via enqueue depth. The DIR high
1387          * watermark is equal, unless the directed credit pool is too small.
1388          */
1389         ldb_credit_high_watermark = enqueue_depth;
1390
1391         /* Don't use enqueue_depth if it would require more directed credits
1392          * than are available.
1393          */
1394         dir_credit_high_watermark =
1395                 RTE_MIN(enqueue_depth,
1396                         handle->cfg.num_dir_credits / dlb2->num_ports);
1397
1398         /* Per QM values */
1399
1400         ret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);
1401         if (ret < 0) {
1402                 DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1403                              ret, dlb2_error_strings[cfg.response.status]);
1404                 goto error_exit;
1405         }
1406
1407         qm_port_id = cfg.response.id;
1408
1409         DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1410                      ev_port->id, qm_port_id);
1411
1412         qm_port = &ev_port->qm_port;
1413         qm_port->ev_port = ev_port; /* back ptr */
1414         qm_port->dlb2 = dlb2;  /* back ptr */
1415
1416         /*
1417          * Init local qe struct(s).
1418          * Note: MOVDIR64 requires the enqueue QE to be aligned
1419          */
1420
1421         snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1422                  ev_port->id);
1423
1424         ret = dlb2_init_qe_mem(qm_port, mz_name);
1425
1426         if (ret < 0) {
1427                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1428                 goto error_exit;
1429         }
1430
1431         qm_port->id = qm_port_id;
1432
1433         qm_port->cached_ldb_credits = 0;
1434         qm_port->cached_dir_credits = 0;
1435         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1436          * the effective depth is smaller.
1437          */
1438         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1439         qm_port->cq_idx = 0;
1440         qm_port->cq_idx_unmasked = 0;
1441
1442         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1443                 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1444         else
1445                 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1446
1447         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1448         /* starting value of gen bit - it toggles at wrap time */
1449         qm_port->gen_bit = 1;
1450
1451         qm_port->int_armed = false;
1452
1453         /* Save off for later use in info and lookup APIs. */
1454         qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1455
1456         qm_port->dequeue_depth = dequeue_depth;
1457
1458         /* Directed ports are auto-pop, by default. */
1459         qm_port->token_pop_mode = AUTO_POP;
1460         qm_port->owed_tokens = 0;
1461         qm_port->issued_releases = 0;
1462
1463         /* Save config message too. */
1464         rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1465
1466         /* update state */
1467         qm_port->state = PORT_STARTED; /* enabled at create time */
1468         qm_port->config_state = DLB2_CONFIGURED;
1469
1470         qm_port->dir_credits = dir_credit_high_watermark;
1471         qm_port->ldb_credits = ldb_credit_high_watermark;
1472         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1473         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1474
1475         DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1476                      qm_port_id,
1477                      dequeue_depth,
1478                      dir_credit_high_watermark,
1479                      ldb_credit_high_watermark);
1480
1481         rte_spinlock_unlock(&handle->resource_lock);
1482
1483         return 0;
1484
1485 error_exit:
1486
1487         if (qm_port)
1488                 dlb2_free_qe_mem(qm_port);
1489
1490         rte_spinlock_unlock(&handle->resource_lock);
1491
1492         DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1493
1494         return ret;
1495 }
1496
1497 static int
1498 dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1499                          uint8_t ev_port_id,
1500                          const struct rte_event_port_conf *port_conf)
1501 {
1502         struct dlb2_eventdev *dlb2;
1503         struct dlb2_eventdev_port *ev_port;
1504         int ret;
1505
1506         if (dev == NULL || port_conf == NULL) {
1507                 DLB2_LOG_ERR("Null parameter\n");
1508                 return -EINVAL;
1509         }
1510
1511         dlb2 = dlb2_pmd_priv(dev);
1512
1513         if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
1514                 return -EINVAL;
1515
1516         if (port_conf->dequeue_depth >
1517                 evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1518             port_conf->enqueue_depth >
1519                 evdev_dlb2_default_info.max_event_port_enqueue_depth)
1520                 return -EINVAL;
1521
1522         ev_port = &dlb2->ev_ports[ev_port_id];
1523         /* configured? */
1524         if (ev_port->setup_done) {
1525                 DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1526                 return -EINVAL;
1527         }
1528
1529         ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1530                 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1531
1532         if (!ev_port->qm_port.is_directed) {
1533                 ret = dlb2_hw_create_ldb_port(dlb2,
1534                                               ev_port,
1535                                               port_conf->dequeue_depth,
1536                                               port_conf->enqueue_depth);
1537                 if (ret < 0) {
1538                         DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1539                                      ev_port_id);
1540
1541                         return ret;
1542                 }
1543         } else {
1544                 ret = dlb2_hw_create_dir_port(dlb2,
1545                                               ev_port,
1546                                               port_conf->dequeue_depth,
1547                                               port_conf->enqueue_depth);
1548                 if (ret < 0) {
1549                         DLB2_LOG_ERR("Failed to create the DIR port\n");
1550                         return ret;
1551                 }
1552         }
1553
1554         /* Save off port config for reconfig */
1555         ev_port->conf = *port_conf;
1556
1557         ev_port->id = ev_port_id;
1558         ev_port->enq_configured = true;
1559         ev_port->setup_done = true;
1560         ev_port->inflight_max = port_conf->new_event_threshold;
1561         ev_port->implicit_release = !(port_conf->event_port_cfg &
1562                   RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1563         ev_port->outstanding_releases = 0;
1564         ev_port->inflight_credits = 0;
1565         ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1566         ev_port->dlb2 = dlb2; /* reverse link */
1567
1568         /* Tear down pre-existing port->queue links */
1569         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1570                 dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1571
1572         dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1573
1574         return 0;
1575 }
1576
1577 static int16_t
1578 dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1579                             uint32_t qm_port_id,
1580                             uint16_t qm_qid,
1581                             uint8_t priority)
1582 {
1583         struct dlb2_map_qid_args cfg;
1584         int32_t ret;
1585
1586         if (handle == NULL)
1587                 return -EINVAL;
1588
1589         /* Build message */
1590         cfg.port_id = qm_port_id;
1591         cfg.qid = qm_qid;
1592         cfg.priority = EV_TO_DLB2_PRIO(priority);
1593
1594         ret = dlb2_iface_map_qid(handle, &cfg);
1595         if (ret < 0) {
1596                 DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1597                              ret, dlb2_error_strings[cfg.response.status]);
1598                 DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1599                              handle->domain_id, cfg.port_id,
1600                              cfg.qid,
1601                              cfg.priority);
1602         } else {
1603                 DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1604                              qm_qid, qm_port_id);
1605         }
1606
1607         return ret;
1608 }
1609
1610 static int
1611 dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1612                           struct dlb2_eventdev_port *ev_port,
1613                           struct dlb2_eventdev_queue *ev_queue,
1614                           uint8_t priority)
1615 {
1616         int first_avail = -1;
1617         int ret, i;
1618
1619         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1620                 if (ev_port->link[i].valid) {
1621                         if (ev_port->link[i].queue_id == ev_queue->id &&
1622                             ev_port->link[i].priority == priority) {
1623                                 if (ev_port->link[i].mapped)
1624                                         return 0; /* already mapped */
1625                                 first_avail = i;
1626                         }
1627                 } else if (first_avail == -1)
1628                         first_avail = i;
1629         }
1630         if (first_avail == -1) {
1631                 DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1632                              ev_port->qm_port.id);
1633                 return -EINVAL;
1634         }
1635
1636         ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1637                                           ev_port->qm_port.id,
1638                                           ev_queue->qm_queue.id,
1639                                           priority);
1640
1641         if (!ret)
1642                 ev_port->link[first_avail].mapped = true;
1643
1644         return ret;
1645 }
1646
1647 static int32_t
1648 dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1649                          struct dlb2_eventdev_queue *ev_queue,
1650                          int32_t qm_port_id)
1651 {
1652         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1653         struct dlb2_create_dir_queue_args cfg;
1654         int32_t ret;
1655
1656         /* The directed port is always configured before its queue */
1657         cfg.port_id = qm_port_id;
1658
1659         if (ev_queue->depth_threshold == 0) {
1660                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1661                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1662         } else
1663                 cfg.depth_threshold = ev_queue->depth_threshold;
1664
1665         ret = dlb2_iface_dir_queue_create(handle, &cfg);
1666         if (ret < 0) {
1667                 DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1668                              ret, dlb2_error_strings[cfg.response.status]);
1669                 return -EINVAL;
1670         }
1671
1672         return cfg.response.id;
1673 }
1674
1675 static int
1676 dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1677                               struct dlb2_eventdev_queue *ev_queue,
1678                               struct dlb2_eventdev_port *ev_port)
1679 {
1680         int32_t qm_qid;
1681
1682         qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1683
1684         if (qm_qid < 0) {
1685                 DLB2_LOG_ERR("Failed to create the DIR queue\n");
1686                 return qm_qid;
1687         }
1688
1689         dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1690
1691         ev_queue->qm_queue.id = qm_qid;
1692
1693         return 0;
1694 }
1695
1696 static int
1697 dlb2_do_port_link(struct rte_eventdev *dev,
1698                   struct dlb2_eventdev_queue *ev_queue,
1699                   struct dlb2_eventdev_port *ev_port,
1700                   uint8_t prio)
1701 {
1702         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1703         int err;
1704
1705         /* Don't link until start time. */
1706         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1707                 return 0;
1708
1709         if (ev_queue->qm_queue.is_directed)
1710                 err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1711         else
1712                 err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1713
1714         if (err) {
1715                 DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1716                              ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1717                              ev_queue->id, ev_port->id);
1718
1719                 rte_errno = err;
1720                 return -1;
1721         }
1722
1723         return 0;
1724 }
1725
1726 static int
1727 dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1728                         uint8_t queue_id,
1729                         bool link_exists,
1730                         int index)
1731 {
1732         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1733         struct dlb2_eventdev_queue *ev_queue;
1734         bool port_is_dir, queue_is_dir;
1735
1736         if (queue_id > dlb2->num_queues) {
1737                 rte_errno = -EINVAL;
1738                 return -1;
1739         }
1740
1741         ev_queue = &dlb2->ev_queues[queue_id];
1742
1743         if (!ev_queue->setup_done &&
1744             ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1745                 rte_errno = -EINVAL;
1746                 return -1;
1747         }
1748
1749         port_is_dir = ev_port->qm_port.is_directed;
1750         queue_is_dir = ev_queue->qm_queue.is_directed;
1751
1752         if (port_is_dir != queue_is_dir) {
1753                 DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1754                              queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1755                              port_is_dir ? "DIR" : "LDB", ev_port->id);
1756
1757                 rte_errno = -EINVAL;
1758                 return -1;
1759         }
1760
1761         /* Check if there is space for the requested link */
1762         if (!link_exists && index == -1) {
1763                 DLB2_LOG_ERR("no space for new link\n");
1764                 rte_errno = -ENOSPC;
1765                 return -1;
1766         }
1767
1768         /* Check if the directed port is already linked */
1769         if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1770             !link_exists) {
1771                 DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1772                              ev_port->id);
1773                 rte_errno = -EINVAL;
1774                 return -1;
1775         }
1776
1777         /* Check if the directed queue is already linked */
1778         if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1779             !link_exists) {
1780                 DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1781                              ev_queue->id);
1782                 rte_errno = -EINVAL;
1783                 return -1;
1784         }
1785
1786         return 0;
1787 }
1788
1789 static int
1790 dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1791                         const uint8_t queues[], const uint8_t priorities[],
1792                         uint16_t nb_links)
1793
1794 {
1795         struct dlb2_eventdev_port *ev_port = event_port;
1796         struct dlb2_eventdev *dlb2;
1797         int i, j;
1798
1799         RTE_SET_USED(dev);
1800
1801         if (ev_port == NULL) {
1802                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1803                 rte_errno = -EINVAL;
1804                 return 0;
1805         }
1806
1807         if (!ev_port->setup_done &&
1808             ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1809                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1810                 rte_errno = -EINVAL;
1811                 return 0;
1812         }
1813
1814         /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1815          * queues pointer.
1816          */
1817         if (nb_links == 0) {
1818                 DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1819                 return 0; /* Ignore and return success */
1820         }
1821
1822         dlb2 = ev_port->dlb2;
1823
1824         DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1825                      nb_links,
1826                      ev_port->qm_port.is_directed ? "DIR" : "LDB",
1827                      ev_port->id);
1828
1829         for (i = 0; i < nb_links; i++) {
1830                 struct dlb2_eventdev_queue *ev_queue;
1831                 uint8_t queue_id, prio;
1832                 bool found = false;
1833                 int index = -1;
1834
1835                 queue_id = queues[i];
1836                 prio = priorities[i];
1837
1838                 /* Check if the link already exists. */
1839                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1840                         if (ev_port->link[j].valid) {
1841                                 if (ev_port->link[j].queue_id == queue_id) {
1842                                         found = true;
1843                                         index = j;
1844                                         break;
1845                                 }
1846                         } else if (index == -1) {
1847                                 index = j;
1848                         }
1849
1850                 /* could not link */
1851                 if (index == -1)
1852                         break;
1853
1854                 /* Check if already linked at the requested priority */
1855                 if (found && ev_port->link[j].priority == prio)
1856                         continue;
1857
1858                 if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1859                         break; /* return index of offending queue */
1860
1861                 ev_queue = &dlb2->ev_queues[queue_id];
1862
1863                 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1864                         break; /* return index of offending queue */
1865
1866                 ev_queue->num_links++;
1867
1868                 ev_port->link[index].queue_id = queue_id;
1869                 ev_port->link[index].priority = prio;
1870                 ev_port->link[index].valid = true;
1871                 /* Entry already exists?  If so, then must be prio change */
1872                 if (!found)
1873                         ev_port->num_links++;
1874         }
1875         return i;
1876 }
1877
1878 static int16_t
1879 dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1880                                 uint32_t qm_port_id,
1881                                 uint16_t qm_qid)
1882 {
1883         struct dlb2_unmap_qid_args cfg;
1884         int32_t ret;
1885
1886         if (handle == NULL)
1887                 return -EINVAL;
1888
1889         cfg.port_id = qm_port_id;
1890         cfg.qid = qm_qid;
1891
1892         ret = dlb2_iface_unmap_qid(handle, &cfg);
1893         if (ret < 0)
1894                 DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1895                              ret, dlb2_error_strings[cfg.response.status]);
1896
1897         return ret;
1898 }
1899
1900 static int
1901 dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1902                             struct dlb2_eventdev_port *ev_port,
1903                             struct dlb2_eventdev_queue *ev_queue)
1904 {
1905         int ret, i;
1906
1907         /* Don't unlink until start time. */
1908         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1909                 return 0;
1910
1911         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1912                 if (ev_port->link[i].valid &&
1913                     ev_port->link[i].queue_id == ev_queue->id)
1914                         break; /* found */
1915         }
1916
1917         /* This is expected with eventdev API!
1918          * It blindly attemmpts to unmap all queues.
1919          */
1920         if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1921                 DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1922                              ev_queue->qm_queue.id,
1923                              ev_port->qm_port.id);
1924                 return 0;
1925         }
1926
1927         ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1928                                               ev_port->qm_port.id,
1929                                               ev_queue->qm_queue.id);
1930         if (!ret)
1931                 ev_port->link[i].mapped = false;
1932
1933         return ret;
1934 }
1935
1936 static int
1937 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1938                           uint8_t queues[], uint16_t nb_unlinks)
1939 {
1940         struct dlb2_eventdev_port *ev_port = event_port;
1941         struct dlb2_eventdev *dlb2;
1942         int i;
1943
1944         RTE_SET_USED(dev);
1945
1946         if (!ev_port->setup_done) {
1947                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1948                              ev_port->id);
1949                 rte_errno = -EINVAL;
1950                 return 0;
1951         }
1952
1953         if (queues == NULL || nb_unlinks == 0) {
1954                 DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
1955                 return 0; /* Ignore and return success */
1956         }
1957
1958         if (ev_port->qm_port.is_directed) {
1959                 DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
1960                              ev_port->id);
1961                 rte_errno = 0;
1962                 return nb_unlinks; /* as if success */
1963         }
1964
1965         dlb2 = ev_port->dlb2;
1966
1967         for (i = 0; i < nb_unlinks; i++) {
1968                 struct dlb2_eventdev_queue *ev_queue;
1969                 int ret, j;
1970
1971                 if (queues[i] >= dlb2->num_queues) {
1972                         DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
1973                         rte_errno = -EINVAL;
1974                         return i; /* return index of offending queue */
1975                 }
1976
1977                 ev_queue = &dlb2->ev_queues[queues[i]];
1978
1979                 /* Does a link exist? */
1980                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1981                         if (ev_port->link[j].queue_id == queues[i] &&
1982                             ev_port->link[j].valid)
1983                                 break;
1984
1985                 if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1986                         continue;
1987
1988                 ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
1989                 if (ret) {
1990                         DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
1991                                      ret, ev_port->id, queues[i]);
1992                         rte_errno = -ENOENT;
1993                         return i; /* return index of offending queue */
1994                 }
1995
1996                 ev_port->link[j].valid = false;
1997                 ev_port->num_links--;
1998                 ev_queue->num_links--;
1999         }
2000
2001         return nb_unlinks;
2002 }
2003
2004 static int
2005 dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
2006                                        void *event_port)
2007 {
2008         struct dlb2_eventdev_port *ev_port = event_port;
2009         struct dlb2_eventdev *dlb2;
2010         struct dlb2_hw_dev *handle;
2011         struct dlb2_pending_port_unmaps_args cfg;
2012         int ret;
2013
2014         RTE_SET_USED(dev);
2015
2016         if (!ev_port->setup_done) {
2017                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
2018                              ev_port->id);
2019                 rte_errno = -EINVAL;
2020                 return 0;
2021         }
2022
2023         cfg.port_id = ev_port->qm_port.id;
2024         dlb2 = ev_port->dlb2;
2025         handle = &dlb2->qm_instance;
2026         ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
2027
2028         if (ret < 0) {
2029                 DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
2030                              ret, dlb2_error_strings[cfg.response.status]);
2031                 return ret;
2032         }
2033
2034         return cfg.response.id;
2035 }
2036
2037 static int
2038 dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
2039 {
2040         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2041         int ret, i;
2042
2043         /* If an event queue or port was previously configured, but hasn't been
2044          * reconfigured, reapply its original configuration.
2045          */
2046         for (i = 0; i < dlb2->num_queues; i++) {
2047                 struct dlb2_eventdev_queue *ev_queue;
2048
2049                 ev_queue = &dlb2->ev_queues[i];
2050
2051                 if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
2052                         continue;
2053
2054                 ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
2055                 if (ret < 0) {
2056                         DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
2057                         return ret;
2058                 }
2059         }
2060
2061         for (i = 0; i < dlb2->num_ports; i++) {
2062                 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2063
2064                 if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
2065                         continue;
2066
2067                 ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
2068                 if (ret < 0) {
2069                         DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
2070                                      i);
2071                         return ret;
2072                 }
2073         }
2074
2075         return 0;
2076 }
2077
2078 static int
2079 dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
2080 {
2081         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2082         int i;
2083
2084         /* Perform requested port->queue links */
2085         for (i = 0; i < dlb2->num_ports; i++) {
2086                 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2087                 int j;
2088
2089                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
2090                         struct dlb2_eventdev_queue *ev_queue;
2091                         uint8_t prio, queue_id;
2092
2093                         if (!ev_port->link[j].valid)
2094                                 continue;
2095
2096                         prio = ev_port->link[j].priority;
2097                         queue_id = ev_port->link[j].queue_id;
2098
2099                         if (dlb2_validate_port_link(ev_port, queue_id, true, j))
2100                                 return -EINVAL;
2101
2102                         ev_queue = &dlb2->ev_queues[queue_id];
2103
2104                         if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
2105                                 return -EINVAL;
2106                 }
2107         }
2108
2109         return 0;
2110 }
2111
2112 static int
2113 dlb2_eventdev_start(struct rte_eventdev *dev)
2114 {
2115         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2116         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
2117         struct dlb2_start_domain_args cfg;
2118         int ret, i;
2119
2120         rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
2121         if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2122                 DLB2_LOG_ERR("bad state %d for dev_start\n",
2123                              (int)dlb2->run_state);
2124                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2125                 return -EINVAL;
2126         }
2127         dlb2->run_state = DLB2_RUN_STATE_STARTING;
2128         rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2129
2130         /* If the device was configured more than once, some event ports and/or
2131          * queues may need to be reconfigured.
2132          */
2133         ret = dlb2_eventdev_reapply_configuration(dev);
2134         if (ret)
2135                 return ret;
2136
2137         /* The DLB PMD delays port links until the device is started. */
2138         ret = dlb2_eventdev_apply_port_links(dev);
2139         if (ret)
2140                 return ret;
2141
2142         for (i = 0; i < dlb2->num_ports; i++) {
2143                 if (!dlb2->ev_ports[i].setup_done) {
2144                         DLB2_LOG_ERR("dlb2: port %d not setup", i);
2145                         return -ESTALE;
2146                 }
2147         }
2148
2149         for (i = 0; i < dlb2->num_queues; i++) {
2150                 if (dlb2->ev_queues[i].num_links == 0) {
2151                         DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
2152                         return -ENOLINK;
2153                 }
2154         }
2155
2156         ret = dlb2_iface_sched_domain_start(handle, &cfg);
2157         if (ret < 0) {
2158                 DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
2159                              ret, dlb2_error_strings[cfg.response.status]);
2160                 return ret;
2161         }
2162
2163         dlb2->run_state = DLB2_RUN_STATE_STARTED;
2164         DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
2165
2166         return 0;
2167 }
2168
2169 static uint8_t cmd_byte_map[DLB2_NUM_PORT_TYPES][DLB2_NUM_HW_SCHED_TYPES] = {
2170         {
2171                 /* Load-balanced cmd bytes */
2172                 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2173                 [RTE_EVENT_OP_FORWARD] = DLB2_FWD_CMD_BYTE,
2174                 [RTE_EVENT_OP_RELEASE] = DLB2_COMP_CMD_BYTE,
2175         },
2176         {
2177                 /* Directed cmd bytes */
2178                 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2179                 [RTE_EVENT_OP_FORWARD] = DLB2_NEW_CMD_BYTE,
2180                 [RTE_EVENT_OP_RELEASE] = DLB2_NOOP_CMD_BYTE,
2181         },
2182 };
2183
2184 static inline uint32_t
2185 dlb2_port_credits_get(struct dlb2_port *qm_port,
2186                       enum dlb2_hw_queue_types type)
2187 {
2188         uint32_t credits = *qm_port->credit_pool[type];
2189         uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2190
2191         if (unlikely(credits < batch_size))
2192                 batch_size = credits;
2193
2194         if (likely(credits &&
2195                    __atomic_compare_exchange_n(
2196                         qm_port->credit_pool[type],
2197                         &credits, credits - batch_size, false,
2198                         __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
2199                 return batch_size;
2200         else
2201                 return 0;
2202 }
2203
2204 static inline void
2205 dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2206                           struct dlb2_eventdev_port *ev_port)
2207 {
2208         uint16_t quanta = ev_port->credit_update_quanta;
2209
2210         if (ev_port->inflight_credits >= quanta * 2) {
2211                 /* Replenish credits, saving one quanta for enqueues */
2212                 uint16_t val = ev_port->inflight_credits - quanta;
2213
2214                 __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
2215                 ev_port->inflight_credits -= val;
2216         }
2217 }
2218
2219 static inline int
2220 dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2221                               struct dlb2_eventdev_port *ev_port)
2222 {
2223         uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
2224                                                 __ATOMIC_SEQ_CST);
2225         const int num = 1;
2226
2227         if (unlikely(ev_port->inflight_max < sw_inflights)) {
2228                 DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2229                 rte_errno = -ENOSPC;
2230                 return 1;
2231         }
2232
2233         if (ev_port->inflight_credits < num) {
2234                 /* check if event enqueue brings ev_port over max threshold */
2235                 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2236
2237                 if (sw_inflights + credit_update_quanta >
2238                                 dlb2->new_event_limit) {
2239                         DLB2_INC_STAT(
2240                         ev_port->stats.traffic.tx_nospc_new_event_limit,
2241                         1);
2242                         rte_errno = -ENOSPC;
2243                         return 1;
2244                 }
2245
2246                 __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
2247                                    __ATOMIC_SEQ_CST);
2248                 ev_port->inflight_credits += (credit_update_quanta);
2249
2250                 if (ev_port->inflight_credits < num) {
2251                         DLB2_INC_STAT(
2252                         ev_port->stats.traffic.tx_nospc_inflight_credits,
2253                         1);
2254                         rte_errno = -ENOSPC;
2255                         return 1;
2256                 }
2257         }
2258
2259         return 0;
2260 }
2261
2262 static inline int
2263 dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2264 {
2265         if (unlikely(qm_port->cached_ldb_credits == 0)) {
2266                 qm_port->cached_ldb_credits =
2267                         dlb2_port_credits_get(qm_port,
2268                                               DLB2_LDB_QUEUE);
2269                 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2270                         DLB2_INC_STAT(
2271                         qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2272                         1);
2273                         DLB2_LOG_DBG("ldb credits exhausted\n");
2274                         return 1; /* credits exhausted */
2275                 }
2276         }
2277
2278         return 0;
2279 }
2280
2281 static inline int
2282 dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2283 {
2284         if (unlikely(qm_port->cached_dir_credits == 0)) {
2285                 qm_port->cached_dir_credits =
2286                         dlb2_port_credits_get(qm_port,
2287                                               DLB2_DIR_QUEUE);
2288                 if (unlikely(qm_port->cached_dir_credits == 0)) {
2289                         DLB2_INC_STAT(
2290                         qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2291                         1);
2292                         DLB2_LOG_DBG("dir credits exhausted\n");
2293                         return 1; /* credits exhausted */
2294                 }
2295         }
2296
2297         return 0;
2298 }
2299
2300 static __rte_always_inline void
2301 dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
2302               struct process_local_port_data *port_data)
2303 {
2304         dlb2_movdir64b(port_data->pp_addr, qe4);
2305 }
2306
2307 static inline int
2308 dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2309 {
2310         struct process_local_port_data *port_data;
2311         struct dlb2_cq_pop_qe *qe;
2312
2313         RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2314
2315         qe = qm_port->consume_qe;
2316
2317         qe->tokens = num - 1;
2318
2319         /* No store fence needed since no pointer is being sent, and CQ token
2320          * pops can be safely reordered with other HCWs.
2321          */
2322         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2323
2324         dlb2_movntdq_single(port_data->pp_addr, qe);
2325
2326         DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
2327
2328         qm_port->owed_tokens = 0;
2329
2330         return 0;
2331 }
2332
2333 static inline void
2334 dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2335                    bool do_sfence,
2336                    struct process_local_port_data *port_data)
2337 {
2338         /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2339          * application writes complete before enqueueing the QE.
2340          */
2341         if (do_sfence)
2342                 rte_wmb();
2343
2344         dlb2_pp_write(qm_port->qe4, port_data);
2345 }
2346
2347 static inline void
2348 dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2349 {
2350         struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2351         int num = qm_port->owed_tokens;
2352
2353         qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2354         qe[idx].tokens = num - 1;
2355
2356         qm_port->owed_tokens = 0;
2357 }
2358
2359 static inline void
2360 dlb2_event_build_hcws(struct dlb2_port *qm_port,
2361                       const struct rte_event ev[],
2362                       int num,
2363                       uint8_t *sched_type,
2364                       uint8_t *queue_id)
2365 {
2366         struct dlb2_enqueue_qe *qe;
2367         uint16_t sched_word[4];
2368         __m128i sse_qe[2];
2369         int i;
2370
2371         qe = qm_port->qe4;
2372
2373         sse_qe[0] = _mm_setzero_si128();
2374         sse_qe[1] = _mm_setzero_si128();
2375
2376         switch (num) {
2377         case 4:
2378                 /* Construct the metadata portion of two HCWs in one 128b SSE
2379                  * register. HCW metadata is constructed in the SSE registers
2380                  * like so:
2381                  * sse_qe[0][63:0]:   qe[0]'s metadata
2382                  * sse_qe[0][127:64]: qe[1]'s metadata
2383                  * sse_qe[1][63:0]:   qe[2]'s metadata
2384                  * sse_qe[1][127:64]: qe[3]'s metadata
2385                  */
2386
2387                 /* Convert the event operation into a command byte and store it
2388                  * in the metadata:
2389                  * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
2390                  * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2391                  * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
2392                  * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2393                  */
2394 #define DLB2_QE_CMD_BYTE 7
2395                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2396                                 cmd_byte_map[qm_port->is_directed][ev[0].op],
2397                                 DLB2_QE_CMD_BYTE);
2398                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2399                                 cmd_byte_map[qm_port->is_directed][ev[1].op],
2400                                 DLB2_QE_CMD_BYTE + 8);
2401                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2402                                 cmd_byte_map[qm_port->is_directed][ev[2].op],
2403                                 DLB2_QE_CMD_BYTE);
2404                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2405                                 cmd_byte_map[qm_port->is_directed][ev[3].op],
2406                                 DLB2_QE_CMD_BYTE + 8);
2407
2408                 /* Store priority, scheduling type, and queue ID in the sched
2409                  * word array because these values are re-used when the
2410                  * destination is a directed queue.
2411                  */
2412                 sched_word[0] = EV_TO_DLB2_PRIO(ev[0].priority) << 10 |
2413                                 sched_type[0] << 8 |
2414                                 queue_id[0];
2415                 sched_word[1] = EV_TO_DLB2_PRIO(ev[1].priority) << 10 |
2416                                 sched_type[1] << 8 |
2417                                 queue_id[1];
2418                 sched_word[2] = EV_TO_DLB2_PRIO(ev[2].priority) << 10 |
2419                                 sched_type[2] << 8 |
2420                                 queue_id[2];
2421                 sched_word[3] = EV_TO_DLB2_PRIO(ev[3].priority) << 10 |
2422                                 sched_type[3] << 8 |
2423                                 queue_id[3];
2424
2425                 /* Store the event priority, scheduling type, and queue ID in
2426                  * the metadata:
2427                  * sse_qe[0][31:16] = sched_word[0]
2428                  * sse_qe[0][95:80] = sched_word[1]
2429                  * sse_qe[1][31:16] = sched_word[2]
2430                  * sse_qe[1][95:80] = sched_word[3]
2431                  */
2432 #define DLB2_QE_QID_SCHED_WORD 1
2433                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2434                                              sched_word[0],
2435                                              DLB2_QE_QID_SCHED_WORD);
2436                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2437                                              sched_word[1],
2438                                              DLB2_QE_QID_SCHED_WORD + 4);
2439                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2440                                              sched_word[2],
2441                                              DLB2_QE_QID_SCHED_WORD);
2442                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2443                                              sched_word[3],
2444                                              DLB2_QE_QID_SCHED_WORD + 4);
2445
2446                 /* If the destination is a load-balanced queue, store the lock
2447                  * ID. If it is a directed queue, DLB places this field in
2448                  * bytes 10-11 of the received QE, so we format it accordingly:
2449                  * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
2450                  * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2451                  * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
2452                  * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2453                  */
2454 #define DLB2_QE_LOCK_ID_WORD 2
2455                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2456                                 (sched_type[0] == DLB2_SCHED_DIRECTED) ?
2457                                         sched_word[0] : ev[0].flow_id,
2458                                 DLB2_QE_LOCK_ID_WORD);
2459                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2460                                 (sched_type[1] == DLB2_SCHED_DIRECTED) ?
2461                                         sched_word[1] : ev[1].flow_id,
2462                                 DLB2_QE_LOCK_ID_WORD + 4);
2463                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2464                                 (sched_type[2] == DLB2_SCHED_DIRECTED) ?
2465                                         sched_word[2] : ev[2].flow_id,
2466                                 DLB2_QE_LOCK_ID_WORD);
2467                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2468                                 (sched_type[3] == DLB2_SCHED_DIRECTED) ?
2469                                         sched_word[3] : ev[3].flow_id,
2470                                 DLB2_QE_LOCK_ID_WORD + 4);
2471
2472                 /* Store the event type and sub event type in the metadata:
2473                  * sse_qe[0][15:0]  = flow_id[0]
2474                  * sse_qe[0][79:64] = flow_id[1]
2475                  * sse_qe[1][15:0]  = flow_id[2]
2476                  * sse_qe[1][79:64] = flow_id[3]
2477                  */
2478 #define DLB2_QE_EV_TYPE_WORD 0
2479                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2480                                              ev[0].sub_event_type << 8 |
2481                                                 ev[0].event_type,
2482                                              DLB2_QE_EV_TYPE_WORD);
2483                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2484                                              ev[1].sub_event_type << 8 |
2485                                                 ev[1].event_type,
2486                                              DLB2_QE_EV_TYPE_WORD + 4);
2487                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2488                                              ev[2].sub_event_type << 8 |
2489                                                 ev[2].event_type,
2490                                              DLB2_QE_EV_TYPE_WORD);
2491                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2492                                              ev[3].sub_event_type << 8 |
2493                                                 ev[3].event_type,
2494                                              DLB2_QE_EV_TYPE_WORD + 4);
2495
2496                 /* Store the metadata to memory (use the double-precision
2497                  * _mm_storeh_pd because there is no integer function for
2498                  * storing the upper 64b):
2499                  * qe[0] metadata = sse_qe[0][63:0]
2500                  * qe[1] metadata = sse_qe[0][127:64]
2501                  * qe[2] metadata = sse_qe[1][63:0]
2502                  * qe[3] metadata = sse_qe[1][127:64]
2503                  */
2504                 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2505                 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2506                               (__m128d)sse_qe[0]);
2507                 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2508                 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2509                               (__m128d)sse_qe[1]);
2510
2511                 qe[0].data = ev[0].u64;
2512                 qe[1].data = ev[1].u64;
2513                 qe[2].data = ev[2].u64;
2514                 qe[3].data = ev[3].u64;
2515
2516                 break;
2517         case 3:
2518         case 2:
2519         case 1:
2520                 for (i = 0; i < num; i++) {
2521                         qe[i].cmd_byte =
2522                                 cmd_byte_map[qm_port->is_directed][ev[i].op];
2523                         qe[i].sched_type = sched_type[i];
2524                         qe[i].data = ev[i].u64;
2525                         qe[i].qid = queue_id[i];
2526                         qe[i].priority = EV_TO_DLB2_PRIO(ev[i].priority);
2527                         qe[i].lock_id = ev[i].flow_id;
2528                         if (sched_type[i] == DLB2_SCHED_DIRECTED) {
2529                                 struct dlb2_msg_info *info =
2530                                         (struct dlb2_msg_info *)&qe[i].lock_id;
2531
2532                                 info->qid = queue_id[i];
2533                                 info->sched_type = DLB2_SCHED_DIRECTED;
2534                                 info->priority = qe[i].priority;
2535                         }
2536                         qe[i].u.event_type.major = ev[i].event_type;
2537                         qe[i].u.event_type.sub = ev[i].sub_event_type;
2538                 }
2539                 break;
2540         case 0:
2541                 break;
2542         }
2543 }
2544
2545 static inline int
2546 dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2547                         struct dlb2_port *qm_port,
2548                         const struct rte_event ev[],
2549                         uint8_t *sched_type,
2550                         uint8_t *queue_id)
2551 {
2552         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2553         struct dlb2_eventdev_queue *ev_queue;
2554         uint16_t *cached_credits = NULL;
2555         struct dlb2_queue *qm_queue;
2556
2557         ev_queue = &dlb2->ev_queues[ev->queue_id];
2558         qm_queue = &ev_queue->qm_queue;
2559         *queue_id = qm_queue->id;
2560
2561         /* Ignore sched_type and hardware credits on release events */
2562         if (ev->op == RTE_EVENT_OP_RELEASE)
2563                 goto op_check;
2564
2565         if (!qm_queue->is_directed) {
2566                 /* Load balanced destination queue */
2567
2568                 if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2569                         rte_errno = -ENOSPC;
2570                         return 1;
2571                 }
2572                 cached_credits = &qm_port->cached_ldb_credits;
2573
2574                 switch (ev->sched_type) {
2575                 case RTE_SCHED_TYPE_ORDERED:
2576                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2577                         if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2578                                 DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
2579                                              *queue_id);
2580                                 rte_errno = -EINVAL;
2581                                 return 1;
2582                         }
2583                         *sched_type = DLB2_SCHED_ORDERED;
2584                         break;
2585                 case RTE_SCHED_TYPE_ATOMIC:
2586                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2587                         *sched_type = DLB2_SCHED_ATOMIC;
2588                         break;
2589                 case RTE_SCHED_TYPE_PARALLEL:
2590                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2591                         if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2592                                 *sched_type = DLB2_SCHED_ORDERED;
2593                         else
2594                                 *sched_type = DLB2_SCHED_UNORDERED;
2595                         break;
2596                 default:
2597                         DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2598                         DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2599                         rte_errno = -EINVAL;
2600                         return 1;
2601                 }
2602         } else {
2603                 /* Directed destination queue */
2604
2605                 if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2606                         rte_errno = -ENOSPC;
2607                         return 1;
2608                 }
2609                 cached_credits = &qm_port->cached_dir_credits;
2610
2611                 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2612
2613                 *sched_type = DLB2_SCHED_DIRECTED;
2614         }
2615
2616 op_check:
2617         switch (ev->op) {
2618         case RTE_EVENT_OP_NEW:
2619                 /* Check that a sw credit is available */
2620                 if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2621                         rte_errno = -ENOSPC;
2622                         return 1;
2623                 }
2624                 ev_port->inflight_credits--;
2625                 (*cached_credits)--;
2626                 break;
2627         case RTE_EVENT_OP_FORWARD:
2628                 /* Check for outstanding_releases underflow. If this occurs,
2629                  * the application is not using the EVENT_OPs correctly; for
2630                  * example, forwarding or releasing events that were not
2631                  * dequeued.
2632                  */
2633                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2634                 ev_port->outstanding_releases--;
2635                 qm_port->issued_releases++;
2636                 (*cached_credits)--;
2637                 break;
2638         case RTE_EVENT_OP_RELEASE:
2639                 ev_port->inflight_credits++;
2640                 /* Check for outstanding_releases underflow. If this occurs,
2641                  * the application is not using the EVENT_OPs correctly; for
2642                  * example, forwarding or releasing events that were not
2643                  * dequeued.
2644                  */
2645                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2646                 ev_port->outstanding_releases--;
2647                 qm_port->issued_releases++;
2648
2649                 /* Replenish s/w credits if enough are cached */
2650                 dlb2_replenish_sw_credits(dlb2, ev_port);
2651                 break;
2652         }
2653
2654         DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2655         DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2656
2657 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
2658         if (ev->op != RTE_EVENT_OP_RELEASE) {
2659                 DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
2660                 DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2661         }
2662 #endif
2663
2664         return 0;
2665 }
2666
2667 static inline uint16_t
2668 __dlb2_event_enqueue_burst(void *event_port,
2669                            const struct rte_event events[],
2670                            uint16_t num,
2671                            bool use_delayed)
2672 {
2673         struct dlb2_eventdev_port *ev_port = event_port;
2674         struct dlb2_port *qm_port = &ev_port->qm_port;
2675         struct process_local_port_data *port_data;
2676         int i;
2677
2678         RTE_ASSERT(ev_port->enq_configured);
2679         RTE_ASSERT(events != NULL);
2680
2681         i = 0;
2682
2683         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2684
2685         while (i < num) {
2686                 uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
2687                 uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
2688                 int pop_offs = 0;
2689                 int j = 0;
2690
2691                 memset(qm_port->qe4,
2692                        0,
2693                        DLB2_NUM_QES_PER_CACHE_LINE *
2694                        sizeof(struct dlb2_enqueue_qe));
2695
2696                 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2697                         const struct rte_event *ev = &events[i + j];
2698                         int16_t thresh = qm_port->token_pop_thresh;
2699
2700                         if (use_delayed &&
2701                             qm_port->token_pop_mode == DELAYED_POP &&
2702                             (ev->op == RTE_EVENT_OP_FORWARD ||
2703                              ev->op == RTE_EVENT_OP_RELEASE) &&
2704                             qm_port->issued_releases >= thresh - 1) {
2705                                 /* Insert the token pop QE and break out. This
2706                                  * may result in a partial HCW, but that is
2707                                  * simpler than supporting arbitrary QE
2708                                  * insertion.
2709                                  */
2710                                 dlb2_construct_token_pop_qe(qm_port, j);
2711
2712                                 /* Reset the releases for the next QE batch */
2713                                 qm_port->issued_releases -= thresh;
2714
2715                                 pop_offs = 1;
2716                                 j++;
2717                                 break;
2718                         }
2719
2720                         if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
2721                                                     &sched_types[j],
2722                                                     &queue_ids[j]))
2723                                 break;
2724                 }
2725
2726                 if (j == 0)
2727                         break;
2728
2729                 dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
2730                                       sched_types, queue_ids);
2731
2732                 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2733
2734                 /* Don't include the token pop QE in the enqueue count */
2735                 i += j - pop_offs;
2736
2737                 /* Don't interpret j < DLB2_NUM_... as out-of-credits if
2738                  * pop_offs != 0
2739                  */
2740                 if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2741                         break;
2742         }
2743
2744         return i;
2745 }
2746
2747 static uint16_t
2748 dlb2_event_enqueue_burst(void *event_port,
2749                              const struct rte_event events[],
2750                              uint16_t num)
2751 {
2752         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2753 }
2754
2755 static uint16_t
2756 dlb2_event_enqueue_burst_delayed(void *event_port,
2757                                      const struct rte_event events[],
2758                                      uint16_t num)
2759 {
2760         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2761 }
2762
2763 static inline uint16_t
2764 dlb2_event_enqueue(void *event_port,
2765                    const struct rte_event events[])
2766 {
2767         return __dlb2_event_enqueue_burst(event_port, events, 1, false);
2768 }
2769
2770 static inline uint16_t
2771 dlb2_event_enqueue_delayed(void *event_port,
2772                            const struct rte_event events[])
2773 {
2774         return __dlb2_event_enqueue_burst(event_port, events, 1, true);
2775 }
2776
2777 static uint16_t
2778 dlb2_event_enqueue_new_burst(void *event_port,
2779                              const struct rte_event events[],
2780                              uint16_t num)
2781 {
2782         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2783 }
2784
2785 static uint16_t
2786 dlb2_event_enqueue_new_burst_delayed(void *event_port,
2787                                      const struct rte_event events[],
2788                                      uint16_t num)
2789 {
2790         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2791 }
2792
2793 static uint16_t
2794 dlb2_event_enqueue_forward_burst(void *event_port,
2795                                  const struct rte_event events[],
2796                                  uint16_t num)
2797 {
2798         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2799 }
2800
2801 static uint16_t
2802 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
2803                                          const struct rte_event events[],
2804                                          uint16_t num)
2805 {
2806         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2807 }
2808
2809 static void
2810 dlb2_event_release(struct dlb2_eventdev *dlb2,
2811                    uint8_t port_id,
2812                    int n)
2813 {
2814         struct process_local_port_data *port_data;
2815         struct dlb2_eventdev_port *ev_port;
2816         struct dlb2_port *qm_port;
2817         int i;
2818
2819         if (port_id > dlb2->num_ports) {
2820                 DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
2821                              port_id);
2822                 rte_errno = -EINVAL;
2823                 return;
2824         }
2825
2826         ev_port = &dlb2->ev_ports[port_id];
2827         qm_port = &ev_port->qm_port;
2828         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2829
2830         i = 0;
2831
2832         if (qm_port->is_directed) {
2833                 i = n;
2834                 goto sw_credit_update;
2835         }
2836
2837         while (i < n) {
2838                 int pop_offs = 0;
2839                 int j = 0;
2840
2841                 /* Zero-out QEs */
2842                 qm_port->qe4[0].cmd_byte = 0;
2843                 qm_port->qe4[1].cmd_byte = 0;
2844                 qm_port->qe4[2].cmd_byte = 0;
2845                 qm_port->qe4[3].cmd_byte = 0;
2846
2847                 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
2848                         int16_t thresh = qm_port->token_pop_thresh;
2849
2850                         if (qm_port->token_pop_mode == DELAYED_POP &&
2851                             qm_port->issued_releases >= thresh - 1) {
2852                                 /* Insert the token pop QE */
2853                                 dlb2_construct_token_pop_qe(qm_port, j);
2854
2855                                 /* Reset the releases for the next QE batch */
2856                                 qm_port->issued_releases -= thresh;
2857
2858                                 pop_offs = 1;
2859                                 j++;
2860                                 break;
2861                         }
2862
2863                         qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
2864                         qm_port->issued_releases++;
2865                 }
2866
2867                 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2868
2869                 /* Don't include the token pop QE in the release count */
2870                 i += j - pop_offs;
2871         }
2872
2873 sw_credit_update:
2874         /* each release returns one credit */
2875         if (!ev_port->outstanding_releases) {
2876                 DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
2877                              __func__);
2878                 return;
2879         }
2880         ev_port->outstanding_releases -= i;
2881         ev_port->inflight_credits += i;
2882
2883         /* Replenish s/w credits if enough releases are performed */
2884         dlb2_replenish_sw_credits(dlb2, ev_port);
2885 }
2886
2887 static inline void
2888 dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
2889 {
2890         uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2891
2892         /* increment port credits, and return to pool if exceeds threshold */
2893         if (!qm_port->is_directed) {
2894                 qm_port->cached_ldb_credits += num;
2895                 if (qm_port->cached_ldb_credits >= 2 * batch_size) {
2896                         __atomic_fetch_add(
2897                                 qm_port->credit_pool[DLB2_LDB_QUEUE],
2898                                 batch_size, __ATOMIC_SEQ_CST);
2899                         qm_port->cached_ldb_credits -= batch_size;
2900                 }
2901         } else {
2902                 qm_port->cached_dir_credits += num;
2903                 if (qm_port->cached_dir_credits >= 2 * batch_size) {
2904                         __atomic_fetch_add(
2905                                 qm_port->credit_pool[DLB2_DIR_QUEUE],
2906                                 batch_size, __ATOMIC_SEQ_CST);
2907                         qm_port->cached_dir_credits -= batch_size;
2908                 }
2909         }
2910 }
2911
2912 static inline int
2913 dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
2914                   struct dlb2_eventdev_port *ev_port,
2915                   struct dlb2_port *qm_port,
2916                   uint64_t timeout,
2917                   uint64_t start_ticks)
2918 {
2919         struct process_local_port_data *port_data;
2920         uint64_t elapsed_ticks;
2921
2922         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2923
2924         elapsed_ticks = rte_get_timer_cycles() - start_ticks;
2925
2926         /* Wait/poll time expired */
2927         if (elapsed_ticks >= timeout) {
2928                 return 1;
2929         } else if (dlb2->umwait_allowed) {
2930                 struct rte_power_monitor_cond pmc;
2931                 volatile struct dlb2_dequeue_qe *cq_base;
2932                 union {
2933                         uint64_t raw_qe[2];
2934                         struct dlb2_dequeue_qe qe;
2935                 } qe_mask;
2936                 uint64_t expected_value;
2937                 volatile uint64_t *monitor_addr;
2938
2939                 qe_mask.qe.cq_gen = 1; /* set mask */
2940
2941                 cq_base = port_data->cq_base;
2942                 monitor_addr = (volatile uint64_t *)(volatile void *)
2943                         &cq_base[qm_port->cq_idx];
2944                 monitor_addr++; /* cq_gen bit is in second 64bit location */
2945
2946                 if (qm_port->gen_bit)
2947                         expected_value = qe_mask.raw_qe[1];
2948                 else
2949                         expected_value = 0;
2950
2951                 pmc.addr = monitor_addr;
2952                 pmc.val = expected_value;
2953                 pmc.mask = qe_mask.raw_qe[1];
2954                 pmc.size = sizeof(uint64_t);
2955
2956                 rte_power_monitor(&pmc, timeout + start_ticks);
2957
2958                 DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
2959         } else {
2960                 uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
2961                 uint64_t curr_ticks = rte_get_timer_cycles();
2962                 uint64_t init_ticks = curr_ticks;
2963
2964                 while ((curr_ticks - start_ticks < timeout) &&
2965                        (curr_ticks - init_ticks < poll_interval))
2966                         curr_ticks = rte_get_timer_cycles();
2967         }
2968
2969         return 0;
2970 }
2971
2972 static inline int
2973 dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
2974                          struct dlb2_port *qm_port,
2975                          struct rte_event *events,
2976                          struct dlb2_dequeue_qe *qes,
2977                          int cnt)
2978 {
2979         uint8_t *qid_mappings = qm_port->qid_mappings;
2980         int i, num, evq_id;
2981
2982         for (i = 0, num = 0; i < cnt; i++) {
2983                 struct dlb2_dequeue_qe *qe = &qes[i];
2984                 int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
2985                         [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2986                         [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2987                         [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2988                         [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2989                 };
2990
2991                 /* Fill in event information.
2992                  * Note that flow_id must be embedded in the data by
2993                  * the app, such as the mbuf RSS hash field if the data
2994                  * buffer is a mbuf.
2995                  */
2996                 if (unlikely(qe->error)) {
2997                         DLB2_LOG_ERR("QE error bit ON\n");
2998                         DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
2999                         dlb2_consume_qe_immediate(qm_port, 1);
3000                         continue; /* Ignore */
3001                 }
3002
3003                 events[num].u64 = qe->data;
3004                 events[num].flow_id = qe->flow_id;
3005                 events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
3006                 events[num].event_type = qe->u.event_type.major;
3007                 events[num].sub_event_type = qe->u.event_type.sub;
3008                 events[num].sched_type = sched_type_map[qe->sched_type];
3009                 events[num].impl_opaque = qe->qid_depth;
3010
3011                 /* qid not preserved for directed queues */
3012                 if (qm_port->is_directed)
3013                         evq_id = ev_port->link[0].queue_id;
3014                 else
3015                         evq_id = qid_mappings[qe->qid];
3016
3017                 events[num].queue_id = evq_id;
3018                 DLB2_INC_STAT(
3019                         ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
3020                         1);
3021                 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
3022                 num++;
3023         }
3024
3025         DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
3026
3027         return num;
3028 }
3029
3030 static inline int
3031 dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
3032                               struct dlb2_port *qm_port,
3033                               struct rte_event *events,
3034                               struct dlb2_dequeue_qe *qes)
3035 {
3036         int sched_type_map[] = {
3037                 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3038                 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3039                 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3040                 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3041         };
3042         const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
3043         uint8_t *qid_mappings = qm_port->qid_mappings;
3044         __m128i sse_evt[2];
3045
3046         /* In the unlikely case that any of the QE error bits are set, process
3047          * them one at a time.
3048          */
3049         if (unlikely(qes[0].error || qes[1].error ||
3050                      qes[2].error || qes[3].error))
3051                 return dlb2_process_dequeue_qes(ev_port, qm_port, events,
3052                                                  qes, num_events);
3053
3054         events[0].u64 = qes[0].data;
3055         events[1].u64 = qes[1].data;
3056         events[2].u64 = qes[2].data;
3057         events[3].u64 = qes[3].data;
3058
3059         /* Construct the metadata portion of two struct rte_events
3060          * in one 128b SSE register. Event metadata is constructed in the SSE
3061          * registers like so:
3062          * sse_evt[0][63:0]:   event[0]'s metadata
3063          * sse_evt[0][127:64]: event[1]'s metadata
3064          * sse_evt[1][63:0]:   event[2]'s metadata
3065          * sse_evt[1][127:64]: event[3]'s metadata
3066          */
3067         sse_evt[0] = _mm_setzero_si128();
3068         sse_evt[1] = _mm_setzero_si128();
3069
3070         /* Convert the hardware queue ID to an event queue ID and store it in
3071          * the metadata:
3072          * sse_evt[0][47:40]   = qid_mappings[qes[0].qid]
3073          * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
3074          * sse_evt[1][47:40]   = qid_mappings[qes[2].qid]
3075          * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
3076          */
3077 #define DLB_EVENT_QUEUE_ID_BYTE 5
3078         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3079                                      qid_mappings[qes[0].qid],
3080                                      DLB_EVENT_QUEUE_ID_BYTE);
3081         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3082                                      qid_mappings[qes[1].qid],
3083                                      DLB_EVENT_QUEUE_ID_BYTE + 8);
3084         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3085                                      qid_mappings[qes[2].qid],
3086                                      DLB_EVENT_QUEUE_ID_BYTE);
3087         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3088                                      qid_mappings[qes[3].qid],
3089                                      DLB_EVENT_QUEUE_ID_BYTE + 8);
3090
3091         /* Convert the hardware priority to an event priority and store it in
3092          * the metadata, while also returning the queue depth status
3093          * value captured by the hardware, storing it in impl_opaque, which can
3094          * be read by the application but not modified
3095          * sse_evt[0][55:48]   = DLB2_TO_EV_PRIO(qes[0].priority)
3096          * sse_evt[0][63:56]   = qes[0].qid_depth
3097          * sse_evt[0][119:112] = DLB2_TO_EV_PRIO(qes[1].priority)
3098          * sse_evt[0][127:120] = qes[1].qid_depth
3099          * sse_evt[1][55:48]   = DLB2_TO_EV_PRIO(qes[2].priority)
3100          * sse_evt[1][63:56]   = qes[2].qid_depth
3101          * sse_evt[1][119:112] = DLB2_TO_EV_PRIO(qes[3].priority)
3102          * sse_evt[1][127:120] = qes[3].qid_depth
3103          */
3104 #define DLB_EVENT_PRIO_IMPL_OPAQUE_WORD 3
3105 #define DLB_BYTE_SHIFT 8
3106         sse_evt[0] =
3107                 _mm_insert_epi16(sse_evt[0],
3108                         DLB2_TO_EV_PRIO((uint8_t)qes[0].priority) |
3109                         (qes[0].qid_depth << DLB_BYTE_SHIFT),
3110                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3111         sse_evt[0] =
3112                 _mm_insert_epi16(sse_evt[0],
3113                         DLB2_TO_EV_PRIO((uint8_t)qes[1].priority) |
3114                         (qes[1].qid_depth << DLB_BYTE_SHIFT),
3115                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3116         sse_evt[1] =
3117                 _mm_insert_epi16(sse_evt[1],
3118                         DLB2_TO_EV_PRIO((uint8_t)qes[2].priority) |
3119                         (qes[2].qid_depth << DLB_BYTE_SHIFT),
3120                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3121         sse_evt[1] =
3122                 _mm_insert_epi16(sse_evt[1],
3123                         DLB2_TO_EV_PRIO((uint8_t)qes[3].priority) |
3124                         (qes[3].qid_depth << DLB_BYTE_SHIFT),
3125                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3126
3127         /* Write the event type, sub event type, and flow_id to the event
3128          * metadata.
3129          * sse_evt[0][31:0]   = qes[0].flow_id |
3130          *                      qes[0].u.event_type.major << 28 |
3131          *                      qes[0].u.event_type.sub << 20;
3132          * sse_evt[0][95:64]  = qes[1].flow_id |
3133          *                      qes[1].u.event_type.major << 28 |
3134          *                      qes[1].u.event_type.sub << 20;
3135          * sse_evt[1][31:0]   = qes[2].flow_id |
3136          *                      qes[2].u.event_type.major << 28 |
3137          *                      qes[2].u.event_type.sub << 20;
3138          * sse_evt[1][95:64]  = qes[3].flow_id |
3139          *                      qes[3].u.event_type.major << 28 |
3140          *                      qes[3].u.event_type.sub << 20;
3141          */
3142 #define DLB_EVENT_EV_TYPE_DW 0
3143 #define DLB_EVENT_EV_TYPE_SHIFT 28
3144 #define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
3145         sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3146                         qes[0].flow_id |
3147                         qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3148                         qes[0].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3149                         DLB_EVENT_EV_TYPE_DW);
3150         sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3151                         qes[1].flow_id |
3152                         qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3153                         qes[1].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3154                         DLB_EVENT_EV_TYPE_DW + 2);
3155         sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3156                         qes[2].flow_id |
3157                         qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3158                         qes[2].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3159                         DLB_EVENT_EV_TYPE_DW);
3160         sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3161                         qes[3].flow_id |
3162                         qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT  |
3163                         qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3164                         DLB_EVENT_EV_TYPE_DW + 2);
3165
3166         /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
3167          * set:
3168          * sse_evt[0][39:32]  = sched_type_map[qes[0].sched_type] << 6
3169          * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
3170          * sse_evt[1][39:32]  = sched_type_map[qes[2].sched_type] << 6
3171          * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
3172          */
3173 #define DLB_EVENT_SCHED_TYPE_BYTE 4
3174 #define DLB_EVENT_SCHED_TYPE_SHIFT 6
3175         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3176                 sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3177                 DLB_EVENT_SCHED_TYPE_BYTE);
3178         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3179                 sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3180                 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3181         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3182                 sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3183                 DLB_EVENT_SCHED_TYPE_BYTE);
3184         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3185                 sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3186                 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3187
3188         /* Store the metadata to the event (use the double-precision
3189          * _mm_storeh_pd because there is no integer function for storing the
3190          * upper 64b):
3191          * events[0].event = sse_evt[0][63:0]
3192          * events[1].event = sse_evt[0][127:64]
3193          * events[2].event = sse_evt[1][63:0]
3194          * events[3].event = sse_evt[1][127:64]
3195          */
3196         _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3197         _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3198         _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3199         _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3200
3201         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3202         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3203         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3204         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3205
3206         DLB2_INC_STAT(
3207                 ev_port->stats.queue[events[0].queue_id].
3208                         qid_depth[qes[0].qid_depth],
3209                 1);
3210         DLB2_INC_STAT(
3211                 ev_port->stats.queue[events[1].queue_id].
3212                         qid_depth[qes[1].qid_depth],
3213                 1);
3214         DLB2_INC_STAT(
3215                 ev_port->stats.queue[events[2].queue_id].
3216                         qid_depth[qes[2].qid_depth],
3217                 1);
3218         DLB2_INC_STAT(
3219                 ev_port->stats.queue[events[3].queue_id].
3220                         qid_depth[qes[3].qid_depth],
3221                 1);
3222
3223         DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3224
3225         return num_events;
3226 }
3227
3228 static __rte_always_inline int
3229 dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3230 {
3231         volatile struct dlb2_dequeue_qe *cq_addr;
3232         uint8_t xor_mask[2] = {0x0F, 0x00};
3233         const uint8_t and_mask = 0x0F;
3234         __m128i *qes = (__m128i *)qe;
3235         uint8_t gen_bits, gen_bit;
3236         uintptr_t addr[4];
3237         uint16_t idx;
3238
3239         cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3240
3241         idx = qm_port->cq_idx;
3242
3243         /* Load the next 4 QEs */
3244         addr[0] = (uintptr_t)&cq_addr[idx];
3245         addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
3246         addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
3247         addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3248
3249         /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3250         rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3251         rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3252         rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3253         rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3254
3255         /* Correct the xor_mask for wrap-around QEs */
3256         gen_bit = qm_port->gen_bit;
3257         xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
3258         xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
3259         xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3260
3261         /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3262          * valid, then QEs[0:N-1] are too.
3263          */
3264         qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3265         rte_compiler_barrier();
3266         qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3267         rte_compiler_barrier();
3268         qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3269         rte_compiler_barrier();
3270         qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3271
3272         /* Extract and combine the gen bits */
3273         gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3274                    ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3275                    ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3276                    ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3277
3278         /* XOR the combined bits such that a 1 represents a valid QE */
3279         gen_bits ^= xor_mask[gen_bit];
3280
3281         /* Mask off gen bits we don't care about */
3282         gen_bits &= and_mask;
3283
3284         return __builtin_popcount(gen_bits);
3285 }
3286
3287 static inline void
3288 dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
3289 {
3290         uint16_t idx = qm_port->cq_idx_unmasked + cnt;
3291
3292         qm_port->cq_idx_unmasked = idx;
3293         qm_port->cq_idx = idx & qm_port->cq_depth_mask;
3294         qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
3295 }
3296
3297 static inline int16_t
3298 dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
3299                        struct dlb2_eventdev_port *ev_port,
3300                        struct rte_event *events,
3301                        uint16_t max_num,
3302                        uint64_t dequeue_timeout_ticks)
3303 {
3304         uint64_t timeout;
3305         uint64_t start_ticks = 0ULL;
3306         struct dlb2_port *qm_port;
3307         int num = 0;
3308
3309         qm_port = &ev_port->qm_port;
3310
3311         /* We have a special implementation for waiting. Wait can be:
3312          * 1) no waiting at all
3313          * 2) busy poll only
3314          * 3) wait for interrupt. If wakeup and poll time
3315          * has expired, then return to caller
3316          * 4) umonitor/umwait repeatedly up to poll time
3317          */
3318
3319         /* If configured for per dequeue wait, then use wait value provided
3320          * to this API. Otherwise we must use the global
3321          * value from eventdev config time.
3322          */
3323         if (!dlb2->global_dequeue_wait)
3324                 timeout = dequeue_timeout_ticks;
3325         else
3326                 timeout = dlb2->global_dequeue_wait_ticks;
3327
3328         start_ticks = rte_get_timer_cycles();
3329
3330         while (num < max_num) {
3331                 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3332                 int num_avail;
3333
3334                 /* Copy up to 4 QEs from the current cache line into qes */
3335                 num_avail = dlb2_recv_qe_sparse(qm_port, qes);
3336
3337                 /* But don't process more than the user requested */
3338                 num_avail = RTE_MIN(num_avail, max_num - num);
3339
3340                 dlb2_inc_cq_idx(qm_port, num_avail << 2);
3341
3342                 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3343                         num += dlb2_process_dequeue_four_qes(ev_port,
3344                                                               qm_port,
3345                                                               &events[num],
3346                                                               &qes[0]);
3347                 else if (num_avail)
3348                         num += dlb2_process_dequeue_qes(ev_port,
3349                                                          qm_port,
3350                                                          &events[num],
3351                                                          &qes[0],
3352                                                          num_avail);
3353                 else if ((timeout == 0) || (num > 0))
3354                         /* Not waiting in any form, or 1+ events received? */
3355                         break;
3356                 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3357                                            timeout, start_ticks))
3358                         break;
3359         }
3360
3361         qm_port->owed_tokens += num;
3362
3363         if (num) {
3364                 if (qm_port->token_pop_mode == AUTO_POP)
3365                         dlb2_consume_qe_immediate(qm_port, num);
3366
3367                 ev_port->outstanding_releases += num;
3368
3369                 dlb2_port_credits_inc(qm_port, num);
3370         }
3371
3372         return num;
3373 }
3374
3375 static __rte_always_inline int
3376 dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
3377              uint8_t *offset)
3378 {
3379         uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
3380                                    {0x00, 0x01, 0x03, 0x07} };
3381         uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
3382         volatile struct dlb2_dequeue_qe *cq_addr;
3383         __m128i *qes = (__m128i *)qe;
3384         uint64_t *cache_line_base;
3385         uint8_t gen_bits;
3386
3387         cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3388         cq_addr = &cq_addr[qm_port->cq_idx];
3389
3390         cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
3391         *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
3392
3393         /* Load the next CQ cache line from memory. Pack these reads as tight
3394          * as possible to reduce the chance that DLB invalidates the line while
3395          * the CPU is reading it. Read the cache line backwards to ensure that
3396          * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
3397          *
3398          * (Valid QEs start at &qe[offset])
3399          */
3400         qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
3401         qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
3402         qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
3403         qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
3404
3405         /* Evict the cache line ASAP */
3406         rte_cldemote(cache_line_base);
3407
3408         /* Extract and combine the gen bits */
3409         gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3410                    ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3411                    ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3412                    ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3413
3414         /* XOR the combined bits such that a 1 represents a valid QE */
3415         gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
3416
3417         /* Mask off gen bits we don't care about */
3418         gen_bits &= and_mask[*offset];
3419
3420         return __builtin_popcount(gen_bits);
3421 }
3422
3423 static inline int16_t
3424 dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
3425                 struct dlb2_eventdev_port *ev_port,
3426                 struct rte_event *events,
3427                 uint16_t max_num,
3428                 uint64_t dequeue_timeout_ticks)
3429 {
3430         uint64_t timeout;
3431         uint64_t start_ticks = 0ULL;
3432         struct dlb2_port *qm_port;
3433         int num = 0;
3434
3435         qm_port = &ev_port->qm_port;
3436
3437         /* We have a special implementation for waiting. Wait can be:
3438          * 1) no waiting at all
3439          * 2) busy poll only
3440          * 3) wait for interrupt. If wakeup and poll time
3441          * has expired, then return to caller
3442          * 4) umonitor/umwait repeatedly up to poll time
3443          */
3444
3445         /* If configured for per dequeue wait, then use wait value provided
3446          * to this API. Otherwise we must use the global
3447          * value from eventdev config time.
3448          */
3449         if (!dlb2->global_dequeue_wait)
3450                 timeout = dequeue_timeout_ticks;
3451         else
3452                 timeout = dlb2->global_dequeue_wait_ticks;
3453
3454         start_ticks = rte_get_timer_cycles();
3455
3456         while (num < max_num) {
3457                 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3458                 uint8_t offset;
3459                 int num_avail;
3460
3461                 /* Copy up to 4 QEs from the current cache line into qes */
3462                 num_avail = dlb2_recv_qe(qm_port, qes, &offset);
3463
3464                 /* But don't process more than the user requested */
3465                 num_avail = RTE_MIN(num_avail, max_num - num);
3466
3467                 dlb2_inc_cq_idx(qm_port, num_avail);
3468
3469                 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3470                         num += dlb2_process_dequeue_four_qes(ev_port,
3471                                                              qm_port,
3472                                                              &events[num],
3473                                                              &qes[offset]);
3474                 else if (num_avail)
3475                         num += dlb2_process_dequeue_qes(ev_port,
3476                                                         qm_port,
3477                                                         &events[num],
3478                                                         &qes[offset],
3479                                                         num_avail);
3480                 else if ((timeout == 0) || (num > 0))
3481                         /* Not waiting in any form, or 1+ events received? */
3482                         break;
3483                 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3484                                            timeout, start_ticks))
3485                         break;
3486         }
3487
3488         qm_port->owed_tokens += num;
3489
3490         if (num) {
3491                 if (qm_port->token_pop_mode == AUTO_POP)
3492                         dlb2_consume_qe_immediate(qm_port, num);
3493
3494                 ev_port->outstanding_releases += num;
3495
3496                 dlb2_port_credits_inc(qm_port, num);
3497         }
3498
3499         return num;
3500 }
3501
3502 static uint16_t
3503 dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3504                          uint64_t wait)
3505 {
3506         struct dlb2_eventdev_port *ev_port = event_port;
3507         struct dlb2_port *qm_port = &ev_port->qm_port;
3508         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3509         uint16_t cnt;
3510
3511         RTE_ASSERT(ev_port->setup_done);
3512         RTE_ASSERT(ev != NULL);
3513
3514         if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3515                 uint16_t out_rels = ev_port->outstanding_releases;
3516
3517                 dlb2_event_release(dlb2, ev_port->id, out_rels);
3518
3519                 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3520         }
3521
3522         if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3523                 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3524
3525         cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
3526
3527         DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3528         DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3529
3530         return cnt;
3531 }
3532
3533 static uint16_t
3534 dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3535 {
3536         return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
3537 }
3538
3539 static uint16_t
3540 dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3541                                 uint16_t num, uint64_t wait)
3542 {
3543         struct dlb2_eventdev_port *ev_port = event_port;
3544         struct dlb2_port *qm_port = &ev_port->qm_port;
3545         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3546         uint16_t cnt;
3547
3548         RTE_ASSERT(ev_port->setup_done);
3549         RTE_ASSERT(ev != NULL);
3550
3551         if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3552                 uint16_t out_rels = ev_port->outstanding_releases;
3553
3554                 dlb2_event_release(dlb2, ev_port->id, out_rels);
3555
3556                 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3557         }
3558
3559         if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3560                 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3561
3562         cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
3563
3564         DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3565         DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3566         return cnt;
3567 }
3568
3569 static uint16_t
3570 dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
3571                           uint64_t wait)
3572 {
3573         return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3574 }
3575
3576 static void
3577 dlb2_flush_port(struct rte_eventdev *dev, int port_id)
3578 {
3579         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3580         eventdev_stop_flush_t flush;
3581         struct rte_event ev;
3582         uint8_t dev_id;
3583         void *arg;
3584         int i;
3585
3586         flush = dev->dev_ops->dev_stop_flush;
3587         dev_id = dev->data->dev_id;
3588         arg = dev->data->dev_stop_flush_arg;
3589
3590         while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
3591                 if (flush)
3592                         flush(dev_id, ev, arg);
3593
3594                 if (dlb2->ev_ports[port_id].qm_port.is_directed)
3595                         continue;
3596
3597                 ev.op = RTE_EVENT_OP_RELEASE;
3598
3599                 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3600         }
3601
3602         /* Enqueue any additional outstanding releases */
3603         ev.op = RTE_EVENT_OP_RELEASE;
3604
3605         for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)
3606                 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3607 }
3608
3609 static uint32_t
3610 dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
3611                          struct dlb2_eventdev_queue *queue)
3612 {
3613         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3614         struct dlb2_get_ldb_queue_depth_args cfg;
3615         int ret;
3616
3617         cfg.queue_id = queue->qm_queue.id;
3618
3619         ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
3620         if (ret < 0) {
3621                 DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
3622                              ret, dlb2_error_strings[cfg.response.status]);
3623                 return ret;
3624         }
3625
3626         return cfg.response.id;
3627 }
3628
3629 static uint32_t
3630 dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
3631                          struct dlb2_eventdev_queue *queue)
3632 {
3633         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3634         struct dlb2_get_dir_queue_depth_args cfg;
3635         int ret;
3636
3637         cfg.queue_id = queue->qm_queue.id;
3638
3639         ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
3640         if (ret < 0) {
3641                 DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
3642                              ret, dlb2_error_strings[cfg.response.status]);
3643                 return ret;
3644         }
3645
3646         return cfg.response.id;
3647 }
3648
3649 uint32_t
3650 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
3651                      struct dlb2_eventdev_queue *queue)
3652 {
3653         if (queue->qm_queue.is_directed)
3654                 return dlb2_get_dir_queue_depth(dlb2, queue);
3655         else
3656                 return dlb2_get_ldb_queue_depth(dlb2, queue);
3657 }
3658
3659 static bool
3660 dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
3661                     struct dlb2_eventdev_queue *queue)
3662 {
3663         return dlb2_get_queue_depth(dlb2, queue) == 0;
3664 }
3665
3666 static bool
3667 dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
3668 {
3669         int i;
3670
3671         for (i = 0; i < dlb2->num_queues; i++) {
3672                 if (dlb2->ev_queues[i].num_links == 0)
3673                         continue;
3674                 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3675                         return false;
3676         }
3677
3678         return true;
3679 }
3680
3681 static bool
3682 dlb2_queues_empty(struct dlb2_eventdev *dlb2)
3683 {
3684         int i;
3685
3686         for (i = 0; i < dlb2->num_queues; i++) {
3687                 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3688                         return false;
3689         }
3690
3691         return true;
3692 }
3693
3694 static void
3695 dlb2_drain(struct rte_eventdev *dev)
3696 {
3697         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3698         struct dlb2_eventdev_port *ev_port = NULL;
3699         uint8_t dev_id;
3700         int i;
3701
3702         dev_id = dev->data->dev_id;
3703
3704         while (!dlb2_linked_queues_empty(dlb2)) {
3705                 /* Flush all the ev_ports, which will drain all their connected
3706                  * queues.
3707                  */
3708                 for (i = 0; i < dlb2->num_ports; i++)
3709                         dlb2_flush_port(dev, i);
3710         }
3711
3712         /* The queues are empty, but there may be events left in the ports. */
3713         for (i = 0; i < dlb2->num_ports; i++)
3714                 dlb2_flush_port(dev, i);
3715
3716         /* If the domain's queues are empty, we're done. */
3717         if (dlb2_queues_empty(dlb2))
3718                 return;
3719
3720         /* Else, there must be at least one unlinked load-balanced queue.
3721          * Select a load-balanced port with which to drain the unlinked
3722          * queue(s).
3723          */
3724         for (i = 0; i < dlb2->num_ports; i++) {
3725                 ev_port = &dlb2->ev_ports[i];
3726
3727                 if (!ev_port->qm_port.is_directed)
3728                         break;
3729         }
3730
3731         if (i == dlb2->num_ports) {
3732                 DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
3733                 return;
3734         }
3735
3736         rte_errno = 0;
3737         rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
3738
3739         if (rte_errno) {
3740                 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
3741                              ev_port->id);
3742                 return;
3743         }
3744
3745         for (i = 0; i < dlb2->num_queues; i++) {
3746                 uint8_t qid, prio;
3747                 int ret;
3748
3749                 if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3750                         continue;
3751
3752                 qid = i;
3753                 prio = 0;
3754
3755                 /* Link the ev_port to the queue */
3756                 ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
3757                 if (ret != 1) {
3758                         DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
3759                                      ev_port->id, qid);
3760                         return;
3761                 }
3762
3763                 /* Flush the queue */
3764                 while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3765                         dlb2_flush_port(dev, ev_port->id);
3766
3767                 /* Drain any extant events in the ev_port. */
3768                 dlb2_flush_port(dev, ev_port->id);
3769
3770                 /* Unlink the ev_port from the queue */
3771                 ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
3772                 if (ret != 1) {
3773                         DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
3774                                      ev_port->id, qid);
3775                         return;
3776                 }
3777         }
3778 }
3779
3780 static void
3781 dlb2_eventdev_stop(struct rte_eventdev *dev)
3782 {
3783         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3784
3785         rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
3786
3787         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
3788                 DLB2_LOG_DBG("Internal error: already stopped\n");
3789                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3790                 return;
3791         } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
3792                 DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
3793                              (int)dlb2->run_state);
3794                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3795                 return;
3796         }
3797
3798         dlb2->run_state = DLB2_RUN_STATE_STOPPING;
3799
3800         rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3801
3802         dlb2_drain(dev);
3803
3804         dlb2->run_state = DLB2_RUN_STATE_STOPPED;
3805 }
3806
3807 static int
3808 dlb2_eventdev_close(struct rte_eventdev *dev)
3809 {
3810         dlb2_hw_reset_sched_domain(dev, false);
3811
3812         return 0;
3813 }
3814
3815 static void
3816 dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
3817 {
3818         RTE_SET_USED(dev);
3819         RTE_SET_USED(id);
3820
3821         /* This function intentionally left blank. */
3822 }
3823
3824 static void
3825 dlb2_eventdev_port_release(void *port)
3826 {
3827         struct dlb2_eventdev_port *ev_port = port;
3828         struct dlb2_port *qm_port;
3829
3830         if (ev_port) {
3831                 qm_port = &ev_port->qm_port;
3832                 if (qm_port->config_state == DLB2_CONFIGURED)
3833                         dlb2_free_qe_mem(qm_port);
3834         }
3835 }
3836
3837 static int
3838 dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
3839                             uint64_t *timeout_ticks)
3840 {
3841         RTE_SET_USED(dev);
3842         uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
3843
3844         *timeout_ticks = ns * cycles_per_ns;
3845
3846         return 0;
3847 }
3848
3849 static void
3850 dlb2_entry_points_init(struct rte_eventdev *dev)
3851 {
3852         struct dlb2_eventdev *dlb2;
3853
3854         /* Expose PMD's eventdev interface */
3855         static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
3856                 .dev_infos_get    = dlb2_eventdev_info_get,
3857                 .dev_configure    = dlb2_eventdev_configure,
3858                 .dev_start        = dlb2_eventdev_start,
3859                 .dev_stop         = dlb2_eventdev_stop,
3860                 .dev_close        = dlb2_eventdev_close,
3861                 .queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
3862                 .queue_setup      = dlb2_eventdev_queue_setup,
3863                 .queue_release    = dlb2_eventdev_queue_release,
3864                 .port_def_conf    = dlb2_eventdev_port_default_conf_get,
3865                 .port_setup       = dlb2_eventdev_port_setup,
3866                 .port_release     = dlb2_eventdev_port_release,
3867                 .port_link        = dlb2_eventdev_port_link,
3868                 .port_unlink      = dlb2_eventdev_port_unlink,
3869                 .port_unlinks_in_progress =
3870                                     dlb2_eventdev_port_unlinks_in_progress,
3871                 .timeout_ticks    = dlb2_eventdev_timeout_ticks,
3872                 .dump             = dlb2_eventdev_dump,
3873                 .xstats_get       = dlb2_eventdev_xstats_get,
3874                 .xstats_get_names = dlb2_eventdev_xstats_get_names,
3875                 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
3876                 .xstats_reset       = dlb2_eventdev_xstats_reset,
3877                 .dev_selftest     = test_dlb2_eventdev,
3878         };
3879
3880         /* Expose PMD's eventdev interface */
3881
3882         dev->dev_ops = &dlb2_eventdev_entry_ops;
3883         dev->enqueue = dlb2_event_enqueue;
3884         dev->enqueue_burst = dlb2_event_enqueue_burst;
3885         dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
3886         dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
3887
3888         dlb2 = dev->data->dev_private;
3889         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
3890                 dev->dequeue = dlb2_event_dequeue_sparse;
3891                 dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
3892         } else {
3893                 dev->dequeue = dlb2_event_dequeue;
3894                 dev->dequeue_burst = dlb2_event_dequeue_burst;
3895         }
3896 }
3897
3898 int
3899 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
3900                             const char *name,
3901                             struct dlb2_devargs *dlb2_args)
3902 {
3903         struct dlb2_eventdev *dlb2;
3904         int err, i;
3905
3906         dlb2 = dev->data->dev_private;
3907
3908         dlb2->event_dev = dev; /* backlink */
3909
3910         evdev_dlb2_default_info.driver_name = name;
3911
3912         dlb2->max_num_events_override = dlb2_args->max_num_events;
3913         dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
3914         dlb2->qm_instance.cos_id = dlb2_args->cos_id;
3915
3916         err = dlb2_iface_open(&dlb2->qm_instance, name);
3917         if (err < 0) {
3918                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3919                              err);
3920                 return err;
3921         }
3922
3923         err = dlb2_iface_get_device_version(&dlb2->qm_instance,
3924                                             &dlb2->revision);
3925         if (err < 0) {
3926                 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
3927                              err);
3928                 return err;
3929         }
3930
3931         err = dlb2_hw_query_resources(dlb2);
3932         if (err) {
3933                 DLB2_LOG_ERR("get resources err=%d for %s\n",
3934                              err, name);
3935                 return err;
3936         }
3937
3938         dlb2_iface_hardware_init(&dlb2->qm_instance);
3939
3940         err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
3941         if (err < 0) {
3942                 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
3943                              err);
3944                 return err;
3945         }
3946
3947         /* Complete xtstats runtime initialization */
3948         err = dlb2_xstats_init(dlb2);
3949         if (err) {
3950                 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
3951                 return err;
3952         }
3953
3954         /* Initialize each port's token pop mode */
3955         for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
3956                 dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
3957
3958         rte_spinlock_init(&dlb2->qm_instance.resource_lock);
3959
3960         dlb2_iface_low_level_io_init();
3961
3962         dlb2_entry_points_init(dev);
3963
3964         dlb2_init_queue_depth_thresholds(dlb2,
3965                                          dlb2_args->qid_depth_thresholds.val);
3966
3967         return 0;
3968 }
3969
3970 int
3971 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
3972                               const char *name)
3973 {
3974         struct dlb2_eventdev *dlb2;
3975         int err;
3976
3977         dlb2 = dev->data->dev_private;
3978
3979         evdev_dlb2_default_info.driver_name = name;
3980
3981         err = dlb2_iface_open(&dlb2->qm_instance, name);
3982         if (err < 0) {
3983                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3984                              err);
3985                 return err;
3986         }
3987
3988         err = dlb2_hw_query_resources(dlb2);
3989         if (err) {
3990                 DLB2_LOG_ERR("get resources err=%d for %s\n",
3991                              err, name);
3992                 return err;
3993         }
3994
3995         dlb2_iface_low_level_io_init();
3996
3997         dlb2_entry_points_init(dev);
3998
3999         return 0;
4000 }
4001
4002 int
4003 dlb2_parse_params(const char *params,
4004                   const char *name,
4005                   struct dlb2_devargs *dlb2_args,
4006                   uint8_t version)
4007 {
4008         int ret = 0;
4009         static const char * const args[] = { NUMA_NODE_ARG,
4010                                              DLB2_MAX_NUM_EVENTS,
4011                                              DLB2_NUM_DIR_CREDITS,
4012                                              DEV_ID_ARG,
4013                                              DLB2_QID_DEPTH_THRESH_ARG,
4014                                              DLB2_COS_ARG,
4015                                              NULL };
4016
4017         if (params != NULL && params[0] != '\0') {
4018                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
4019
4020                 if (kvlist == NULL) {
4021                         RTE_LOG(INFO, PMD,
4022                                 "Ignoring unsupported parameters when creating device '%s'\n",
4023                                 name);
4024                 } else {
4025                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
4026                                                      set_numa_node,
4027                                                      &dlb2_args->socket_id);
4028                         if (ret != 0) {
4029                                 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
4030                                              name);
4031                                 rte_kvargs_free(kvlist);
4032                                 return ret;
4033                         }
4034
4035                         ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
4036                                                  set_max_num_events,
4037                                                  &dlb2_args->max_num_events);
4038                         if (ret != 0) {
4039                                 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
4040                                              name);
4041                                 rte_kvargs_free(kvlist);
4042                                 return ret;
4043                         }
4044
4045                         if (version == DLB2_HW_V2) {
4046                                 ret = rte_kvargs_process(kvlist,
4047                                         DLB2_NUM_DIR_CREDITS,
4048                                         set_num_dir_credits,
4049                                         &dlb2_args->num_dir_credits_override);
4050                                 if (ret != 0) {
4051                                         DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
4052                                                      name);
4053                                         rte_kvargs_free(kvlist);
4054                                         return ret;
4055                                 }
4056                         }
4057                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
4058                                                  set_dev_id,
4059                                                  &dlb2_args->dev_id);
4060                         if (ret != 0) {
4061                                 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
4062                                              name);
4063                                 rte_kvargs_free(kvlist);
4064                                 return ret;
4065                         }
4066
4067                         if (version == DLB2_HW_V2) {
4068                                 ret = rte_kvargs_process(
4069                                         kvlist,
4070                                         DLB2_QID_DEPTH_THRESH_ARG,
4071                                         set_qid_depth_thresh,
4072                                         &dlb2_args->qid_depth_thresholds);
4073                         } else {
4074                                 ret = rte_kvargs_process(
4075                                         kvlist,
4076                                         DLB2_QID_DEPTH_THRESH_ARG,
4077                                         set_qid_depth_thresh_v2_5,
4078                                         &dlb2_args->qid_depth_thresholds);
4079                         }
4080                         if (ret != 0) {
4081                                 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
4082                                              name);
4083                                 rte_kvargs_free(kvlist);
4084                                 return ret;
4085                         }
4086
4087                         ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
4088                                                  set_cos,
4089                                                  &dlb2_args->cos_id);
4090                         if (ret != 0) {
4091                                 DLB2_LOG_ERR("%s: Error parsing cos parameter",
4092                                              name);
4093                                 rte_kvargs_free(kvlist);
4094                                 return ret;
4095                         }
4096
4097                         rte_kvargs_free(kvlist);
4098                 }
4099         }
4100         return ret;
4101 }
4102 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);