event/dlb2: add delayed token pop logic
[dpdk.git] / drivers / event / dlb2 / dlb2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <sys/fcntl.h>
15
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
24 #include <rte_io.h>
25 #include <rte_kvargs.h>
26 #include <rte_log.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf.h>
29 #include <rte_power_intrinsics.h>
30 #include <rte_prefetch.h>
31 #include <rte_ring.h>
32 #include <rte_string_fns.h>
33
34 #include "dlb2_priv.h"
35 #include "dlb2_iface.h"
36 #include "dlb2_inline_fns.h"
37
38 /*
39  * Resources exposed to eventdev. Some values overridden at runtime using
40  * values returned by the DLB kernel driver.
41  */
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 #endif
45 static struct rte_event_dev_info evdev_dlb2_default_info = {
46         .driver_name = "", /* probe will set */
47         .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
48         .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
50         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 #else
52         .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
53 #endif
54         .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
55         .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
56         .max_event_priority_levels = DLB2_QID_PRIORITIES,
57         .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
58         .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
59         .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
60         .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
61         .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
62         .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
63         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64                           RTE_EVENT_DEV_CAP_EVENT_QOS |
65                           RTE_EVENT_DEV_CAP_BURST_MODE |
66                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
69 };
70
71 struct process_local_port_data
72 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
73
74 static void
75 dlb2_free_qe_mem(struct dlb2_port *qm_port)
76 {
77         if (qm_port == NULL)
78                 return;
79
80         rte_free(qm_port->qe4);
81         qm_port->qe4 = NULL;
82
83         rte_free(qm_port->int_arm_qe);
84         qm_port->int_arm_qe = NULL;
85
86         rte_free(qm_port->consume_qe);
87         qm_port->consume_qe = NULL;
88
89         rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
90         dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
91 }
92
93 /* override defaults with value(s) provided on command line */
94 static void
95 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
96                                  int *qid_depth_thresholds)
97 {
98         int q;
99
100         for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
101                 if (qid_depth_thresholds[q] != 0)
102                         dlb2->ev_queues[q].depth_threshold =
103                                 qid_depth_thresholds[q];
104         }
105 }
106
107 static int
108 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
109 {
110         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
111         struct dlb2_hw_resource_info *dlb2_info = &handle->info;
112         int ret;
113
114         /* Query driver resources provisioned for this device */
115
116         ret = dlb2_iface_get_num_resources(handle,
117                                            &dlb2->hw_rsrc_query_results);
118         if (ret) {
119                 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
120                 return ret;
121         }
122
123         /* Complete filling in device resource info returned to evdev app,
124          * overriding any default values.
125          * The capabilities (CAPs) were set at compile time.
126          */
127
128         evdev_dlb2_default_info.max_event_queues =
129                 dlb2->hw_rsrc_query_results.num_ldb_queues;
130
131         evdev_dlb2_default_info.max_event_ports =
132                 dlb2->hw_rsrc_query_results.num_ldb_ports;
133
134         evdev_dlb2_default_info.max_num_events =
135                 dlb2->hw_rsrc_query_results.num_ldb_credits;
136
137         /* Save off values used when creating the scheduling domain. */
138
139         handle->info.num_sched_domains =
140                 dlb2->hw_rsrc_query_results.num_sched_domains;
141
142         handle->info.hw_rsrc_max.nb_events_limit =
143                 dlb2->hw_rsrc_query_results.num_ldb_credits;
144
145         handle->info.hw_rsrc_max.num_queues =
146                 dlb2->hw_rsrc_query_results.num_ldb_queues +
147                 dlb2->hw_rsrc_query_results.num_dir_ports;
148
149         handle->info.hw_rsrc_max.num_ldb_queues =
150                 dlb2->hw_rsrc_query_results.num_ldb_queues;
151
152         handle->info.hw_rsrc_max.num_ldb_ports =
153                 dlb2->hw_rsrc_query_results.num_ldb_ports;
154
155         handle->info.hw_rsrc_max.num_dir_ports =
156                 dlb2->hw_rsrc_query_results.num_dir_ports;
157
158         handle->info.hw_rsrc_max.reorder_window_size =
159                 dlb2->hw_rsrc_query_results.num_hist_list_entries;
160
161         rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
162
163         return 0;
164 }
165
166 #define DLB2_BASE_10 10
167
168 static int
169 dlb2_string_to_int(int *result, const char *str)
170 {
171         long ret;
172         char *endptr;
173
174         if (str == NULL || result == NULL)
175                 return -EINVAL;
176
177         errno = 0;
178         ret = strtol(str, &endptr, DLB2_BASE_10);
179         if (errno)
180                 return -errno;
181
182         /* long int and int may be different width for some architectures */
183         if (ret < INT_MIN || ret > INT_MAX || endptr == str)
184                 return -EINVAL;
185
186         *result = ret;
187         return 0;
188 }
189
190 static int
191 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
192 {
193         int *socket_id = opaque;
194         int ret;
195
196         ret = dlb2_string_to_int(socket_id, value);
197         if (ret < 0)
198                 return ret;
199
200         if (*socket_id > RTE_MAX_NUMA_NODES)
201                 return -EINVAL;
202         return 0;
203 }
204
205 static int
206 set_max_num_events(const char *key __rte_unused,
207                    const char *value,
208                    void *opaque)
209 {
210         int *max_num_events = opaque;
211         int ret;
212
213         if (value == NULL || opaque == NULL) {
214                 DLB2_LOG_ERR("NULL pointer\n");
215                 return -EINVAL;
216         }
217
218         ret = dlb2_string_to_int(max_num_events, value);
219         if (ret < 0)
220                 return ret;
221
222         if (*max_num_events < 0 || *max_num_events >
223                         DLB2_MAX_NUM_LDB_CREDITS) {
224                 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
225                              DLB2_MAX_NUM_LDB_CREDITS);
226                 return -EINVAL;
227         }
228
229         return 0;
230 }
231
232 static int
233 set_num_dir_credits(const char *key __rte_unused,
234                     const char *value,
235                     void *opaque)
236 {
237         int *num_dir_credits = opaque;
238         int ret;
239
240         if (value == NULL || opaque == NULL) {
241                 DLB2_LOG_ERR("NULL pointer\n");
242                 return -EINVAL;
243         }
244
245         ret = dlb2_string_to_int(num_dir_credits, value);
246         if (ret < 0)
247                 return ret;
248
249         if (*num_dir_credits < 0 ||
250             *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
251                 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
252                              DLB2_MAX_NUM_DIR_CREDITS);
253                 return -EINVAL;
254         }
255
256         return 0;
257 }
258
259 static int
260 set_dev_id(const char *key __rte_unused,
261            const char *value,
262            void *opaque)
263 {
264         int *dev_id = opaque;
265         int ret;
266
267         if (value == NULL || opaque == NULL) {
268                 DLB2_LOG_ERR("NULL pointer\n");
269                 return -EINVAL;
270         }
271
272         ret = dlb2_string_to_int(dev_id, value);
273         if (ret < 0)
274                 return ret;
275
276         return 0;
277 }
278
279 static int
280 set_cos(const char *key __rte_unused,
281         const char *value,
282         void *opaque)
283 {
284         enum dlb2_cos *cos_id = opaque;
285         int x = 0;
286         int ret;
287
288         if (value == NULL || opaque == NULL) {
289                 DLB2_LOG_ERR("NULL pointer\n");
290                 return -EINVAL;
291         }
292
293         ret = dlb2_string_to_int(&x, value);
294         if (ret < 0)
295                 return ret;
296
297         if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
298                 DLB2_LOG_ERR(
299                         "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
300                         x);
301                 return -EINVAL;
302         }
303
304         *cos_id = x;
305
306         return 0;
307 }
308
309
310 static int
311 set_qid_depth_thresh(const char *key __rte_unused,
312                      const char *value,
313                      void *opaque)
314 {
315         struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
316         int first, last, thresh, i;
317
318         if (value == NULL || opaque == NULL) {
319                 DLB2_LOG_ERR("NULL pointer\n");
320                 return -EINVAL;
321         }
322
323         /* command line override may take one of the following 3 forms:
324          * qid_depth_thresh=all:<threshold_value> ... all queues
325          * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
326          * qid_depth_thresh=qid:<threshold_value> ... just one queue
327          */
328         if (sscanf(value, "all:%d", &thresh) == 1) {
329                 first = 0;
330                 last = DLB2_MAX_NUM_QUEUES - 1;
331         } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
332                 /* we have everything we need */
333         } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
334                 last = first;
335         } else {
336                 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
337                 return -EINVAL;
338         }
339
340         if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
341                 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
342                 return -EINVAL;
343         }
344
345         if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
346                 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
347                              DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
348                 return -EINVAL;
349         }
350
351         for (i = first; i <= last; i++)
352                 qid_thresh->val[i] = thresh; /* indexed by qid */
353
354         return 0;
355 }
356
357 static void
358 dlb2_eventdev_info_get(struct rte_eventdev *dev,
359                        struct rte_event_dev_info *dev_info)
360 {
361         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
362         int ret;
363
364         ret = dlb2_hw_query_resources(dlb2);
365         if (ret) {
366                 const struct rte_eventdev_data *data = dev->data;
367
368                 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
369                              ret, data->dev_id);
370                 /* fn is void, so fall through and return values set up in
371                  * probe
372                  */
373         }
374
375         /* Add num resources currently owned by this domain.
376          * These would become available if the scheduling domain were reset due
377          * to the application recalling eventdev_configure to *reconfigure* the
378          * domain.
379          */
380         evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
381         evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
382         evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
383
384         evdev_dlb2_default_info.max_event_queues =
385                 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
386                         RTE_EVENT_MAX_QUEUES_PER_DEV);
387
388         evdev_dlb2_default_info.max_num_events =
389                 RTE_MIN(evdev_dlb2_default_info.max_num_events,
390                         dlb2->max_num_events_override);
391
392         *dev_info = evdev_dlb2_default_info;
393 }
394
395 static int
396 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
397                             const struct dlb2_hw_rsrcs *resources_asked)
398 {
399         int ret = 0;
400         struct dlb2_create_sched_domain_args *cfg;
401
402         if (resources_asked == NULL) {
403                 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
404                 ret = EINVAL;
405                 goto error_exit;
406         }
407
408         /* Map generic qm resources to dlb2 resources */
409         cfg = &handle->cfg.resources;
410
411         /* DIR ports and queues */
412
413         cfg->num_dir_ports = resources_asked->num_dir_ports;
414
415         cfg->num_dir_credits = resources_asked->num_dir_credits;
416
417         /* LDB queues */
418
419         cfg->num_ldb_queues = resources_asked->num_ldb_queues;
420
421         /* LDB ports */
422
423         cfg->cos_strict = 0; /* Best effort */
424         cfg->num_cos_ldb_ports[0] = 0;
425         cfg->num_cos_ldb_ports[1] = 0;
426         cfg->num_cos_ldb_ports[2] = 0;
427         cfg->num_cos_ldb_ports[3] = 0;
428
429         switch (handle->cos_id) {
430         case DLB2_COS_0:
431                 cfg->num_ldb_ports = 0; /* no don't care ports */
432                 cfg->num_cos_ldb_ports[0] =
433                         resources_asked->num_ldb_ports;
434                 break;
435         case DLB2_COS_1:
436                 cfg->num_ldb_ports = 0; /* no don't care ports */
437                 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
438                 break;
439         case DLB2_COS_2:
440                 cfg->num_ldb_ports = 0; /* no don't care ports */
441                 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
442                 break;
443         case DLB2_COS_3:
444                 cfg->num_ldb_ports = 0; /* no don't care ports */
445                 cfg->num_cos_ldb_ports[3] =
446                         resources_asked->num_ldb_ports;
447                 break;
448         case DLB2_COS_DEFAULT:
449                 /* all ldb ports are don't care ports from a cos perspective */
450                 cfg->num_ldb_ports =
451                         resources_asked->num_ldb_ports;
452                 break;
453         }
454
455         cfg->num_ldb_credits =
456                 resources_asked->num_ldb_credits;
457
458         cfg->num_atomic_inflights =
459                 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
460                 cfg->num_ldb_queues;
461
462         cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
463                 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
464
465         DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
466                      cfg->num_ldb_queues,
467                      resources_asked->num_ldb_ports,
468                      cfg->num_dir_ports,
469                      cfg->num_atomic_inflights,
470                      cfg->num_hist_list_entries,
471                      cfg->num_ldb_credits,
472                      cfg->num_dir_credits);
473
474         /* Configure the QM */
475
476         ret = dlb2_iface_sched_domain_create(handle, cfg);
477         if (ret < 0) {
478                 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
479                              ret,
480                              dlb2_error_strings[cfg->response.status]);
481
482                 goto error_exit;
483         }
484
485         handle->domain_id = cfg->response.id;
486         handle->cfg.configured = true;
487
488 error_exit:
489
490         return ret;
491 }
492
493 static void
494 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
495 {
496         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
497         enum dlb2_configuration_state config_state;
498         int i, j;
499
500         dlb2_iface_domain_reset(dlb2);
501
502         /* Free all dynamically allocated port memory */
503         for (i = 0; i < dlb2->num_ports; i++)
504                 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
505
506         /* If reconfiguring, mark the device's queues and ports as "previously
507          * configured." If the user doesn't reconfigure them, the PMD will
508          * reapply their previous configuration when the device is started.
509          */
510         config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
511                 DLB2_NOT_CONFIGURED;
512
513         for (i = 0; i < dlb2->num_ports; i++) {
514                 dlb2->ev_ports[i].qm_port.config_state = config_state;
515                 /* Reset setup_done so ports can be reconfigured */
516                 dlb2->ev_ports[i].setup_done = false;
517                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
518                         dlb2->ev_ports[i].link[j].mapped = false;
519         }
520
521         for (i = 0; i < dlb2->num_queues; i++)
522                 dlb2->ev_queues[i].qm_queue.config_state = config_state;
523
524         for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
525                 dlb2->ev_queues[i].setup_done = false;
526
527         dlb2->num_ports = 0;
528         dlb2->num_ldb_ports = 0;
529         dlb2->num_dir_ports = 0;
530         dlb2->num_queues = 0;
531         dlb2->num_ldb_queues = 0;
532         dlb2->num_dir_queues = 0;
533         dlb2->configured = false;
534 }
535
536 /* Note: 1 QM instance per QM device, QM instance/device == event device */
537 static int
538 dlb2_eventdev_configure(const struct rte_eventdev *dev)
539 {
540         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
541         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
542         struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
543         const struct rte_eventdev_data *data = dev->data;
544         const struct rte_event_dev_config *config = &data->dev_conf;
545         int ret;
546
547         /* If this eventdev is already configured, we must release the current
548          * scheduling domain before attempting to configure a new one.
549          */
550         if (dlb2->configured) {
551                 dlb2_hw_reset_sched_domain(dev, true);
552
553                 ret = dlb2_hw_query_resources(dlb2);
554                 if (ret) {
555                         DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
556                                      ret, data->dev_id);
557                         return ret;
558                 }
559         }
560
561         if (config->nb_event_queues > rsrcs->num_queues) {
562                 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
563                              config->nb_event_queues,
564                              rsrcs->num_queues);
565                 return -EINVAL;
566         }
567         if (config->nb_event_ports > (rsrcs->num_ldb_ports
568                         + rsrcs->num_dir_ports)) {
569                 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
570                              config->nb_event_ports,
571                              (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
572                 return -EINVAL;
573         }
574         if (config->nb_events_limit > rsrcs->nb_events_limit) {
575                 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
576                              config->nb_events_limit,
577                              rsrcs->nb_events_limit);
578                 return -EINVAL;
579         }
580
581         if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
582                 dlb2->global_dequeue_wait = false;
583         else {
584                 uint32_t timeout32;
585
586                 dlb2->global_dequeue_wait = true;
587
588                 /* note size mismatch of timeout vals in eventdev lib. */
589                 timeout32 = config->dequeue_timeout_ns;
590
591                 dlb2->global_dequeue_wait_ticks =
592                         timeout32 * (rte_get_timer_hz() / 1E9);
593         }
594
595         /* Does this platform support umonitor/umwait? */
596         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
597                 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
598                     RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
599                         DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
600                                      RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
601                         return -EINVAL;
602                 }
603                 dlb2->umwait_allowed = true;
604         }
605
606         rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
607         rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
608         /* 1 dir queue per dir port */
609         rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
610
611         /* Scale down nb_events_limit by 4 for directed credits, since there
612          * are 4x as many load-balanced credits.
613          */
614         rsrcs->num_ldb_credits = 0;
615         rsrcs->num_dir_credits = 0;
616
617         if (rsrcs->num_ldb_queues)
618                 rsrcs->num_ldb_credits = config->nb_events_limit;
619         if (rsrcs->num_dir_ports)
620                 rsrcs->num_dir_credits = config->nb_events_limit / 4;
621         if (dlb2->num_dir_credits_override != -1)
622                 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
623
624         if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
625                 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
626                 return -ENODEV;
627         }
628
629         dlb2->new_event_limit = config->nb_events_limit;
630         __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
631
632         /* Save number of ports/queues for this event dev */
633         dlb2->num_ports = config->nb_event_ports;
634         dlb2->num_queues = config->nb_event_queues;
635         dlb2->num_dir_ports = rsrcs->num_dir_ports;
636         dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
637         dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
638         dlb2->num_dir_queues = dlb2->num_dir_ports;
639         dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
640         dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
641         dlb2->dir_credit_pool = rsrcs->num_dir_credits;
642         dlb2->max_dir_credits = rsrcs->num_dir_credits;
643
644         dlb2->configured = true;
645
646         return 0;
647 }
648
649 static void
650 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
651                                     uint8_t port_id,
652                                     struct rte_event_port_conf *port_conf)
653 {
654         RTE_SET_USED(port_id);
655         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
656
657         port_conf->new_event_threshold = dlb2->new_event_limit;
658         port_conf->dequeue_depth = 32;
659         port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
660         port_conf->event_port_cfg = 0;
661 }
662
663 static void
664 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
665                                      uint8_t queue_id,
666                                      struct rte_event_queue_conf *queue_conf)
667 {
668         RTE_SET_USED(dev);
669         RTE_SET_USED(queue_id);
670
671         queue_conf->nb_atomic_flows = 1024;
672         queue_conf->nb_atomic_order_sequences = 64;
673         queue_conf->event_queue_cfg = 0;
674         queue_conf->priority = 0;
675 }
676
677 static int32_t
678 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
679 {
680         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
681         struct dlb2_get_sn_allocation_args cfg;
682         int ret;
683
684         cfg.group = group;
685
686         ret = dlb2_iface_get_sn_allocation(handle, &cfg);
687         if (ret < 0) {
688                 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
689                              ret, dlb2_error_strings[cfg.response.status]);
690                 return ret;
691         }
692
693         return cfg.response.id;
694 }
695
696 static int
697 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
698 {
699         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
700         struct dlb2_set_sn_allocation_args cfg;
701         int ret;
702
703         cfg.num = num;
704         cfg.group = group;
705
706         ret = dlb2_iface_set_sn_allocation(handle, &cfg);
707         if (ret < 0) {
708                 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
709                              ret, dlb2_error_strings[cfg.response.status]);
710                 return ret;
711         }
712
713         return ret;
714 }
715
716 static int32_t
717 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
718 {
719         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
720         struct dlb2_get_sn_occupancy_args cfg;
721         int ret;
722
723         cfg.group = group;
724
725         ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
726         if (ret < 0) {
727                 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
728                              ret, dlb2_error_strings[cfg.response.status]);
729                 return ret;
730         }
731
732         return cfg.response.id;
733 }
734
735 /* Query the current sequence number allocations and, if they conflict with the
736  * requested LDB queue configuration, attempt to re-allocate sequence numbers.
737  * This is best-effort; if it fails, the PMD will attempt to configure the
738  * load-balanced queue and return an error.
739  */
740 static void
741 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
742                            const struct rte_event_queue_conf *queue_conf)
743 {
744         int grp_occupancy[DLB2_NUM_SN_GROUPS];
745         int grp_alloc[DLB2_NUM_SN_GROUPS];
746         int i, sequence_numbers;
747
748         sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
749
750         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
751                 int total_slots;
752
753                 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
754                 if (grp_alloc[i] < 0)
755                         return;
756
757                 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
758
759                 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
760                 if (grp_occupancy[i] < 0)
761                         return;
762
763                 /* DLB has at least one available slot for the requested
764                  * sequence numbers, so no further configuration required.
765                  */
766                 if (grp_alloc[i] == sequence_numbers &&
767                     grp_occupancy[i] < total_slots)
768                         return;
769         }
770
771         /* None of the sequence number groups are configured for the requested
772          * sequence numbers, so we have to reconfigure one of them. This is
773          * only possible if a group is not in use.
774          */
775         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
776                 if (grp_occupancy[i] == 0)
777                         break;
778         }
779
780         if (i == DLB2_NUM_SN_GROUPS) {
781                 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
782                        __func__, sequence_numbers);
783                 return;
784         }
785
786         /* Attempt to configure slot i with the requested number of sequence
787          * numbers. Ignore the return value -- if this fails, the error will be
788          * caught during subsequent queue configuration.
789          */
790         dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
791 }
792
793 static int32_t
794 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
795                          struct dlb2_eventdev_queue *ev_queue,
796                          const struct rte_event_queue_conf *evq_conf)
797 {
798         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
799         struct dlb2_queue *queue = &ev_queue->qm_queue;
800         struct dlb2_create_ldb_queue_args cfg;
801         int32_t ret;
802         uint32_t qm_qid;
803         int sched_type = -1;
804
805         if (evq_conf == NULL)
806                 return -EINVAL;
807
808         if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
809                 if (evq_conf->nb_atomic_order_sequences != 0)
810                         sched_type = RTE_SCHED_TYPE_ORDERED;
811                 else
812                         sched_type = RTE_SCHED_TYPE_PARALLEL;
813         } else
814                 sched_type = evq_conf->schedule_type;
815
816         cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
817         cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
818         cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
819
820         if (sched_type != RTE_SCHED_TYPE_ORDERED) {
821                 cfg.num_sequence_numbers = 0;
822                 cfg.num_qid_inflights = 2048;
823         }
824
825         /* App should set this to the number of hardware flows they want, not
826          * the overall number of flows they're going to use. E.g. if app is
827          * using 64 flows and sets compression to 64, best-case they'll get
828          * 64 unique hashed flows in hardware.
829          */
830         switch (evq_conf->nb_atomic_flows) {
831         /* Valid DLB2 compression levels */
832         case 64:
833         case 128:
834         case 256:
835         case 512:
836         case (1 * 1024): /* 1K */
837         case (2 * 1024): /* 2K */
838         case (4 * 1024): /* 4K */
839         case (64 * 1024): /* 64K */
840                 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
841                 break;
842         default:
843                 /* Invalid compression level */
844                 cfg.lock_id_comp_level = 0; /* no compression */
845         }
846
847         if (ev_queue->depth_threshold == 0) {
848                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
849                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
850         } else
851                 cfg.depth_threshold = ev_queue->depth_threshold;
852
853         ret = dlb2_iface_ldb_queue_create(handle, &cfg);
854         if (ret < 0) {
855                 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
856                              ret, dlb2_error_strings[cfg.response.status]);
857                 return -EINVAL;
858         }
859
860         qm_qid = cfg.response.id;
861
862         /* Save off queue config for debug, resource lookups, and reconfig */
863         queue->num_qid_inflights = cfg.num_qid_inflights;
864         queue->num_atm_inflights = cfg.num_atomic_inflights;
865
866         queue->sched_type = sched_type;
867         queue->config_state = DLB2_CONFIGURED;
868
869         DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
870                      qm_qid,
871                      cfg.num_atomic_inflights,
872                      cfg.num_sequence_numbers,
873                      cfg.num_qid_inflights);
874
875         return qm_qid;
876 }
877
878 static int
879 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
880                               struct dlb2_eventdev_queue *ev_queue,
881                               const struct rte_event_queue_conf *queue_conf)
882 {
883         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
884         int32_t qm_qid;
885
886         if (queue_conf->nb_atomic_order_sequences)
887                 dlb2_program_sn_allocation(dlb2, queue_conf);
888
889         qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
890         if (qm_qid < 0) {
891                 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
892
893                 return qm_qid;
894         }
895
896         dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
897
898         ev_queue->qm_queue.id = qm_qid;
899
900         return 0;
901 }
902
903 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
904 {
905         int i, num = 0;
906
907         for (i = 0; i < dlb2->num_queues; i++) {
908                 if (dlb2->ev_queues[i].setup_done &&
909                     dlb2->ev_queues[i].qm_queue.is_directed)
910                         num++;
911         }
912
913         return num;
914 }
915
916 static void
917 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
918                          struct dlb2_eventdev_queue *ev_queue)
919 {
920         struct dlb2_eventdev_port *ev_port;
921         int i, j;
922
923         for (i = 0; i < dlb2->num_ports; i++) {
924                 ev_port = &dlb2->ev_ports[i];
925
926                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
927                         if (!ev_port->link[j].valid ||
928                             ev_port->link[j].queue_id != ev_queue->id)
929                                 continue;
930
931                         ev_port->link[j].valid = false;
932                         ev_port->num_links--;
933                 }
934         }
935
936         ev_queue->num_links = 0;
937 }
938
939 static int
940 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
941                           uint8_t ev_qid,
942                           const struct rte_event_queue_conf *queue_conf)
943 {
944         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
945         struct dlb2_eventdev_queue *ev_queue;
946         int ret;
947
948         if (queue_conf == NULL)
949                 return -EINVAL;
950
951         if (ev_qid >= dlb2->num_queues)
952                 return -EINVAL;
953
954         ev_queue = &dlb2->ev_queues[ev_qid];
955
956         ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
957                 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
958         ev_queue->id = ev_qid;
959         ev_queue->conf = *queue_conf;
960
961         if (!ev_queue->qm_queue.is_directed) {
962                 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
963         } else {
964                 /* The directed queue isn't setup until link time, at which
965                  * point we know its directed port ID. Directed queue setup
966                  * will only fail if this queue is already setup or there are
967                  * no directed queues left to configure.
968                  */
969                 ret = 0;
970
971                 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
972
973                 if (ev_queue->setup_done ||
974                     dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
975                         ret = -EINVAL;
976         }
977
978         /* Tear down pre-existing port->queue links */
979         if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
980                 dlb2_queue_link_teardown(dlb2, ev_queue);
981
982         if (!ret)
983                 ev_queue->setup_done = true;
984
985         return ret;
986 }
987
988 static int
989 dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
990 {
991         struct dlb2_cq_pop_qe *qe;
992
993         qe = rte_zmalloc(mz_name,
994                         DLB2_NUM_QES_PER_CACHE_LINE *
995                                 sizeof(struct dlb2_cq_pop_qe),
996                         RTE_CACHE_LINE_SIZE);
997
998         if (qe == NULL) {
999                 DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1000                 return -ENOMEM;
1001         }
1002         qm_port->consume_qe = qe;
1003
1004         qe->qe_valid = 0;
1005         qe->qe_frag = 0;
1006         qe->qe_comp = 0;
1007         qe->cq_token = 1;
1008         /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1009          * and so on.
1010          */
1011         qe->tokens = 0; /* set at run time */
1012         qe->meas_lat = 0;
1013         qe->no_dec = 0;
1014         /* Completion IDs are disabled */
1015         qe->cmp_id = 0;
1016
1017         return 0;
1018 }
1019
1020 static int
1021 dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1022 {
1023         struct dlb2_enqueue_qe *qe;
1024
1025         qe = rte_zmalloc(mz_name,
1026                         DLB2_NUM_QES_PER_CACHE_LINE *
1027                                 sizeof(struct dlb2_enqueue_qe),
1028                         RTE_CACHE_LINE_SIZE);
1029
1030         if (qe == NULL) {
1031                 DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1032                 return -ENOMEM;
1033         }
1034         qm_port->int_arm_qe = qe;
1035
1036         /* V2 - INT ARM is CQ_TOKEN + FRAG */
1037         qe->qe_valid = 0;
1038         qe->qe_frag = 1;
1039         qe->qe_comp = 0;
1040         qe->cq_token = 1;
1041         qe->meas_lat = 0;
1042         qe->no_dec = 0;
1043         /* Completion IDs are disabled */
1044         qe->cmp_id = 0;
1045
1046         return 0;
1047 }
1048
1049 static int
1050 dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1051 {
1052         int ret, sz;
1053
1054         sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1055
1056         qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1057
1058         if (qm_port->qe4 == NULL) {
1059                 DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1060                 ret = -ENOMEM;
1061                 goto error_exit;
1062         }
1063
1064         ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1065         if (ret < 0) {
1066                 DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1067                 goto error_exit;
1068         }
1069
1070         ret = dlb2_init_consume_qe(qm_port, mz_name);
1071         if (ret < 0) {
1072                 DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1073                 goto error_exit;
1074         }
1075
1076         return 0;
1077
1078 error_exit:
1079
1080         dlb2_free_qe_mem(qm_port);
1081
1082         return ret;
1083 }
1084
1085 static inline uint16_t
1086 dlb2_event_enqueue_delayed(void *event_port,
1087                            const struct rte_event events[]);
1088
1089 static inline uint16_t
1090 dlb2_event_enqueue_burst_delayed(void *event_port,
1091                                  const struct rte_event events[],
1092                                  uint16_t num);
1093
1094 static inline uint16_t
1095 dlb2_event_enqueue_new_burst_delayed(void *event_port,
1096                                      const struct rte_event events[],
1097                                      uint16_t num);
1098
1099 static inline uint16_t
1100 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
1101                                          const struct rte_event events[],
1102                                          uint16_t num);
1103
1104 static int
1105 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1106                         struct dlb2_eventdev_port *ev_port,
1107                         uint32_t dequeue_depth,
1108                         uint32_t enqueue_depth)
1109 {
1110         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1111         struct dlb2_create_ldb_port_args cfg = { {0} };
1112         int ret;
1113         struct dlb2_port *qm_port = NULL;
1114         char mz_name[RTE_MEMZONE_NAMESIZE];
1115         uint32_t qm_port_id;
1116         uint16_t ldb_credit_high_watermark;
1117         uint16_t dir_credit_high_watermark;
1118
1119         if (handle == NULL)
1120                 return -EINVAL;
1121
1122         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1123                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1124                              DLB2_MIN_CQ_DEPTH);
1125                 return -EINVAL;
1126         }
1127
1128         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1129                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1130                              DLB2_MIN_ENQUEUE_DEPTH);
1131                 return -EINVAL;
1132         }
1133
1134         rte_spinlock_lock(&handle->resource_lock);
1135
1136         /* We round up to the next power of 2 if necessary */
1137         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1138         cfg.cq_depth_threshold = 1;
1139
1140         cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1141
1142         if (handle->cos_id == DLB2_COS_DEFAULT)
1143                 cfg.cos_id = 0;
1144         else
1145                 cfg.cos_id = handle->cos_id;
1146
1147         cfg.cos_strict = 0;
1148
1149         /* User controls the LDB high watermark via enqueue depth. The DIR high
1150          * watermark is equal, unless the directed credit pool is too small.
1151          */
1152         ldb_credit_high_watermark = enqueue_depth;
1153
1154         /* If there are no directed ports, the kernel driver will ignore this
1155          * port's directed credit settings. Don't use enqueue_depth if it would
1156          * require more directed credits than are available.
1157          */
1158         dir_credit_high_watermark =
1159                 RTE_MIN(enqueue_depth,
1160                         handle->cfg.num_dir_credits / dlb2->num_ports);
1161
1162         /* Per QM values */
1163
1164         ret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);
1165         if (ret < 0) {
1166                 DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1167                              ret, dlb2_error_strings[cfg.response.status]);
1168                 goto error_exit;
1169         }
1170
1171         qm_port_id = cfg.response.id;
1172
1173         DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1174                      ev_port->id, qm_port_id);
1175
1176         qm_port = &ev_port->qm_port;
1177         qm_port->ev_port = ev_port; /* back ptr */
1178         qm_port->dlb2 = dlb2; /* back ptr */
1179         /*
1180          * Allocate and init local qe struct(s).
1181          * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1182          */
1183
1184         snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1185                  ev_port->id);
1186
1187         ret = dlb2_init_qe_mem(qm_port, mz_name);
1188         if (ret < 0) {
1189                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1190                 goto error_exit;
1191         }
1192
1193         qm_port->id = qm_port_id;
1194
1195         qm_port->cached_ldb_credits = 0;
1196         qm_port->cached_dir_credits = 0;
1197         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1198          * the effective depth is smaller.
1199          */
1200         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1201         qm_port->cq_idx = 0;
1202         qm_port->cq_idx_unmasked = 0;
1203
1204         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1205                 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1206         else
1207                 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1208
1209         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1210         /* starting value of gen bit - it toggles at wrap time */
1211         qm_port->gen_bit = 1;
1212
1213         qm_port->int_armed = false;
1214
1215         /* Save off for later use in info and lookup APIs. */
1216         qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1217
1218         qm_port->dequeue_depth = dequeue_depth;
1219         qm_port->token_pop_thresh = dequeue_depth;
1220
1221         /* The default enqueue functions do not include delayed-pop support for
1222          * performance reasons.
1223          */
1224         if (qm_port->token_pop_mode == DELAYED_POP) {
1225                 dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
1226                 dlb2->event_dev->enqueue_burst =
1227                         dlb2_event_enqueue_burst_delayed;
1228                 dlb2->event_dev->enqueue_new_burst =
1229                         dlb2_event_enqueue_new_burst_delayed;
1230                 dlb2->event_dev->enqueue_forward_burst =
1231                         dlb2_event_enqueue_forward_burst_delayed;
1232         }
1233
1234         qm_port->owed_tokens = 0;
1235         qm_port->issued_releases = 0;
1236
1237         /* Save config message too. */
1238         rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1239
1240         /* update state */
1241         qm_port->state = PORT_STARTED; /* enabled at create time */
1242         qm_port->config_state = DLB2_CONFIGURED;
1243
1244         qm_port->dir_credits = dir_credit_high_watermark;
1245         qm_port->ldb_credits = ldb_credit_high_watermark;
1246         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1247         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1248
1249         DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1250                      qm_port_id,
1251                      dequeue_depth,
1252                      qm_port->ldb_credits,
1253                      qm_port->dir_credits);
1254
1255         rte_spinlock_unlock(&handle->resource_lock);
1256
1257         return 0;
1258
1259 error_exit:
1260
1261         if (qm_port)
1262                 dlb2_free_qe_mem(qm_port);
1263
1264         rte_spinlock_unlock(&handle->resource_lock);
1265
1266         DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1267
1268         return ret;
1269 }
1270
1271 static void
1272 dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1273                         struct dlb2_eventdev_port *ev_port)
1274 {
1275         struct dlb2_eventdev_queue *ev_queue;
1276         int i;
1277
1278         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1279                 if (!ev_port->link[i].valid)
1280                         continue;
1281
1282                 ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1283
1284                 ev_port->link[i].valid = false;
1285                 ev_port->num_links--;
1286                 ev_queue->num_links--;
1287         }
1288 }
1289
1290 static int
1291 dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1292                         struct dlb2_eventdev_port *ev_port,
1293                         uint32_t dequeue_depth,
1294                         uint32_t enqueue_depth)
1295 {
1296         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1297         struct dlb2_create_dir_port_args cfg = { {0} };
1298         int ret;
1299         struct dlb2_port *qm_port = NULL;
1300         char mz_name[RTE_MEMZONE_NAMESIZE];
1301         uint32_t qm_port_id;
1302         uint16_t ldb_credit_high_watermark;
1303         uint16_t dir_credit_high_watermark;
1304
1305         if (dlb2 == NULL || handle == NULL)
1306                 return -EINVAL;
1307
1308         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1309                 DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1310                              DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1311                 return -EINVAL;
1312         }
1313
1314         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1315                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1316                              DLB2_MIN_ENQUEUE_DEPTH);
1317                 return -EINVAL;
1318         }
1319
1320         rte_spinlock_lock(&handle->resource_lock);
1321
1322         /* Directed queues are configured at link time. */
1323         cfg.queue_id = -1;
1324
1325         /* We round up to the next power of 2 if necessary */
1326         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1327         cfg.cq_depth_threshold = 1;
1328
1329         /* User controls the LDB high watermark via enqueue depth. The DIR high
1330          * watermark is equal, unless the directed credit pool is too small.
1331          */
1332         ldb_credit_high_watermark = enqueue_depth;
1333
1334         /* Don't use enqueue_depth if it would require more directed credits
1335          * than are available.
1336          */
1337         dir_credit_high_watermark =
1338                 RTE_MIN(enqueue_depth,
1339                         handle->cfg.num_dir_credits / dlb2->num_ports);
1340
1341         /* Per QM values */
1342
1343         ret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);
1344         if (ret < 0) {
1345                 DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1346                              ret, dlb2_error_strings[cfg.response.status]);
1347                 goto error_exit;
1348         }
1349
1350         qm_port_id = cfg.response.id;
1351
1352         DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1353                      ev_port->id, qm_port_id);
1354
1355         qm_port = &ev_port->qm_port;
1356         qm_port->ev_port = ev_port; /* back ptr */
1357         qm_port->dlb2 = dlb2;  /* back ptr */
1358
1359         /*
1360          * Init local qe struct(s).
1361          * Note: MOVDIR64 requires the enqueue QE to be aligned
1362          */
1363
1364         snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1365                  ev_port->id);
1366
1367         ret = dlb2_init_qe_mem(qm_port, mz_name);
1368
1369         if (ret < 0) {
1370                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1371                 goto error_exit;
1372         }
1373
1374         qm_port->id = qm_port_id;
1375
1376         qm_port->cached_ldb_credits = 0;
1377         qm_port->cached_dir_credits = 0;
1378         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1379          * the effective depth is smaller.
1380          */
1381         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1382         qm_port->cq_idx = 0;
1383         qm_port->cq_idx_unmasked = 0;
1384
1385         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1386                 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1387         else
1388                 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1389
1390         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1391         /* starting value of gen bit - it toggles at wrap time */
1392         qm_port->gen_bit = 1;
1393
1394         qm_port->int_armed = false;
1395
1396         /* Save off for later use in info and lookup APIs. */
1397         qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1398
1399         qm_port->dequeue_depth = dequeue_depth;
1400
1401         /* Directed ports are auto-pop, by default. */
1402         qm_port->token_pop_mode = AUTO_POP;
1403         qm_port->owed_tokens = 0;
1404         qm_port->issued_releases = 0;
1405
1406         /* Save config message too. */
1407         rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1408
1409         /* update state */
1410         qm_port->state = PORT_STARTED; /* enabled at create time */
1411         qm_port->config_state = DLB2_CONFIGURED;
1412
1413         qm_port->dir_credits = dir_credit_high_watermark;
1414         qm_port->ldb_credits = ldb_credit_high_watermark;
1415         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1416         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1417
1418         DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1419                      qm_port_id,
1420                      dequeue_depth,
1421                      dir_credit_high_watermark,
1422                      ldb_credit_high_watermark);
1423
1424         rte_spinlock_unlock(&handle->resource_lock);
1425
1426         return 0;
1427
1428 error_exit:
1429
1430         if (qm_port)
1431                 dlb2_free_qe_mem(qm_port);
1432
1433         rte_spinlock_unlock(&handle->resource_lock);
1434
1435         DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1436
1437         return ret;
1438 }
1439
1440 static int
1441 dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1442                          uint8_t ev_port_id,
1443                          const struct rte_event_port_conf *port_conf)
1444 {
1445         struct dlb2_eventdev *dlb2;
1446         struct dlb2_eventdev_port *ev_port;
1447         int ret;
1448
1449         if (dev == NULL || port_conf == NULL) {
1450                 DLB2_LOG_ERR("Null parameter\n");
1451                 return -EINVAL;
1452         }
1453
1454         dlb2 = dlb2_pmd_priv(dev);
1455
1456         if (ev_port_id >= DLB2_MAX_NUM_PORTS)
1457                 return -EINVAL;
1458
1459         if (port_conf->dequeue_depth >
1460                 evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1461             port_conf->enqueue_depth >
1462                 evdev_dlb2_default_info.max_event_port_enqueue_depth)
1463                 return -EINVAL;
1464
1465         ev_port = &dlb2->ev_ports[ev_port_id];
1466         /* configured? */
1467         if (ev_port->setup_done) {
1468                 DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1469                 return -EINVAL;
1470         }
1471
1472         ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1473                 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1474
1475         if (!ev_port->qm_port.is_directed) {
1476                 ret = dlb2_hw_create_ldb_port(dlb2,
1477                                               ev_port,
1478                                               port_conf->dequeue_depth,
1479                                               port_conf->enqueue_depth);
1480                 if (ret < 0) {
1481                         DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1482                                      ev_port_id);
1483
1484                         return ret;
1485                 }
1486         } else {
1487                 ret = dlb2_hw_create_dir_port(dlb2,
1488                                               ev_port,
1489                                               port_conf->dequeue_depth,
1490                                               port_conf->enqueue_depth);
1491                 if (ret < 0) {
1492                         DLB2_LOG_ERR("Failed to create the DIR port\n");
1493                         return ret;
1494                 }
1495         }
1496
1497         /* Save off port config for reconfig */
1498         ev_port->conf = *port_conf;
1499
1500         ev_port->id = ev_port_id;
1501         ev_port->enq_configured = true;
1502         ev_port->setup_done = true;
1503         ev_port->inflight_max = port_conf->new_event_threshold;
1504         ev_port->implicit_release = !(port_conf->event_port_cfg &
1505                   RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1506         ev_port->outstanding_releases = 0;
1507         ev_port->inflight_credits = 0;
1508         ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1509         ev_port->dlb2 = dlb2; /* reverse link */
1510
1511         /* Tear down pre-existing port->queue links */
1512         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1513                 dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1514
1515         dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1516
1517         return 0;
1518 }
1519
1520 static int16_t
1521 dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1522                             uint32_t qm_port_id,
1523                             uint16_t qm_qid,
1524                             uint8_t priority)
1525 {
1526         struct dlb2_map_qid_args cfg;
1527         int32_t ret;
1528
1529         if (handle == NULL)
1530                 return -EINVAL;
1531
1532         /* Build message */
1533         cfg.port_id = qm_port_id;
1534         cfg.qid = qm_qid;
1535         cfg.priority = EV_TO_DLB2_PRIO(priority);
1536
1537         ret = dlb2_iface_map_qid(handle, &cfg);
1538         if (ret < 0) {
1539                 DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1540                              ret, dlb2_error_strings[cfg.response.status]);
1541                 DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1542                              handle->domain_id, cfg.port_id,
1543                              cfg.qid,
1544                              cfg.priority);
1545         } else {
1546                 DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1547                              qm_qid, qm_port_id);
1548         }
1549
1550         return ret;
1551 }
1552
1553 static int
1554 dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1555                           struct dlb2_eventdev_port *ev_port,
1556                           struct dlb2_eventdev_queue *ev_queue,
1557                           uint8_t priority)
1558 {
1559         int first_avail = -1;
1560         int ret, i;
1561
1562         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1563                 if (ev_port->link[i].valid) {
1564                         if (ev_port->link[i].queue_id == ev_queue->id &&
1565                             ev_port->link[i].priority == priority) {
1566                                 if (ev_port->link[i].mapped)
1567                                         return 0; /* already mapped */
1568                                 first_avail = i;
1569                         }
1570                 } else if (first_avail == -1)
1571                         first_avail = i;
1572         }
1573         if (first_avail == -1) {
1574                 DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1575                              ev_port->qm_port.id);
1576                 return -EINVAL;
1577         }
1578
1579         ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1580                                           ev_port->qm_port.id,
1581                                           ev_queue->qm_queue.id,
1582                                           priority);
1583
1584         if (!ret)
1585                 ev_port->link[first_avail].mapped = true;
1586
1587         return ret;
1588 }
1589
1590 static int32_t
1591 dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1592                          struct dlb2_eventdev_queue *ev_queue,
1593                          int32_t qm_port_id)
1594 {
1595         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1596         struct dlb2_create_dir_queue_args cfg;
1597         int32_t ret;
1598
1599         /* The directed port is always configured before its queue */
1600         cfg.port_id = qm_port_id;
1601
1602         if (ev_queue->depth_threshold == 0) {
1603                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1604                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1605         } else
1606                 cfg.depth_threshold = ev_queue->depth_threshold;
1607
1608         ret = dlb2_iface_dir_queue_create(handle, &cfg);
1609         if (ret < 0) {
1610                 DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1611                              ret, dlb2_error_strings[cfg.response.status]);
1612                 return -EINVAL;
1613         }
1614
1615         return cfg.response.id;
1616 }
1617
1618 static int
1619 dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1620                               struct dlb2_eventdev_queue *ev_queue,
1621                               struct dlb2_eventdev_port *ev_port)
1622 {
1623         int32_t qm_qid;
1624
1625         qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1626
1627         if (qm_qid < 0) {
1628                 DLB2_LOG_ERR("Failed to create the DIR queue\n");
1629                 return qm_qid;
1630         }
1631
1632         dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1633
1634         ev_queue->qm_queue.id = qm_qid;
1635
1636         return 0;
1637 }
1638
1639 static int
1640 dlb2_do_port_link(struct rte_eventdev *dev,
1641                   struct dlb2_eventdev_queue *ev_queue,
1642                   struct dlb2_eventdev_port *ev_port,
1643                   uint8_t prio)
1644 {
1645         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1646         int err;
1647
1648         /* Don't link until start time. */
1649         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1650                 return 0;
1651
1652         if (ev_queue->qm_queue.is_directed)
1653                 err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1654         else
1655                 err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1656
1657         if (err) {
1658                 DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1659                              ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1660                              ev_queue->id, ev_port->id);
1661
1662                 rte_errno = err;
1663                 return -1;
1664         }
1665
1666         return 0;
1667 }
1668
1669 static int
1670 dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1671                         uint8_t queue_id,
1672                         bool link_exists,
1673                         int index)
1674 {
1675         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1676         struct dlb2_eventdev_queue *ev_queue;
1677         bool port_is_dir, queue_is_dir;
1678
1679         if (queue_id > dlb2->num_queues) {
1680                 rte_errno = -EINVAL;
1681                 return -1;
1682         }
1683
1684         ev_queue = &dlb2->ev_queues[queue_id];
1685
1686         if (!ev_queue->setup_done &&
1687             ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1688                 rte_errno = -EINVAL;
1689                 return -1;
1690         }
1691
1692         port_is_dir = ev_port->qm_port.is_directed;
1693         queue_is_dir = ev_queue->qm_queue.is_directed;
1694
1695         if (port_is_dir != queue_is_dir) {
1696                 DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1697                              queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1698                              port_is_dir ? "DIR" : "LDB", ev_port->id);
1699
1700                 rte_errno = -EINVAL;
1701                 return -1;
1702         }
1703
1704         /* Check if there is space for the requested link */
1705         if (!link_exists && index == -1) {
1706                 DLB2_LOG_ERR("no space for new link\n");
1707                 rte_errno = -ENOSPC;
1708                 return -1;
1709         }
1710
1711         /* Check if the directed port is already linked */
1712         if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1713             !link_exists) {
1714                 DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1715                              ev_port->id);
1716                 rte_errno = -EINVAL;
1717                 return -1;
1718         }
1719
1720         /* Check if the directed queue is already linked */
1721         if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1722             !link_exists) {
1723                 DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1724                              ev_queue->id);
1725                 rte_errno = -EINVAL;
1726                 return -1;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static int
1733 dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1734                         const uint8_t queues[], const uint8_t priorities[],
1735                         uint16_t nb_links)
1736
1737 {
1738         struct dlb2_eventdev_port *ev_port = event_port;
1739         struct dlb2_eventdev *dlb2;
1740         int i, j;
1741
1742         RTE_SET_USED(dev);
1743
1744         if (ev_port == NULL) {
1745                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1746                 rte_errno = -EINVAL;
1747                 return 0;
1748         }
1749
1750         if (!ev_port->setup_done &&
1751             ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1752                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1753                 rte_errno = -EINVAL;
1754                 return 0;
1755         }
1756
1757         /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1758          * queues pointer.
1759          */
1760         if (nb_links == 0) {
1761                 DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1762                 return 0; /* Ignore and return success */
1763         }
1764
1765         dlb2 = ev_port->dlb2;
1766
1767         DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1768                      nb_links,
1769                      ev_port->qm_port.is_directed ? "DIR" : "LDB",
1770                      ev_port->id);
1771
1772         for (i = 0; i < nb_links; i++) {
1773                 struct dlb2_eventdev_queue *ev_queue;
1774                 uint8_t queue_id, prio;
1775                 bool found = false;
1776                 int index = -1;
1777
1778                 queue_id = queues[i];
1779                 prio = priorities[i];
1780
1781                 /* Check if the link already exists. */
1782                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1783                         if (ev_port->link[j].valid) {
1784                                 if (ev_port->link[j].queue_id == queue_id) {
1785                                         found = true;
1786                                         index = j;
1787                                         break;
1788                                 }
1789                         } else if (index == -1) {
1790                                 index = j;
1791                         }
1792
1793                 /* could not link */
1794                 if (index == -1)
1795                         break;
1796
1797                 /* Check if already linked at the requested priority */
1798                 if (found && ev_port->link[j].priority == prio)
1799                         continue;
1800
1801                 if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1802                         break; /* return index of offending queue */
1803
1804                 ev_queue = &dlb2->ev_queues[queue_id];
1805
1806                 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1807                         break; /* return index of offending queue */
1808
1809                 ev_queue->num_links++;
1810
1811                 ev_port->link[index].queue_id = queue_id;
1812                 ev_port->link[index].priority = prio;
1813                 ev_port->link[index].valid = true;
1814                 /* Entry already exists?  If so, then must be prio change */
1815                 if (!found)
1816                         ev_port->num_links++;
1817         }
1818         return i;
1819 }
1820
1821 static int16_t
1822 dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1823                                 uint32_t qm_port_id,
1824                                 uint16_t qm_qid)
1825 {
1826         struct dlb2_unmap_qid_args cfg;
1827         int32_t ret;
1828
1829         if (handle == NULL)
1830                 return -EINVAL;
1831
1832         cfg.port_id = qm_port_id;
1833         cfg.qid = qm_qid;
1834
1835         ret = dlb2_iface_unmap_qid(handle, &cfg);
1836         if (ret < 0)
1837                 DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1838                              ret, dlb2_error_strings[cfg.response.status]);
1839
1840         return ret;
1841 }
1842
1843 static int
1844 dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1845                             struct dlb2_eventdev_port *ev_port,
1846                             struct dlb2_eventdev_queue *ev_queue)
1847 {
1848         int ret, i;
1849
1850         /* Don't unlink until start time. */
1851         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1852                 return 0;
1853
1854         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1855                 if (ev_port->link[i].valid &&
1856                     ev_port->link[i].queue_id == ev_queue->id)
1857                         break; /* found */
1858         }
1859
1860         /* This is expected with eventdev API!
1861          * It blindly attemmpts to unmap all queues.
1862          */
1863         if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1864                 DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1865                              ev_queue->qm_queue.id,
1866                              ev_port->qm_port.id);
1867                 return 0;
1868         }
1869
1870         ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1871                                               ev_port->qm_port.id,
1872                                               ev_queue->qm_queue.id);
1873         if (!ret)
1874                 ev_port->link[i].mapped = false;
1875
1876         return ret;
1877 }
1878
1879 static int
1880 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1881                           uint8_t queues[], uint16_t nb_unlinks)
1882 {
1883         struct dlb2_eventdev_port *ev_port = event_port;
1884         struct dlb2_eventdev *dlb2;
1885         int i;
1886
1887         RTE_SET_USED(dev);
1888
1889         if (!ev_port->setup_done) {
1890                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1891                              ev_port->id);
1892                 rte_errno = -EINVAL;
1893                 return 0;
1894         }
1895
1896         if (queues == NULL || nb_unlinks == 0) {
1897                 DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
1898                 return 0; /* Ignore and return success */
1899         }
1900
1901         if (ev_port->qm_port.is_directed) {
1902                 DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
1903                              ev_port->id);
1904                 rte_errno = 0;
1905                 return nb_unlinks; /* as if success */
1906         }
1907
1908         dlb2 = ev_port->dlb2;
1909
1910         for (i = 0; i < nb_unlinks; i++) {
1911                 struct dlb2_eventdev_queue *ev_queue;
1912                 int ret, j;
1913
1914                 if (queues[i] >= dlb2->num_queues) {
1915                         DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
1916                         rte_errno = -EINVAL;
1917                         return i; /* return index of offending queue */
1918                 }
1919
1920                 ev_queue = &dlb2->ev_queues[queues[i]];
1921
1922                 /* Does a link exist? */
1923                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1924                         if (ev_port->link[j].queue_id == queues[i] &&
1925                             ev_port->link[j].valid)
1926                                 break;
1927
1928                 if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1929                         continue;
1930
1931                 ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
1932                 if (ret) {
1933                         DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
1934                                      ret, ev_port->id, queues[i]);
1935                         rte_errno = -ENOENT;
1936                         return i; /* return index of offending queue */
1937                 }
1938
1939                 ev_port->link[j].valid = false;
1940                 ev_port->num_links--;
1941                 ev_queue->num_links--;
1942         }
1943
1944         return nb_unlinks;
1945 }
1946
1947 static int
1948 dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
1949                                        void *event_port)
1950 {
1951         struct dlb2_eventdev_port *ev_port = event_port;
1952         struct dlb2_eventdev *dlb2;
1953         struct dlb2_hw_dev *handle;
1954         struct dlb2_pending_port_unmaps_args cfg;
1955         int ret;
1956
1957         RTE_SET_USED(dev);
1958
1959         if (!ev_port->setup_done) {
1960                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1961                              ev_port->id);
1962                 rte_errno = -EINVAL;
1963                 return 0;
1964         }
1965
1966         cfg.port_id = ev_port->qm_port.id;
1967         dlb2 = ev_port->dlb2;
1968         handle = &dlb2->qm_instance;
1969         ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
1970
1971         if (ret < 0) {
1972                 DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
1973                              ret, dlb2_error_strings[cfg.response.status]);
1974                 return ret;
1975         }
1976
1977         return cfg.response.id;
1978 }
1979
1980 static int
1981 dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
1982 {
1983         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1984         int ret, i;
1985
1986         /* If an event queue or port was previously configured, but hasn't been
1987          * reconfigured, reapply its original configuration.
1988          */
1989         for (i = 0; i < dlb2->num_queues; i++) {
1990                 struct dlb2_eventdev_queue *ev_queue;
1991
1992                 ev_queue = &dlb2->ev_queues[i];
1993
1994                 if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
1995                         continue;
1996
1997                 ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
1998                 if (ret < 0) {
1999                         DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
2000                         return ret;
2001                 }
2002         }
2003
2004         for (i = 0; i < dlb2->num_ports; i++) {
2005                 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2006
2007                 if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
2008                         continue;
2009
2010                 ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
2011                 if (ret < 0) {
2012                         DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
2013                                      i);
2014                         return ret;
2015                 }
2016         }
2017
2018         return 0;
2019 }
2020
2021 static int
2022 dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
2023 {
2024         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2025         int i;
2026
2027         /* Perform requested port->queue links */
2028         for (i = 0; i < dlb2->num_ports; i++) {
2029                 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2030                 int j;
2031
2032                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
2033                         struct dlb2_eventdev_queue *ev_queue;
2034                         uint8_t prio, queue_id;
2035
2036                         if (!ev_port->link[j].valid)
2037                                 continue;
2038
2039                         prio = ev_port->link[j].priority;
2040                         queue_id = ev_port->link[j].queue_id;
2041
2042                         if (dlb2_validate_port_link(ev_port, queue_id, true, j))
2043                                 return -EINVAL;
2044
2045                         ev_queue = &dlb2->ev_queues[queue_id];
2046
2047                         if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
2048                                 return -EINVAL;
2049                 }
2050         }
2051
2052         return 0;
2053 }
2054
2055 static int
2056 dlb2_eventdev_start(struct rte_eventdev *dev)
2057 {
2058         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2059         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
2060         struct dlb2_start_domain_args cfg;
2061         int ret, i;
2062
2063         rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
2064         if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2065                 DLB2_LOG_ERR("bad state %d for dev_start\n",
2066                              (int)dlb2->run_state);
2067                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2068                 return -EINVAL;
2069         }
2070         dlb2->run_state = DLB2_RUN_STATE_STARTING;
2071         rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2072
2073         /* If the device was configured more than once, some event ports and/or
2074          * queues may need to be reconfigured.
2075          */
2076         ret = dlb2_eventdev_reapply_configuration(dev);
2077         if (ret)
2078                 return ret;
2079
2080         /* The DLB PMD delays port links until the device is started. */
2081         ret = dlb2_eventdev_apply_port_links(dev);
2082         if (ret)
2083                 return ret;
2084
2085         for (i = 0; i < dlb2->num_ports; i++) {
2086                 if (!dlb2->ev_ports[i].setup_done) {
2087                         DLB2_LOG_ERR("dlb2: port %d not setup", i);
2088                         return -ESTALE;
2089                 }
2090         }
2091
2092         for (i = 0; i < dlb2->num_queues; i++) {
2093                 if (dlb2->ev_queues[i].num_links == 0) {
2094                         DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
2095                         return -ENOLINK;
2096                 }
2097         }
2098
2099         ret = dlb2_iface_sched_domain_start(handle, &cfg);
2100         if (ret < 0) {
2101                 DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
2102                              ret, dlb2_error_strings[cfg.response.status]);
2103                 return ret;
2104         }
2105
2106         dlb2->run_state = DLB2_RUN_STATE_STARTED;
2107         DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
2108
2109         return 0;
2110 }
2111
2112 static uint8_t cmd_byte_map[DLB2_NUM_PORT_TYPES][DLB2_NUM_HW_SCHED_TYPES] = {
2113         {
2114                 /* Load-balanced cmd bytes */
2115                 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2116                 [RTE_EVENT_OP_FORWARD] = DLB2_FWD_CMD_BYTE,
2117                 [RTE_EVENT_OP_RELEASE] = DLB2_COMP_CMD_BYTE,
2118         },
2119         {
2120                 /* Directed cmd bytes */
2121                 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2122                 [RTE_EVENT_OP_FORWARD] = DLB2_NEW_CMD_BYTE,
2123                 [RTE_EVENT_OP_RELEASE] = DLB2_NOOP_CMD_BYTE,
2124         },
2125 };
2126
2127 static inline uint32_t
2128 dlb2_port_credits_get(struct dlb2_port *qm_port,
2129                       enum dlb2_hw_queue_types type)
2130 {
2131         uint32_t credits = *qm_port->credit_pool[type];
2132         uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2133
2134         if (unlikely(credits < batch_size))
2135                 batch_size = credits;
2136
2137         if (likely(credits &&
2138                    __atomic_compare_exchange_n(
2139                         qm_port->credit_pool[type],
2140                         &credits, credits - batch_size, false,
2141                         __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
2142                 return batch_size;
2143         else
2144                 return 0;
2145 }
2146
2147 static inline void
2148 dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2149                           struct dlb2_eventdev_port *ev_port)
2150 {
2151         uint16_t quanta = ev_port->credit_update_quanta;
2152
2153         if (ev_port->inflight_credits >= quanta * 2) {
2154                 /* Replenish credits, saving one quanta for enqueues */
2155                 uint16_t val = ev_port->inflight_credits - quanta;
2156
2157                 __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
2158                 ev_port->inflight_credits -= val;
2159         }
2160 }
2161
2162 static inline int
2163 dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2164                               struct dlb2_eventdev_port *ev_port)
2165 {
2166         uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
2167                                                 __ATOMIC_SEQ_CST);
2168         const int num = 1;
2169
2170         if (unlikely(ev_port->inflight_max < sw_inflights)) {
2171                 DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2172                 rte_errno = -ENOSPC;
2173                 return 1;
2174         }
2175
2176         if (ev_port->inflight_credits < num) {
2177                 /* check if event enqueue brings ev_port over max threshold */
2178                 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2179
2180                 if (sw_inflights + credit_update_quanta >
2181                                 dlb2->new_event_limit) {
2182                         DLB2_INC_STAT(
2183                         ev_port->stats.traffic.tx_nospc_new_event_limit,
2184                         1);
2185                         rte_errno = -ENOSPC;
2186                         return 1;
2187                 }
2188
2189                 __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
2190                                    __ATOMIC_SEQ_CST);
2191                 ev_port->inflight_credits += (credit_update_quanta);
2192
2193                 if (ev_port->inflight_credits < num) {
2194                         DLB2_INC_STAT(
2195                         ev_port->stats.traffic.tx_nospc_inflight_credits,
2196                         1);
2197                         rte_errno = -ENOSPC;
2198                         return 1;
2199                 }
2200         }
2201
2202         return 0;
2203 }
2204
2205 static inline int
2206 dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2207 {
2208         if (unlikely(qm_port->cached_ldb_credits == 0)) {
2209                 qm_port->cached_ldb_credits =
2210                         dlb2_port_credits_get(qm_port,
2211                                               DLB2_LDB_QUEUE);
2212                 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2213                         DLB2_INC_STAT(
2214                         qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2215                         1);
2216                         DLB2_LOG_DBG("ldb credits exhausted\n");
2217                         return 1; /* credits exhausted */
2218                 }
2219         }
2220
2221         return 0;
2222 }
2223
2224 static inline int
2225 dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2226 {
2227         if (unlikely(qm_port->cached_dir_credits == 0)) {
2228                 qm_port->cached_dir_credits =
2229                         dlb2_port_credits_get(qm_port,
2230                                               DLB2_DIR_QUEUE);
2231                 if (unlikely(qm_port->cached_dir_credits == 0)) {
2232                         DLB2_INC_STAT(
2233                         qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2234                         1);
2235                         DLB2_LOG_DBG("dir credits exhausted\n");
2236                         return 1; /* credits exhausted */
2237                 }
2238         }
2239
2240         return 0;
2241 }
2242
2243 static __rte_always_inline void
2244 dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
2245               struct process_local_port_data *port_data)
2246 {
2247         dlb2_movdir64b(port_data->pp_addr, qe4);
2248 }
2249
2250 static inline int
2251 dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2252 {
2253         struct process_local_port_data *port_data;
2254         struct dlb2_cq_pop_qe *qe;
2255
2256         RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2257
2258         qe = qm_port->consume_qe;
2259
2260         qe->tokens = num - 1;
2261
2262         /* No store fence needed since no pointer is being sent, and CQ token
2263          * pops can be safely reordered with other HCWs.
2264          */
2265         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2266
2267         dlb2_movntdq_single(port_data->pp_addr, qe);
2268
2269         DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
2270
2271         qm_port->owed_tokens = 0;
2272
2273         return 0;
2274 }
2275
2276 static inline void
2277 dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2278                    bool do_sfence,
2279                    struct process_local_port_data *port_data)
2280 {
2281         /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2282          * application writes complete before enqueueing the QE.
2283          */
2284         if (do_sfence)
2285                 rte_wmb();
2286
2287         dlb2_pp_write(qm_port->qe4, port_data);
2288 }
2289
2290 static inline void
2291 dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2292 {
2293         struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2294         int num = qm_port->owed_tokens;
2295
2296         qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2297         qe[idx].tokens = num - 1;
2298
2299         qm_port->owed_tokens = 0;
2300 }
2301
2302 static inline void
2303 dlb2_event_build_hcws(struct dlb2_port *qm_port,
2304                       const struct rte_event ev[],
2305                       int num,
2306                       uint8_t *sched_type,
2307                       uint8_t *queue_id)
2308 {
2309         struct dlb2_enqueue_qe *qe;
2310         uint16_t sched_word[4];
2311         __m128i sse_qe[2];
2312         int i;
2313
2314         qe = qm_port->qe4;
2315
2316         sse_qe[0] = _mm_setzero_si128();
2317         sse_qe[1] = _mm_setzero_si128();
2318
2319         switch (num) {
2320         case 4:
2321                 /* Construct the metadata portion of two HCWs in one 128b SSE
2322                  * register. HCW metadata is constructed in the SSE registers
2323                  * like so:
2324                  * sse_qe[0][63:0]:   qe[0]'s metadata
2325                  * sse_qe[0][127:64]: qe[1]'s metadata
2326                  * sse_qe[1][63:0]:   qe[2]'s metadata
2327                  * sse_qe[1][127:64]: qe[3]'s metadata
2328                  */
2329
2330                 /* Convert the event operation into a command byte and store it
2331                  * in the metadata:
2332                  * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
2333                  * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2334                  * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
2335                  * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2336                  */
2337 #define DLB2_QE_CMD_BYTE 7
2338                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2339                                 cmd_byte_map[qm_port->is_directed][ev[0].op],
2340                                 DLB2_QE_CMD_BYTE);
2341                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2342                                 cmd_byte_map[qm_port->is_directed][ev[1].op],
2343                                 DLB2_QE_CMD_BYTE + 8);
2344                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2345                                 cmd_byte_map[qm_port->is_directed][ev[2].op],
2346                                 DLB2_QE_CMD_BYTE);
2347                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2348                                 cmd_byte_map[qm_port->is_directed][ev[3].op],
2349                                 DLB2_QE_CMD_BYTE + 8);
2350
2351                 /* Store priority, scheduling type, and queue ID in the sched
2352                  * word array because these values are re-used when the
2353                  * destination is a directed queue.
2354                  */
2355                 sched_word[0] = EV_TO_DLB2_PRIO(ev[0].priority) << 10 |
2356                                 sched_type[0] << 8 |
2357                                 queue_id[0];
2358                 sched_word[1] = EV_TO_DLB2_PRIO(ev[1].priority) << 10 |
2359                                 sched_type[1] << 8 |
2360                                 queue_id[1];
2361                 sched_word[2] = EV_TO_DLB2_PRIO(ev[2].priority) << 10 |
2362                                 sched_type[2] << 8 |
2363                                 queue_id[2];
2364                 sched_word[3] = EV_TO_DLB2_PRIO(ev[3].priority) << 10 |
2365                                 sched_type[3] << 8 |
2366                                 queue_id[3];
2367
2368                 /* Store the event priority, scheduling type, and queue ID in
2369                  * the metadata:
2370                  * sse_qe[0][31:16] = sched_word[0]
2371                  * sse_qe[0][95:80] = sched_word[1]
2372                  * sse_qe[1][31:16] = sched_word[2]
2373                  * sse_qe[1][95:80] = sched_word[3]
2374                  */
2375 #define DLB2_QE_QID_SCHED_WORD 1
2376                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2377                                              sched_word[0],
2378                                              DLB2_QE_QID_SCHED_WORD);
2379                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2380                                              sched_word[1],
2381                                              DLB2_QE_QID_SCHED_WORD + 4);
2382                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2383                                              sched_word[2],
2384                                              DLB2_QE_QID_SCHED_WORD);
2385                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2386                                              sched_word[3],
2387                                              DLB2_QE_QID_SCHED_WORD + 4);
2388
2389                 /* If the destination is a load-balanced queue, store the lock
2390                  * ID. If it is a directed queue, DLB places this field in
2391                  * bytes 10-11 of the received QE, so we format it accordingly:
2392                  * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
2393                  * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2394                  * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
2395                  * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2396                  */
2397 #define DLB2_QE_LOCK_ID_WORD 2
2398                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2399                                 (sched_type[0] == DLB2_SCHED_DIRECTED) ?
2400                                         sched_word[0] : ev[0].flow_id,
2401                                 DLB2_QE_LOCK_ID_WORD);
2402                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2403                                 (sched_type[1] == DLB2_SCHED_DIRECTED) ?
2404                                         sched_word[1] : ev[1].flow_id,
2405                                 DLB2_QE_LOCK_ID_WORD + 4);
2406                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2407                                 (sched_type[2] == DLB2_SCHED_DIRECTED) ?
2408                                         sched_word[2] : ev[2].flow_id,
2409                                 DLB2_QE_LOCK_ID_WORD);
2410                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2411                                 (sched_type[3] == DLB2_SCHED_DIRECTED) ?
2412                                         sched_word[3] : ev[3].flow_id,
2413                                 DLB2_QE_LOCK_ID_WORD + 4);
2414
2415                 /* Store the event type and sub event type in the metadata:
2416                  * sse_qe[0][15:0]  = flow_id[0]
2417                  * sse_qe[0][79:64] = flow_id[1]
2418                  * sse_qe[1][15:0]  = flow_id[2]
2419                  * sse_qe[1][79:64] = flow_id[3]
2420                  */
2421 #define DLB2_QE_EV_TYPE_WORD 0
2422                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2423                                              ev[0].sub_event_type << 8 |
2424                                                 ev[0].event_type,
2425                                              DLB2_QE_EV_TYPE_WORD);
2426                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2427                                              ev[1].sub_event_type << 8 |
2428                                                 ev[1].event_type,
2429                                              DLB2_QE_EV_TYPE_WORD + 4);
2430                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2431                                              ev[2].sub_event_type << 8 |
2432                                                 ev[2].event_type,
2433                                              DLB2_QE_EV_TYPE_WORD);
2434                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2435                                              ev[3].sub_event_type << 8 |
2436                                                 ev[3].event_type,
2437                                              DLB2_QE_EV_TYPE_WORD + 4);
2438
2439                 /* Store the metadata to memory (use the double-precision
2440                  * _mm_storeh_pd because there is no integer function for
2441                  * storing the upper 64b):
2442                  * qe[0] metadata = sse_qe[0][63:0]
2443                  * qe[1] metadata = sse_qe[0][127:64]
2444                  * qe[2] metadata = sse_qe[1][63:0]
2445                  * qe[3] metadata = sse_qe[1][127:64]
2446                  */
2447                 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2448                 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2449                               (__m128d)sse_qe[0]);
2450                 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2451                 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2452                               (__m128d)sse_qe[1]);
2453
2454                 qe[0].data = ev[0].u64;
2455                 qe[1].data = ev[1].u64;
2456                 qe[2].data = ev[2].u64;
2457                 qe[3].data = ev[3].u64;
2458
2459                 break;
2460         case 3:
2461         case 2:
2462         case 1:
2463                 for (i = 0; i < num; i++) {
2464                         qe[i].cmd_byte =
2465                                 cmd_byte_map[qm_port->is_directed][ev[i].op];
2466                         qe[i].sched_type = sched_type[i];
2467                         qe[i].data = ev[i].u64;
2468                         qe[i].qid = queue_id[i];
2469                         qe[i].priority = EV_TO_DLB2_PRIO(ev[i].priority);
2470                         qe[i].lock_id = ev[i].flow_id;
2471                         if (sched_type[i] == DLB2_SCHED_DIRECTED) {
2472                                 struct dlb2_msg_info *info =
2473                                         (struct dlb2_msg_info *)&qe[i].lock_id;
2474
2475                                 info->qid = queue_id[i];
2476                                 info->sched_type = DLB2_SCHED_DIRECTED;
2477                                 info->priority = qe[i].priority;
2478                         }
2479                         qe[i].u.event_type.major = ev[i].event_type;
2480                         qe[i].u.event_type.sub = ev[i].sub_event_type;
2481                 }
2482                 break;
2483         case 0:
2484                 break;
2485         }
2486 }
2487
2488 static inline int
2489 dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2490                         struct dlb2_port *qm_port,
2491                         const struct rte_event ev[],
2492                         uint8_t *sched_type,
2493                         uint8_t *queue_id)
2494 {
2495         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2496         struct dlb2_eventdev_queue *ev_queue;
2497         uint16_t *cached_credits = NULL;
2498         struct dlb2_queue *qm_queue;
2499
2500         ev_queue = &dlb2->ev_queues[ev->queue_id];
2501         qm_queue = &ev_queue->qm_queue;
2502         *queue_id = qm_queue->id;
2503
2504         /* Ignore sched_type and hardware credits on release events */
2505         if (ev->op == RTE_EVENT_OP_RELEASE)
2506                 goto op_check;
2507
2508         if (!qm_queue->is_directed) {
2509                 /* Load balanced destination queue */
2510
2511                 if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2512                         rte_errno = -ENOSPC;
2513                         return 1;
2514                 }
2515                 cached_credits = &qm_port->cached_ldb_credits;
2516
2517                 switch (ev->sched_type) {
2518                 case RTE_SCHED_TYPE_ORDERED:
2519                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2520                         if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2521                                 DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
2522                                              *queue_id);
2523                                 rte_errno = -EINVAL;
2524                                 return 1;
2525                         }
2526                         *sched_type = DLB2_SCHED_ORDERED;
2527                         break;
2528                 case RTE_SCHED_TYPE_ATOMIC:
2529                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2530                         *sched_type = DLB2_SCHED_ATOMIC;
2531                         break;
2532                 case RTE_SCHED_TYPE_PARALLEL:
2533                         DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2534                         if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2535                                 *sched_type = DLB2_SCHED_ORDERED;
2536                         else
2537                                 *sched_type = DLB2_SCHED_UNORDERED;
2538                         break;
2539                 default:
2540                         DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2541                         DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2542                         rte_errno = -EINVAL;
2543                         return 1;
2544                 }
2545         } else {
2546                 /* Directed destination queue */
2547
2548                 if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2549                         rte_errno = -ENOSPC;
2550                         return 1;
2551                 }
2552                 cached_credits = &qm_port->cached_dir_credits;
2553
2554                 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2555
2556                 *sched_type = DLB2_SCHED_DIRECTED;
2557         }
2558
2559 op_check:
2560         switch (ev->op) {
2561         case RTE_EVENT_OP_NEW:
2562                 /* Check that a sw credit is available */
2563                 if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2564                         rte_errno = -ENOSPC;
2565                         return 1;
2566                 }
2567                 ev_port->inflight_credits--;
2568                 (*cached_credits)--;
2569                 break;
2570         case RTE_EVENT_OP_FORWARD:
2571                 /* Check for outstanding_releases underflow. If this occurs,
2572                  * the application is not using the EVENT_OPs correctly; for
2573                  * example, forwarding or releasing events that were not
2574                  * dequeued.
2575                  */
2576                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2577                 ev_port->outstanding_releases--;
2578                 qm_port->issued_releases++;
2579                 (*cached_credits)--;
2580                 break;
2581         case RTE_EVENT_OP_RELEASE:
2582                 ev_port->inflight_credits++;
2583                 /* Check for outstanding_releases underflow. If this occurs,
2584                  * the application is not using the EVENT_OPs correctly; for
2585                  * example, forwarding or releasing events that were not
2586                  * dequeued.
2587                  */
2588                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2589                 ev_port->outstanding_releases--;
2590                 qm_port->issued_releases++;
2591
2592                 /* Replenish s/w credits if enough are cached */
2593                 dlb2_replenish_sw_credits(dlb2, ev_port);
2594                 break;
2595         }
2596
2597         DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2598         DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2599
2600 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
2601         if (ev->op != RTE_EVENT_OP_RELEASE) {
2602                 DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
2603                 DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2604         }
2605 #endif
2606
2607         return 0;
2608 }
2609
2610 static inline uint16_t
2611 __dlb2_event_enqueue_burst(void *event_port,
2612                            const struct rte_event events[],
2613                            uint16_t num,
2614                            bool use_delayed)
2615 {
2616         struct dlb2_eventdev_port *ev_port = event_port;
2617         struct dlb2_port *qm_port = &ev_port->qm_port;
2618         struct process_local_port_data *port_data;
2619         int i;
2620
2621         RTE_ASSERT(ev_port->enq_configured);
2622         RTE_ASSERT(events != NULL);
2623
2624         i = 0;
2625
2626         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2627
2628         while (i < num) {
2629                 uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
2630                 uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
2631                 int pop_offs = 0;
2632                 int j = 0;
2633
2634                 memset(qm_port->qe4,
2635                        0,
2636                        DLB2_NUM_QES_PER_CACHE_LINE *
2637                        sizeof(struct dlb2_enqueue_qe));
2638
2639                 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2640                         const struct rte_event *ev = &events[i + j];
2641                         int16_t thresh = qm_port->token_pop_thresh;
2642
2643                         if (use_delayed &&
2644                             qm_port->token_pop_mode == DELAYED_POP &&
2645                             (ev->op == RTE_EVENT_OP_FORWARD ||
2646                              ev->op == RTE_EVENT_OP_RELEASE) &&
2647                             qm_port->issued_releases >= thresh - 1) {
2648                                 /* Insert the token pop QE and break out. This
2649                                  * may result in a partial HCW, but that is
2650                                  * simpler than supporting arbitrary QE
2651                                  * insertion.
2652                                  */
2653                                 dlb2_construct_token_pop_qe(qm_port, j);
2654
2655                                 /* Reset the releases for the next QE batch */
2656                                 qm_port->issued_releases -= thresh;
2657
2658                                 pop_offs = 1;
2659                                 j++;
2660                                 break;
2661                         }
2662
2663                         if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
2664                                                     &sched_types[j],
2665                                                     &queue_ids[j]))
2666                                 break;
2667                 }
2668
2669                 if (j == 0)
2670                         break;
2671
2672                 dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
2673                                       sched_types, queue_ids);
2674
2675                 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2676
2677                 /* Don't include the token pop QE in the enqueue count */
2678                 i += j - pop_offs;
2679
2680                 /* Don't interpret j < DLB2_NUM_... as out-of-credits if
2681                  * pop_offs != 0
2682                  */
2683                 if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2684                         break;
2685         }
2686
2687         return i;
2688 }
2689
2690 static uint16_t
2691 dlb2_event_enqueue_burst(void *event_port,
2692                              const struct rte_event events[],
2693                              uint16_t num)
2694 {
2695         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2696 }
2697
2698 static uint16_t
2699 dlb2_event_enqueue_burst_delayed(void *event_port,
2700                                      const struct rte_event events[],
2701                                      uint16_t num)
2702 {
2703         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2704 }
2705
2706 static inline uint16_t
2707 dlb2_event_enqueue(void *event_port,
2708                    const struct rte_event events[])
2709 {
2710         return __dlb2_event_enqueue_burst(event_port, events, 1, false);
2711 }
2712
2713 static inline uint16_t
2714 dlb2_event_enqueue_delayed(void *event_port,
2715                            const struct rte_event events[])
2716 {
2717         return __dlb2_event_enqueue_burst(event_port, events, 1, true);
2718 }
2719
2720 static uint16_t
2721 dlb2_event_enqueue_new_burst(void *event_port,
2722                              const struct rte_event events[],
2723                              uint16_t num)
2724 {
2725         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2726 }
2727
2728 static uint16_t
2729 dlb2_event_enqueue_new_burst_delayed(void *event_port,
2730                                      const struct rte_event events[],
2731                                      uint16_t num)
2732 {
2733         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2734 }
2735
2736 static uint16_t
2737 dlb2_event_enqueue_forward_burst(void *event_port,
2738                                  const struct rte_event events[],
2739                                  uint16_t num)
2740 {
2741         return __dlb2_event_enqueue_burst(event_port, events, num, false);
2742 }
2743
2744 static uint16_t
2745 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
2746                                          const struct rte_event events[],
2747                                          uint16_t num)
2748 {
2749         return __dlb2_event_enqueue_burst(event_port, events, num, true);
2750 }
2751
2752 static void
2753 dlb2_event_release(struct dlb2_eventdev *dlb2,
2754                    uint8_t port_id,
2755                    int n)
2756 {
2757         struct process_local_port_data *port_data;
2758         struct dlb2_eventdev_port *ev_port;
2759         struct dlb2_port *qm_port;
2760         int i;
2761
2762         if (port_id > dlb2->num_ports) {
2763                 DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
2764                              port_id);
2765                 rte_errno = -EINVAL;
2766                 return;
2767         }
2768
2769         ev_port = &dlb2->ev_ports[port_id];
2770         qm_port = &ev_port->qm_port;
2771         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2772
2773         i = 0;
2774
2775         if (qm_port->is_directed) {
2776                 i = n;
2777                 goto sw_credit_update;
2778         }
2779
2780         while (i < n) {
2781                 int pop_offs = 0;
2782                 int j = 0;
2783
2784                 /* Zero-out QEs */
2785                 qm_port->qe4[0].cmd_byte = 0;
2786                 qm_port->qe4[1].cmd_byte = 0;
2787                 qm_port->qe4[2].cmd_byte = 0;
2788                 qm_port->qe4[3].cmd_byte = 0;
2789
2790                 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
2791                         int16_t thresh = qm_port->token_pop_thresh;
2792
2793                         if (qm_port->token_pop_mode == DELAYED_POP &&
2794                             qm_port->issued_releases >= thresh - 1) {
2795                                 /* Insert the token pop QE */
2796                                 dlb2_construct_token_pop_qe(qm_port, j);
2797
2798                                 /* Reset the releases for the next QE batch */
2799                                 qm_port->issued_releases -= thresh;
2800
2801                                 pop_offs = 1;
2802                                 j++;
2803                                 break;
2804                         }
2805
2806                         qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
2807                         qm_port->issued_releases++;
2808                 }
2809
2810                 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2811
2812                 /* Don't include the token pop QE in the release count */
2813                 i += j - pop_offs;
2814         }
2815
2816 sw_credit_update:
2817         /* each release returns one credit */
2818         if (!ev_port->outstanding_releases) {
2819                 DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
2820                              __func__);
2821                 return;
2822         }
2823         ev_port->outstanding_releases -= i;
2824         ev_port->inflight_credits += i;
2825
2826         /* Replenish s/w credits if enough releases are performed */
2827         dlb2_replenish_sw_credits(dlb2, ev_port);
2828 }
2829
2830 static inline void
2831 dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
2832 {
2833         uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2834
2835         /* increment port credits, and return to pool if exceeds threshold */
2836         if (!qm_port->is_directed) {
2837                 qm_port->cached_ldb_credits += num;
2838                 if (qm_port->cached_ldb_credits >= 2 * batch_size) {
2839                         __atomic_fetch_add(
2840                                 qm_port->credit_pool[DLB2_LDB_QUEUE],
2841                                 batch_size, __ATOMIC_SEQ_CST);
2842                         qm_port->cached_ldb_credits -= batch_size;
2843                 }
2844         } else {
2845                 qm_port->cached_dir_credits += num;
2846                 if (qm_port->cached_dir_credits >= 2 * batch_size) {
2847                         __atomic_fetch_add(
2848                                 qm_port->credit_pool[DLB2_DIR_QUEUE],
2849                                 batch_size, __ATOMIC_SEQ_CST);
2850                         qm_port->cached_dir_credits -= batch_size;
2851                 }
2852         }
2853 }
2854
2855 static inline int
2856 dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
2857                   struct dlb2_eventdev_port *ev_port,
2858                   struct dlb2_port *qm_port,
2859                   uint64_t timeout,
2860                   uint64_t start_ticks)
2861 {
2862         struct process_local_port_data *port_data;
2863         uint64_t elapsed_ticks;
2864
2865         port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2866
2867         elapsed_ticks = rte_get_timer_cycles() - start_ticks;
2868
2869         /* Wait/poll time expired */
2870         if (elapsed_ticks >= timeout) {
2871                 return 1;
2872         } else if (dlb2->umwait_allowed) {
2873                 volatile struct dlb2_dequeue_qe *cq_base;
2874                 union {
2875                         uint64_t raw_qe[2];
2876                         struct dlb2_dequeue_qe qe;
2877                 } qe_mask;
2878                 uint64_t expected_value;
2879                 volatile uint64_t *monitor_addr;
2880
2881                 qe_mask.qe.cq_gen = 1; /* set mask */
2882
2883                 cq_base = port_data->cq_base;
2884                 monitor_addr = (volatile uint64_t *)(volatile void *)
2885                         &cq_base[qm_port->cq_idx];
2886                 monitor_addr++; /* cq_gen bit is in second 64bit location */
2887
2888                 if (qm_port->gen_bit)
2889                         expected_value = qe_mask.raw_qe[1];
2890                 else
2891                         expected_value = 0;
2892
2893                 rte_power_monitor(monitor_addr, expected_value,
2894                                   qe_mask.raw_qe[1], timeout + start_ticks,
2895                                   sizeof(uint64_t));
2896
2897                 DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
2898         } else {
2899                 uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
2900                 uint64_t curr_ticks = rte_get_timer_cycles();
2901                 uint64_t init_ticks = curr_ticks;
2902
2903                 while ((curr_ticks - start_ticks < timeout) &&
2904                        (curr_ticks - init_ticks < poll_interval))
2905                         curr_ticks = rte_get_timer_cycles();
2906         }
2907
2908         return 0;
2909 }
2910
2911 static inline int
2912 dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
2913                          struct dlb2_port *qm_port,
2914                          struct rte_event *events,
2915                          struct dlb2_dequeue_qe *qes,
2916                          int cnt)
2917 {
2918         uint8_t *qid_mappings = qm_port->qid_mappings;
2919         int i, num, evq_id;
2920
2921         for (i = 0, num = 0; i < cnt; i++) {
2922                 struct dlb2_dequeue_qe *qe = &qes[i];
2923                 int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
2924                         [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2925                         [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2926                         [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2927                         [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2928                 };
2929
2930                 /* Fill in event information.
2931                  * Note that flow_id must be embedded in the data by
2932                  * the app, such as the mbuf RSS hash field if the data
2933                  * buffer is a mbuf.
2934                  */
2935                 if (unlikely(qe->error)) {
2936                         DLB2_LOG_ERR("QE error bit ON\n");
2937                         DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
2938                         dlb2_consume_qe_immediate(qm_port, 1);
2939                         continue; /* Ignore */
2940                 }
2941
2942                 events[num].u64 = qe->data;
2943                 events[num].flow_id = qe->flow_id;
2944                 events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
2945                 events[num].event_type = qe->u.event_type.major;
2946                 events[num].sub_event_type = qe->u.event_type.sub;
2947                 events[num].sched_type = sched_type_map[qe->sched_type];
2948                 events[num].impl_opaque = qe->qid_depth;
2949
2950                 /* qid not preserved for directed queues */
2951                 if (qm_port->is_directed)
2952                         evq_id = ev_port->link[0].queue_id;
2953                 else
2954                         evq_id = qid_mappings[qe->qid];
2955
2956                 events[num].queue_id = evq_id;
2957                 DLB2_INC_STAT(
2958                         ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
2959                         1);
2960                 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
2961                 num++;
2962         }
2963
2964         DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
2965
2966         return num;
2967 }
2968
2969 static inline int
2970 dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
2971                               struct dlb2_port *qm_port,
2972                               struct rte_event *events,
2973                               struct dlb2_dequeue_qe *qes)
2974 {
2975         int sched_type_map[] = {
2976                 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2977                 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2978                 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2979                 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2980         };
2981         const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
2982         uint8_t *qid_mappings = qm_port->qid_mappings;
2983         __m128i sse_evt[2];
2984
2985         /* In the unlikely case that any of the QE error bits are set, process
2986          * them one at a time.
2987          */
2988         if (unlikely(qes[0].error || qes[1].error ||
2989                      qes[2].error || qes[3].error))
2990                 return dlb2_process_dequeue_qes(ev_port, qm_port, events,
2991                                                  qes, num_events);
2992
2993         events[0].u64 = qes[0].data;
2994         events[1].u64 = qes[1].data;
2995         events[2].u64 = qes[2].data;
2996         events[3].u64 = qes[3].data;
2997
2998         /* Construct the metadata portion of two struct rte_events
2999          * in one 128b SSE register. Event metadata is constructed in the SSE
3000          * registers like so:
3001          * sse_evt[0][63:0]:   event[0]'s metadata
3002          * sse_evt[0][127:64]: event[1]'s metadata
3003          * sse_evt[1][63:0]:   event[2]'s metadata
3004          * sse_evt[1][127:64]: event[3]'s metadata
3005          */
3006         sse_evt[0] = _mm_setzero_si128();
3007         sse_evt[1] = _mm_setzero_si128();
3008
3009         /* Convert the hardware queue ID to an event queue ID and store it in
3010          * the metadata:
3011          * sse_evt[0][47:40]   = qid_mappings[qes[0].qid]
3012          * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
3013          * sse_evt[1][47:40]   = qid_mappings[qes[2].qid]
3014          * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
3015          */
3016 #define DLB_EVENT_QUEUE_ID_BYTE 5
3017         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3018                                      qid_mappings[qes[0].qid],
3019                                      DLB_EVENT_QUEUE_ID_BYTE);
3020         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3021                                      qid_mappings[qes[1].qid],
3022                                      DLB_EVENT_QUEUE_ID_BYTE + 8);
3023         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3024                                      qid_mappings[qes[2].qid],
3025                                      DLB_EVENT_QUEUE_ID_BYTE);
3026         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3027                                      qid_mappings[qes[3].qid],
3028                                      DLB_EVENT_QUEUE_ID_BYTE + 8);
3029
3030         /* Convert the hardware priority to an event priority and store it in
3031          * the metadata, while also returning the queue depth status
3032          * value captured by the hardware, storing it in impl_opaque, which can
3033          * be read by the application but not modified
3034          * sse_evt[0][55:48]   = DLB2_TO_EV_PRIO(qes[0].priority)
3035          * sse_evt[0][63:56]   = qes[0].qid_depth
3036          * sse_evt[0][119:112] = DLB2_TO_EV_PRIO(qes[1].priority)
3037          * sse_evt[0][127:120] = qes[1].qid_depth
3038          * sse_evt[1][55:48]   = DLB2_TO_EV_PRIO(qes[2].priority)
3039          * sse_evt[1][63:56]   = qes[2].qid_depth
3040          * sse_evt[1][119:112] = DLB2_TO_EV_PRIO(qes[3].priority)
3041          * sse_evt[1][127:120] = qes[3].qid_depth
3042          */
3043 #define DLB_EVENT_PRIO_IMPL_OPAQUE_WORD 3
3044 #define DLB_BYTE_SHIFT 8
3045         sse_evt[0] =
3046                 _mm_insert_epi16(sse_evt[0],
3047                         DLB2_TO_EV_PRIO((uint8_t)qes[0].priority) |
3048                         (qes[0].qid_depth << DLB_BYTE_SHIFT),
3049                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3050         sse_evt[0] =
3051                 _mm_insert_epi16(sse_evt[0],
3052                         DLB2_TO_EV_PRIO((uint8_t)qes[1].priority) |
3053                         (qes[1].qid_depth << DLB_BYTE_SHIFT),
3054                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3055         sse_evt[1] =
3056                 _mm_insert_epi16(sse_evt[1],
3057                         DLB2_TO_EV_PRIO((uint8_t)qes[2].priority) |
3058                         (qes[2].qid_depth << DLB_BYTE_SHIFT),
3059                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3060         sse_evt[1] =
3061                 _mm_insert_epi16(sse_evt[1],
3062                         DLB2_TO_EV_PRIO((uint8_t)qes[3].priority) |
3063                         (qes[3].qid_depth << DLB_BYTE_SHIFT),
3064                         DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3065
3066         /* Write the event type, sub event type, and flow_id to the event
3067          * metadata.
3068          * sse_evt[0][31:0]   = qes[0].flow_id |
3069          *                      qes[0].u.event_type.major << 28 |
3070          *                      qes[0].u.event_type.sub << 20;
3071          * sse_evt[0][95:64]  = qes[1].flow_id |
3072          *                      qes[1].u.event_type.major << 28 |
3073          *                      qes[1].u.event_type.sub << 20;
3074          * sse_evt[1][31:0]   = qes[2].flow_id |
3075          *                      qes[2].u.event_type.major << 28 |
3076          *                      qes[2].u.event_type.sub << 20;
3077          * sse_evt[1][95:64]  = qes[3].flow_id |
3078          *                      qes[3].u.event_type.major << 28 |
3079          *                      qes[3].u.event_type.sub << 20;
3080          */
3081 #define DLB_EVENT_EV_TYPE_DW 0
3082 #define DLB_EVENT_EV_TYPE_SHIFT 28
3083 #define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
3084         sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3085                         qes[0].flow_id |
3086                         qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3087                         qes[0].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3088                         DLB_EVENT_EV_TYPE_DW);
3089         sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3090                         qes[1].flow_id |
3091                         qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3092                         qes[1].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3093                         DLB_EVENT_EV_TYPE_DW + 2);
3094         sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3095                         qes[2].flow_id |
3096                         qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3097                         qes[2].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3098                         DLB_EVENT_EV_TYPE_DW);
3099         sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3100                         qes[3].flow_id |
3101                         qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT  |
3102                         qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3103                         DLB_EVENT_EV_TYPE_DW + 2);
3104
3105         /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
3106          * set:
3107          * sse_evt[0][39:32]  = sched_type_map[qes[0].sched_type] << 6
3108          * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
3109          * sse_evt[1][39:32]  = sched_type_map[qes[2].sched_type] << 6
3110          * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
3111          */
3112 #define DLB_EVENT_SCHED_TYPE_BYTE 4
3113 #define DLB_EVENT_SCHED_TYPE_SHIFT 6
3114         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3115                 sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3116                 DLB_EVENT_SCHED_TYPE_BYTE);
3117         sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3118                 sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3119                 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3120         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3121                 sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3122                 DLB_EVENT_SCHED_TYPE_BYTE);
3123         sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3124                 sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3125                 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3126
3127         /* Store the metadata to the event (use the double-precision
3128          * _mm_storeh_pd because there is no integer function for storing the
3129          * upper 64b):
3130          * events[0].event = sse_evt[0][63:0]
3131          * events[1].event = sse_evt[0][127:64]
3132          * events[2].event = sse_evt[1][63:0]
3133          * events[3].event = sse_evt[1][127:64]
3134          */
3135         _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3136         _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3137         _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3138         _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3139
3140         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3141         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3142         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3143         DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3144
3145         DLB2_INC_STAT(
3146                 ev_port->stats.queue[events[0].queue_id].
3147                         qid_depth[qes[0].qid_depth],
3148                 1);
3149         DLB2_INC_STAT(
3150                 ev_port->stats.queue[events[1].queue_id].
3151                         qid_depth[qes[1].qid_depth],
3152                 1);
3153         DLB2_INC_STAT(
3154                 ev_port->stats.queue[events[2].queue_id].
3155                         qid_depth[qes[2].qid_depth],
3156                 1);
3157         DLB2_INC_STAT(
3158                 ev_port->stats.queue[events[3].queue_id].
3159                         qid_depth[qes[3].qid_depth],
3160                 1);
3161
3162         DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3163
3164         return num_events;
3165 }
3166
3167 static __rte_always_inline int
3168 dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3169 {
3170         volatile struct dlb2_dequeue_qe *cq_addr;
3171         uint8_t xor_mask[2] = {0x0F, 0x00};
3172         const uint8_t and_mask = 0x0F;
3173         __m128i *qes = (__m128i *)qe;
3174         uint8_t gen_bits, gen_bit;
3175         uintptr_t addr[4];
3176         uint16_t idx;
3177
3178         cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3179
3180         idx = qm_port->cq_idx;
3181
3182         /* Load the next 4 QEs */
3183         addr[0] = (uintptr_t)&cq_addr[idx];
3184         addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
3185         addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
3186         addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3187
3188         /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3189         rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3190         rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3191         rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3192         rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3193
3194         /* Correct the xor_mask for wrap-around QEs */
3195         gen_bit = qm_port->gen_bit;
3196         xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
3197         xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
3198         xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3199
3200         /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3201          * valid, then QEs[0:N-1] are too.
3202          */
3203         qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3204         rte_compiler_barrier();
3205         qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3206         rte_compiler_barrier();
3207         qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3208         rte_compiler_barrier();
3209         qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3210
3211         /* Extract and combine the gen bits */
3212         gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3213                    ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3214                    ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3215                    ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3216
3217         /* XOR the combined bits such that a 1 represents a valid QE */
3218         gen_bits ^= xor_mask[gen_bit];
3219
3220         /* Mask off gen bits we don't care about */
3221         gen_bits &= and_mask;
3222
3223         return __builtin_popcount(gen_bits);
3224 }
3225
3226 static inline void
3227 dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
3228 {
3229         uint16_t idx = qm_port->cq_idx_unmasked + cnt;
3230
3231         qm_port->cq_idx_unmasked = idx;
3232         qm_port->cq_idx = idx & qm_port->cq_depth_mask;
3233         qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
3234 }
3235
3236 static inline int16_t
3237 dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
3238                        struct dlb2_eventdev_port *ev_port,
3239                        struct rte_event *events,
3240                        uint16_t max_num,
3241                        uint64_t dequeue_timeout_ticks)
3242 {
3243         uint64_t timeout;
3244         uint64_t start_ticks = 0ULL;
3245         struct dlb2_port *qm_port;
3246         int num = 0;
3247
3248         qm_port = &ev_port->qm_port;
3249
3250         /* We have a special implementation for waiting. Wait can be:
3251          * 1) no waiting at all
3252          * 2) busy poll only
3253          * 3) wait for interrupt. If wakeup and poll time
3254          * has expired, then return to caller
3255          * 4) umonitor/umwait repeatedly up to poll time
3256          */
3257
3258         /* If configured for per dequeue wait, then use wait value provided
3259          * to this API. Otherwise we must use the global
3260          * value from eventdev config time.
3261          */
3262         if (!dlb2->global_dequeue_wait)
3263                 timeout = dequeue_timeout_ticks;
3264         else
3265                 timeout = dlb2->global_dequeue_wait_ticks;
3266
3267         start_ticks = rte_get_timer_cycles();
3268
3269         while (num < max_num) {
3270                 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3271                 int num_avail;
3272
3273                 /* Copy up to 4 QEs from the current cache line into qes */
3274                 num_avail = dlb2_recv_qe_sparse(qm_port, qes);
3275
3276                 /* But don't process more than the user requested */
3277                 num_avail = RTE_MIN(num_avail, max_num - num);
3278
3279                 dlb2_inc_cq_idx(qm_port, num_avail << 2);
3280
3281                 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3282                         num += dlb2_process_dequeue_four_qes(ev_port,
3283                                                               qm_port,
3284                                                               &events[num],
3285                                                               &qes[0]);
3286                 else if (num_avail)
3287                         num += dlb2_process_dequeue_qes(ev_port,
3288                                                          qm_port,
3289                                                          &events[num],
3290                                                          &qes[0],
3291                                                          num_avail);
3292                 else if ((timeout == 0) || (num > 0))
3293                         /* Not waiting in any form, or 1+ events received? */
3294                         break;
3295                 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3296                                            timeout, start_ticks))
3297                         break;
3298         }
3299
3300         qm_port->owed_tokens += num;
3301
3302         if (num) {
3303                 if (qm_port->token_pop_mode == AUTO_POP)
3304                         dlb2_consume_qe_immediate(qm_port, num);
3305
3306                 ev_port->outstanding_releases += num;
3307
3308                 dlb2_port_credits_inc(qm_port, num);
3309         }
3310
3311         return num;
3312 }
3313
3314 static __rte_always_inline int
3315 dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
3316              uint8_t *offset)
3317 {
3318         uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
3319                                    {0x00, 0x01, 0x03, 0x07} };
3320         uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
3321         volatile struct dlb2_dequeue_qe *cq_addr;
3322         __m128i *qes = (__m128i *)qe;
3323         uint64_t *cache_line_base;
3324         uint8_t gen_bits;
3325
3326         cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3327         cq_addr = &cq_addr[qm_port->cq_idx];
3328
3329         cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
3330         *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
3331
3332         /* Load the next CQ cache line from memory. Pack these reads as tight
3333          * as possible to reduce the chance that DLB invalidates the line while
3334          * the CPU is reading it. Read the cache line backwards to ensure that
3335          * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
3336          *
3337          * (Valid QEs start at &qe[offset])
3338          */
3339         qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
3340         qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
3341         qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
3342         qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
3343
3344         /* Evict the cache line ASAP */
3345         rte_cldemote(cache_line_base);
3346
3347         /* Extract and combine the gen bits */
3348         gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3349                    ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3350                    ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3351                    ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3352
3353         /* XOR the combined bits such that a 1 represents a valid QE */
3354         gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
3355
3356         /* Mask off gen bits we don't care about */
3357         gen_bits &= and_mask[*offset];
3358
3359         return __builtin_popcount(gen_bits);
3360 }
3361
3362 static inline int16_t
3363 dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
3364                 struct dlb2_eventdev_port *ev_port,
3365                 struct rte_event *events,
3366                 uint16_t max_num,
3367                 uint64_t dequeue_timeout_ticks)
3368 {
3369         uint64_t timeout;
3370         uint64_t start_ticks = 0ULL;
3371         struct dlb2_port *qm_port;
3372         int num = 0;
3373
3374         qm_port = &ev_port->qm_port;
3375
3376         /* We have a special implementation for waiting. Wait can be:
3377          * 1) no waiting at all
3378          * 2) busy poll only
3379          * 3) wait for interrupt. If wakeup and poll time
3380          * has expired, then return to caller
3381          * 4) umonitor/umwait repeatedly up to poll time
3382          */
3383
3384         /* If configured for per dequeue wait, then use wait value provided
3385          * to this API. Otherwise we must use the global
3386          * value from eventdev config time.
3387          */
3388         if (!dlb2->global_dequeue_wait)
3389                 timeout = dequeue_timeout_ticks;
3390         else
3391                 timeout = dlb2->global_dequeue_wait_ticks;
3392
3393         start_ticks = rte_get_timer_cycles();
3394
3395         while (num < max_num) {
3396                 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3397                 uint8_t offset;
3398                 int num_avail;
3399
3400                 /* Copy up to 4 QEs from the current cache line into qes */
3401                 num_avail = dlb2_recv_qe(qm_port, qes, &offset);
3402
3403                 /* But don't process more than the user requested */
3404                 num_avail = RTE_MIN(num_avail, max_num - num);
3405
3406                 dlb2_inc_cq_idx(qm_port, num_avail);
3407
3408                 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3409                         num += dlb2_process_dequeue_four_qes(ev_port,
3410                                                              qm_port,
3411                                                              &events[num],
3412                                                              &qes[offset]);
3413                 else if (num_avail)
3414                         num += dlb2_process_dequeue_qes(ev_port,
3415                                                         qm_port,
3416                                                         &events[num],
3417                                                         &qes[offset],
3418                                                         num_avail);
3419                 else if ((timeout == 0) || (num > 0))
3420                         /* Not waiting in any form, or 1+ events received? */
3421                         break;
3422                 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3423                                            timeout, start_ticks))
3424                         break;
3425         }
3426
3427         qm_port->owed_tokens += num;
3428
3429         if (num) {
3430                 if (qm_port->token_pop_mode == AUTO_POP)
3431                         dlb2_consume_qe_immediate(qm_port, num);
3432
3433                 ev_port->outstanding_releases += num;
3434
3435                 dlb2_port_credits_inc(qm_port, num);
3436         }
3437
3438         return num;
3439 }
3440
3441 static uint16_t
3442 dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3443                          uint64_t wait)
3444 {
3445         struct dlb2_eventdev_port *ev_port = event_port;
3446         struct dlb2_port *qm_port = &ev_port->qm_port;
3447         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3448         uint16_t cnt;
3449
3450         RTE_ASSERT(ev_port->setup_done);
3451         RTE_ASSERT(ev != NULL);
3452
3453         if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3454                 uint16_t out_rels = ev_port->outstanding_releases;
3455
3456                 dlb2_event_release(dlb2, ev_port->id, out_rels);
3457
3458                 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3459         }
3460
3461         if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3462                 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3463
3464         cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
3465
3466         DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3467         DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3468
3469         return cnt;
3470 }
3471
3472 static uint16_t
3473 dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3474 {
3475         return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
3476 }
3477
3478 static uint16_t
3479 dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3480                                 uint16_t num, uint64_t wait)
3481 {
3482         struct dlb2_eventdev_port *ev_port = event_port;
3483         struct dlb2_port *qm_port = &ev_port->qm_port;
3484         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3485         uint16_t cnt;
3486
3487         RTE_ASSERT(ev_port->setup_done);
3488         RTE_ASSERT(ev != NULL);
3489
3490         if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3491                 uint16_t out_rels = ev_port->outstanding_releases;
3492
3493                 dlb2_event_release(dlb2, ev_port->id, out_rels);
3494
3495                 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3496         }
3497
3498         if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3499                 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3500
3501         cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
3502
3503         DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3504         DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3505         return cnt;
3506 }
3507
3508 static uint16_t
3509 dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
3510                           uint64_t wait)
3511 {
3512         return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3513 }
3514
3515 static void
3516 dlb2_flush_port(struct rte_eventdev *dev, int port_id)
3517 {
3518         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3519         eventdev_stop_flush_t flush;
3520         struct rte_event ev;
3521         uint8_t dev_id;
3522         void *arg;
3523         int i;
3524
3525         flush = dev->dev_ops->dev_stop_flush;
3526         dev_id = dev->data->dev_id;
3527         arg = dev->data->dev_stop_flush_arg;
3528
3529         while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
3530                 if (flush)
3531                         flush(dev_id, ev, arg);
3532
3533                 if (dlb2->ev_ports[port_id].qm_port.is_directed)
3534                         continue;
3535
3536                 ev.op = RTE_EVENT_OP_RELEASE;
3537
3538                 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3539         }
3540
3541         /* Enqueue any additional outstanding releases */
3542         ev.op = RTE_EVENT_OP_RELEASE;
3543
3544         for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)
3545                 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3546 }
3547
3548 static uint32_t
3549 dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
3550                          struct dlb2_eventdev_queue *queue)
3551 {
3552         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3553         struct dlb2_get_ldb_queue_depth_args cfg;
3554         int ret;
3555
3556         cfg.queue_id = queue->qm_queue.id;
3557
3558         ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
3559         if (ret < 0) {
3560                 DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
3561                              ret, dlb2_error_strings[cfg.response.status]);
3562                 return ret;
3563         }
3564
3565         return cfg.response.id;
3566 }
3567
3568 static uint32_t
3569 dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
3570                          struct dlb2_eventdev_queue *queue)
3571 {
3572         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3573         struct dlb2_get_dir_queue_depth_args cfg;
3574         int ret;
3575
3576         cfg.queue_id = queue->qm_queue.id;
3577
3578         ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
3579         if (ret < 0) {
3580                 DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
3581                              ret, dlb2_error_strings[cfg.response.status]);
3582                 return ret;
3583         }
3584
3585         return cfg.response.id;
3586 }
3587
3588 uint32_t
3589 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
3590                      struct dlb2_eventdev_queue *queue)
3591 {
3592         if (queue->qm_queue.is_directed)
3593                 return dlb2_get_dir_queue_depth(dlb2, queue);
3594         else
3595                 return dlb2_get_ldb_queue_depth(dlb2, queue);
3596 }
3597
3598 static bool
3599 dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
3600                     struct dlb2_eventdev_queue *queue)
3601 {
3602         return dlb2_get_queue_depth(dlb2, queue) == 0;
3603 }
3604
3605 static bool
3606 dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
3607 {
3608         int i;
3609
3610         for (i = 0; i < dlb2->num_queues; i++) {
3611                 if (dlb2->ev_queues[i].num_links == 0)
3612                         continue;
3613                 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3614                         return false;
3615         }
3616
3617         return true;
3618 }
3619
3620 static bool
3621 dlb2_queues_empty(struct dlb2_eventdev *dlb2)
3622 {
3623         int i;
3624
3625         for (i = 0; i < dlb2->num_queues; i++) {
3626                 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3627                         return false;
3628         }
3629
3630         return true;
3631 }
3632
3633 static void
3634 dlb2_drain(struct rte_eventdev *dev)
3635 {
3636         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3637         struct dlb2_eventdev_port *ev_port = NULL;
3638         uint8_t dev_id;
3639         int i;
3640
3641         dev_id = dev->data->dev_id;
3642
3643         while (!dlb2_linked_queues_empty(dlb2)) {
3644                 /* Flush all the ev_ports, which will drain all their connected
3645                  * queues.
3646                  */
3647                 for (i = 0; i < dlb2->num_ports; i++)
3648                         dlb2_flush_port(dev, i);
3649         }
3650
3651         /* The queues are empty, but there may be events left in the ports. */
3652         for (i = 0; i < dlb2->num_ports; i++)
3653                 dlb2_flush_port(dev, i);
3654
3655         /* If the domain's queues are empty, we're done. */
3656         if (dlb2_queues_empty(dlb2))
3657                 return;
3658
3659         /* Else, there must be at least one unlinked load-balanced queue.
3660          * Select a load-balanced port with which to drain the unlinked
3661          * queue(s).
3662          */
3663         for (i = 0; i < dlb2->num_ports; i++) {
3664                 ev_port = &dlb2->ev_ports[i];
3665
3666                 if (!ev_port->qm_port.is_directed)
3667                         break;
3668         }
3669
3670         if (i == dlb2->num_ports) {
3671                 DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
3672                 return;
3673         }
3674
3675         rte_errno = 0;
3676         rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
3677
3678         if (rte_errno) {
3679                 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
3680                              ev_port->id);
3681                 return;
3682         }
3683
3684         for (i = 0; i < dlb2->num_queues; i++) {
3685                 uint8_t qid, prio;
3686                 int ret;
3687
3688                 if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3689                         continue;
3690
3691                 qid = i;
3692                 prio = 0;
3693
3694                 /* Link the ev_port to the queue */
3695                 ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
3696                 if (ret != 1) {
3697                         DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
3698                                      ev_port->id, qid);
3699                         return;
3700                 }
3701
3702                 /* Flush the queue */
3703                 while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3704                         dlb2_flush_port(dev, ev_port->id);
3705
3706                 /* Drain any extant events in the ev_port. */
3707                 dlb2_flush_port(dev, ev_port->id);
3708
3709                 /* Unlink the ev_port from the queue */
3710                 ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
3711                 if (ret != 1) {
3712                         DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
3713                                      ev_port->id, qid);
3714                         return;
3715                 }
3716         }
3717 }
3718
3719 static void
3720 dlb2_eventdev_stop(struct rte_eventdev *dev)
3721 {
3722         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3723
3724         rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
3725
3726         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
3727                 DLB2_LOG_DBG("Internal error: already stopped\n");
3728                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3729                 return;
3730         } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
3731                 DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
3732                              (int)dlb2->run_state);
3733                 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3734                 return;
3735         }
3736
3737         dlb2->run_state = DLB2_RUN_STATE_STOPPING;
3738
3739         rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3740
3741         dlb2_drain(dev);
3742
3743         dlb2->run_state = DLB2_RUN_STATE_STOPPED;
3744 }
3745
3746 static int
3747 dlb2_eventdev_close(struct rte_eventdev *dev)
3748 {
3749         dlb2_hw_reset_sched_domain(dev, false);
3750
3751         return 0;
3752 }
3753
3754 static void
3755 dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
3756 {
3757         RTE_SET_USED(dev);
3758         RTE_SET_USED(id);
3759
3760         /* This function intentionally left blank. */
3761 }
3762
3763 static void
3764 dlb2_eventdev_port_release(void *port)
3765 {
3766         struct dlb2_eventdev_port *ev_port = port;
3767         struct dlb2_port *qm_port;
3768
3769         if (ev_port) {
3770                 qm_port = &ev_port->qm_port;
3771                 if (qm_port->config_state == DLB2_CONFIGURED)
3772                         dlb2_free_qe_mem(qm_port);
3773         }
3774 }
3775
3776 static int
3777 dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
3778                             uint64_t *timeout_ticks)
3779 {
3780         RTE_SET_USED(dev);
3781         uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
3782
3783         *timeout_ticks = ns * cycles_per_ns;
3784
3785         return 0;
3786 }
3787
3788 static void
3789 dlb2_entry_points_init(struct rte_eventdev *dev)
3790 {
3791         struct dlb2_eventdev *dlb2;
3792
3793         /* Expose PMD's eventdev interface */
3794         static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
3795                 .dev_infos_get    = dlb2_eventdev_info_get,
3796                 .dev_configure    = dlb2_eventdev_configure,
3797                 .dev_start        = dlb2_eventdev_start,
3798                 .dev_stop         = dlb2_eventdev_stop,
3799                 .dev_close        = dlb2_eventdev_close,
3800                 .queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
3801                 .queue_setup      = dlb2_eventdev_queue_setup,
3802                 .queue_release    = dlb2_eventdev_queue_release,
3803                 .port_def_conf    = dlb2_eventdev_port_default_conf_get,
3804                 .port_setup       = dlb2_eventdev_port_setup,
3805                 .port_release     = dlb2_eventdev_port_release,
3806                 .port_link        = dlb2_eventdev_port_link,
3807                 .port_unlink      = dlb2_eventdev_port_unlink,
3808                 .port_unlinks_in_progress =
3809                                     dlb2_eventdev_port_unlinks_in_progress,
3810                 .timeout_ticks    = dlb2_eventdev_timeout_ticks,
3811                 .dump             = dlb2_eventdev_dump,
3812                 .xstats_get       = dlb2_eventdev_xstats_get,
3813                 .xstats_get_names = dlb2_eventdev_xstats_get_names,
3814                 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
3815                 .xstats_reset       = dlb2_eventdev_xstats_reset,
3816                 .dev_selftest     = test_dlb2_eventdev,
3817         };
3818
3819         /* Expose PMD's eventdev interface */
3820
3821         dev->dev_ops = &dlb2_eventdev_entry_ops;
3822         dev->enqueue = dlb2_event_enqueue;
3823         dev->enqueue_burst = dlb2_event_enqueue_burst;
3824         dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
3825         dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
3826
3827         dlb2 = dev->data->dev_private;
3828         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
3829                 dev->dequeue = dlb2_event_dequeue_sparse;
3830                 dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
3831         } else {
3832                 dev->dequeue = dlb2_event_dequeue;
3833                 dev->dequeue_burst = dlb2_event_dequeue_burst;
3834         }
3835 }
3836
3837 int
3838 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
3839                             const char *name,
3840                             struct dlb2_devargs *dlb2_args)
3841 {
3842         struct dlb2_eventdev *dlb2;
3843         int err, i;
3844
3845         dlb2 = dev->data->dev_private;
3846
3847         dlb2->event_dev = dev; /* backlink */
3848
3849         evdev_dlb2_default_info.driver_name = name;
3850
3851         dlb2->max_num_events_override = dlb2_args->max_num_events;
3852         dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
3853         dlb2->qm_instance.cos_id = dlb2_args->cos_id;
3854
3855         err = dlb2_iface_open(&dlb2->qm_instance, name);
3856         if (err < 0) {
3857                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3858                              err);
3859                 return err;
3860         }
3861
3862         err = dlb2_iface_get_device_version(&dlb2->qm_instance,
3863                                             &dlb2->revision);
3864         if (err < 0) {
3865                 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
3866                              err);
3867                 return err;
3868         }
3869
3870         err = dlb2_hw_query_resources(dlb2);
3871         if (err) {
3872                 DLB2_LOG_ERR("get resources err=%d for %s\n",
3873                              err, name);
3874                 return err;
3875         }
3876
3877         dlb2_iface_hardware_init(&dlb2->qm_instance);
3878
3879         err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
3880         if (err < 0) {
3881                 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
3882                              err);
3883                 return err;
3884         }
3885
3886         /* Complete xtstats runtime initialization */
3887         err = dlb2_xstats_init(dlb2);
3888         if (err) {
3889                 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
3890                 return err;
3891         }
3892
3893         /* Initialize each port's token pop mode */
3894         for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
3895                 dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
3896
3897         rte_spinlock_init(&dlb2->qm_instance.resource_lock);
3898
3899         dlb2_iface_low_level_io_init();
3900
3901         dlb2_entry_points_init(dev);
3902
3903         dlb2_init_queue_depth_thresholds(dlb2,
3904                                          dlb2_args->qid_depth_thresholds.val);
3905
3906         return 0;
3907 }
3908
3909 int
3910 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
3911                               const char *name)
3912 {
3913         struct dlb2_eventdev *dlb2;
3914         int err;
3915
3916         dlb2 = dev->data->dev_private;
3917
3918         evdev_dlb2_default_info.driver_name = name;
3919
3920         err = dlb2_iface_open(&dlb2->qm_instance, name);
3921         if (err < 0) {
3922                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3923                              err);
3924                 return err;
3925         }
3926
3927         err = dlb2_hw_query_resources(dlb2);
3928         if (err) {
3929                 DLB2_LOG_ERR("get resources err=%d for %s\n",
3930                              err, name);
3931                 return err;
3932         }
3933
3934         dlb2_iface_low_level_io_init();
3935
3936         dlb2_entry_points_init(dev);
3937
3938         return 0;
3939 }
3940
3941 int
3942 dlb2_parse_params(const char *params,
3943                   const char *name,
3944                   struct dlb2_devargs *dlb2_args)
3945 {
3946         int ret = 0;
3947         static const char * const args[] = { NUMA_NODE_ARG,
3948                                              DLB2_MAX_NUM_EVENTS,
3949                                              DLB2_NUM_DIR_CREDITS,
3950                                              DEV_ID_ARG,
3951                                              DLB2_QID_DEPTH_THRESH_ARG,
3952                                              DLB2_COS_ARG,
3953                                              NULL };
3954
3955         if (params != NULL && params[0] != '\0') {
3956                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
3957
3958                 if (kvlist == NULL) {
3959                         RTE_LOG(INFO, PMD,
3960                                 "Ignoring unsupported parameters when creating device '%s'\n",
3961                                 name);
3962                 } else {
3963                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
3964                                                      set_numa_node,
3965                                                      &dlb2_args->socket_id);
3966                         if (ret != 0) {
3967                                 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
3968                                              name);
3969                                 rte_kvargs_free(kvlist);
3970                                 return ret;
3971                         }
3972
3973                         ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
3974                                                  set_max_num_events,
3975                                                  &dlb2_args->max_num_events);
3976                         if (ret != 0) {
3977                                 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
3978                                              name);
3979                                 rte_kvargs_free(kvlist);
3980                                 return ret;
3981                         }
3982
3983                         ret = rte_kvargs_process(kvlist,
3984                                         DLB2_NUM_DIR_CREDITS,
3985                                         set_num_dir_credits,
3986                                         &dlb2_args->num_dir_credits_override);
3987                         if (ret != 0) {
3988                                 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
3989                                              name);
3990                                 rte_kvargs_free(kvlist);
3991                                 return ret;
3992                         }
3993
3994                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
3995                                                  set_dev_id,
3996                                                  &dlb2_args->dev_id);
3997                         if (ret != 0) {
3998                                 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
3999                                              name);
4000                                 rte_kvargs_free(kvlist);
4001                                 return ret;
4002                         }
4003
4004                         ret = rte_kvargs_process(
4005                                         kvlist,
4006                                         DLB2_QID_DEPTH_THRESH_ARG,
4007                                         set_qid_depth_thresh,
4008                                         &dlb2_args->qid_depth_thresholds);
4009                         if (ret != 0) {
4010                                 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
4011                                              name);
4012                                 rte_kvargs_free(kvlist);
4013                                 return ret;
4014                         }
4015
4016                         ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
4017                                                  set_cos,
4018                                                  &dlb2_args->cos_id);
4019                         if (ret != 0) {
4020                                 DLB2_LOG_ERR("%s: Error parsing cos parameter",
4021                                              name);
4022                                 rte_kvargs_free(kvlist);
4023                                 return ret;
4024                         }
4025
4026                         rte_kvargs_free(kvlist);
4027                 }
4028         }
4029         return ret;
4030 }
4031 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);