event/dlb2: add port unlink and unlinks in progress
[dpdk.git] / drivers / event / dlb2 / dlb2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <sys/fcntl.h>
15
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
24 #include <rte_io.h>
25 #include <rte_kvargs.h>
26 #include <rte_log.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf.h>
29 #include <rte_prefetch.h>
30 #include <rte_ring.h>
31 #include <rte_string_fns.h>
32
33 #include "dlb2_priv.h"
34 #include "dlb2_iface.h"
35 #include "dlb2_inline_fns.h"
36
37 /*
38  * Resources exposed to eventdev. Some values overridden at runtime using
39  * values returned by the DLB kernel driver.
40  */
41 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
42 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
43 #endif
44 static struct rte_event_dev_info evdev_dlb2_default_info = {
45         .driver_name = "", /* probe will set */
46         .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
47         .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
48 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
49         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
50 #else
51         .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
52 #endif
53         .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
54         .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
55         .max_event_priority_levels = DLB2_QID_PRIORITIES,
56         .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
57         .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
58         .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
59         .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
60         .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
61         .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
62         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
63                           RTE_EVENT_DEV_CAP_EVENT_QOS |
64                           RTE_EVENT_DEV_CAP_BURST_MODE |
65                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
66                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
67                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
68 };
69
70 struct process_local_port_data
71 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
72
73 /*
74  * DUMMY - added so that xstats path will compile/link.
75  * Will be replaced by real version in a subsequent
76  * patch.
77  */
78 uint32_t
79 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
80                      struct dlb2_eventdev_queue *queue)
81 {
82         RTE_SET_USED(dlb2);
83         RTE_SET_USED(queue);
84
85         return 0;
86 }
87
88 static void
89 dlb2_free_qe_mem(struct dlb2_port *qm_port)
90 {
91         if (qm_port == NULL)
92                 return;
93
94         rte_free(qm_port->qe4);
95         qm_port->qe4 = NULL;
96
97         rte_free(qm_port->int_arm_qe);
98         qm_port->int_arm_qe = NULL;
99
100         rte_free(qm_port->consume_qe);
101         qm_port->consume_qe = NULL;
102
103         rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
104         dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
105 }
106
107 /* override defaults with value(s) provided on command line */
108 static void
109 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
110                                  int *qid_depth_thresholds)
111 {
112         int q;
113
114         for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
115                 if (qid_depth_thresholds[q] != 0)
116                         dlb2->ev_queues[q].depth_threshold =
117                                 qid_depth_thresholds[q];
118         }
119 }
120
121 static int
122 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
123 {
124         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
125         struct dlb2_hw_resource_info *dlb2_info = &handle->info;
126         int ret;
127
128         /* Query driver resources provisioned for this device */
129
130         ret = dlb2_iface_get_num_resources(handle,
131                                            &dlb2->hw_rsrc_query_results);
132         if (ret) {
133                 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
134                 return ret;
135         }
136
137         /* Complete filling in device resource info returned to evdev app,
138          * overriding any default values.
139          * The capabilities (CAPs) were set at compile time.
140          */
141
142         evdev_dlb2_default_info.max_event_queues =
143                 dlb2->hw_rsrc_query_results.num_ldb_queues;
144
145         evdev_dlb2_default_info.max_event_ports =
146                 dlb2->hw_rsrc_query_results.num_ldb_ports;
147
148         evdev_dlb2_default_info.max_num_events =
149                 dlb2->hw_rsrc_query_results.num_ldb_credits;
150
151         /* Save off values used when creating the scheduling domain. */
152
153         handle->info.num_sched_domains =
154                 dlb2->hw_rsrc_query_results.num_sched_domains;
155
156         handle->info.hw_rsrc_max.nb_events_limit =
157                 dlb2->hw_rsrc_query_results.num_ldb_credits;
158
159         handle->info.hw_rsrc_max.num_queues =
160                 dlb2->hw_rsrc_query_results.num_ldb_queues +
161                 dlb2->hw_rsrc_query_results.num_dir_ports;
162
163         handle->info.hw_rsrc_max.num_ldb_queues =
164                 dlb2->hw_rsrc_query_results.num_ldb_queues;
165
166         handle->info.hw_rsrc_max.num_ldb_ports =
167                 dlb2->hw_rsrc_query_results.num_ldb_ports;
168
169         handle->info.hw_rsrc_max.num_dir_ports =
170                 dlb2->hw_rsrc_query_results.num_dir_ports;
171
172         handle->info.hw_rsrc_max.reorder_window_size =
173                 dlb2->hw_rsrc_query_results.num_hist_list_entries;
174
175         rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
176
177         return 0;
178 }
179
180 #define DLB2_BASE_10 10
181
182 static int
183 dlb2_string_to_int(int *result, const char *str)
184 {
185         long ret;
186         char *endptr;
187
188         if (str == NULL || result == NULL)
189                 return -EINVAL;
190
191         errno = 0;
192         ret = strtol(str, &endptr, DLB2_BASE_10);
193         if (errno)
194                 return -errno;
195
196         /* long int and int may be different width for some architectures */
197         if (ret < INT_MIN || ret > INT_MAX || endptr == str)
198                 return -EINVAL;
199
200         *result = ret;
201         return 0;
202 }
203
204 static int
205 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
206 {
207         int *socket_id = opaque;
208         int ret;
209
210         ret = dlb2_string_to_int(socket_id, value);
211         if (ret < 0)
212                 return ret;
213
214         if (*socket_id > RTE_MAX_NUMA_NODES)
215                 return -EINVAL;
216         return 0;
217 }
218
219 static int
220 set_max_num_events(const char *key __rte_unused,
221                    const char *value,
222                    void *opaque)
223 {
224         int *max_num_events = opaque;
225         int ret;
226
227         if (value == NULL || opaque == NULL) {
228                 DLB2_LOG_ERR("NULL pointer\n");
229                 return -EINVAL;
230         }
231
232         ret = dlb2_string_to_int(max_num_events, value);
233         if (ret < 0)
234                 return ret;
235
236         if (*max_num_events < 0 || *max_num_events >
237                         DLB2_MAX_NUM_LDB_CREDITS) {
238                 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
239                              DLB2_MAX_NUM_LDB_CREDITS);
240                 return -EINVAL;
241         }
242
243         return 0;
244 }
245
246 static int
247 set_num_dir_credits(const char *key __rte_unused,
248                     const char *value,
249                     void *opaque)
250 {
251         int *num_dir_credits = opaque;
252         int ret;
253
254         if (value == NULL || opaque == NULL) {
255                 DLB2_LOG_ERR("NULL pointer\n");
256                 return -EINVAL;
257         }
258
259         ret = dlb2_string_to_int(num_dir_credits, value);
260         if (ret < 0)
261                 return ret;
262
263         if (*num_dir_credits < 0 ||
264             *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
265                 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
266                              DLB2_MAX_NUM_DIR_CREDITS);
267                 return -EINVAL;
268         }
269
270         return 0;
271 }
272
273 static int
274 set_dev_id(const char *key __rte_unused,
275            const char *value,
276            void *opaque)
277 {
278         int *dev_id = opaque;
279         int ret;
280
281         if (value == NULL || opaque == NULL) {
282                 DLB2_LOG_ERR("NULL pointer\n");
283                 return -EINVAL;
284         }
285
286         ret = dlb2_string_to_int(dev_id, value);
287         if (ret < 0)
288                 return ret;
289
290         return 0;
291 }
292
293 static int
294 set_cos(const char *key __rte_unused,
295         const char *value,
296         void *opaque)
297 {
298         enum dlb2_cos *cos_id = opaque;
299         int x = 0;
300         int ret;
301
302         if (value == NULL || opaque == NULL) {
303                 DLB2_LOG_ERR("NULL pointer\n");
304                 return -EINVAL;
305         }
306
307         ret = dlb2_string_to_int(&x, value);
308         if (ret < 0)
309                 return ret;
310
311         if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
312                 DLB2_LOG_ERR(
313                         "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
314                         x);
315                 return -EINVAL;
316         }
317
318         *cos_id = x;
319
320         return 0;
321 }
322
323
324 static int
325 set_qid_depth_thresh(const char *key __rte_unused,
326                      const char *value,
327                      void *opaque)
328 {
329         struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
330         int first, last, thresh, i;
331
332         if (value == NULL || opaque == NULL) {
333                 DLB2_LOG_ERR("NULL pointer\n");
334                 return -EINVAL;
335         }
336
337         /* command line override may take one of the following 3 forms:
338          * qid_depth_thresh=all:<threshold_value> ... all queues
339          * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
340          * qid_depth_thresh=qid:<threshold_value> ... just one queue
341          */
342         if (sscanf(value, "all:%d", &thresh) == 1) {
343                 first = 0;
344                 last = DLB2_MAX_NUM_QUEUES - 1;
345         } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
346                 /* we have everything we need */
347         } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
348                 last = first;
349         } else {
350                 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
351                 return -EINVAL;
352         }
353
354         if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
355                 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
356                 return -EINVAL;
357         }
358
359         if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
360                 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
361                              DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
362                 return -EINVAL;
363         }
364
365         for (i = first; i <= last; i++)
366                 qid_thresh->val[i] = thresh; /* indexed by qid */
367
368         return 0;
369 }
370
371 static void
372 dlb2_eventdev_info_get(struct rte_eventdev *dev,
373                        struct rte_event_dev_info *dev_info)
374 {
375         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
376         int ret;
377
378         ret = dlb2_hw_query_resources(dlb2);
379         if (ret) {
380                 const struct rte_eventdev_data *data = dev->data;
381
382                 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
383                              ret, data->dev_id);
384                 /* fn is void, so fall through and return values set up in
385                  * probe
386                  */
387         }
388
389         /* Add num resources currently owned by this domain.
390          * These would become available if the scheduling domain were reset due
391          * to the application recalling eventdev_configure to *reconfigure* the
392          * domain.
393          */
394         evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
395         evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
396         evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
397
398         evdev_dlb2_default_info.max_event_queues =
399                 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
400                         RTE_EVENT_MAX_QUEUES_PER_DEV);
401
402         evdev_dlb2_default_info.max_num_events =
403                 RTE_MIN(evdev_dlb2_default_info.max_num_events,
404                         dlb2->max_num_events_override);
405
406         *dev_info = evdev_dlb2_default_info;
407 }
408
409 static int
410 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
411                             const struct dlb2_hw_rsrcs *resources_asked)
412 {
413         int ret = 0;
414         struct dlb2_create_sched_domain_args *cfg;
415
416         if (resources_asked == NULL) {
417                 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
418                 ret = EINVAL;
419                 goto error_exit;
420         }
421
422         /* Map generic qm resources to dlb2 resources */
423         cfg = &handle->cfg.resources;
424
425         /* DIR ports and queues */
426
427         cfg->num_dir_ports = resources_asked->num_dir_ports;
428
429         cfg->num_dir_credits = resources_asked->num_dir_credits;
430
431         /* LDB queues */
432
433         cfg->num_ldb_queues = resources_asked->num_ldb_queues;
434
435         /* LDB ports */
436
437         cfg->cos_strict = 0; /* Best effort */
438         cfg->num_cos_ldb_ports[0] = 0;
439         cfg->num_cos_ldb_ports[1] = 0;
440         cfg->num_cos_ldb_ports[2] = 0;
441         cfg->num_cos_ldb_ports[3] = 0;
442
443         switch (handle->cos_id) {
444         case DLB2_COS_0:
445                 cfg->num_ldb_ports = 0; /* no don't care ports */
446                 cfg->num_cos_ldb_ports[0] =
447                         resources_asked->num_ldb_ports;
448                 break;
449         case DLB2_COS_1:
450                 cfg->num_ldb_ports = 0; /* no don't care ports */
451                 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
452                 break;
453         case DLB2_COS_2:
454                 cfg->num_ldb_ports = 0; /* no don't care ports */
455                 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
456                 break;
457         case DLB2_COS_3:
458                 cfg->num_ldb_ports = 0; /* no don't care ports */
459                 cfg->num_cos_ldb_ports[3] =
460                         resources_asked->num_ldb_ports;
461                 break;
462         case DLB2_COS_DEFAULT:
463                 /* all ldb ports are don't care ports from a cos perspective */
464                 cfg->num_ldb_ports =
465                         resources_asked->num_ldb_ports;
466                 break;
467         }
468
469         cfg->num_ldb_credits =
470                 resources_asked->num_ldb_credits;
471
472         cfg->num_atomic_inflights =
473                 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
474                 cfg->num_ldb_queues;
475
476         cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
477                 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
478
479         DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
480                      cfg->num_ldb_queues,
481                      resources_asked->num_ldb_ports,
482                      cfg->num_dir_ports,
483                      cfg->num_atomic_inflights,
484                      cfg->num_hist_list_entries,
485                      cfg->num_ldb_credits,
486                      cfg->num_dir_credits);
487
488         /* Configure the QM */
489
490         ret = dlb2_iface_sched_domain_create(handle, cfg);
491         if (ret < 0) {
492                 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
493                              ret,
494                              dlb2_error_strings[cfg->response.status]);
495
496                 goto error_exit;
497         }
498
499         handle->domain_id = cfg->response.id;
500         handle->cfg.configured = true;
501
502 error_exit:
503
504         return ret;
505 }
506
507 static void
508 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
509 {
510         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
511         enum dlb2_configuration_state config_state;
512         int i, j;
513
514         dlb2_iface_domain_reset(dlb2);
515
516         /* Free all dynamically allocated port memory */
517         for (i = 0; i < dlb2->num_ports; i++)
518                 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
519
520         /* If reconfiguring, mark the device's queues and ports as "previously
521          * configured." If the user doesn't reconfigure them, the PMD will
522          * reapply their previous configuration when the device is started.
523          */
524         config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
525                 DLB2_NOT_CONFIGURED;
526
527         for (i = 0; i < dlb2->num_ports; i++) {
528                 dlb2->ev_ports[i].qm_port.config_state = config_state;
529                 /* Reset setup_done so ports can be reconfigured */
530                 dlb2->ev_ports[i].setup_done = false;
531                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
532                         dlb2->ev_ports[i].link[j].mapped = false;
533         }
534
535         for (i = 0; i < dlb2->num_queues; i++)
536                 dlb2->ev_queues[i].qm_queue.config_state = config_state;
537
538         for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
539                 dlb2->ev_queues[i].setup_done = false;
540
541         dlb2->num_ports = 0;
542         dlb2->num_ldb_ports = 0;
543         dlb2->num_dir_ports = 0;
544         dlb2->num_queues = 0;
545         dlb2->num_ldb_queues = 0;
546         dlb2->num_dir_queues = 0;
547         dlb2->configured = false;
548 }
549
550 /* Note: 1 QM instance per QM device, QM instance/device == event device */
551 static int
552 dlb2_eventdev_configure(const struct rte_eventdev *dev)
553 {
554         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
555         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
556         struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
557         const struct rte_eventdev_data *data = dev->data;
558         const struct rte_event_dev_config *config = &data->dev_conf;
559         int ret;
560
561         /* If this eventdev is already configured, we must release the current
562          * scheduling domain before attempting to configure a new one.
563          */
564         if (dlb2->configured) {
565                 dlb2_hw_reset_sched_domain(dev, true);
566
567                 ret = dlb2_hw_query_resources(dlb2);
568                 if (ret) {
569                         DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
570                                      ret, data->dev_id);
571                         return ret;
572                 }
573         }
574
575         if (config->nb_event_queues > rsrcs->num_queues) {
576                 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
577                              config->nb_event_queues,
578                              rsrcs->num_queues);
579                 return -EINVAL;
580         }
581         if (config->nb_event_ports > (rsrcs->num_ldb_ports
582                         + rsrcs->num_dir_ports)) {
583                 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
584                              config->nb_event_ports,
585                              (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
586                 return -EINVAL;
587         }
588         if (config->nb_events_limit > rsrcs->nb_events_limit) {
589                 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
590                              config->nb_events_limit,
591                              rsrcs->nb_events_limit);
592                 return -EINVAL;
593         }
594
595         if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
596                 dlb2->global_dequeue_wait = false;
597         else {
598                 uint32_t timeout32;
599
600                 dlb2->global_dequeue_wait = true;
601
602                 /* note size mismatch of timeout vals in eventdev lib. */
603                 timeout32 = config->dequeue_timeout_ns;
604
605                 dlb2->global_dequeue_wait_ticks =
606                         timeout32 * (rte_get_timer_hz() / 1E9);
607         }
608
609         /* Does this platform support umonitor/umwait? */
610         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
611                 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
612                     RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
613                         DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
614                                      RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
615                         return -EINVAL;
616                 }
617                 dlb2->umwait_allowed = true;
618         }
619
620         rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
621         rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
622         /* 1 dir queue per dir port */
623         rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
624
625         /* Scale down nb_events_limit by 4 for directed credits, since there
626          * are 4x as many load-balanced credits.
627          */
628         rsrcs->num_ldb_credits = 0;
629         rsrcs->num_dir_credits = 0;
630
631         if (rsrcs->num_ldb_queues)
632                 rsrcs->num_ldb_credits = config->nb_events_limit;
633         if (rsrcs->num_dir_ports)
634                 rsrcs->num_dir_credits = config->nb_events_limit / 4;
635         if (dlb2->num_dir_credits_override != -1)
636                 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
637
638         if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
639                 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
640                 return -ENODEV;
641         }
642
643         dlb2->new_event_limit = config->nb_events_limit;
644         __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
645
646         /* Save number of ports/queues for this event dev */
647         dlb2->num_ports = config->nb_event_ports;
648         dlb2->num_queues = config->nb_event_queues;
649         dlb2->num_dir_ports = rsrcs->num_dir_ports;
650         dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
651         dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
652         dlb2->num_dir_queues = dlb2->num_dir_ports;
653         dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
654         dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
655         dlb2->dir_credit_pool = rsrcs->num_dir_credits;
656         dlb2->max_dir_credits = rsrcs->num_dir_credits;
657
658         dlb2->configured = true;
659
660         return 0;
661 }
662
663 static void
664 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
665                                     uint8_t port_id,
666                                     struct rte_event_port_conf *port_conf)
667 {
668         RTE_SET_USED(port_id);
669         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
670
671         port_conf->new_event_threshold = dlb2->new_event_limit;
672         port_conf->dequeue_depth = 32;
673         port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
674         port_conf->event_port_cfg = 0;
675 }
676
677 static void
678 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
679                                      uint8_t queue_id,
680                                      struct rte_event_queue_conf *queue_conf)
681 {
682         RTE_SET_USED(dev);
683         RTE_SET_USED(queue_id);
684
685         queue_conf->nb_atomic_flows = 1024;
686         queue_conf->nb_atomic_order_sequences = 64;
687         queue_conf->event_queue_cfg = 0;
688         queue_conf->priority = 0;
689 }
690
691 static int32_t
692 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
693 {
694         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
695         struct dlb2_get_sn_allocation_args cfg;
696         int ret;
697
698         cfg.group = group;
699
700         ret = dlb2_iface_get_sn_allocation(handle, &cfg);
701         if (ret < 0) {
702                 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
703                              ret, dlb2_error_strings[cfg.response.status]);
704                 return ret;
705         }
706
707         return cfg.response.id;
708 }
709
710 static int
711 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
712 {
713         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
714         struct dlb2_set_sn_allocation_args cfg;
715         int ret;
716
717         cfg.num = num;
718         cfg.group = group;
719
720         ret = dlb2_iface_set_sn_allocation(handle, &cfg);
721         if (ret < 0) {
722                 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
723                              ret, dlb2_error_strings[cfg.response.status]);
724                 return ret;
725         }
726
727         return ret;
728 }
729
730 static int32_t
731 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
732 {
733         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
734         struct dlb2_get_sn_occupancy_args cfg;
735         int ret;
736
737         cfg.group = group;
738
739         ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
740         if (ret < 0) {
741                 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
742                              ret, dlb2_error_strings[cfg.response.status]);
743                 return ret;
744         }
745
746         return cfg.response.id;
747 }
748
749 /* Query the current sequence number allocations and, if they conflict with the
750  * requested LDB queue configuration, attempt to re-allocate sequence numbers.
751  * This is best-effort; if it fails, the PMD will attempt to configure the
752  * load-balanced queue and return an error.
753  */
754 static void
755 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
756                            const struct rte_event_queue_conf *queue_conf)
757 {
758         int grp_occupancy[DLB2_NUM_SN_GROUPS];
759         int grp_alloc[DLB2_NUM_SN_GROUPS];
760         int i, sequence_numbers;
761
762         sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
763
764         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
765                 int total_slots;
766
767                 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
768                 if (grp_alloc[i] < 0)
769                         return;
770
771                 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
772
773                 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
774                 if (grp_occupancy[i] < 0)
775                         return;
776
777                 /* DLB has at least one available slot for the requested
778                  * sequence numbers, so no further configuration required.
779                  */
780                 if (grp_alloc[i] == sequence_numbers &&
781                     grp_occupancy[i] < total_slots)
782                         return;
783         }
784
785         /* None of the sequence number groups are configured for the requested
786          * sequence numbers, so we have to reconfigure one of them. This is
787          * only possible if a group is not in use.
788          */
789         for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
790                 if (grp_occupancy[i] == 0)
791                         break;
792         }
793
794         if (i == DLB2_NUM_SN_GROUPS) {
795                 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
796                        __func__, sequence_numbers);
797                 return;
798         }
799
800         /* Attempt to configure slot i with the requested number of sequence
801          * numbers. Ignore the return value -- if this fails, the error will be
802          * caught during subsequent queue configuration.
803          */
804         dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
805 }
806
807 static int32_t
808 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
809                          struct dlb2_eventdev_queue *ev_queue,
810                          const struct rte_event_queue_conf *evq_conf)
811 {
812         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
813         struct dlb2_queue *queue = &ev_queue->qm_queue;
814         struct dlb2_create_ldb_queue_args cfg;
815         int32_t ret;
816         uint32_t qm_qid;
817         int sched_type = -1;
818
819         if (evq_conf == NULL)
820                 return -EINVAL;
821
822         if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
823                 if (evq_conf->nb_atomic_order_sequences != 0)
824                         sched_type = RTE_SCHED_TYPE_ORDERED;
825                 else
826                         sched_type = RTE_SCHED_TYPE_PARALLEL;
827         } else
828                 sched_type = evq_conf->schedule_type;
829
830         cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
831         cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
832         cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
833
834         if (sched_type != RTE_SCHED_TYPE_ORDERED) {
835                 cfg.num_sequence_numbers = 0;
836                 cfg.num_qid_inflights = 2048;
837         }
838
839         /* App should set this to the number of hardware flows they want, not
840          * the overall number of flows they're going to use. E.g. if app is
841          * using 64 flows and sets compression to 64, best-case they'll get
842          * 64 unique hashed flows in hardware.
843          */
844         switch (evq_conf->nb_atomic_flows) {
845         /* Valid DLB2 compression levels */
846         case 64:
847         case 128:
848         case 256:
849         case 512:
850         case (1 * 1024): /* 1K */
851         case (2 * 1024): /* 2K */
852         case (4 * 1024): /* 4K */
853         case (64 * 1024): /* 64K */
854                 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
855                 break;
856         default:
857                 /* Invalid compression level */
858                 cfg.lock_id_comp_level = 0; /* no compression */
859         }
860
861         if (ev_queue->depth_threshold == 0) {
862                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
863                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
864         } else
865                 cfg.depth_threshold = ev_queue->depth_threshold;
866
867         ret = dlb2_iface_ldb_queue_create(handle, &cfg);
868         if (ret < 0) {
869                 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
870                              ret, dlb2_error_strings[cfg.response.status]);
871                 return -EINVAL;
872         }
873
874         qm_qid = cfg.response.id;
875
876         /* Save off queue config for debug, resource lookups, and reconfig */
877         queue->num_qid_inflights = cfg.num_qid_inflights;
878         queue->num_atm_inflights = cfg.num_atomic_inflights;
879
880         queue->sched_type = sched_type;
881         queue->config_state = DLB2_CONFIGURED;
882
883         DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
884                      qm_qid,
885                      cfg.num_atomic_inflights,
886                      cfg.num_sequence_numbers,
887                      cfg.num_qid_inflights);
888
889         return qm_qid;
890 }
891
892 static int
893 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
894                               struct dlb2_eventdev_queue *ev_queue,
895                               const struct rte_event_queue_conf *queue_conf)
896 {
897         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
898         int32_t qm_qid;
899
900         if (queue_conf->nb_atomic_order_sequences)
901                 dlb2_program_sn_allocation(dlb2, queue_conf);
902
903         qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
904         if (qm_qid < 0) {
905                 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
906
907                 return qm_qid;
908         }
909
910         dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
911
912         ev_queue->qm_queue.id = qm_qid;
913
914         return 0;
915 }
916
917 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
918 {
919         int i, num = 0;
920
921         for (i = 0; i < dlb2->num_queues; i++) {
922                 if (dlb2->ev_queues[i].setup_done &&
923                     dlb2->ev_queues[i].qm_queue.is_directed)
924                         num++;
925         }
926
927         return num;
928 }
929
930 static void
931 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
932                          struct dlb2_eventdev_queue *ev_queue)
933 {
934         struct dlb2_eventdev_port *ev_port;
935         int i, j;
936
937         for (i = 0; i < dlb2->num_ports; i++) {
938                 ev_port = &dlb2->ev_ports[i];
939
940                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
941                         if (!ev_port->link[j].valid ||
942                             ev_port->link[j].queue_id != ev_queue->id)
943                                 continue;
944
945                         ev_port->link[j].valid = false;
946                         ev_port->num_links--;
947                 }
948         }
949
950         ev_queue->num_links = 0;
951 }
952
953 static int
954 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
955                           uint8_t ev_qid,
956                           const struct rte_event_queue_conf *queue_conf)
957 {
958         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
959         struct dlb2_eventdev_queue *ev_queue;
960         int ret;
961
962         if (queue_conf == NULL)
963                 return -EINVAL;
964
965         if (ev_qid >= dlb2->num_queues)
966                 return -EINVAL;
967
968         ev_queue = &dlb2->ev_queues[ev_qid];
969
970         ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
971                 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
972         ev_queue->id = ev_qid;
973         ev_queue->conf = *queue_conf;
974
975         if (!ev_queue->qm_queue.is_directed) {
976                 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
977         } else {
978                 /* The directed queue isn't setup until link time, at which
979                  * point we know its directed port ID. Directed queue setup
980                  * will only fail if this queue is already setup or there are
981                  * no directed queues left to configure.
982                  */
983                 ret = 0;
984
985                 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
986
987                 if (ev_queue->setup_done ||
988                     dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
989                         ret = -EINVAL;
990         }
991
992         /* Tear down pre-existing port->queue links */
993         if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
994                 dlb2_queue_link_teardown(dlb2, ev_queue);
995
996         if (!ret)
997                 ev_queue->setup_done = true;
998
999         return ret;
1000 }
1001
1002 static int
1003 dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
1004 {
1005         struct dlb2_cq_pop_qe *qe;
1006
1007         qe = rte_zmalloc(mz_name,
1008                         DLB2_NUM_QES_PER_CACHE_LINE *
1009                                 sizeof(struct dlb2_cq_pop_qe),
1010                         RTE_CACHE_LINE_SIZE);
1011
1012         if (qe == NULL) {
1013                 DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1014                 return -ENOMEM;
1015         }
1016         qm_port->consume_qe = qe;
1017
1018         qe->qe_valid = 0;
1019         qe->qe_frag = 0;
1020         qe->qe_comp = 0;
1021         qe->cq_token = 1;
1022         /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1023          * and so on.
1024          */
1025         qe->tokens = 0; /* set at run time */
1026         qe->meas_lat = 0;
1027         qe->no_dec = 0;
1028         /* Completion IDs are disabled */
1029         qe->cmp_id = 0;
1030
1031         return 0;
1032 }
1033
1034 static int
1035 dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1036 {
1037         struct dlb2_enqueue_qe *qe;
1038
1039         qe = rte_zmalloc(mz_name,
1040                         DLB2_NUM_QES_PER_CACHE_LINE *
1041                                 sizeof(struct dlb2_enqueue_qe),
1042                         RTE_CACHE_LINE_SIZE);
1043
1044         if (qe == NULL) {
1045                 DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1046                 return -ENOMEM;
1047         }
1048         qm_port->int_arm_qe = qe;
1049
1050         /* V2 - INT ARM is CQ_TOKEN + FRAG */
1051         qe->qe_valid = 0;
1052         qe->qe_frag = 1;
1053         qe->qe_comp = 0;
1054         qe->cq_token = 1;
1055         qe->meas_lat = 0;
1056         qe->no_dec = 0;
1057         /* Completion IDs are disabled */
1058         qe->cmp_id = 0;
1059
1060         return 0;
1061 }
1062
1063 static int
1064 dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1065 {
1066         int ret, sz;
1067
1068         sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1069
1070         qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1071
1072         if (qm_port->qe4 == NULL) {
1073                 DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1074                 ret = -ENOMEM;
1075                 goto error_exit;
1076         }
1077
1078         ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1079         if (ret < 0) {
1080                 DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1081                 goto error_exit;
1082         }
1083
1084         ret = dlb2_init_consume_qe(qm_port, mz_name);
1085         if (ret < 0) {
1086                 DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1087                 goto error_exit;
1088         }
1089
1090         return 0;
1091
1092 error_exit:
1093
1094         dlb2_free_qe_mem(qm_port);
1095
1096         return ret;
1097 }
1098
1099 static int
1100 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1101                         struct dlb2_eventdev_port *ev_port,
1102                         uint32_t dequeue_depth,
1103                         uint32_t enqueue_depth)
1104 {
1105         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1106         struct dlb2_create_ldb_port_args cfg = { {0} };
1107         int ret;
1108         struct dlb2_port *qm_port = NULL;
1109         char mz_name[RTE_MEMZONE_NAMESIZE];
1110         uint32_t qm_port_id;
1111         uint16_t ldb_credit_high_watermark;
1112         uint16_t dir_credit_high_watermark;
1113
1114         if (handle == NULL)
1115                 return -EINVAL;
1116
1117         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1118                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1119                              DLB2_MIN_CQ_DEPTH);
1120                 return -EINVAL;
1121         }
1122
1123         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1124                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1125                              DLB2_MIN_ENQUEUE_DEPTH);
1126                 return -EINVAL;
1127         }
1128
1129         rte_spinlock_lock(&handle->resource_lock);
1130
1131         /* We round up to the next power of 2 if necessary */
1132         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1133         cfg.cq_depth_threshold = 1;
1134
1135         cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1136
1137         if (handle->cos_id == DLB2_COS_DEFAULT)
1138                 cfg.cos_id = 0;
1139         else
1140                 cfg.cos_id = handle->cos_id;
1141
1142         cfg.cos_strict = 0;
1143
1144         /* User controls the LDB high watermark via enqueue depth. The DIR high
1145          * watermark is equal, unless the directed credit pool is too small.
1146          */
1147         ldb_credit_high_watermark = enqueue_depth;
1148
1149         /* If there are no directed ports, the kernel driver will ignore this
1150          * port's directed credit settings. Don't use enqueue_depth if it would
1151          * require more directed credits than are available.
1152          */
1153         dir_credit_high_watermark =
1154                 RTE_MIN(enqueue_depth,
1155                         handle->cfg.num_dir_credits / dlb2->num_ports);
1156
1157         /* Per QM values */
1158
1159         ret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);
1160         if (ret < 0) {
1161                 DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1162                              ret, dlb2_error_strings[cfg.response.status]);
1163                 goto error_exit;
1164         }
1165
1166         qm_port_id = cfg.response.id;
1167
1168         DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1169                      ev_port->id, qm_port_id);
1170
1171         qm_port = &ev_port->qm_port;
1172         qm_port->ev_port = ev_port; /* back ptr */
1173         qm_port->dlb2 = dlb2; /* back ptr */
1174         /*
1175          * Allocate and init local qe struct(s).
1176          * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1177          */
1178
1179         snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1180                  ev_port->id);
1181
1182         ret = dlb2_init_qe_mem(qm_port, mz_name);
1183         if (ret < 0) {
1184                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1185                 goto error_exit;
1186         }
1187
1188         qm_port->id = qm_port_id;
1189
1190         qm_port->cached_ldb_credits = 0;
1191         qm_port->cached_dir_credits = 0;
1192         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1193          * the effective depth is smaller.
1194          */
1195         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1196         qm_port->cq_idx = 0;
1197         qm_port->cq_idx_unmasked = 0;
1198
1199         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1200                 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1201         else
1202                 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1203
1204         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1205         /* starting value of gen bit - it toggles at wrap time */
1206         qm_port->gen_bit = 1;
1207
1208         qm_port->int_armed = false;
1209
1210         /* Save off for later use in info and lookup APIs. */
1211         qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1212
1213         qm_port->dequeue_depth = dequeue_depth;
1214
1215         qm_port->owed_tokens = 0;
1216         qm_port->issued_releases = 0;
1217
1218         /* Save config message too. */
1219         rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1220
1221         /* update state */
1222         qm_port->state = PORT_STARTED; /* enabled at create time */
1223         qm_port->config_state = DLB2_CONFIGURED;
1224
1225         qm_port->dir_credits = dir_credit_high_watermark;
1226         qm_port->ldb_credits = ldb_credit_high_watermark;
1227         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1228         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1229
1230         DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1231                      qm_port_id,
1232                      dequeue_depth,
1233                      qm_port->ldb_credits,
1234                      qm_port->dir_credits);
1235
1236         rte_spinlock_unlock(&handle->resource_lock);
1237
1238         return 0;
1239
1240 error_exit:
1241
1242         if (qm_port)
1243                 dlb2_free_qe_mem(qm_port);
1244
1245         rte_spinlock_unlock(&handle->resource_lock);
1246
1247         DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1248
1249         return ret;
1250 }
1251
1252 static void
1253 dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1254                         struct dlb2_eventdev_port *ev_port)
1255 {
1256         struct dlb2_eventdev_queue *ev_queue;
1257         int i;
1258
1259         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1260                 if (!ev_port->link[i].valid)
1261                         continue;
1262
1263                 ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1264
1265                 ev_port->link[i].valid = false;
1266                 ev_port->num_links--;
1267                 ev_queue->num_links--;
1268         }
1269 }
1270
1271 static int
1272 dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1273                         struct dlb2_eventdev_port *ev_port,
1274                         uint32_t dequeue_depth,
1275                         uint32_t enqueue_depth)
1276 {
1277         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1278         struct dlb2_create_dir_port_args cfg = { {0} };
1279         int ret;
1280         struct dlb2_port *qm_port = NULL;
1281         char mz_name[RTE_MEMZONE_NAMESIZE];
1282         uint32_t qm_port_id;
1283         uint16_t ldb_credit_high_watermark;
1284         uint16_t dir_credit_high_watermark;
1285
1286         if (dlb2 == NULL || handle == NULL)
1287                 return -EINVAL;
1288
1289         if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1290                 DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1291                              DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1292                 return -EINVAL;
1293         }
1294
1295         if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1296                 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1297                              DLB2_MIN_ENQUEUE_DEPTH);
1298                 return -EINVAL;
1299         }
1300
1301         rte_spinlock_lock(&handle->resource_lock);
1302
1303         /* Directed queues are configured at link time. */
1304         cfg.queue_id = -1;
1305
1306         /* We round up to the next power of 2 if necessary */
1307         cfg.cq_depth = rte_align32pow2(dequeue_depth);
1308         cfg.cq_depth_threshold = 1;
1309
1310         /* User controls the LDB high watermark via enqueue depth. The DIR high
1311          * watermark is equal, unless the directed credit pool is too small.
1312          */
1313         ldb_credit_high_watermark = enqueue_depth;
1314
1315         /* Don't use enqueue_depth if it would require more directed credits
1316          * than are available.
1317          */
1318         dir_credit_high_watermark =
1319                 RTE_MIN(enqueue_depth,
1320                         handle->cfg.num_dir_credits / dlb2->num_ports);
1321
1322         /* Per QM values */
1323
1324         ret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);
1325         if (ret < 0) {
1326                 DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1327                              ret, dlb2_error_strings[cfg.response.status]);
1328                 goto error_exit;
1329         }
1330
1331         qm_port_id = cfg.response.id;
1332
1333         DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1334                      ev_port->id, qm_port_id);
1335
1336         qm_port = &ev_port->qm_port;
1337         qm_port->ev_port = ev_port; /* back ptr */
1338         qm_port->dlb2 = dlb2;  /* back ptr */
1339
1340         /*
1341          * Init local qe struct(s).
1342          * Note: MOVDIR64 requires the enqueue QE to be aligned
1343          */
1344
1345         snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1346                  ev_port->id);
1347
1348         ret = dlb2_init_qe_mem(qm_port, mz_name);
1349
1350         if (ret < 0) {
1351                 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1352                 goto error_exit;
1353         }
1354
1355         qm_port->id = qm_port_id;
1356
1357         qm_port->cached_ldb_credits = 0;
1358         qm_port->cached_dir_credits = 0;
1359         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1360          * the effective depth is smaller.
1361          */
1362         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1363         qm_port->cq_idx = 0;
1364         qm_port->cq_idx_unmasked = 0;
1365
1366         if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1367                 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1368         else
1369                 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1370
1371         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1372         /* starting value of gen bit - it toggles at wrap time */
1373         qm_port->gen_bit = 1;
1374
1375         qm_port->int_armed = false;
1376
1377         /* Save off for later use in info and lookup APIs. */
1378         qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1379
1380         qm_port->dequeue_depth = dequeue_depth;
1381
1382         qm_port->owed_tokens = 0;
1383         qm_port->issued_releases = 0;
1384
1385         /* Save config message too. */
1386         rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1387
1388         /* update state */
1389         qm_port->state = PORT_STARTED; /* enabled at create time */
1390         qm_port->config_state = DLB2_CONFIGURED;
1391
1392         qm_port->dir_credits = dir_credit_high_watermark;
1393         qm_port->ldb_credits = ldb_credit_high_watermark;
1394         qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1395         qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1396
1397         DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1398                      qm_port_id,
1399                      dequeue_depth,
1400                      dir_credit_high_watermark,
1401                      ldb_credit_high_watermark);
1402
1403         rte_spinlock_unlock(&handle->resource_lock);
1404
1405         return 0;
1406
1407 error_exit:
1408
1409         if (qm_port)
1410                 dlb2_free_qe_mem(qm_port);
1411
1412         rte_spinlock_unlock(&handle->resource_lock);
1413
1414         DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1415
1416         return ret;
1417 }
1418
1419 static int
1420 dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1421                          uint8_t ev_port_id,
1422                          const struct rte_event_port_conf *port_conf)
1423 {
1424         struct dlb2_eventdev *dlb2;
1425         struct dlb2_eventdev_port *ev_port;
1426         int ret;
1427
1428         if (dev == NULL || port_conf == NULL) {
1429                 DLB2_LOG_ERR("Null parameter\n");
1430                 return -EINVAL;
1431         }
1432
1433         dlb2 = dlb2_pmd_priv(dev);
1434
1435         if (ev_port_id >= DLB2_MAX_NUM_PORTS)
1436                 return -EINVAL;
1437
1438         if (port_conf->dequeue_depth >
1439                 evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1440             port_conf->enqueue_depth >
1441                 evdev_dlb2_default_info.max_event_port_enqueue_depth)
1442                 return -EINVAL;
1443
1444         ev_port = &dlb2->ev_ports[ev_port_id];
1445         /* configured? */
1446         if (ev_port->setup_done) {
1447                 DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1448                 return -EINVAL;
1449         }
1450
1451         ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1452                 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1453
1454         if (!ev_port->qm_port.is_directed) {
1455                 ret = dlb2_hw_create_ldb_port(dlb2,
1456                                               ev_port,
1457                                               port_conf->dequeue_depth,
1458                                               port_conf->enqueue_depth);
1459                 if (ret < 0) {
1460                         DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1461                                      ev_port_id);
1462
1463                         return ret;
1464                 }
1465         } else {
1466                 ret = dlb2_hw_create_dir_port(dlb2,
1467                                               ev_port,
1468                                               port_conf->dequeue_depth,
1469                                               port_conf->enqueue_depth);
1470                 if (ret < 0) {
1471                         DLB2_LOG_ERR("Failed to create the DIR port\n");
1472                         return ret;
1473                 }
1474         }
1475
1476         /* Save off port config for reconfig */
1477         ev_port->conf = *port_conf;
1478
1479         ev_port->id = ev_port_id;
1480         ev_port->enq_configured = true;
1481         ev_port->setup_done = true;
1482         ev_port->inflight_max = port_conf->new_event_threshold;
1483         ev_port->implicit_release = !(port_conf->event_port_cfg &
1484                   RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1485         ev_port->outstanding_releases = 0;
1486         ev_port->inflight_credits = 0;
1487         ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1488         ev_port->dlb2 = dlb2; /* reverse link */
1489
1490         /* Tear down pre-existing port->queue links */
1491         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1492                 dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1493
1494         dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1495
1496         return 0;
1497 }
1498
1499 static int16_t
1500 dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1501                             uint32_t qm_port_id,
1502                             uint16_t qm_qid,
1503                             uint8_t priority)
1504 {
1505         struct dlb2_map_qid_args cfg;
1506         int32_t ret;
1507
1508         if (handle == NULL)
1509                 return -EINVAL;
1510
1511         /* Build message */
1512         cfg.port_id = qm_port_id;
1513         cfg.qid = qm_qid;
1514         cfg.priority = EV_TO_DLB2_PRIO(priority);
1515
1516         ret = dlb2_iface_map_qid(handle, &cfg);
1517         if (ret < 0) {
1518                 DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1519                              ret, dlb2_error_strings[cfg.response.status]);
1520                 DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1521                              handle->domain_id, cfg.port_id,
1522                              cfg.qid,
1523                              cfg.priority);
1524         } else {
1525                 DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1526                              qm_qid, qm_port_id);
1527         }
1528
1529         return ret;
1530 }
1531
1532 static int
1533 dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1534                           struct dlb2_eventdev_port *ev_port,
1535                           struct dlb2_eventdev_queue *ev_queue,
1536                           uint8_t priority)
1537 {
1538         int first_avail = -1;
1539         int ret, i;
1540
1541         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1542                 if (ev_port->link[i].valid) {
1543                         if (ev_port->link[i].queue_id == ev_queue->id &&
1544                             ev_port->link[i].priority == priority) {
1545                                 if (ev_port->link[i].mapped)
1546                                         return 0; /* already mapped */
1547                                 first_avail = i;
1548                         }
1549                 } else if (first_avail == -1)
1550                         first_avail = i;
1551         }
1552         if (first_avail == -1) {
1553                 DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1554                              ev_port->qm_port.id);
1555                 return -EINVAL;
1556         }
1557
1558         ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1559                                           ev_port->qm_port.id,
1560                                           ev_queue->qm_queue.id,
1561                                           priority);
1562
1563         if (!ret)
1564                 ev_port->link[first_avail].mapped = true;
1565
1566         return ret;
1567 }
1568
1569 static int32_t
1570 dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1571                          struct dlb2_eventdev_queue *ev_queue,
1572                          int32_t qm_port_id)
1573 {
1574         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1575         struct dlb2_create_dir_queue_args cfg;
1576         int32_t ret;
1577
1578         /* The directed port is always configured before its queue */
1579         cfg.port_id = qm_port_id;
1580
1581         if (ev_queue->depth_threshold == 0) {
1582                 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1583                 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1584         } else
1585                 cfg.depth_threshold = ev_queue->depth_threshold;
1586
1587         ret = dlb2_iface_dir_queue_create(handle, &cfg);
1588         if (ret < 0) {
1589                 DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1590                              ret, dlb2_error_strings[cfg.response.status]);
1591                 return -EINVAL;
1592         }
1593
1594         return cfg.response.id;
1595 }
1596
1597 static int
1598 dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1599                               struct dlb2_eventdev_queue *ev_queue,
1600                               struct dlb2_eventdev_port *ev_port)
1601 {
1602         int32_t qm_qid;
1603
1604         qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1605
1606         if (qm_qid < 0) {
1607                 DLB2_LOG_ERR("Failed to create the DIR queue\n");
1608                 return qm_qid;
1609         }
1610
1611         dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1612
1613         ev_queue->qm_queue.id = qm_qid;
1614
1615         return 0;
1616 }
1617
1618 static int
1619 dlb2_do_port_link(struct rte_eventdev *dev,
1620                   struct dlb2_eventdev_queue *ev_queue,
1621                   struct dlb2_eventdev_port *ev_port,
1622                   uint8_t prio)
1623 {
1624         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1625         int err;
1626
1627         /* Don't link until start time. */
1628         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1629                 return 0;
1630
1631         if (ev_queue->qm_queue.is_directed)
1632                 err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1633         else
1634                 err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1635
1636         if (err) {
1637                 DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1638                              ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1639                              ev_queue->id, ev_port->id);
1640
1641                 rte_errno = err;
1642                 return -1;
1643         }
1644
1645         return 0;
1646 }
1647
1648 static int
1649 dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1650                         uint8_t queue_id,
1651                         bool link_exists,
1652                         int index)
1653 {
1654         struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1655         struct dlb2_eventdev_queue *ev_queue;
1656         bool port_is_dir, queue_is_dir;
1657
1658         if (queue_id > dlb2->num_queues) {
1659                 rte_errno = -EINVAL;
1660                 return -1;
1661         }
1662
1663         ev_queue = &dlb2->ev_queues[queue_id];
1664
1665         if (!ev_queue->setup_done &&
1666             ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1667                 rte_errno = -EINVAL;
1668                 return -1;
1669         }
1670
1671         port_is_dir = ev_port->qm_port.is_directed;
1672         queue_is_dir = ev_queue->qm_queue.is_directed;
1673
1674         if (port_is_dir != queue_is_dir) {
1675                 DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1676                              queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1677                              port_is_dir ? "DIR" : "LDB", ev_port->id);
1678
1679                 rte_errno = -EINVAL;
1680                 return -1;
1681         }
1682
1683         /* Check if there is space for the requested link */
1684         if (!link_exists && index == -1) {
1685                 DLB2_LOG_ERR("no space for new link\n");
1686                 rte_errno = -ENOSPC;
1687                 return -1;
1688         }
1689
1690         /* Check if the directed port is already linked */
1691         if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1692             !link_exists) {
1693                 DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1694                              ev_port->id);
1695                 rte_errno = -EINVAL;
1696                 return -1;
1697         }
1698
1699         /* Check if the directed queue is already linked */
1700         if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1701             !link_exists) {
1702                 DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1703                              ev_queue->id);
1704                 rte_errno = -EINVAL;
1705                 return -1;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int
1712 dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1713                         const uint8_t queues[], const uint8_t priorities[],
1714                         uint16_t nb_links)
1715
1716 {
1717         struct dlb2_eventdev_port *ev_port = event_port;
1718         struct dlb2_eventdev *dlb2;
1719         int i, j;
1720
1721         RTE_SET_USED(dev);
1722
1723         if (ev_port == NULL) {
1724                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1725                 rte_errno = -EINVAL;
1726                 return 0;
1727         }
1728
1729         if (!ev_port->setup_done &&
1730             ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1731                 DLB2_LOG_ERR("dlb2: evport not setup\n");
1732                 rte_errno = -EINVAL;
1733                 return 0;
1734         }
1735
1736         /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1737          * queues pointer.
1738          */
1739         if (nb_links == 0) {
1740                 DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1741                 return 0; /* Ignore and return success */
1742         }
1743
1744         dlb2 = ev_port->dlb2;
1745
1746         DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1747                      nb_links,
1748                      ev_port->qm_port.is_directed ? "DIR" : "LDB",
1749                      ev_port->id);
1750
1751         for (i = 0; i < nb_links; i++) {
1752                 struct dlb2_eventdev_queue *ev_queue;
1753                 uint8_t queue_id, prio;
1754                 bool found = false;
1755                 int index = -1;
1756
1757                 queue_id = queues[i];
1758                 prio = priorities[i];
1759
1760                 /* Check if the link already exists. */
1761                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1762                         if (ev_port->link[j].valid) {
1763                                 if (ev_port->link[j].queue_id == queue_id) {
1764                                         found = true;
1765                                         index = j;
1766                                         break;
1767                                 }
1768                         } else if (index == -1) {
1769                                 index = j;
1770                         }
1771
1772                 /* could not link */
1773                 if (index == -1)
1774                         break;
1775
1776                 /* Check if already linked at the requested priority */
1777                 if (found && ev_port->link[j].priority == prio)
1778                         continue;
1779
1780                 if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1781                         break; /* return index of offending queue */
1782
1783                 ev_queue = &dlb2->ev_queues[queue_id];
1784
1785                 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1786                         break; /* return index of offending queue */
1787
1788                 ev_queue->num_links++;
1789
1790                 ev_port->link[index].queue_id = queue_id;
1791                 ev_port->link[index].priority = prio;
1792                 ev_port->link[index].valid = true;
1793                 /* Entry already exists?  If so, then must be prio change */
1794                 if (!found)
1795                         ev_port->num_links++;
1796         }
1797         return i;
1798 }
1799
1800 static int16_t
1801 dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1802                                 uint32_t qm_port_id,
1803                                 uint16_t qm_qid)
1804 {
1805         struct dlb2_unmap_qid_args cfg;
1806         int32_t ret;
1807
1808         if (handle == NULL)
1809                 return -EINVAL;
1810
1811         cfg.port_id = qm_port_id;
1812         cfg.qid = qm_qid;
1813
1814         ret = dlb2_iface_unmap_qid(handle, &cfg);
1815         if (ret < 0)
1816                 DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1817                              ret, dlb2_error_strings[cfg.response.status]);
1818
1819         return ret;
1820 }
1821
1822 static int
1823 dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1824                             struct dlb2_eventdev_port *ev_port,
1825                             struct dlb2_eventdev_queue *ev_queue)
1826 {
1827         int ret, i;
1828
1829         /* Don't unlink until start time. */
1830         if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1831                 return 0;
1832
1833         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1834                 if (ev_port->link[i].valid &&
1835                     ev_port->link[i].queue_id == ev_queue->id)
1836                         break; /* found */
1837         }
1838
1839         /* This is expected with eventdev API!
1840          * It blindly attemmpts to unmap all queues.
1841          */
1842         if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1843                 DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1844                              ev_queue->qm_queue.id,
1845                              ev_port->qm_port.id);
1846                 return 0;
1847         }
1848
1849         ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1850                                               ev_port->qm_port.id,
1851                                               ev_queue->qm_queue.id);
1852         if (!ret)
1853                 ev_port->link[i].mapped = false;
1854
1855         return ret;
1856 }
1857
1858 static int
1859 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1860                           uint8_t queues[], uint16_t nb_unlinks)
1861 {
1862         struct dlb2_eventdev_port *ev_port = event_port;
1863         struct dlb2_eventdev *dlb2;
1864         int i;
1865
1866         RTE_SET_USED(dev);
1867
1868         if (!ev_port->setup_done) {
1869                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1870                              ev_port->id);
1871                 rte_errno = -EINVAL;
1872                 return 0;
1873         }
1874
1875         if (queues == NULL || nb_unlinks == 0) {
1876                 DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
1877                 return 0; /* Ignore and return success */
1878         }
1879
1880         /* FIXME: How to handle unlink on a directed port? */
1881         if (ev_port->qm_port.is_directed) {
1882                 DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
1883                              ev_port->id);
1884                 rte_errno = 0;
1885                 return nb_unlinks; /* as if success */
1886         }
1887
1888         dlb2 = ev_port->dlb2;
1889
1890         for (i = 0; i < nb_unlinks; i++) {
1891                 struct dlb2_eventdev_queue *ev_queue;
1892                 int ret, j;
1893
1894                 if (queues[i] >= dlb2->num_queues) {
1895                         DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
1896                         rte_errno = -EINVAL;
1897                         return i; /* return index of offending queue */
1898                 }
1899
1900                 ev_queue = &dlb2->ev_queues[queues[i]];
1901
1902                 /* Does a link exist? */
1903                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1904                         if (ev_port->link[j].queue_id == queues[i] &&
1905                             ev_port->link[j].valid)
1906                                 break;
1907
1908                 if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1909                         continue;
1910
1911                 ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
1912                 if (ret) {
1913                         DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
1914                                      ret, ev_port->id, queues[i]);
1915                         rte_errno = -ENOENT;
1916                         return i; /* return index of offending queue */
1917                 }
1918
1919                 ev_port->link[j].valid = false;
1920                 ev_port->num_links--;
1921                 ev_queue->num_links--;
1922         }
1923
1924         return nb_unlinks;
1925 }
1926
1927 static int
1928 dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
1929                                        void *event_port)
1930 {
1931         struct dlb2_eventdev_port *ev_port = event_port;
1932         struct dlb2_eventdev *dlb2;
1933         struct dlb2_hw_dev *handle;
1934         struct dlb2_pending_port_unmaps_args cfg;
1935         int ret;
1936
1937         RTE_SET_USED(dev);
1938
1939         if (!ev_port->setup_done) {
1940                 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1941                              ev_port->id);
1942                 rte_errno = -EINVAL;
1943                 return 0;
1944         }
1945
1946         cfg.port_id = ev_port->qm_port.id;
1947         dlb2 = ev_port->dlb2;
1948         handle = &dlb2->qm_instance;
1949         ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
1950
1951         if (ret < 0) {
1952                 DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
1953                              ret, dlb2_error_strings[cfg.response.status]);
1954                 return ret;
1955         }
1956
1957         return cfg.response.id;
1958 }
1959
1960 static void
1961 dlb2_entry_points_init(struct rte_eventdev *dev)
1962 {
1963         /* Expose PMD's eventdev interface */
1964         static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
1965                 .dev_infos_get    = dlb2_eventdev_info_get,
1966                 .dev_configure    = dlb2_eventdev_configure,
1967                 .queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
1968                 .queue_setup      = dlb2_eventdev_queue_setup,
1969                 .port_def_conf    = dlb2_eventdev_port_default_conf_get,
1970                 .port_setup       = dlb2_eventdev_port_setup,
1971                 .port_link        = dlb2_eventdev_port_link,
1972                 .port_unlink      = dlb2_eventdev_port_unlink,
1973                 .port_unlinks_in_progress =
1974                                     dlb2_eventdev_port_unlinks_in_progress,
1975                 .dump             = dlb2_eventdev_dump,
1976                 .xstats_get       = dlb2_eventdev_xstats_get,
1977                 .xstats_get_names = dlb2_eventdev_xstats_get_names,
1978                 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
1979                 .xstats_reset       = dlb2_eventdev_xstats_reset,
1980         };
1981
1982         dev->dev_ops = &dlb2_eventdev_entry_ops;
1983 }
1984
1985 int
1986 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
1987                             const char *name,
1988                             struct dlb2_devargs *dlb2_args)
1989 {
1990         struct dlb2_eventdev *dlb2;
1991         int err;
1992
1993         dlb2 = dev->data->dev_private;
1994
1995         dlb2->event_dev = dev; /* backlink */
1996
1997         evdev_dlb2_default_info.driver_name = name;
1998
1999         dlb2->max_num_events_override = dlb2_args->max_num_events;
2000         dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
2001         dlb2->qm_instance.cos_id = dlb2_args->cos_id;
2002
2003         err = dlb2_iface_open(&dlb2->qm_instance, name);
2004         if (err < 0) {
2005                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
2006                              err);
2007                 return err;
2008         }
2009
2010         err = dlb2_iface_get_device_version(&dlb2->qm_instance,
2011                                             &dlb2->revision);
2012         if (err < 0) {
2013                 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
2014                              err);
2015                 return err;
2016         }
2017
2018         err = dlb2_hw_query_resources(dlb2);
2019         if (err) {
2020                 DLB2_LOG_ERR("get resources err=%d for %s\n",
2021                              err, name);
2022                 return err;
2023         }
2024
2025         dlb2_iface_hardware_init(&dlb2->qm_instance);
2026
2027         err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
2028         if (err < 0) {
2029                 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
2030                              err);
2031                 return err;
2032         }
2033
2034         /* Complete xtstats runtime initialization */
2035         err = dlb2_xstats_init(dlb2);
2036         if (err) {
2037                 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
2038                 return err;
2039         }
2040
2041         rte_spinlock_init(&dlb2->qm_instance.resource_lock);
2042
2043         dlb2_iface_low_level_io_init();
2044
2045         dlb2_entry_points_init(dev);
2046
2047         dlb2_init_queue_depth_thresholds(dlb2,
2048                                          dlb2_args->qid_depth_thresholds.val);
2049
2050         return 0;
2051 }
2052
2053 int
2054 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
2055                               const char *name)
2056 {
2057         struct dlb2_eventdev *dlb2;
2058         int err;
2059
2060         dlb2 = dev->data->dev_private;
2061
2062         evdev_dlb2_default_info.driver_name = name;
2063
2064         err = dlb2_iface_open(&dlb2->qm_instance, name);
2065         if (err < 0) {
2066                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
2067                              err);
2068                 return err;
2069         }
2070
2071         err = dlb2_hw_query_resources(dlb2);
2072         if (err) {
2073                 DLB2_LOG_ERR("get resources err=%d for %s\n",
2074                              err, name);
2075                 return err;
2076         }
2077
2078         dlb2_iface_low_level_io_init();
2079
2080         dlb2_entry_points_init(dev);
2081
2082         return 0;
2083 }
2084
2085 int
2086 dlb2_parse_params(const char *params,
2087                   const char *name,
2088                   struct dlb2_devargs *dlb2_args)
2089 {
2090         int ret = 0;
2091         static const char * const args[] = { NUMA_NODE_ARG,
2092                                              DLB2_MAX_NUM_EVENTS,
2093                                              DLB2_NUM_DIR_CREDITS,
2094                                              DEV_ID_ARG,
2095                                              DLB2_QID_DEPTH_THRESH_ARG,
2096                                              DLB2_COS_ARG,
2097                                              NULL };
2098
2099         if (params != NULL && params[0] != '\0') {
2100                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
2101
2102                 if (kvlist == NULL) {
2103                         RTE_LOG(INFO, PMD,
2104                                 "Ignoring unsupported parameters when creating device '%s'\n",
2105                                 name);
2106                 } else {
2107                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
2108                                                      set_numa_node,
2109                                                      &dlb2_args->socket_id);
2110                         if (ret != 0) {
2111                                 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
2112                                              name);
2113                                 rte_kvargs_free(kvlist);
2114                                 return ret;
2115                         }
2116
2117                         ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
2118                                                  set_max_num_events,
2119                                                  &dlb2_args->max_num_events);
2120                         if (ret != 0) {
2121                                 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
2122                                              name);
2123                                 rte_kvargs_free(kvlist);
2124                                 return ret;
2125                         }
2126
2127                         ret = rte_kvargs_process(kvlist,
2128                                         DLB2_NUM_DIR_CREDITS,
2129                                         set_num_dir_credits,
2130                                         &dlb2_args->num_dir_credits_override);
2131                         if (ret != 0) {
2132                                 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
2133                                              name);
2134                                 rte_kvargs_free(kvlist);
2135                                 return ret;
2136                         }
2137
2138                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
2139                                                  set_dev_id,
2140                                                  &dlb2_args->dev_id);
2141                         if (ret != 0) {
2142                                 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
2143                                              name);
2144                                 rte_kvargs_free(kvlist);
2145                                 return ret;
2146                         }
2147
2148                         ret = rte_kvargs_process(
2149                                         kvlist,
2150                                         DLB2_QID_DEPTH_THRESH_ARG,
2151                                         set_qid_depth_thresh,
2152                                         &dlb2_args->qid_depth_thresholds);
2153                         if (ret != 0) {
2154                                 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
2155                                              name);
2156                                 rte_kvargs_free(kvlist);
2157                                 return ret;
2158                         }
2159
2160                         ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
2161                                                  set_cos,
2162                                                  &dlb2_args->cos_id);
2163                         if (ret != 0) {
2164                                 DLB2_LOG_ERR("%s: Error parsing cos parameter",
2165                                              name);
2166                                 rte_kvargs_free(kvlist);
2167                                 return ret;
2168                         }
2169
2170                         rte_kvargs_free(kvlist);
2171                 }
2172         }
2173         return ret;
2174 }
2175 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);