event/dlb: add enqueue and its burst variants
[dpdk.git] / drivers / event / dlb / dlb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/fcntl.h>
14 #include <sys/mman.h>
15 #include <unistd.h>
16
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
21 #include <rte_dev.h>
22 #include <rte_errno.h>
23 #include <rte_io.h>
24 #include <rte_kvargs.h>
25 #include <rte_log.h>
26 #include <rte_malloc.h>
27 #include <rte_mbuf.h>
28 #include <rte_prefetch.h>
29 #include <rte_ring.h>
30 #include <rte_string_fns.h>
31
32 #include <rte_eventdev.h>
33 #include <rte_eventdev_pmd.h>
34
35 #include "dlb_priv.h"
36 #include "dlb_iface.h"
37 #include "dlb_inline_fns.h"
38
39 /*
40  * Resources exposed to eventdev.
41  */
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 #endif
45 static struct rte_event_dev_info evdev_dlb_default_info = {
46         .driver_name = "", /* probe will set */
47         .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
48         .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
50         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 #else
52         .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
53 #endif
54         .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
55         .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
56         .max_event_priority_levels = DLB_QID_PRIORITIES,
57         .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
58         .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
59         .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
60         .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
61         .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
62         .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
63         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64                           RTE_EVENT_DEV_CAP_EVENT_QOS |
65                           RTE_EVENT_DEV_CAP_BURST_MODE |
66                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
69 };
70
71 struct process_local_port_data
72 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
73
74 uint32_t
75 dlb_get_queue_depth(struct dlb_eventdev *dlb,
76                     struct dlb_eventdev_queue *queue)
77 {
78         /* DUMMY FOR NOW So "xstats" patch compiles */
79         RTE_SET_USED(dlb);
80         RTE_SET_USED(queue);
81
82         return 0;
83 }
84
85 static int
86 dlb_hw_query_resources(struct dlb_eventdev *dlb)
87 {
88         struct dlb_hw_dev *handle = &dlb->qm_instance;
89         struct dlb_hw_resource_info *dlb_info = &handle->info;
90         int ret;
91
92         ret = dlb_iface_get_num_resources(handle,
93                                           &dlb->hw_rsrc_query_results);
94         if (ret) {
95                 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
96                 return ret;
97         }
98
99         /* Complete filling in device resource info returned to evdev app,
100          * overriding any default values.
101          * The capabilities (CAPs) were set at compile time.
102          */
103
104         evdev_dlb_default_info.max_event_queues =
105                 dlb->hw_rsrc_query_results.num_ldb_queues;
106
107         evdev_dlb_default_info.max_event_ports =
108                 dlb->hw_rsrc_query_results.num_ldb_ports;
109
110         evdev_dlb_default_info.max_num_events =
111                 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
112
113         /* Save off values used when creating the scheduling domain. */
114
115         handle->info.num_sched_domains =
116                 dlb->hw_rsrc_query_results.num_sched_domains;
117
118         handle->info.hw_rsrc_max.nb_events_limit =
119                 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
120
121         handle->info.hw_rsrc_max.num_queues =
122                 dlb->hw_rsrc_query_results.num_ldb_queues +
123                 dlb->hw_rsrc_query_results.num_dir_ports;
124
125         handle->info.hw_rsrc_max.num_ldb_queues =
126                 dlb->hw_rsrc_query_results.num_ldb_queues;
127
128         handle->info.hw_rsrc_max.num_ldb_ports =
129                 dlb->hw_rsrc_query_results.num_ldb_ports;
130
131         handle->info.hw_rsrc_max.num_dir_ports =
132                 dlb->hw_rsrc_query_results.num_dir_ports;
133
134         handle->info.hw_rsrc_max.reorder_window_size =
135                 dlb->hw_rsrc_query_results.num_hist_list_entries;
136
137         rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
138
139         return 0;
140 }
141
142 static void
143 dlb_free_qe_mem(struct dlb_port *qm_port)
144 {
145         if (qm_port == NULL)
146                 return;
147
148         rte_free(qm_port->qe4);
149         qm_port->qe4 = NULL;
150
151         rte_free(qm_port->consume_qe);
152         qm_port->consume_qe = NULL;
153 }
154
155 static int
156 dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
157 {
158         struct dlb_cq_pop_qe *qe;
159
160         qe = rte_zmalloc(mz_name,
161                         DLB_NUM_QES_PER_CACHE_LINE *
162                                 sizeof(struct dlb_cq_pop_qe),
163                         RTE_CACHE_LINE_SIZE);
164
165         if (qe == NULL) {
166                 DLB_LOG_ERR("dlb: no memory for consume_qe\n");
167                 return -ENOMEM;
168         }
169
170         qm_port->consume_qe = qe;
171
172         qe->qe_valid = 0;
173         qe->qe_frag = 0;
174         qe->qe_comp = 0;
175         qe->cq_token = 1;
176         /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
177          * and so on.
178          */
179         qe->tokens = 0; /* set at run time */
180         qe->meas_lat = 0;
181         qe->no_dec = 0;
182         /* Completion IDs are disabled */
183         qe->cmp_id = 0;
184
185         return 0;
186 }
187
188 static int
189 dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
190 {
191         int ret, sz;
192
193         sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
194
195         qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
196
197         if (qm_port->qe4 == NULL) {
198                 DLB_LOG_ERR("dlb: no qe4 memory\n");
199                 ret = -ENOMEM;
200                 goto error_exit;
201         }
202
203         ret = dlb_init_consume_qe(qm_port, mz_name);
204         if (ret < 0) {
205                 DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
206                 goto error_exit;
207         }
208
209         return 0;
210
211 error_exit:
212
213         dlb_free_qe_mem(qm_port);
214
215         return ret;
216 }
217
218 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
219  * unsafe.
220  */
221 #define DLB_BASE_10 10
222
223 static int
224 dlb_string_to_int(int *result, const char *str)
225 {
226         long ret;
227         char *endstr;
228
229         if (str == NULL || result == NULL)
230                 return -EINVAL;
231
232         errno = 0;
233         ret = strtol(str, &endstr, DLB_BASE_10);
234         if (errno)
235                 return -errno;
236
237         /* long int and int may be different width for some architectures */
238         if (ret < INT_MIN || ret > INT_MAX || endstr == str)
239                 return -EINVAL;
240
241         *result = ret;
242         return 0;
243 }
244
245 static int
246 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
247 {
248         int *socket_id = opaque;
249         int ret;
250
251         ret = dlb_string_to_int(socket_id, value);
252         if (ret < 0)
253                 return ret;
254
255         if (*socket_id > RTE_MAX_NUMA_NODES)
256                 return -EINVAL;
257
258         return 0;
259 }
260
261 static int
262 set_max_num_events(const char *key __rte_unused,
263                    const char *value,
264                    void *opaque)
265 {
266         int *max_num_events = opaque;
267         int ret;
268
269         if (value == NULL || opaque == NULL) {
270                 DLB_LOG_ERR("NULL pointer\n");
271                 return -EINVAL;
272         }
273
274         ret = dlb_string_to_int(max_num_events, value);
275         if (ret < 0)
276                 return ret;
277
278         if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
279                 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
280                             DLB_MAX_NUM_LDB_CREDITS);
281                 return -EINVAL;
282         }
283
284         return 0;
285 }
286
287 static int
288 set_num_dir_credits(const char *key __rte_unused,
289                     const char *value,
290                     void *opaque)
291 {
292         int *num_dir_credits = opaque;
293         int ret;
294
295         if (value == NULL || opaque == NULL) {
296                 DLB_LOG_ERR("NULL pointer\n");
297                 return -EINVAL;
298         }
299
300         ret = dlb_string_to_int(num_dir_credits, value);
301         if (ret < 0)
302                 return ret;
303
304         if (*num_dir_credits < 0 ||
305             *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
306                 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
307                             DLB_MAX_NUM_DIR_CREDITS);
308                 return -EINVAL;
309         }
310         return 0;
311 }
312
313 /* VDEV-only notes:
314  * This function first unmaps all memory mappings and closes the
315  * domain's file descriptor, which causes the driver to reset the
316  * scheduling domain. Once that completes (when close() returns), we
317  * can safely free the dynamically allocated memory used by the
318  * scheduling domain.
319  *
320  * PF-only notes:
321  * We will maintain a use count and use that to determine when
322  * a reset is required.  In PF mode, we never mmap, or munmap
323  * device memory,  and we own the entire physical PCI device.
324  */
325
326 static void
327 dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
328 {
329         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
330         enum dlb_configuration_state config_state;
331         int i, j;
332
333         /* Close and reset the domain */
334         dlb_iface_domain_close(dlb);
335
336         /* Free all dynamically allocated port memory */
337         for (i = 0; i < dlb->num_ports; i++)
338                 dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
339
340         /* If reconfiguring, mark the device's queues and ports as "previously
341          * configured." If the user does not reconfigure them, the PMD will
342          * reapply their previous configuration when the device is started.
343          */
344         config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
345
346         for (i = 0; i < dlb->num_ports; i++) {
347                 dlb->ev_ports[i].qm_port.config_state = config_state;
348                 /* Reset setup_done so ports can be reconfigured */
349                 dlb->ev_ports[i].setup_done = false;
350                 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
351                         dlb->ev_ports[i].link[j].mapped = false;
352         }
353
354         for (i = 0; i < dlb->num_queues; i++)
355                 dlb->ev_queues[i].qm_queue.config_state = config_state;
356
357         for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
358                 dlb->ev_queues[i].setup_done = false;
359
360         dlb->num_ports = 0;
361         dlb->num_ldb_ports = 0;
362         dlb->num_dir_ports = 0;
363         dlb->num_queues = 0;
364         dlb->num_ldb_queues = 0;
365         dlb->num_dir_queues = 0;
366         dlb->configured = false;
367 }
368
369 static int
370 dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
371 {
372         struct dlb_create_ldb_pool_args cfg;
373         struct dlb_cmd_response response;
374         int ret;
375
376         if (handle == NULL)
377                 return -EINVAL;
378
379         if (!handle->cfg.resources.num_ldb_credits) {
380                 handle->cfg.ldb_credit_pool_id = 0;
381                 handle->cfg.num_ldb_credits = 0;
382                 return 0;
383         }
384
385         cfg.response = (uintptr_t)&response;
386         cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
387
388         ret = dlb_iface_ldb_credit_pool_create(handle,
389                                                &cfg);
390         if (ret < 0) {
391                 DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
392                             ret, dlb_error_strings[response.status]);
393         }
394
395         handle->cfg.ldb_credit_pool_id = response.id;
396         handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
397
398         return ret;
399 }
400
401 static int
402 dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
403 {
404         struct dlb_create_dir_pool_args cfg;
405         struct dlb_cmd_response response;
406         int ret;
407
408         if (handle == NULL)
409                 return -EINVAL;
410
411         if (!handle->cfg.resources.num_dir_credits) {
412                 handle->cfg.dir_credit_pool_id = 0;
413                 handle->cfg.num_dir_credits = 0;
414                 return 0;
415         }
416
417         cfg.response = (uintptr_t)&response;
418         cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
419
420         ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
421         if (ret < 0)
422                 DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
423                             ret, dlb_error_strings[response.status]);
424
425         handle->cfg.dir_credit_pool_id = response.id;
426         handle->cfg.num_dir_credits = cfg.num_dir_credits;
427
428         return ret;
429 }
430
431 static int
432 dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
433                            struct dlb_eventdev *dlb,
434                            const struct dlb_hw_rsrcs *resources_asked)
435 {
436         int ret = 0;
437         struct dlb_create_sched_domain_args *config_params;
438         struct dlb_cmd_response response;
439
440         if (resources_asked == NULL) {
441                 DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
442                 ret = EINVAL;
443                 goto error_exit;
444         }
445
446         /* Map generic qm resources to dlb resources */
447         config_params = &handle->cfg.resources;
448
449         config_params->response = (uintptr_t)&response;
450
451         /* DIR ports and queues */
452
453         config_params->num_dir_ports =
454                 resources_asked->num_dir_ports;
455
456         config_params->num_dir_credits =
457                 resources_asked->num_dir_credits;
458
459         /* LDB ports and queues */
460
461         config_params->num_ldb_queues =
462                 resources_asked->num_ldb_queues;
463
464         config_params->num_ldb_ports =
465                 resources_asked->num_ldb_ports;
466
467         config_params->num_ldb_credits =
468                 resources_asked->num_ldb_credits;
469
470         config_params->num_atomic_inflights =
471                 dlb->num_atm_inflights_per_queue *
472                 config_params->num_ldb_queues;
473
474         config_params->num_hist_list_entries = config_params->num_ldb_ports *
475                 DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
476
477         /* dlb limited to 1 credit pool per queue type */
478         config_params->num_ldb_credit_pools = 1;
479         config_params->num_dir_credit_pools = 1;
480
481         DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
482                     config_params->num_ldb_queues,
483                     config_params->num_ldb_ports,
484                     config_params->num_dir_ports,
485                     config_params->num_atomic_inflights,
486                     config_params->num_hist_list_entries,
487                     config_params->num_ldb_credits,
488                     config_params->num_dir_credits,
489                     config_params->num_ldb_credit_pools,
490                     config_params->num_dir_credit_pools);
491
492         /* Configure the QM */
493
494         ret = dlb_iface_sched_domain_create(handle, config_params);
495         if (ret < 0) {
496                 DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
497                             handle->device_id,
498                             ret,
499                             dlb_error_strings[response.status]);
500                 goto error_exit;
501         }
502
503         handle->domain_id = response.id;
504         handle->domain_id_valid = 1;
505
506         config_params->response = 0;
507
508         ret = dlb_ldb_credit_pool_create(handle);
509         if (ret < 0) {
510                 DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
511                 goto error_exit2;
512         }
513
514         ret = dlb_dir_credit_pool_create(handle);
515         if (ret < 0) {
516                 DLB_LOG_ERR("dlb: create dir credit pool failed\n");
517                 goto error_exit2;
518         }
519
520         handle->cfg.configured = true;
521
522         return 0;
523
524 error_exit2:
525         dlb_iface_domain_close(dlb);
526
527 error_exit:
528         return ret;
529 }
530
531 /* End HW specific */
532 static void
533 dlb_eventdev_info_get(struct rte_eventdev *dev,
534                       struct rte_event_dev_info *dev_info)
535 {
536         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
537         int ret;
538
539         ret = dlb_hw_query_resources(dlb);
540         if (ret) {
541                 const struct rte_eventdev_data *data = dev->data;
542
543                 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
544                             ret, data->dev_id);
545                 /* fn is void, so fall through and return values set up in
546                  * probe
547                  */
548         }
549
550         /* Add num resources currently owned by this domain.
551          * These would become available if the scheduling domain were reset due
552          * to the application recalling eventdev_configure to *reconfigure* the
553          * domain.
554          */
555         evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
556         evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
557         evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
558
559         /* In DLB A-stepping hardware, applications are limited to 128
560          * configured ports (load-balanced or directed). The reported number of
561          * available ports must reflect this.
562          */
563         if (dlb->revision < DLB_REV_B0) {
564                 int used_ports;
565
566                 used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
567                         dlb->hw_rsrc_query_results.num_ldb_ports -
568                         dlb->hw_rsrc_query_results.num_dir_ports;
569
570                 evdev_dlb_default_info.max_event_ports =
571                         RTE_MIN(evdev_dlb_default_info.max_event_ports,
572                                 128 - used_ports);
573         }
574
575         evdev_dlb_default_info.max_event_queues =
576                 RTE_MIN(evdev_dlb_default_info.max_event_queues,
577                         RTE_EVENT_MAX_QUEUES_PER_DEV);
578
579         evdev_dlb_default_info.max_num_events =
580                 RTE_MIN(evdev_dlb_default_info.max_num_events,
581                         dlb->max_num_events_override);
582
583         *dev_info = evdev_dlb_default_info;
584 }
585
586 /* Note: 1 QM instance per QM device, QM instance/device == event device */
587 static int
588 dlb_eventdev_configure(const struct rte_eventdev *dev)
589 {
590         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
591         struct dlb_hw_dev *handle = &dlb->qm_instance;
592         struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
593         const struct rte_eventdev_data *data = dev->data;
594         const struct rte_event_dev_config *config = &data->dev_conf;
595         int ret;
596
597         /* If this eventdev is already configured, we must release the current
598          * scheduling domain before attempting to configure a new one.
599          */
600         if (dlb->configured) {
601                 dlb_hw_reset_sched_domain(dev, true);
602
603                 ret = dlb_hw_query_resources(dlb);
604                 if (ret) {
605                         DLB_LOG_ERR("get resources err=%d, devid=%d\n",
606                                     ret, data->dev_id);
607                         return ret;
608                 }
609         }
610
611         if (config->nb_event_queues > rsrcs->num_queues) {
612                 DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
613                             config->nb_event_queues,
614                             rsrcs->num_queues);
615                 return -EINVAL;
616         }
617         if (config->nb_event_ports > (rsrcs->num_ldb_ports
618                         + rsrcs->num_dir_ports)) {
619                 DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
620                             config->nb_event_ports,
621                             (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
622                 return -EINVAL;
623         }
624         if (config->nb_events_limit > rsrcs->nb_events_limit) {
625                 DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
626                             config->nb_events_limit,
627                             rsrcs->nb_events_limit);
628                 return -EINVAL;
629         }
630
631         if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
632                 dlb->global_dequeue_wait = false;
633         else {
634                 uint32_t timeout32;
635
636                 dlb->global_dequeue_wait = true;
637
638                 timeout32 = config->dequeue_timeout_ns;
639
640                 dlb->global_dequeue_wait_ticks =
641                         timeout32 * (rte_get_timer_hz() / 1E9);
642         }
643
644         /* Does this platform support umonitor/umwait? */
645         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
646                 if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
647                     RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
648                         DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
649                                     RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
650                         return -EINVAL;
651                 }
652                 dlb->umwait_allowed = true;
653         }
654
655         rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
656         rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
657         /* 1 dir queue per dir port */
658         rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
659
660         /* Scale down nb_events_limit by 4 for directed credits, since there
661          * are 4x as many load-balanced credits.
662          */
663         rsrcs->num_ldb_credits = 0;
664         rsrcs->num_dir_credits = 0;
665
666         if (rsrcs->num_ldb_queues)
667                 rsrcs->num_ldb_credits = config->nb_events_limit;
668         if (rsrcs->num_dir_ports)
669                 rsrcs->num_dir_credits = config->nb_events_limit / 4;
670         if (dlb->num_dir_credits_override != -1)
671                 rsrcs->num_dir_credits = dlb->num_dir_credits_override;
672
673         if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
674                 DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
675                 return -ENODEV;
676         }
677
678         dlb->new_event_limit = config->nb_events_limit;
679         __atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
680
681         /* Save number of ports/queues for this event dev */
682         dlb->num_ports = config->nb_event_ports;
683         dlb->num_queues = config->nb_event_queues;
684         dlb->num_dir_ports = rsrcs->num_dir_ports;
685         dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
686         dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
687         dlb->num_dir_queues = dlb->num_dir_ports;
688         dlb->num_ldb_credits = rsrcs->num_ldb_credits;
689         dlb->num_dir_credits = rsrcs->num_dir_credits;
690
691         dlb->configured = true;
692
693         return 0;
694 }
695
696 static int16_t
697 dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
698                                uint32_t qm_port_id,
699                                uint16_t qm_qid)
700 {
701         struct dlb_unmap_qid_args cfg;
702         struct dlb_cmd_response response;
703         int32_t ret;
704
705         if (handle == NULL)
706                 return -EINVAL;
707
708         cfg.response = (uintptr_t)&response;
709         cfg.port_id = qm_port_id;
710         cfg.qid = qm_qid;
711
712         ret = dlb_iface_unmap_qid(handle, &cfg);
713         if (ret < 0)
714                 DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
715                             ret, dlb_error_strings[response.status]);
716
717         return ret;
718 }
719
720 static int
721 dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
722                            struct dlb_eventdev_port *ev_port,
723                            struct dlb_eventdev_queue *ev_queue)
724 {
725         int ret, i;
726
727         /* Don't unlink until start time. */
728         if (dlb->run_state == DLB_RUN_STATE_STOPPED)
729                 return 0;
730
731         for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
732                 if (ev_port->link[i].valid &&
733                     ev_port->link[i].queue_id == ev_queue->id)
734                         break; /* found */
735         }
736
737         /* This is expected with eventdev API!
738          * It blindly attempts to unmap all queues.
739          */
740         if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
741                 DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
742                             ev_queue->qm_queue.id,
743                             ev_port->qm_port.id);
744                 return 0;
745         }
746
747         ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
748                                              ev_port->qm_port.id,
749                                              ev_queue->qm_queue.id);
750         if (!ret)
751                 ev_port->link[i].mapped = false;
752
753         return ret;
754 }
755
756 static int
757 dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
758                          uint8_t queues[], uint16_t nb_unlinks)
759 {
760         struct dlb_eventdev_port *ev_port = event_port;
761         struct dlb_eventdev *dlb;
762         int i;
763
764         RTE_SET_USED(dev);
765
766         if (!ev_port->setup_done) {
767                 DLB_LOG_ERR("dlb: evport %d is not configured\n",
768                             ev_port->id);
769                 rte_errno = -EINVAL;
770                 return 0;
771         }
772
773         if (queues == NULL || nb_unlinks == 0) {
774                 DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
775                 return 0; /* Ignore and return success */
776         }
777
778         if (ev_port->qm_port.is_directed) {
779                 DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
780                             ev_port->id);
781                 rte_errno = 0;
782                 return nb_unlinks; /* as if success */
783         }
784
785         dlb = ev_port->dlb;
786
787         for (i = 0; i < nb_unlinks; i++) {
788                 struct dlb_eventdev_queue *ev_queue;
789                 int ret, j;
790
791                 if (queues[i] >= dlb->num_queues) {
792                         DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
793                         rte_errno = -EINVAL;
794                         return i; /* return index of offending queue */
795                 }
796
797                 ev_queue = &dlb->ev_queues[queues[i]];
798
799                 /* Does a link exist? */
800                 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
801                         if (ev_port->link[j].queue_id == queues[i] &&
802                             ev_port->link[j].valid)
803                                 break;
804
805                 if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
806                         continue;
807
808                 ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
809                 if (ret) {
810                         DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
811                                     ret, ev_port->id, queues[i]);
812                         rte_errno = -ENOENT;
813                         return i; /* return index of offending queue */
814                 }
815
816                 ev_port->link[j].valid = false;
817                 ev_port->num_links--;
818                 ev_queue->num_links--;
819         }
820
821         return nb_unlinks;
822 }
823
824 static int
825 dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
826                                       void *event_port)
827 {
828         struct dlb_eventdev_port *ev_port = event_port;
829         struct dlb_eventdev *dlb;
830         struct dlb_hw_dev *handle;
831         struct dlb_pending_port_unmaps_args cfg;
832         struct dlb_cmd_response response;
833         int ret;
834
835         RTE_SET_USED(dev);
836
837         if (!ev_port->setup_done) {
838                 DLB_LOG_ERR("dlb: evport %d is not configured\n",
839                             ev_port->id);
840                 rte_errno = -EINVAL;
841                 return 0;
842         }
843
844         cfg.port_id = ev_port->qm_port.id;
845         cfg.response = (uintptr_t)&response;
846         dlb = ev_port->dlb;
847         handle = &dlb->qm_instance;
848         ret = dlb_iface_pending_port_unmaps(handle, &cfg);
849
850         if (ret < 0) {
851                 DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
852                             ret, dlb_error_strings[response.status]);
853                 return ret;
854         }
855
856         return response.id;
857 }
858
859 static void
860 dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
861                                    uint8_t port_id,
862                                    struct rte_event_port_conf *port_conf)
863 {
864         RTE_SET_USED(port_id);
865         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
866
867         port_conf->new_event_threshold = dlb->new_event_limit;
868         port_conf->dequeue_depth = 32;
869         port_conf->enqueue_depth = DLB_MAX_ENQUEUE_DEPTH;
870         port_conf->event_port_cfg = 0;
871 }
872
873 static void
874 dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
875                                     uint8_t queue_id,
876                                     struct rte_event_queue_conf *queue_conf)
877 {
878         RTE_SET_USED(dev);
879         RTE_SET_USED(queue_id);
880         queue_conf->nb_atomic_flows = 1024;
881         queue_conf->nb_atomic_order_sequences = 32;
882         queue_conf->event_queue_cfg = 0;
883         queue_conf->priority = 0;
884 }
885
886 static int
887 dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
888                        struct dlb_eventdev_port *ev_port,
889                        uint32_t dequeue_depth,
890                        uint32_t cq_depth,
891                        uint32_t enqueue_depth,
892                        uint16_t rsvd_tokens,
893                        bool use_rsvd_token_scheme)
894 {
895         struct dlb_hw_dev *handle = &dlb->qm_instance;
896         struct dlb_create_ldb_port_args cfg = {0};
897         struct dlb_cmd_response response = {0};
898         int ret;
899         struct dlb_port *qm_port = NULL;
900         char mz_name[RTE_MEMZONE_NAMESIZE];
901         uint32_t qm_port_id;
902
903         if (handle == NULL)
904                 return -EINVAL;
905
906         if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
907                 DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
908                         DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
909                 return -EINVAL;
910         }
911
912         if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
913                 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
914                             DLB_MIN_ENQUEUE_DEPTH);
915                 return -EINVAL;
916         }
917
918         rte_spinlock_lock(&handle->resource_lock);
919
920         cfg.response = (uintptr_t)&response;
921
922         /* We round up to the next power of 2 if necessary */
923         cfg.cq_depth = rte_align32pow2(cq_depth);
924         cfg.cq_depth_threshold = rsvd_tokens;
925
926         cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
927
928         /* User controls the LDB high watermark via enqueue depth. The DIR high
929          * watermark is equal, unless the directed credit pool is too small.
930          */
931         cfg.ldb_credit_high_watermark = enqueue_depth;
932
933         /* If there are no directed ports, the kernel driver will ignore this
934          * port's directed credit settings. Don't use enqueue_depth if it would
935          * require more directed credits than are available.
936          */
937         cfg.dir_credit_high_watermark =
938                 RTE_MIN(enqueue_depth,
939                         handle->cfg.num_dir_credits / dlb->num_ports);
940
941         cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
942         cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
943
944         cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
945         cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
946
947         /* Per QM values */
948
949         cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
950         cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
951
952         ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
953         if (ret < 0) {
954                 DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
955                             ret, dlb_error_strings[response.status]);
956                 goto error_exit;
957         }
958
959         qm_port_id = response.id;
960
961         DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
962                     ev_port->id, qm_port_id);
963
964         qm_port = &ev_port->qm_port;
965         qm_port->ev_port = ev_port; /* back ptr */
966         qm_port->dlb = dlb; /* back ptr */
967
968         /*
969          * Allocate and init local qe struct(s).
970          * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
971          */
972
973         snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
974                  ev_port->id);
975
976         ret = dlb_init_qe_mem(qm_port, mz_name);
977         if (ret < 0) {
978                 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
979                 goto error_exit;
980         }
981
982         qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
983         qm_port->id = qm_port_id;
984
985         /* The credit window is one high water mark of QEs */
986         qm_port->ldb_pushcount_at_credit_expiry = 0;
987         qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
988         /* The credit window is one high water mark of QEs */
989         qm_port->dir_pushcount_at_credit_expiry = 0;
990         qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
991         qm_port->cq_depth = cfg.cq_depth;
992         /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
993          * the effective depth is smaller.
994          */
995         qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
996         qm_port->cq_idx = 0;
997         qm_port->cq_idx_unmasked = 0;
998         if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
999                 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1000         else
1001                 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1002
1003         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1004         /* starting value of gen bit - it toggles at wrap time */
1005         qm_port->gen_bit = 1;
1006
1007         qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1008         qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1009         qm_port->int_armed = false;
1010
1011         /* Save off for later use in info and lookup APIs. */
1012         qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
1013
1014         qm_port->dequeue_depth = dequeue_depth;
1015
1016         qm_port->owed_tokens = 0;
1017         qm_port->issued_releases = 0;
1018
1019         /* update state */
1020         qm_port->state = PORT_STARTED; /* enabled at create time */
1021         qm_port->config_state = DLB_CONFIGURED;
1022
1023         qm_port->dir_credits = cfg.dir_credit_high_watermark;
1024         qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1025
1026         DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1027                     qm_port_id,
1028                     cq_depth,
1029                     qm_port->ldb_credits,
1030                     qm_port->dir_credits);
1031
1032         rte_spinlock_unlock(&handle->resource_lock);
1033
1034         return 0;
1035
1036 error_exit:
1037         if (qm_port) {
1038                 dlb_free_qe_mem(qm_port);
1039                 qm_port->pp_mmio_base = 0;
1040         }
1041
1042         rte_spinlock_unlock(&handle->resource_lock);
1043
1044         DLB_LOG_ERR("dlb: create ldb port failed!\n");
1045
1046         return ret;
1047 }
1048
1049 static int
1050 dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
1051                        struct dlb_eventdev_port *ev_port,
1052                        uint32_t dequeue_depth,
1053                        uint32_t cq_depth,
1054                        uint32_t enqueue_depth,
1055                        uint16_t rsvd_tokens,
1056                        bool use_rsvd_token_scheme)
1057 {
1058         struct dlb_hw_dev *handle = &dlb->qm_instance;
1059         struct dlb_create_dir_port_args cfg = {0};
1060         struct dlb_cmd_response response = {0};
1061         int ret;
1062         struct dlb_port *qm_port = NULL;
1063         char mz_name[RTE_MEMZONE_NAMESIZE];
1064         uint32_t qm_port_id;
1065
1066         if (dlb == NULL || handle == NULL)
1067                 return -EINVAL;
1068
1069         if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
1070                 DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
1071                             DLB_MIN_DIR_CQ_DEPTH);
1072                 return -EINVAL;
1073         }
1074
1075         if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
1076                 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
1077                             DLB_MIN_ENQUEUE_DEPTH);
1078                 return -EINVAL;
1079         }
1080
1081         rte_spinlock_lock(&handle->resource_lock);
1082
1083         /* Directed queues are configured at link time. */
1084         cfg.queue_id = -1;
1085
1086         cfg.response = (uintptr_t)&response;
1087
1088         /* We round up to the next power of 2 if necessary */
1089         cfg.cq_depth = rte_align32pow2(cq_depth);
1090         cfg.cq_depth_threshold = rsvd_tokens;
1091
1092         /* User controls the LDB high watermark via enqueue depth. The DIR high
1093          * watermark is equal, unless the directed credit pool is too small.
1094          */
1095         cfg.ldb_credit_high_watermark = enqueue_depth;
1096
1097         /* Don't use enqueue_depth if it would require more directed credits
1098          * than are available.
1099          */
1100         cfg.dir_credit_high_watermark =
1101                 RTE_MIN(enqueue_depth,
1102                         handle->cfg.num_dir_credits / dlb->num_ports);
1103
1104         cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
1105         cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
1106
1107         cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
1108         cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
1109
1110         /* Per QM values */
1111
1112         cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
1113         cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
1114
1115         ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
1116         if (ret < 0) {
1117                 DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
1118                             ret, dlb_error_strings[response.status]);
1119                 goto error_exit;
1120         }
1121
1122         qm_port_id = response.id;
1123
1124         DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
1125                     ev_port->id, qm_port_id);
1126
1127         qm_port = &ev_port->qm_port;
1128         qm_port->ev_port = ev_port; /* back ptr */
1129         qm_port->dlb = dlb;  /* back ptr */
1130
1131         /*
1132          * Init local qe struct(s).
1133          * Note: MOVDIR64 requires the enqueue QE to be aligned
1134          */
1135
1136         snprintf(mz_name, sizeof(mz_name), "dir_port%d",
1137                  ev_port->id);
1138
1139         ret = dlb_init_qe_mem(qm_port, mz_name);
1140
1141         if (ret < 0) {
1142                 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
1143                 goto error_exit;
1144         }
1145
1146         qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
1147         qm_port->id = qm_port_id;
1148
1149         /* The credit window is one high water mark of QEs */
1150         qm_port->ldb_pushcount_at_credit_expiry = 0;
1151         qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
1152         /* The credit window is one high water mark of QEs */
1153         qm_port->dir_pushcount_at_credit_expiry = 0;
1154         qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
1155         qm_port->cq_depth = cfg.cq_depth;
1156         qm_port->cq_idx = 0;
1157         qm_port->cq_idx_unmasked = 0;
1158         if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
1159                 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1160         else
1161                 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1162
1163         qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1164         /* starting value of gen bit - it toggles at wrap time */
1165         qm_port->gen_bit = 1;
1166
1167         qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1168         qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1169         qm_port->int_armed = false;
1170
1171         /* Save off for later use in info and lookup APIs. */
1172         qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
1173
1174         qm_port->dequeue_depth = dequeue_depth;
1175
1176         qm_port->owed_tokens = 0;
1177         qm_port->issued_releases = 0;
1178
1179         /* update state */
1180         qm_port->state = PORT_STARTED; /* enabled at create time */
1181         qm_port->config_state = DLB_CONFIGURED;
1182
1183         qm_port->dir_credits = cfg.dir_credit_high_watermark;
1184         qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1185
1186         DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
1187                     qm_port_id,
1188                     cq_depth,
1189                     cfg.dir_credit_high_watermark,
1190                     cfg.ldb_credit_high_watermark);
1191
1192         rte_spinlock_unlock(&handle->resource_lock);
1193
1194         return 0;
1195
1196 error_exit:
1197         if (qm_port) {
1198                 qm_port->pp_mmio_base = 0;
1199                 dlb_free_qe_mem(qm_port);
1200         }
1201
1202         rte_spinlock_unlock(&handle->resource_lock);
1203
1204         DLB_LOG_ERR("dlb: create dir port failed!\n");
1205
1206         return ret;
1207 }
1208
1209 static int32_t
1210 dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
1211                         struct dlb_queue *queue,
1212                         const struct rte_event_queue_conf *evq_conf)
1213 {
1214         struct dlb_hw_dev *handle = &dlb->qm_instance;
1215         struct dlb_create_ldb_queue_args cfg;
1216         struct dlb_cmd_response response;
1217         int32_t ret;
1218         uint32_t qm_qid;
1219         int sched_type = -1;
1220
1221         if (evq_conf == NULL)
1222                 return -EINVAL;
1223
1224         if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
1225                 if (evq_conf->nb_atomic_order_sequences != 0)
1226                         sched_type = RTE_SCHED_TYPE_ORDERED;
1227                 else
1228                         sched_type = RTE_SCHED_TYPE_PARALLEL;
1229         } else
1230                 sched_type = evq_conf->schedule_type;
1231
1232         cfg.response = (uintptr_t)&response;
1233         cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
1234         cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
1235         cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
1236
1237         if (sched_type != RTE_SCHED_TYPE_ORDERED) {
1238                 cfg.num_sequence_numbers = 0;
1239                 cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
1240         }
1241
1242         ret = dlb_iface_ldb_queue_create(handle, &cfg);
1243         if (ret < 0) {
1244                 DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
1245                             ret, dlb_error_strings[response.status]);
1246                 return -EINVAL;
1247         }
1248
1249         qm_qid = response.id;
1250
1251         /* Save off queue config for debug, resource lookups, and reconfig */
1252         queue->num_qid_inflights = cfg.num_qid_inflights;
1253         queue->num_atm_inflights = cfg.num_atomic_inflights;
1254
1255         queue->sched_type = sched_type;
1256         queue->config_state = DLB_CONFIGURED;
1257
1258         DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
1259                     qm_qid,
1260                     cfg.num_atomic_inflights,
1261                     cfg.num_sequence_numbers,
1262                     cfg.num_qid_inflights);
1263
1264         return qm_qid;
1265 }
1266
1267 static int32_t
1268 dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
1269 {
1270         struct dlb_hw_dev *handle = &dlb->qm_instance;
1271         struct dlb_get_sn_allocation_args cfg;
1272         struct dlb_cmd_response response;
1273         int ret;
1274
1275         cfg.group = group;
1276         cfg.response = (uintptr_t)&response;
1277
1278         ret = dlb_iface_get_sn_allocation(handle, &cfg);
1279         if (ret < 0) {
1280                 DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
1281                             ret, dlb_error_strings[response.status]);
1282                 return ret;
1283         }
1284
1285         return response.id;
1286 }
1287
1288 static int
1289 dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
1290 {
1291         struct dlb_hw_dev *handle = &dlb->qm_instance;
1292         struct dlb_set_sn_allocation_args cfg;
1293         struct dlb_cmd_response response;
1294         int ret;
1295
1296         cfg.num = num;
1297         cfg.group = group;
1298         cfg.response = (uintptr_t)&response;
1299
1300         ret = dlb_iface_set_sn_allocation(handle, &cfg);
1301         if (ret < 0) {
1302                 DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
1303                             ret, dlb_error_strings[response.status]);
1304                 return ret;
1305         }
1306
1307         return ret;
1308 }
1309
1310 static int32_t
1311 dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
1312 {
1313         struct dlb_hw_dev *handle = &dlb->qm_instance;
1314         struct dlb_get_sn_occupancy_args cfg;
1315         struct dlb_cmd_response response;
1316         int ret;
1317
1318         cfg.group = group;
1319         cfg.response = (uintptr_t)&response;
1320
1321         ret = dlb_iface_get_sn_occupancy(handle, &cfg);
1322         if (ret < 0) {
1323                 DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
1324                             ret, dlb_error_strings[response.status]);
1325                 return ret;
1326         }
1327
1328         return response.id;
1329 }
1330
1331 /* Query the current sequence number allocations and, if they conflict with the
1332  * requested LDB queue configuration, attempt to re-allocate sequence numbers.
1333  * This is best-effort; if it fails, the PMD will attempt to configure the
1334  * load-balanced queue and return an error.
1335  */
1336 static void
1337 dlb_program_sn_allocation(struct dlb_eventdev *dlb,
1338                           const struct rte_event_queue_conf *queue_conf)
1339 {
1340         int grp_occupancy[DLB_NUM_SN_GROUPS];
1341         int grp_alloc[DLB_NUM_SN_GROUPS];
1342         int i, sequence_numbers;
1343
1344         sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
1345
1346         for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1347                 int total_slots;
1348
1349                 grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
1350                 if (grp_alloc[i] < 0)
1351                         return;
1352
1353                 total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
1354
1355                 grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
1356                 if (grp_occupancy[i] < 0)
1357                         return;
1358
1359                 /* DLB has at least one available slot for the requested
1360                  * sequence numbers, so no further configuration required.
1361                  */
1362                 if (grp_alloc[i] == sequence_numbers &&
1363                     grp_occupancy[i] < total_slots)
1364                         return;
1365         }
1366
1367         /* None of the sequence number groups are configured for the requested
1368          * sequence numbers, so we have to reconfigure one of them. This is
1369          * only possible if a group is not in use.
1370          */
1371         for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1372                 if (grp_occupancy[i] == 0)
1373                         break;
1374         }
1375
1376         if (i == DLB_NUM_SN_GROUPS) {
1377                 DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
1378                        __func__, sequence_numbers);
1379                 return;
1380         }
1381
1382         /* Attempt to configure slot i with the requested number of sequence
1383          * numbers. Ignore the return value -- if this fails, the error will be
1384          * caught during subsequent queue configuration.
1385          */
1386         dlb_set_sn_allocation(dlb, i, sequence_numbers);
1387 }
1388
1389 static int
1390 dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
1391                              struct dlb_eventdev_queue *ev_queue,
1392                              const struct rte_event_queue_conf *queue_conf)
1393 {
1394         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1395         int32_t qm_qid;
1396
1397         if (queue_conf->nb_atomic_order_sequences)
1398                 dlb_program_sn_allocation(dlb, queue_conf);
1399
1400         qm_qid = dlb_hw_create_ldb_queue(dlb,
1401                                          &ev_queue->qm_queue,
1402                                          queue_conf);
1403         if (qm_qid < 0) {
1404                 DLB_LOG_ERR("Failed to create the load-balanced queue\n");
1405
1406                 return qm_qid;
1407         }
1408
1409         dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
1410
1411         ev_queue->qm_queue.id = qm_qid;
1412
1413         return 0;
1414 }
1415
1416 static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
1417 {
1418         int i, num = 0;
1419
1420         for (i = 0; i < dlb->num_queues; i++) {
1421                 if (dlb->ev_queues[i].setup_done &&
1422                     dlb->ev_queues[i].qm_queue.is_directed)
1423                         num++;
1424         }
1425
1426         return num;
1427 }
1428
1429 static void
1430 dlb_queue_link_teardown(struct dlb_eventdev *dlb,
1431                         struct dlb_eventdev_queue *ev_queue)
1432 {
1433         struct dlb_eventdev_port *ev_port;
1434         int i, j;
1435
1436         for (i = 0; i < dlb->num_ports; i++) {
1437                 ev_port = &dlb->ev_ports[i];
1438
1439                 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1440                         if (!ev_port->link[j].valid ||
1441                             ev_port->link[j].queue_id != ev_queue->id)
1442                                 continue;
1443
1444                         ev_port->link[j].valid = false;
1445                         ev_port->num_links--;
1446                 }
1447         }
1448
1449         ev_queue->num_links = 0;
1450 }
1451
1452 static int
1453 dlb_eventdev_queue_setup(struct rte_eventdev *dev,
1454                          uint8_t ev_qid,
1455                          const struct rte_event_queue_conf *queue_conf)
1456 {
1457         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1458         struct dlb_eventdev_queue *ev_queue;
1459         int ret;
1460
1461         if (queue_conf == NULL)
1462                 return -EINVAL;
1463
1464         if (ev_qid >= dlb->num_queues)
1465                 return -EINVAL;
1466
1467         ev_queue = &dlb->ev_queues[ev_qid];
1468
1469         ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
1470                 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
1471         ev_queue->id = ev_qid;
1472         ev_queue->conf = *queue_conf;
1473
1474         if (!ev_queue->qm_queue.is_directed) {
1475                 ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
1476         } else {
1477                 /* The directed queue isn't setup until link time, at which
1478                  * point we know its directed port ID. Directed queue setup
1479                  * will only fail if this queue is already setup or there are
1480                  * no directed queues left to configure.
1481                  */
1482                 ret = 0;
1483
1484                 ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
1485
1486                 if (ev_queue->setup_done ||
1487                     dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
1488                         ret = -EINVAL;
1489         }
1490
1491         /* Tear down pre-existing port->queue links */
1492         if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
1493                 dlb_queue_link_teardown(dlb, ev_queue);
1494
1495         if (!ret)
1496                 ev_queue->setup_done = true;
1497
1498         return ret;
1499 }
1500
1501 static void
1502 dlb_port_link_teardown(struct dlb_eventdev *dlb,
1503                        struct dlb_eventdev_port *ev_port)
1504 {
1505         struct dlb_eventdev_queue *ev_queue;
1506         int i;
1507
1508         for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1509                 if (!ev_port->link[i].valid)
1510                         continue;
1511
1512                 ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
1513
1514                 ev_port->link[i].valid = false;
1515                 ev_port->num_links--;
1516                 ev_queue->num_links--;
1517         }
1518 }
1519
1520 static int
1521 dlb_eventdev_port_setup(struct rte_eventdev *dev,
1522                         uint8_t ev_port_id,
1523                         const struct rte_event_port_conf *port_conf)
1524 {
1525         struct dlb_eventdev *dlb;
1526         struct dlb_eventdev_port *ev_port;
1527         bool use_rsvd_token_scheme;
1528         uint32_t adj_cq_depth;
1529         uint16_t rsvd_tokens;
1530         int ret;
1531
1532         if (dev == NULL || port_conf == NULL) {
1533                 DLB_LOG_ERR("Null parameter\n");
1534                 return -EINVAL;
1535         }
1536
1537         dlb = dlb_pmd_priv(dev);
1538
1539         if (ev_port_id >= DLB_MAX_NUM_PORTS)
1540                 return -EINVAL;
1541
1542         if (port_conf->dequeue_depth >
1543                 evdev_dlb_default_info.max_event_port_dequeue_depth ||
1544             port_conf->enqueue_depth >
1545                 evdev_dlb_default_info.max_event_port_enqueue_depth)
1546                 return -EINVAL;
1547
1548         ev_port = &dlb->ev_ports[ev_port_id];
1549         /* configured? */
1550         if (ev_port->setup_done) {
1551                 DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
1552                 return -EINVAL;
1553         }
1554
1555         /* The reserved token interrupt arming scheme requires that one or more
1556          * CQ tokens be reserved by the PMD. This limits the amount of CQ space
1557          * usable by the DLB, so in order to give an *effective* CQ depth equal
1558          * to the user-requested value, we double CQ depth and reserve half of
1559          * its tokens. If the user requests the max CQ depth (256) then we
1560          * cannot double it, so we reserve one token and give an effective
1561          * depth of 255 entries.
1562          */
1563         use_rsvd_token_scheme = true;
1564         rsvd_tokens = 1;
1565         adj_cq_depth = port_conf->dequeue_depth;
1566
1567         if (use_rsvd_token_scheme && adj_cq_depth < 256) {
1568                 rsvd_tokens = adj_cq_depth;
1569                 adj_cq_depth *= 2;
1570         }
1571
1572         ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1573                 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1574
1575         if (!ev_port->qm_port.is_directed) {
1576                 ret = dlb_hw_create_ldb_port(dlb,
1577                                              ev_port,
1578                                              port_conf->dequeue_depth,
1579                                              adj_cq_depth,
1580                                              port_conf->enqueue_depth,
1581                                              rsvd_tokens,
1582                                              use_rsvd_token_scheme);
1583                 if (ret < 0) {
1584                         DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1585                                     ev_port_id);
1586                         return ret;
1587                 }
1588         } else {
1589                 ret = dlb_hw_create_dir_port(dlb,
1590                                              ev_port,
1591                                              port_conf->dequeue_depth,
1592                                              adj_cq_depth,
1593                                              port_conf->enqueue_depth,
1594                                              rsvd_tokens,
1595                                              use_rsvd_token_scheme);
1596                 if (ret < 0) {
1597                         DLB_LOG_ERR("Failed to create the DIR port\n");
1598                         return ret;
1599                 }
1600         }
1601
1602         /* Save off port config for reconfig */
1603         dlb->ev_ports[ev_port_id].conf = *port_conf;
1604
1605         dlb->ev_ports[ev_port_id].id = ev_port_id;
1606         dlb->ev_ports[ev_port_id].enq_configured = true;
1607         dlb->ev_ports[ev_port_id].setup_done = true;
1608         dlb->ev_ports[ev_port_id].inflight_max =
1609                 port_conf->new_event_threshold;
1610         dlb->ev_ports[ev_port_id].implicit_release =
1611                 !(port_conf->event_port_cfg &
1612                   RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1613         dlb->ev_ports[ev_port_id].outstanding_releases = 0;
1614         dlb->ev_ports[ev_port_id].inflight_credits = 0;
1615         dlb->ev_ports[ev_port_id].credit_update_quanta =
1616                 RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
1617         dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
1618
1619         /* Tear down pre-existing port->queue links */
1620         if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1621                 dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
1622
1623         dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
1624
1625         return 0;
1626 }
1627
1628 static int
1629 dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
1630 {
1631         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1632         int ret, i;
1633
1634         /* If an event queue or port was previously configured, but hasn't been
1635          * reconfigured, reapply its original configuration.
1636          */
1637         for (i = 0; i < dlb->num_queues; i++) {
1638                 struct dlb_eventdev_queue *ev_queue;
1639
1640                 ev_queue = &dlb->ev_queues[i];
1641
1642                 if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
1643                         continue;
1644
1645                 ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
1646                 if (ret < 0) {
1647                         DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
1648                         return ret;
1649                 }
1650         }
1651
1652         for (i = 0; i < dlb->num_ports; i++) {
1653                 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1654
1655                 if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
1656                         continue;
1657
1658                 ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
1659                 if (ret < 0) {
1660                         DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
1661                                     i);
1662                         return ret;
1663                 }
1664         }
1665
1666         return 0;
1667 }
1668
1669 static int
1670 set_dev_id(const char *key __rte_unused,
1671            const char *value,
1672            void *opaque)
1673 {
1674         int *dev_id = opaque;
1675         int ret;
1676
1677         if (value == NULL || opaque == NULL) {
1678                 DLB_LOG_ERR("NULL pointer\n");
1679                 return -EINVAL;
1680         }
1681
1682         ret = dlb_string_to_int(dev_id, value);
1683         if (ret < 0)
1684                 return ret;
1685
1686         return 0;
1687 }
1688
1689 static int
1690 set_defer_sched(const char *key __rte_unused,
1691                 const char *value,
1692                 void *opaque)
1693 {
1694         int *defer_sched = opaque;
1695
1696         if (value == NULL || opaque == NULL) {
1697                 DLB_LOG_ERR("NULL pointer\n");
1698                 return -EINVAL;
1699         }
1700
1701         if (strncmp(value, "on", 2) != 0) {
1702                 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
1703                             value);
1704                 return -EINVAL;
1705         }
1706
1707         *defer_sched = 1;
1708
1709         return 0;
1710 }
1711
1712 static int
1713 set_num_atm_inflights(const char *key __rte_unused,
1714                       const char *value,
1715                       void *opaque)
1716 {
1717         int *num_atm_inflights = opaque;
1718         int ret;
1719
1720         if (value == NULL || opaque == NULL) {
1721                 DLB_LOG_ERR("NULL pointer\n");
1722                 return -EINVAL;
1723         }
1724
1725         ret = dlb_string_to_int(num_atm_inflights, value);
1726         if (ret < 0)
1727                 return ret;
1728
1729         if (*num_atm_inflights < 0 ||
1730             *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
1731                 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
1732                             DLB_MAX_NUM_ATM_INFLIGHTS);
1733                 return -EINVAL;
1734         }
1735
1736         return 0;
1737 }
1738
1739 static int
1740 dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
1741                        uint8_t queue_id,
1742                        bool link_exists,
1743                        int index)
1744 {
1745         struct dlb_eventdev *dlb = ev_port->dlb;
1746         struct dlb_eventdev_queue *ev_queue;
1747         bool port_is_dir, queue_is_dir;
1748
1749         if (queue_id > dlb->num_queues) {
1750                 DLB_LOG_ERR("queue_id %d > num queues %d\n",
1751                             queue_id, dlb->num_queues);
1752                 rte_errno = -EINVAL;
1753                 return -1;
1754         }
1755
1756         ev_queue = &dlb->ev_queues[queue_id];
1757
1758         if (!ev_queue->setup_done &&
1759             ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
1760                 DLB_LOG_ERR("setup not done and not previously configured\n");
1761                 rte_errno = -EINVAL;
1762                 return -1;
1763         }
1764
1765         port_is_dir = ev_port->qm_port.is_directed;
1766         queue_is_dir = ev_queue->qm_queue.is_directed;
1767
1768         if (port_is_dir != queue_is_dir) {
1769                 DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
1770                             queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1771                             port_is_dir ? "DIR" : "LDB", ev_port->id);
1772
1773                 rte_errno = -EINVAL;
1774                 return -1;
1775         }
1776
1777         /* Check if there is space for the requested link */
1778         if (!link_exists && index == -1) {
1779                 DLB_LOG_ERR("no space for new link\n");
1780                 rte_errno = -ENOSPC;
1781                 return -1;
1782         }
1783
1784         /* Check if the directed port is already linked */
1785         if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1786             !link_exists) {
1787                 DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1788                             ev_port->id);
1789                 rte_errno = -EINVAL;
1790                 return -1;
1791         }
1792
1793         /* Check if the directed queue is already linked */
1794         if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1795             !link_exists) {
1796                 DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1797                             ev_queue->id);
1798                 rte_errno = -EINVAL;
1799                 return -1;
1800         }
1801
1802         return 0;
1803 }
1804
1805 static int32_t
1806 dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
1807 {
1808         struct dlb_hw_dev *handle = &dlb->qm_instance;
1809         struct dlb_create_dir_queue_args cfg;
1810         struct dlb_cmd_response response;
1811         int32_t ret;
1812
1813         cfg.response = (uintptr_t)&response;
1814
1815         /* The directed port is always configured before its queue */
1816         cfg.port_id = qm_port_id;
1817
1818         ret = dlb_iface_dir_queue_create(handle, &cfg);
1819         if (ret < 0) {
1820                 DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
1821                             ret, dlb_error_strings[response.status]);
1822                 return -EINVAL;
1823         }
1824
1825         return response.id;
1826 }
1827
1828 static int
1829 dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
1830                              struct dlb_eventdev_queue *ev_queue,
1831                              struct dlb_eventdev_port *ev_port)
1832 {
1833         int32_t qm_qid;
1834
1835         qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
1836
1837         if (qm_qid < 0) {
1838                 DLB_LOG_ERR("Failed to create the DIR queue\n");
1839                 return qm_qid;
1840         }
1841
1842         dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1843
1844         ev_queue->qm_queue.id = qm_qid;
1845
1846         return 0;
1847 }
1848
1849 static int16_t
1850 dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
1851                            uint32_t qm_port_id,
1852                            uint16_t qm_qid,
1853                            uint8_t priority)
1854 {
1855         struct dlb_map_qid_args cfg;
1856         struct dlb_cmd_response response;
1857         int32_t ret;
1858
1859         if (handle == NULL)
1860                 return -EINVAL;
1861
1862         /* Build message */
1863         cfg.response = (uintptr_t)&response;
1864         cfg.port_id = qm_port_id;
1865         cfg.qid = qm_qid;
1866         cfg.priority = EV_TO_DLB_PRIO(priority);
1867
1868         ret = dlb_iface_map_qid(handle, &cfg);
1869         if (ret < 0) {
1870                 DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
1871                             ret, dlb_error_strings[response.status]);
1872                 DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1873                             handle->device_id,
1874                             handle->domain_id, cfg.port_id,
1875                             cfg.qid,
1876                             cfg.priority);
1877         } else {
1878                 DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
1879                             qm_qid, qm_port_id);
1880         }
1881
1882         return ret;
1883 }
1884
1885 static int
1886 dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
1887                          struct dlb_eventdev_port *ev_port,
1888                          struct dlb_eventdev_queue *ev_queue,
1889                          uint8_t priority)
1890 {
1891         int first_avail = -1;
1892         int ret, i;
1893
1894         for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1895                 if (ev_port->link[i].valid) {
1896                         if (ev_port->link[i].queue_id == ev_queue->id &&
1897                             ev_port->link[i].priority == priority) {
1898                                 if (ev_port->link[i].mapped)
1899                                         return 0; /* already mapped */
1900                                 first_avail = i;
1901                         }
1902                 } else {
1903                         if (first_avail == -1)
1904                                 first_avail = i;
1905                 }
1906         }
1907         if (first_avail == -1) {
1908                 DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
1909                             ev_port->qm_port.id);
1910                 return -EINVAL;
1911         }
1912
1913         ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
1914                                          ev_port->qm_port.id,
1915                                          ev_queue->qm_queue.id,
1916                                          priority);
1917
1918         if (!ret)
1919                 ev_port->link[first_avail].mapped = true;
1920
1921         return ret;
1922 }
1923
1924 static int
1925 dlb_do_port_link(struct rte_eventdev *dev,
1926                  struct dlb_eventdev_queue *ev_queue,
1927                  struct dlb_eventdev_port *ev_port,
1928                  uint8_t prio)
1929 {
1930         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1931         int err;
1932
1933         /* Don't link until start time. */
1934         if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1935                 return 0;
1936
1937         if (ev_queue->qm_queue.is_directed)
1938                 err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
1939         else
1940                 err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
1941
1942         if (err) {
1943                 DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1944                             ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1945                             ev_queue->id, ev_port->id);
1946
1947                 rte_errno = err;
1948                 return -1;
1949         }
1950
1951         return 0;
1952 }
1953
1954 static int
1955 dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
1956 {
1957         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1958         int i;
1959
1960         /* Perform requested port->queue links */
1961         for (i = 0; i < dlb->num_ports; i++) {
1962                 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1963                 int j;
1964
1965                 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1966                         struct dlb_eventdev_queue *ev_queue;
1967                         uint8_t prio, queue_id;
1968
1969                         if (!ev_port->link[j].valid)
1970                                 continue;
1971
1972                         prio = ev_port->link[j].priority;
1973                         queue_id = ev_port->link[j].queue_id;
1974
1975                         if (dlb_validate_port_link(ev_port, queue_id, true, j))
1976                                 return -EINVAL;
1977
1978                         ev_queue = &dlb->ev_queues[queue_id];
1979
1980                         if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
1981                                 return -EINVAL;
1982                 }
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int
1989 dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1990                        const uint8_t queues[], const uint8_t priorities[],
1991                        uint16_t nb_links)
1992
1993 {
1994         struct dlb_eventdev_port *ev_port = event_port;
1995         struct dlb_eventdev *dlb;
1996         int i, j;
1997
1998         RTE_SET_USED(dev);
1999
2000         if (ev_port == NULL) {
2001                 DLB_LOG_ERR("dlb: evport not setup\n");
2002                 rte_errno = -EINVAL;
2003                 return 0;
2004         }
2005
2006         if (!ev_port->setup_done &&
2007             ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
2008                 DLB_LOG_ERR("dlb: evport not setup\n");
2009                 rte_errno = -EINVAL;
2010                 return 0;
2011         }
2012
2013         /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
2014          * queues pointer.
2015          */
2016         if (nb_links == 0) {
2017                 DLB_LOG_DBG("dlb: nb_links is 0\n");
2018                 return 0; /* Ignore and return success */
2019         }
2020
2021         dlb = ev_port->dlb;
2022
2023         DLB_LOG_DBG("Linking %u queues to %s port %d\n",
2024                     nb_links,
2025                     ev_port->qm_port.is_directed ? "DIR" : "LDB",
2026                     ev_port->id);
2027
2028         for (i = 0; i < nb_links; i++) {
2029                 struct dlb_eventdev_queue *ev_queue;
2030                 uint8_t queue_id, prio;
2031                 bool found = false;
2032                 int index = -1;
2033
2034                 queue_id = queues[i];
2035                 prio = priorities[i];
2036
2037                 /* Check if the link already exists. */
2038                 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
2039                         if (ev_port->link[j].valid) {
2040                                 if (ev_port->link[j].queue_id == queue_id) {
2041                                         found = true;
2042                                         index = j;
2043                                         break;
2044                                 }
2045                         } else {
2046                                 if (index == -1)
2047                                         index = j;
2048                         }
2049
2050                 /* could not link */
2051                 if (index == -1)
2052                         break;
2053
2054                 /* Check if already linked at the requested priority */
2055                 if (found && ev_port->link[j].priority == prio)
2056                         continue;
2057
2058                 if (dlb_validate_port_link(ev_port, queue_id, found, index))
2059                         break; /* return index of offending queue */
2060
2061                 ev_queue = &dlb->ev_queues[queue_id];
2062
2063                 if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
2064                         break; /* return index of offending queue */
2065
2066                 ev_queue->num_links++;
2067
2068                 ev_port->link[index].queue_id = queue_id;
2069                 ev_port->link[index].priority = prio;
2070                 ev_port->link[index].valid = true;
2071                 /* Entry already exists?  If so, then must be prio change */
2072                 if (!found)
2073                         ev_port->num_links++;
2074         }
2075         return i;
2076 }
2077
2078 static int
2079 dlb_eventdev_start(struct rte_eventdev *dev)
2080 {
2081         struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
2082         struct dlb_hw_dev *handle = &dlb->qm_instance;
2083         struct dlb_start_domain_args cfg;
2084         struct dlb_cmd_response response;
2085         int ret, i;
2086
2087         rte_spinlock_lock(&dlb->qm_instance.resource_lock);
2088         if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
2089                 DLB_LOG_ERR("bad state %d for dev_start\n",
2090                             (int)dlb->run_state);
2091                 rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2092                 return -EINVAL;
2093         }
2094         dlb->run_state  = DLB_RUN_STATE_STARTING;
2095         rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2096
2097         /* If the device was configured more than once, some event ports and/or
2098          * queues may need to be reconfigured.
2099          */
2100         ret = dlb_eventdev_reapply_configuration(dev);
2101         if (ret)
2102                 return ret;
2103
2104         /* The DLB PMD delays port links until the device is started. */
2105         ret = dlb_eventdev_apply_port_links(dev);
2106         if (ret)
2107                 return ret;
2108
2109         cfg.response = (uintptr_t)&response;
2110
2111         for (i = 0; i < dlb->num_ports; i++) {
2112                 if (!dlb->ev_ports[i].setup_done) {
2113                         DLB_LOG_ERR("dlb: port %d not setup", i);
2114                         return -ESTALE;
2115                 }
2116         }
2117
2118         for (i = 0; i < dlb->num_queues; i++) {
2119                 if (dlb->ev_queues[i].num_links == 0) {
2120                         DLB_LOG_ERR("dlb: queue %d is not linked", i);
2121                         return -ENOLINK;
2122                 }
2123         }
2124
2125         ret = dlb_iface_sched_domain_start(handle, &cfg);
2126         if (ret < 0) {
2127                 DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
2128                             ret, dlb_error_strings[response.status]);
2129                 return ret;
2130         }
2131
2132         dlb->run_state = DLB_RUN_STATE_STARTED;
2133         DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
2134
2135         return 0;
2136 }
2137
2138 static inline int
2139 dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
2140                              struct dlb_eventdev_port *ev_port)
2141 {
2142         uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
2143                                                 __ATOMIC_SEQ_CST);
2144         const int num = 1;
2145
2146         if (unlikely(ev_port->inflight_max < sw_inflights)) {
2147                 DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2148                 rte_errno = -ENOSPC;
2149                 return 1;
2150         }
2151
2152         if (ev_port->inflight_credits < num) {
2153                 /* check if event enqueue brings ev_port over max threshold */
2154                 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2155
2156                 if (sw_inflights + credit_update_quanta >
2157                     dlb->new_event_limit) {
2158                         DLB_INC_STAT(
2159                                 ev_port->stats.traffic.tx_nospc_new_event_limit,
2160                                 1);
2161                         rte_errno = -ENOSPC;
2162                         return 1;
2163                 }
2164
2165                 __atomic_fetch_add(&dlb->inflights, credit_update_quanta,
2166                                    __ATOMIC_SEQ_CST);
2167                 ev_port->inflight_credits += (credit_update_quanta);
2168
2169                 if (ev_port->inflight_credits < num) {
2170                         DLB_INC_STAT(
2171                             ev_port->stats.traffic.tx_nospc_inflight_credits,
2172                             1);
2173                         rte_errno = -ENOSPC;
2174                         return 1;
2175                 }
2176         }
2177
2178         return 0;
2179 }
2180
2181 static inline void
2182 dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
2183                          struct dlb_eventdev_port *ev_port)
2184 {
2185         uint16_t quanta = ev_port->credit_update_quanta;
2186
2187         if (ev_port->inflight_credits >= quanta * 2) {
2188                 /* Replenish credits, saving one quanta for enqueues */
2189                 uint16_t val = ev_port->inflight_credits - quanta;
2190
2191                 __atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
2192                 ev_port->inflight_credits -= val;
2193         }
2194 }
2195
2196 static __rte_always_inline uint16_t
2197 dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
2198 {
2199         volatile uint16_t *popcount;
2200
2201         if (ldb)
2202                 popcount = port_data->ldb_popcount;
2203         else
2204                 popcount = port_data->dir_popcount;
2205
2206         return *popcount;
2207 }
2208
2209 static inline int
2210 dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
2211                                  struct process_local_port_data *port_data)
2212 {
2213         if (unlikely(qm_port->cached_ldb_credits == 0)) {
2214                 uint16_t pc;
2215
2216                 pc = dlb_read_pc(port_data, true);
2217
2218                 qm_port->cached_ldb_credits = pc -
2219                         qm_port->ldb_pushcount_at_credit_expiry;
2220                 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2221                         DLB_INC_STAT(
2222                         qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2223                         1);
2224
2225                         DLB_LOG_DBG("ldb credits exhausted\n");
2226                         return 1;
2227                 }
2228                 qm_port->ldb_pushcount_at_credit_expiry +=
2229                         qm_port->cached_ldb_credits;
2230         }
2231
2232         return 0;
2233 }
2234
2235 static inline int
2236 dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
2237                                  struct process_local_port_data *port_data)
2238 {
2239         if (unlikely(qm_port->cached_dir_credits == 0)) {
2240                 uint16_t pc;
2241
2242                 pc = dlb_read_pc(port_data, false);
2243
2244                 qm_port->cached_dir_credits = pc -
2245                         qm_port->dir_pushcount_at_credit_expiry;
2246
2247                 if (unlikely(qm_port->cached_dir_credits == 0)) {
2248                         DLB_INC_STAT(
2249                         qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2250                         1);
2251
2252                         DLB_LOG_DBG("dir credits exhausted\n");
2253                         return 1;
2254                 }
2255                 qm_port->dir_pushcount_at_credit_expiry +=
2256                         qm_port->cached_dir_credits;
2257         }
2258
2259         return 0;
2260 }
2261
2262 static inline int
2263 dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
2264                        struct dlb_port *qm_port,
2265                        const struct rte_event ev[],
2266                        struct process_local_port_data *port_data,
2267                        uint8_t *sched_type,
2268                        uint8_t *queue_id)
2269 {
2270         struct dlb_eventdev *dlb = ev_port->dlb;
2271         struct dlb_eventdev_queue *ev_queue;
2272         uint16_t *cached_credits = NULL;
2273         struct dlb_queue *qm_queue;
2274
2275         ev_queue = &dlb->ev_queues[ev->queue_id];
2276         qm_queue = &ev_queue->qm_queue;
2277         *queue_id = qm_queue->id;
2278
2279         /* Ignore sched_type and hardware credits on release events */
2280         if (ev->op == RTE_EVENT_OP_RELEASE)
2281                 goto op_check;
2282
2283         if (!qm_queue->is_directed) {
2284                 /* Load balanced destination queue */
2285
2286                 if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
2287                         rte_errno = -ENOSPC;
2288                         return 1;
2289                 }
2290                 cached_credits = &qm_port->cached_ldb_credits;
2291
2292                 switch (ev->sched_type) {
2293                 case RTE_SCHED_TYPE_ORDERED:
2294                         DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2295                         if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2296                                 DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue %d\n",
2297                                             *queue_id);
2298                                 rte_errno = -EINVAL;
2299                                 return 1;
2300                         }
2301                         *sched_type = DLB_SCHED_ORDERED;
2302                         break;
2303                 case RTE_SCHED_TYPE_ATOMIC:
2304                         DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2305                         *sched_type = DLB_SCHED_ATOMIC;
2306                         break;
2307                 case RTE_SCHED_TYPE_PARALLEL:
2308                         DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2309                         if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2310                                 *sched_type = DLB_SCHED_ORDERED;
2311                         else
2312                                 *sched_type = DLB_SCHED_UNORDERED;
2313                         break;
2314                 default:
2315                         DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2316                         DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
2317                         rte_errno = -EINVAL;
2318                         return 1;
2319                 }
2320         } else {
2321                 /* Directed destination queue */
2322
2323                 if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
2324                         rte_errno = -ENOSPC;
2325                         return 1;
2326                 }
2327                 cached_credits = &qm_port->cached_dir_credits;
2328
2329                 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2330
2331                 *sched_type = DLB_SCHED_DIRECTED;
2332         }
2333
2334 op_check:
2335         switch (ev->op) {
2336         case RTE_EVENT_OP_NEW:
2337                 /* Check that a sw credit is available */
2338                 if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
2339                         rte_errno = -ENOSPC;
2340                         return 1;
2341                 }
2342                 ev_port->inflight_credits--;
2343                 (*cached_credits)--;
2344                 break;
2345         case RTE_EVENT_OP_FORWARD:
2346                 /* Check for outstanding_releases underflow. If this occurs,
2347                  * the application is not using the EVENT_OPs correctly; for
2348                  * example, forwarding or releasing events that were not
2349                  * dequeued.
2350                  */
2351                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2352                 ev_port->outstanding_releases--;
2353                 qm_port->issued_releases++;
2354                 (*cached_credits)--;
2355                 break;
2356         case RTE_EVENT_OP_RELEASE:
2357                 ev_port->inflight_credits++;
2358                 /* Check for outstanding_releases underflow. If this occurs,
2359                  * the application is not using the EVENT_OPs correctly; for
2360                  * example, forwarding or releasing events that were not
2361                  * dequeued.
2362                  */
2363                 RTE_ASSERT(ev_port->outstanding_releases > 0);
2364                 ev_port->outstanding_releases--;
2365                 qm_port->issued_releases++;
2366                 /* Replenish s/w credits if enough are cached */
2367                 dlb_replenish_sw_credits(dlb, ev_port);
2368                 break;
2369         }
2370
2371         DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2372         DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2373
2374 #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
2375         if (ev->op != RTE_EVENT_OP_RELEASE) {
2376                 DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
2377                 DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2378         }
2379 #endif
2380
2381         return 0;
2382 }
2383
2384 static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] = {
2385         {
2386                 /* Load-balanced cmd bytes */
2387                 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2388                 [RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
2389                 [RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
2390         },
2391         {
2392                 /* Directed cmd bytes */
2393                 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2394                 [RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
2395                 [RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
2396         },
2397 };
2398
2399 static inline void
2400 dlb_event_build_hcws(struct dlb_port *qm_port,
2401                      const struct rte_event ev[],
2402                      int num,
2403                      uint8_t *sched_type,
2404                      uint8_t *queue_id)
2405 {
2406         struct dlb_enqueue_qe *qe;
2407         uint16_t sched_word[4];
2408         __m128i sse_qe[2];
2409         int i;
2410
2411         qe = qm_port->qe4;
2412
2413         sse_qe[0] = _mm_setzero_si128();
2414         sse_qe[1] = _mm_setzero_si128();
2415
2416         switch (num) {
2417         case 4:
2418                 /* Construct the metadata portion of two HCWs in one 128b SSE
2419                  * register. HCW metadata is constructed in the SSE registers
2420                  * like so:
2421                  * sse_qe[0][63:0]:   qe[0]'s metadata
2422                  * sse_qe[0][127:64]: qe[1]'s metadata
2423                  * sse_qe[1][63:0]:   qe[2]'s metadata
2424                  * sse_qe[1][127:64]: qe[3]'s metadata
2425                  */
2426
2427                 /* Convert the event operation into a command byte and store it
2428                  * in the metadata:
2429                  * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
2430                  * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2431                  * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
2432                  * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2433                  */
2434 #define DLB_QE_CMD_BYTE 7
2435                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2436                                 cmd_byte_map[qm_port->is_directed][ev[0].op],
2437                                 DLB_QE_CMD_BYTE);
2438                 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2439                                 cmd_byte_map[qm_port->is_directed][ev[1].op],
2440                                 DLB_QE_CMD_BYTE + 8);
2441                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2442                                 cmd_byte_map[qm_port->is_directed][ev[2].op],
2443                                 DLB_QE_CMD_BYTE);
2444                 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2445                                 cmd_byte_map[qm_port->is_directed][ev[3].op],
2446                                 DLB_QE_CMD_BYTE + 8);
2447
2448                 /* Store priority, scheduling type, and queue ID in the sched
2449                  * word array because these values are re-used when the
2450                  * destination is a directed queue.
2451                  */
2452                 sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
2453                                 sched_type[0] << 8 |
2454                                 queue_id[0];
2455                 sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
2456                                 sched_type[1] << 8 |
2457                                 queue_id[1];
2458                 sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
2459                                 sched_type[2] << 8 |
2460                                 queue_id[2];
2461                 sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
2462                                 sched_type[3] << 8 |
2463                                 queue_id[3];
2464
2465                 /* Store the event priority, scheduling type, and queue ID in
2466                  * the metadata:
2467                  * sse_qe[0][31:16] = sched_word[0]
2468                  * sse_qe[0][95:80] = sched_word[1]
2469                  * sse_qe[1][31:16] = sched_word[2]
2470                  * sse_qe[1][95:80] = sched_word[3]
2471                  */
2472 #define DLB_QE_QID_SCHED_WORD 1
2473                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2474                                              sched_word[0],
2475                                              DLB_QE_QID_SCHED_WORD);
2476                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2477                                              sched_word[1],
2478                                              DLB_QE_QID_SCHED_WORD + 4);
2479                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2480                                              sched_word[2],
2481                                              DLB_QE_QID_SCHED_WORD);
2482                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2483                                              sched_word[3],
2484                                              DLB_QE_QID_SCHED_WORD + 4);
2485
2486                 /* If the destination is a load-balanced queue, store the lock
2487                  * ID. If it is a directed queue, DLB places this field in
2488                  * bytes 10-11 of the received QE, so we format it accordingly:
2489                  * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
2490                  * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2491                  * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
2492                  * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2493                  */
2494 #define DLB_QE_LOCK_ID_WORD 2
2495                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2496                                 (sched_type[0] == DLB_SCHED_DIRECTED) ?
2497                                         sched_word[0] : ev[0].flow_id,
2498                                 DLB_QE_LOCK_ID_WORD);
2499                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2500                                 (sched_type[1] == DLB_SCHED_DIRECTED) ?
2501                                         sched_word[1] : ev[1].flow_id,
2502                                 DLB_QE_LOCK_ID_WORD + 4);
2503                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2504                                 (sched_type[2] == DLB_SCHED_DIRECTED) ?
2505                                         sched_word[2] : ev[2].flow_id,
2506                                 DLB_QE_LOCK_ID_WORD);
2507                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2508                                 (sched_type[3] == DLB_SCHED_DIRECTED) ?
2509                                         sched_word[3] : ev[3].flow_id,
2510                                 DLB_QE_LOCK_ID_WORD + 4);
2511
2512                 /* Store the event type and sub event type in the metadata:
2513                  * sse_qe[0][15:0]  = flow_id[0]
2514                  * sse_qe[0][79:64] = flow_id[1]
2515                  * sse_qe[1][15:0]  = flow_id[2]
2516                  * sse_qe[1][79:64] = flow_id[3]
2517                  */
2518 #define DLB_QE_EV_TYPE_WORD 0
2519                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2520                                              ev[0].sub_event_type << 8 |
2521                                                 ev[0].event_type,
2522                                              DLB_QE_EV_TYPE_WORD);
2523                 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2524                                              ev[1].sub_event_type << 8 |
2525                                                 ev[1].event_type,
2526                                              DLB_QE_EV_TYPE_WORD + 4);
2527                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2528                                              ev[2].sub_event_type << 8 |
2529                                                 ev[2].event_type,
2530                                              DLB_QE_EV_TYPE_WORD);
2531                 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2532                                              ev[3].sub_event_type << 8 |
2533                                                 ev[3].event_type,
2534                                              DLB_QE_EV_TYPE_WORD + 4);
2535
2536                 /* Store the metadata to memory (use the double-precision
2537                  * _mm_storeh_pd because there is no integer function for
2538                  * storing the upper 64b):
2539                  * qe[0] metadata = sse_qe[0][63:0]
2540                  * qe[1] metadata = sse_qe[0][127:64]
2541                  * qe[2] metadata = sse_qe[1][63:0]
2542                  * qe[3] metadata = sse_qe[1][127:64]
2543                  */
2544                 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2545                 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2546                               (__m128d) sse_qe[0]);
2547                 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2548                 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2549                               (__m128d) sse_qe[1]);
2550
2551                 qe[0].data = ev[0].u64;
2552                 qe[1].data = ev[1].u64;
2553                 qe[2].data = ev[2].u64;
2554                 qe[3].data = ev[3].u64;
2555
2556                 break;
2557         case 3:
2558         case 2:
2559         case 1:
2560                 for (i = 0; i < num; i++) {
2561                         qe[i].cmd_byte =
2562                                 cmd_byte_map[qm_port->is_directed][ev[i].op];
2563                         qe[i].sched_type = sched_type[i];
2564                         qe[i].data = ev[i].u64;
2565                         qe[i].qid = queue_id[i];
2566                         qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
2567                         qe[i].lock_id = ev[i].flow_id;
2568                         if (sched_type[i] == DLB_SCHED_DIRECTED) {
2569                                 struct dlb_msg_info *info =
2570                                         (struct dlb_msg_info *)&qe[i].lock_id;
2571
2572                                 info->qid = queue_id[i];
2573                                 info->sched_type = DLB_SCHED_DIRECTED;
2574                                 info->priority = qe[i].priority;
2575                         }
2576                         qe[i].u.event_type.major = ev[i].event_type;
2577                         qe[i].u.event_type.sub = ev[i].sub_event_type;
2578                 }
2579                 break;
2580         case 0:
2581                 break;
2582         }
2583 }
2584
2585 static __rte_always_inline void
2586 dlb_pp_write(struct dlb_enqueue_qe *qe4,
2587              struct process_local_port_data *port_data)
2588 {
2589         dlb_movdir64b(port_data->pp_addr, qe4);
2590 }
2591
2592 static inline void
2593 dlb_hw_do_enqueue(struct dlb_port *qm_port,
2594                   bool do_sfence,
2595                   struct process_local_port_data *port_data)
2596 {
2597         DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
2598
2599         /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2600          * application writes complete before enqueueing the release HCW.
2601          */
2602         if (do_sfence)
2603                 rte_wmb();
2604
2605         dlb_pp_write(qm_port->qe4, port_data);
2606 }
2607
2608 static inline uint16_t
2609 __dlb_event_enqueue_burst(void *event_port,
2610                           const struct rte_event events[],
2611                           uint16_t num)
2612 {
2613         struct dlb_eventdev_port *ev_port = event_port;
2614         struct dlb_port *qm_port = &ev_port->qm_port;
2615         struct process_local_port_data *port_data;
2616         int i;
2617
2618         RTE_ASSERT(ev_port->enq_configured);
2619         RTE_ASSERT(events != NULL);
2620
2621         rte_errno = 0;
2622         i = 0;
2623
2624         port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
2625
2626         while (i < num) {
2627                 uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
2628                 uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
2629                 int pop_offs = 0;
2630                 int j = 0;
2631
2632                 memset(qm_port->qe4,
2633                        0,
2634                        DLB_NUM_QES_PER_CACHE_LINE *
2635                        sizeof(struct dlb_enqueue_qe));
2636
2637                 for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2638                         const struct rte_event *ev = &events[i + j];
2639
2640                         if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
2641                                                    port_data, &sched_types[j],
2642                                                    &queue_ids[j]))
2643                                 break;
2644                 }
2645
2646                 if (j == 0)
2647                         break;
2648
2649                 dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
2650                                      sched_types, queue_ids);
2651
2652                 dlb_hw_do_enqueue(qm_port, i == 0, port_data);
2653
2654                 /* Don't include the token pop QE in the enqueue count */
2655                 i += j - pop_offs;
2656
2657                 /* Don't interpret j < DLB_NUM_... as out-of-credits if
2658                  * pop_offs != 0
2659                  */
2660                 if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2661                         break;
2662         }
2663
2664         RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
2665
2666         return i;
2667 }
2668
2669 static inline uint16_t
2670 dlb_event_enqueue_burst(void *event_port,
2671                         const struct rte_event events[],
2672                         uint16_t num)
2673 {
2674         return __dlb_event_enqueue_burst(event_port, events, num);
2675 }
2676
2677 static inline uint16_t
2678 dlb_event_enqueue(void *event_port,
2679                   const struct rte_event events[])
2680 {
2681         return __dlb_event_enqueue_burst(event_port, events, 1);
2682 }
2683
2684 static uint16_t
2685 dlb_event_enqueue_new_burst(void *event_port,
2686                             const struct rte_event events[],
2687                             uint16_t num)
2688 {
2689         return __dlb_event_enqueue_burst(event_port, events, num);
2690 }
2691
2692 static uint16_t
2693 dlb_event_enqueue_forward_burst(void *event_port,
2694                                 const struct rte_event events[],
2695                                 uint16_t num)
2696 {
2697         return __dlb_event_enqueue_burst(event_port, events, num);
2698 }
2699
2700 void
2701 dlb_entry_points_init(struct rte_eventdev *dev)
2702 {
2703         static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
2704                 .dev_infos_get    = dlb_eventdev_info_get,
2705                 .dev_configure    = dlb_eventdev_configure,
2706                 .dev_start        = dlb_eventdev_start,
2707                 .queue_def_conf   = dlb_eventdev_queue_default_conf_get,
2708                 .port_def_conf    = dlb_eventdev_port_default_conf_get,
2709                 .queue_setup      = dlb_eventdev_queue_setup,
2710                 .port_setup       = dlb_eventdev_port_setup,
2711                 .port_link        = dlb_eventdev_port_link,
2712                 .port_unlink      = dlb_eventdev_port_unlink,
2713                 .port_unlinks_in_progress =
2714                                     dlb_eventdev_port_unlinks_in_progress,
2715                 .dump             = dlb_eventdev_dump,
2716                 .xstats_get       = dlb_eventdev_xstats_get,
2717                 .xstats_get_names = dlb_eventdev_xstats_get_names,
2718                 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
2719                 .xstats_reset       = dlb_eventdev_xstats_reset,
2720         };
2721
2722         /* Expose PMD's eventdev interface */
2723         dev->dev_ops = &dlb_eventdev_entry_ops;
2724
2725         dev->enqueue = dlb_event_enqueue;
2726         dev->enqueue_burst = dlb_event_enqueue_burst;
2727         dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
2728         dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
2729 }
2730
2731 int
2732 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
2733                            const char *name,
2734                            struct dlb_devargs *dlb_args)
2735 {
2736         struct dlb_eventdev *dlb;
2737         int err;
2738
2739         dlb = dev->data->dev_private;
2740
2741         dlb->event_dev = dev; /* backlink */
2742
2743         evdev_dlb_default_info.driver_name = name;
2744
2745         dlb->max_num_events_override = dlb_args->max_num_events;
2746         dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
2747         dlb->defer_sched = dlb_args->defer_sched;
2748         dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
2749
2750         /* Open the interface.
2751          * For vdev mode, this means open the dlb kernel module.
2752          */
2753         err = dlb_iface_open(&dlb->qm_instance, name);
2754         if (err < 0) {
2755                 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
2756                             err);
2757                 return err;
2758         }
2759
2760         err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
2761         if (err < 0) {
2762                 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
2763                             err);
2764                 return err;
2765         }
2766
2767         err = dlb_hw_query_resources(dlb);
2768         if (err) {
2769                 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
2770                 return err;
2771         }
2772
2773         err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
2774         if (err < 0) {
2775                 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
2776                 return err;
2777         }
2778
2779         /* Complete xtstats runtime initialization */
2780         err = dlb_xstats_init(dlb);
2781         if (err) {
2782                 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
2783                 return err;
2784         }
2785
2786         rte_spinlock_init(&dlb->qm_instance.resource_lock);
2787
2788         dlb_iface_low_level_io_init(dlb);
2789
2790         dlb_entry_points_init(dev);
2791
2792         return 0;
2793 }
2794
2795 int
2796 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
2797                              const char *name)
2798 {
2799         struct dlb_eventdev *dlb;
2800         int err;
2801
2802         dlb = dev->data->dev_private;
2803
2804         evdev_dlb_default_info.driver_name = name;
2805
2806         err = dlb_iface_open(&dlb->qm_instance, name);
2807         if (err < 0) {
2808                 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
2809                             err);
2810                 return err;
2811         }
2812
2813         err = dlb_hw_query_resources(dlb);
2814         if (err) {
2815                 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
2816                 return err;
2817         }
2818
2819         dlb_iface_low_level_io_init(dlb);
2820
2821         dlb_entry_points_init(dev);
2822
2823         return 0;
2824 }
2825
2826 int
2827 dlb_parse_params(const char *params,
2828                  const char *name,
2829                  struct dlb_devargs *dlb_args)
2830 {
2831         int ret = 0;
2832         static const char * const args[] = { NUMA_NODE_ARG,
2833                                              DLB_MAX_NUM_EVENTS,
2834                                              DLB_NUM_DIR_CREDITS,
2835                                              DEV_ID_ARG,
2836                                              DLB_DEFER_SCHED_ARG,
2837                                              DLB_NUM_ATM_INFLIGHTS_ARG,
2838                                              NULL };
2839
2840         if (params && params[0] != '\0') {
2841                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
2842
2843                 if (kvlist == NULL) {
2844                         DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
2845                                      name);
2846                 } else {
2847                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
2848                                                      set_numa_node,
2849                                                      &dlb_args->socket_id);
2850                         if (ret != 0) {
2851                                 DLB_LOG_ERR("%s: Error parsing numa node parameter",
2852                                             name);
2853                                 rte_kvargs_free(kvlist);
2854                                 return ret;
2855                         }
2856
2857                         ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
2858                                                  set_max_num_events,
2859                                                  &dlb_args->max_num_events);
2860                         if (ret != 0) {
2861                                 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
2862                                             name);
2863                                 rte_kvargs_free(kvlist);
2864                                 return ret;
2865                         }
2866
2867                         ret = rte_kvargs_process(kvlist,
2868                                         DLB_NUM_DIR_CREDITS,
2869                                         set_num_dir_credits,
2870                                         &dlb_args->num_dir_credits_override);
2871                         if (ret != 0) {
2872                                 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
2873                                             name);
2874                                 rte_kvargs_free(kvlist);
2875                                 return ret;
2876                         }
2877
2878                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
2879                                                  set_dev_id,
2880                                                  &dlb_args->dev_id);
2881                         if (ret != 0) {
2882                                 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
2883                                             name);
2884                                 rte_kvargs_free(kvlist);
2885                                 return ret;
2886                         }
2887
2888                         ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
2889                                                  set_defer_sched,
2890                                                  &dlb_args->defer_sched);
2891                         if (ret != 0) {
2892                                 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
2893                                             name);
2894                                 rte_kvargs_free(kvlist);
2895                                 return ret;
2896                         }
2897
2898                         ret = rte_kvargs_process(kvlist,
2899                                                  DLB_NUM_ATM_INFLIGHTS_ARG,
2900                                                  set_num_atm_inflights,
2901                                                  &dlb_args->num_atm_inflights);
2902                         if (ret != 0) {
2903                                 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
2904                                             name);
2905                                 rte_kvargs_free(kvlist);
2906                                 return ret;
2907                         }
2908
2909                         rte_kvargs_free(kvlist);
2910                 }
2911         }
2912         return ret;
2913 }
2914 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);