1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <sys/fcntl.h>
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
22 #include <rte_errno.h>
24 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
28 #include <rte_prefetch.h>
30 #include <rte_string_fns.h>
32 #include <rte_eventdev.h>
33 #include <rte_eventdev_pmd.h>
36 #include "dlb_iface.h"
37 #include "dlb_inline_fns.h"
40 * Resources exposed to eventdev.
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
45 static struct rte_event_dev_info evdev_dlb_default_info = {
46 .driver_name = "", /* probe will set */
47 .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
48 .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
50 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
52 .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
54 .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
55 .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
56 .max_event_priority_levels = DLB_QID_PRIORITIES,
57 .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
58 .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
59 .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
60 .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
61 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
62 .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
63 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64 RTE_EVENT_DEV_CAP_EVENT_QOS |
65 RTE_EVENT_DEV_CAP_BURST_MODE |
66 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
71 struct process_local_port_data
72 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
75 dlb_get_queue_depth(struct dlb_eventdev *dlb,
76 struct dlb_eventdev_queue *queue)
78 /* DUMMY FOR NOW So "xstats" patch compiles */
86 dlb_hw_query_resources(struct dlb_eventdev *dlb)
88 struct dlb_hw_dev *handle = &dlb->qm_instance;
89 struct dlb_hw_resource_info *dlb_info = &handle->info;
92 ret = dlb_iface_get_num_resources(handle,
93 &dlb->hw_rsrc_query_results);
95 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
99 /* Complete filling in device resource info returned to evdev app,
100 * overriding any default values.
101 * The capabilities (CAPs) were set at compile time.
104 evdev_dlb_default_info.max_event_queues =
105 dlb->hw_rsrc_query_results.num_ldb_queues;
107 evdev_dlb_default_info.max_event_ports =
108 dlb->hw_rsrc_query_results.num_ldb_ports;
110 evdev_dlb_default_info.max_num_events =
111 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
113 /* Save off values used when creating the scheduling domain. */
115 handle->info.num_sched_domains =
116 dlb->hw_rsrc_query_results.num_sched_domains;
118 handle->info.hw_rsrc_max.nb_events_limit =
119 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
121 handle->info.hw_rsrc_max.num_queues =
122 dlb->hw_rsrc_query_results.num_ldb_queues +
123 dlb->hw_rsrc_query_results.num_dir_ports;
125 handle->info.hw_rsrc_max.num_ldb_queues =
126 dlb->hw_rsrc_query_results.num_ldb_queues;
128 handle->info.hw_rsrc_max.num_ldb_ports =
129 dlb->hw_rsrc_query_results.num_ldb_ports;
131 handle->info.hw_rsrc_max.num_dir_ports =
132 dlb->hw_rsrc_query_results.num_dir_ports;
134 handle->info.hw_rsrc_max.reorder_window_size =
135 dlb->hw_rsrc_query_results.num_hist_list_entries;
137 rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
143 dlb_free_qe_mem(struct dlb_port *qm_port)
148 rte_free(qm_port->qe4);
151 rte_free(qm_port->consume_qe);
152 qm_port->consume_qe = NULL;
155 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
158 #define DLB_BASE_10 10
161 dlb_string_to_int(int *result, const char *str)
166 if (str == NULL || result == NULL)
170 ret = strtol(str, &endstr, DLB_BASE_10);
174 /* long int and int may be different width for some architectures */
175 if (ret < INT_MIN || ret > INT_MAX || endstr == str)
183 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
185 int *socket_id = opaque;
188 ret = dlb_string_to_int(socket_id, value);
192 if (*socket_id > RTE_MAX_NUMA_NODES)
199 set_max_num_events(const char *key __rte_unused,
203 int *max_num_events = opaque;
206 if (value == NULL || opaque == NULL) {
207 DLB_LOG_ERR("NULL pointer\n");
211 ret = dlb_string_to_int(max_num_events, value);
215 if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
216 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
217 DLB_MAX_NUM_LDB_CREDITS);
225 set_num_dir_credits(const char *key __rte_unused,
229 int *num_dir_credits = opaque;
232 if (value == NULL || opaque == NULL) {
233 DLB_LOG_ERR("NULL pointer\n");
237 ret = dlb_string_to_int(num_dir_credits, value);
241 if (*num_dir_credits < 0 ||
242 *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
243 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
244 DLB_MAX_NUM_DIR_CREDITS);
251 * This function first unmaps all memory mappings and closes the
252 * domain's file descriptor, which causes the driver to reset the
253 * scheduling domain. Once that completes (when close() returns), we
254 * can safely free the dynamically allocated memory used by the
258 * We will maintain a use count and use that to determine when
259 * a reset is required. In PF mode, we never mmap, or munmap
260 * device memory, and we own the entire physical PCI device.
264 dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
266 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
267 enum dlb_configuration_state config_state;
270 /* Close and reset the domain */
271 dlb_iface_domain_close(dlb);
273 /* Free all dynamically allocated port memory */
274 for (i = 0; i < dlb->num_ports; i++)
275 dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
277 /* If reconfiguring, mark the device's queues and ports as "previously
278 * configured." If the user does not reconfigure them, the PMD will
279 * reapply their previous configuration when the device is started.
281 config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
283 for (i = 0; i < dlb->num_ports; i++) {
284 dlb->ev_ports[i].qm_port.config_state = config_state;
285 /* Reset setup_done so ports can be reconfigured */
286 dlb->ev_ports[i].setup_done = false;
287 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
288 dlb->ev_ports[i].link[j].mapped = false;
291 for (i = 0; i < dlb->num_queues; i++)
292 dlb->ev_queues[i].qm_queue.config_state = config_state;
294 for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
295 dlb->ev_queues[i].setup_done = false;
298 dlb->num_ldb_ports = 0;
299 dlb->num_dir_ports = 0;
301 dlb->num_ldb_queues = 0;
302 dlb->num_dir_queues = 0;
303 dlb->configured = false;
307 dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
309 struct dlb_create_ldb_pool_args cfg;
310 struct dlb_cmd_response response;
316 if (!handle->cfg.resources.num_ldb_credits) {
317 handle->cfg.ldb_credit_pool_id = 0;
318 handle->cfg.num_ldb_credits = 0;
322 cfg.response = (uintptr_t)&response;
323 cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
325 ret = dlb_iface_ldb_credit_pool_create(handle,
328 DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
329 ret, dlb_error_strings[response.status]);
332 handle->cfg.ldb_credit_pool_id = response.id;
333 handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
339 dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
341 struct dlb_create_dir_pool_args cfg;
342 struct dlb_cmd_response response;
348 if (!handle->cfg.resources.num_dir_credits) {
349 handle->cfg.dir_credit_pool_id = 0;
350 handle->cfg.num_dir_credits = 0;
354 cfg.response = (uintptr_t)&response;
355 cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
357 ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
359 DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
360 ret, dlb_error_strings[response.status]);
362 handle->cfg.dir_credit_pool_id = response.id;
363 handle->cfg.num_dir_credits = cfg.num_dir_credits;
369 dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
370 struct dlb_eventdev *dlb,
371 const struct dlb_hw_rsrcs *resources_asked)
374 struct dlb_create_sched_domain_args *config_params;
375 struct dlb_cmd_response response;
377 if (resources_asked == NULL) {
378 DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
383 /* Map generic qm resources to dlb resources */
384 config_params = &handle->cfg.resources;
386 config_params->response = (uintptr_t)&response;
388 /* DIR ports and queues */
390 config_params->num_dir_ports =
391 resources_asked->num_dir_ports;
393 config_params->num_dir_credits =
394 resources_asked->num_dir_credits;
396 /* LDB ports and queues */
398 config_params->num_ldb_queues =
399 resources_asked->num_ldb_queues;
401 config_params->num_ldb_ports =
402 resources_asked->num_ldb_ports;
404 config_params->num_ldb_credits =
405 resources_asked->num_ldb_credits;
407 config_params->num_atomic_inflights =
408 dlb->num_atm_inflights_per_queue *
409 config_params->num_ldb_queues;
411 config_params->num_hist_list_entries = config_params->num_ldb_ports *
412 DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
414 /* dlb limited to 1 credit pool per queue type */
415 config_params->num_ldb_credit_pools = 1;
416 config_params->num_dir_credit_pools = 1;
418 DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
419 config_params->num_ldb_queues,
420 config_params->num_ldb_ports,
421 config_params->num_dir_ports,
422 config_params->num_atomic_inflights,
423 config_params->num_hist_list_entries,
424 config_params->num_ldb_credits,
425 config_params->num_dir_credits,
426 config_params->num_ldb_credit_pools,
427 config_params->num_dir_credit_pools);
429 /* Configure the QM */
431 ret = dlb_iface_sched_domain_create(handle, config_params);
433 DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
436 dlb_error_strings[response.status]);
440 handle->domain_id = response.id;
441 handle->domain_id_valid = 1;
443 config_params->response = 0;
445 ret = dlb_ldb_credit_pool_create(handle);
447 DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
451 ret = dlb_dir_credit_pool_create(handle);
453 DLB_LOG_ERR("dlb: create dir credit pool failed\n");
457 handle->cfg.configured = true;
462 dlb_iface_domain_close(dlb);
468 /* End HW specific */
470 dlb_eventdev_info_get(struct rte_eventdev *dev,
471 struct rte_event_dev_info *dev_info)
473 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
476 ret = dlb_hw_query_resources(dlb);
478 const struct rte_eventdev_data *data = dev->data;
480 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
482 /* fn is void, so fall through and return values set up in
487 /* Add num resources currently owned by this domain.
488 * These would become available if the scheduling domain were reset due
489 * to the application recalling eventdev_configure to *reconfigure* the
492 evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
493 evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
494 evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
496 /* In DLB A-stepping hardware, applications are limited to 128
497 * configured ports (load-balanced or directed). The reported number of
498 * available ports must reflect this.
500 if (dlb->revision < DLB_REV_B0) {
503 used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
504 dlb->hw_rsrc_query_results.num_ldb_ports -
505 dlb->hw_rsrc_query_results.num_dir_ports;
507 evdev_dlb_default_info.max_event_ports =
508 RTE_MIN(evdev_dlb_default_info.max_event_ports,
512 evdev_dlb_default_info.max_event_queues =
513 RTE_MIN(evdev_dlb_default_info.max_event_queues,
514 RTE_EVENT_MAX_QUEUES_PER_DEV);
516 evdev_dlb_default_info.max_num_events =
517 RTE_MIN(evdev_dlb_default_info.max_num_events,
518 dlb->max_num_events_override);
520 *dev_info = evdev_dlb_default_info;
523 /* Note: 1 QM instance per QM device, QM instance/device == event device */
525 dlb_eventdev_configure(const struct rte_eventdev *dev)
527 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
528 struct dlb_hw_dev *handle = &dlb->qm_instance;
529 struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
530 const struct rte_eventdev_data *data = dev->data;
531 const struct rte_event_dev_config *config = &data->dev_conf;
534 /* If this eventdev is already configured, we must release the current
535 * scheduling domain before attempting to configure a new one.
537 if (dlb->configured) {
538 dlb_hw_reset_sched_domain(dev, true);
540 ret = dlb_hw_query_resources(dlb);
542 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
548 if (config->nb_event_queues > rsrcs->num_queues) {
549 DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
550 config->nb_event_queues,
554 if (config->nb_event_ports > (rsrcs->num_ldb_ports
555 + rsrcs->num_dir_ports)) {
556 DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
557 config->nb_event_ports,
558 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
561 if (config->nb_events_limit > rsrcs->nb_events_limit) {
562 DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
563 config->nb_events_limit,
564 rsrcs->nb_events_limit);
568 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
569 dlb->global_dequeue_wait = false;
573 dlb->global_dequeue_wait = true;
575 timeout32 = config->dequeue_timeout_ns;
577 dlb->global_dequeue_wait_ticks =
578 timeout32 * (rte_get_timer_hz() / 1E9);
581 /* Does this platform support umonitor/umwait? */
582 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
583 if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
584 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
585 DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
586 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
589 dlb->umwait_allowed = true;
592 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
593 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
594 /* 1 dir queue per dir port */
595 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
597 /* Scale down nb_events_limit by 4 for directed credits, since there
598 * are 4x as many load-balanced credits.
600 rsrcs->num_ldb_credits = 0;
601 rsrcs->num_dir_credits = 0;
603 if (rsrcs->num_ldb_queues)
604 rsrcs->num_ldb_credits = config->nb_events_limit;
605 if (rsrcs->num_dir_ports)
606 rsrcs->num_dir_credits = config->nb_events_limit / 4;
607 if (dlb->num_dir_credits_override != -1)
608 rsrcs->num_dir_credits = dlb->num_dir_credits_override;
610 if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
611 DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
615 dlb->new_event_limit = config->nb_events_limit;
616 __atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
618 /* Save number of ports/queues for this event dev */
619 dlb->num_ports = config->nb_event_ports;
620 dlb->num_queues = config->nb_event_queues;
621 dlb->num_dir_ports = rsrcs->num_dir_ports;
622 dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
623 dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
624 dlb->num_dir_queues = dlb->num_dir_ports;
625 dlb->num_ldb_credits = rsrcs->num_ldb_credits;
626 dlb->num_dir_credits = rsrcs->num_dir_credits;
628 dlb->configured = true;
634 set_dev_id(const char *key __rte_unused,
638 int *dev_id = opaque;
641 if (value == NULL || opaque == NULL) {
642 DLB_LOG_ERR("NULL pointer\n");
646 ret = dlb_string_to_int(dev_id, value);
654 set_defer_sched(const char *key __rte_unused,
658 int *defer_sched = opaque;
660 if (value == NULL || opaque == NULL) {
661 DLB_LOG_ERR("NULL pointer\n");
665 if (strncmp(value, "on", 2) != 0) {
666 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
677 set_num_atm_inflights(const char *key __rte_unused,
681 int *num_atm_inflights = opaque;
684 if (value == NULL || opaque == NULL) {
685 DLB_LOG_ERR("NULL pointer\n");
689 ret = dlb_string_to_int(num_atm_inflights, value);
693 if (*num_atm_inflights < 0 ||
694 *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
695 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
696 DLB_MAX_NUM_ATM_INFLIGHTS);
704 dlb_entry_points_init(struct rte_eventdev *dev)
706 static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
707 .dev_infos_get = dlb_eventdev_info_get,
708 .dev_configure = dlb_eventdev_configure,
709 .dump = dlb_eventdev_dump,
710 .xstats_get = dlb_eventdev_xstats_get,
711 .xstats_get_names = dlb_eventdev_xstats_get_names,
712 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
713 .xstats_reset = dlb_eventdev_xstats_reset,
716 /* Expose PMD's eventdev interface */
717 dev->dev_ops = &dlb_eventdev_entry_ops;
721 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
723 struct dlb_devargs *dlb_args)
725 struct dlb_eventdev *dlb;
728 dlb = dev->data->dev_private;
730 dlb->event_dev = dev; /* backlink */
732 evdev_dlb_default_info.driver_name = name;
734 dlb->max_num_events_override = dlb_args->max_num_events;
735 dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
736 dlb->defer_sched = dlb_args->defer_sched;
737 dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
739 /* Open the interface.
740 * For vdev mode, this means open the dlb kernel module.
742 err = dlb_iface_open(&dlb->qm_instance, name);
744 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
749 err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
751 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
756 err = dlb_hw_query_resources(dlb);
758 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
762 err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
764 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
768 /* Complete xtstats runtime initialization */
769 err = dlb_xstats_init(dlb);
771 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
775 rte_spinlock_init(&dlb->qm_instance.resource_lock);
777 dlb_iface_low_level_io_init(dlb);
779 dlb_entry_points_init(dev);
785 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
788 struct dlb_eventdev *dlb;
791 dlb = dev->data->dev_private;
793 evdev_dlb_default_info.driver_name = name;
795 err = dlb_iface_open(&dlb->qm_instance, name);
797 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
802 err = dlb_hw_query_resources(dlb);
804 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
808 dlb_iface_low_level_io_init(dlb);
810 dlb_entry_points_init(dev);
816 dlb_parse_params(const char *params,
818 struct dlb_devargs *dlb_args)
821 static const char * const args[] = { NUMA_NODE_ARG,
826 DLB_NUM_ATM_INFLIGHTS_ARG,
829 if (params && params[0] != '\0') {
830 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
832 if (kvlist == NULL) {
833 DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
836 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
838 &dlb_args->socket_id);
840 DLB_LOG_ERR("%s: Error parsing numa node parameter",
842 rte_kvargs_free(kvlist);
846 ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
848 &dlb_args->max_num_events);
850 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
852 rte_kvargs_free(kvlist);
856 ret = rte_kvargs_process(kvlist,
859 &dlb_args->num_dir_credits_override);
861 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
863 rte_kvargs_free(kvlist);
867 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
871 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
873 rte_kvargs_free(kvlist);
877 ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
879 &dlb_args->defer_sched);
881 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
883 rte_kvargs_free(kvlist);
887 ret = rte_kvargs_process(kvlist,
888 DLB_NUM_ATM_INFLIGHTS_ARG,
889 set_num_atm_inflights,
890 &dlb_args->num_atm_inflights);
892 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
894 rte_kvargs_free(kvlist);
898 rte_kvargs_free(kvlist);
903 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);