1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
14 #include <sys/fcntl.h>
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
25 #include <rte_kvargs.h>
27 #include <rte_malloc.h>
29 #include <rte_prefetch.h>
31 #include <rte_string_fns.h>
33 #include "dlb2_priv.h"
34 #include "dlb2_iface.h"
35 #include "dlb2_inline_fns.h"
38 * Resources exposed to eventdev. Some values overridden at runtime using
39 * values returned by the DLB kernel driver.
41 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
42 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 static struct rte_event_dev_info evdev_dlb2_default_info = {
45 .driver_name = "", /* probe will set */
46 .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
47 .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
48 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
49 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
53 .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
54 .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
55 .max_event_priority_levels = DLB2_QID_PRIORITIES,
56 .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
57 .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
58 .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
59 .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
60 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
61 .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
62 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
63 RTE_EVENT_DEV_CAP_EVENT_QOS |
64 RTE_EVENT_DEV_CAP_BURST_MODE |
65 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
66 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
67 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
70 struct process_local_port_data
71 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
74 * DUMMY - added so that xstats path will compile/link.
75 * Will be replaced by real version in a subsequent
79 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
80 struct dlb2_eventdev_queue *queue)
89 dlb2_free_qe_mem(struct dlb2_port *qm_port)
94 rte_free(qm_port->qe4);
97 rte_free(qm_port->int_arm_qe);
98 qm_port->int_arm_qe = NULL;
100 rte_free(qm_port->consume_qe);
101 qm_port->consume_qe = NULL;
103 rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
104 dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
107 /* override defaults with value(s) provided on command line */
109 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
110 int *qid_depth_thresholds)
114 for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
115 if (qid_depth_thresholds[q] != 0)
116 dlb2->ev_queues[q].depth_threshold =
117 qid_depth_thresholds[q];
122 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
124 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
125 struct dlb2_hw_resource_info *dlb2_info = &handle->info;
128 /* Query driver resources provisioned for this device */
130 ret = dlb2_iface_get_num_resources(handle,
131 &dlb2->hw_rsrc_query_results);
133 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
137 /* Complete filling in device resource info returned to evdev app,
138 * overriding any default values.
139 * The capabilities (CAPs) were set at compile time.
142 evdev_dlb2_default_info.max_event_queues =
143 dlb2->hw_rsrc_query_results.num_ldb_queues;
145 evdev_dlb2_default_info.max_event_ports =
146 dlb2->hw_rsrc_query_results.num_ldb_ports;
148 evdev_dlb2_default_info.max_num_events =
149 dlb2->hw_rsrc_query_results.num_ldb_credits;
151 /* Save off values used when creating the scheduling domain. */
153 handle->info.num_sched_domains =
154 dlb2->hw_rsrc_query_results.num_sched_domains;
156 handle->info.hw_rsrc_max.nb_events_limit =
157 dlb2->hw_rsrc_query_results.num_ldb_credits;
159 handle->info.hw_rsrc_max.num_queues =
160 dlb2->hw_rsrc_query_results.num_ldb_queues +
161 dlb2->hw_rsrc_query_results.num_dir_ports;
163 handle->info.hw_rsrc_max.num_ldb_queues =
164 dlb2->hw_rsrc_query_results.num_ldb_queues;
166 handle->info.hw_rsrc_max.num_ldb_ports =
167 dlb2->hw_rsrc_query_results.num_ldb_ports;
169 handle->info.hw_rsrc_max.num_dir_ports =
170 dlb2->hw_rsrc_query_results.num_dir_ports;
172 handle->info.hw_rsrc_max.reorder_window_size =
173 dlb2->hw_rsrc_query_results.num_hist_list_entries;
175 rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
180 #define DLB2_BASE_10 10
183 dlb2_string_to_int(int *result, const char *str)
188 if (str == NULL || result == NULL)
192 ret = strtol(str, &endptr, DLB2_BASE_10);
196 /* long int and int may be different width for some architectures */
197 if (ret < INT_MIN || ret > INT_MAX || endptr == str)
205 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
207 int *socket_id = opaque;
210 ret = dlb2_string_to_int(socket_id, value);
214 if (*socket_id > RTE_MAX_NUMA_NODES)
220 set_max_num_events(const char *key __rte_unused,
224 int *max_num_events = opaque;
227 if (value == NULL || opaque == NULL) {
228 DLB2_LOG_ERR("NULL pointer\n");
232 ret = dlb2_string_to_int(max_num_events, value);
236 if (*max_num_events < 0 || *max_num_events >
237 DLB2_MAX_NUM_LDB_CREDITS) {
238 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
239 DLB2_MAX_NUM_LDB_CREDITS);
247 set_num_dir_credits(const char *key __rte_unused,
251 int *num_dir_credits = opaque;
254 if (value == NULL || opaque == NULL) {
255 DLB2_LOG_ERR("NULL pointer\n");
259 ret = dlb2_string_to_int(num_dir_credits, value);
263 if (*num_dir_credits < 0 ||
264 *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
265 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
266 DLB2_MAX_NUM_DIR_CREDITS);
274 set_dev_id(const char *key __rte_unused,
278 int *dev_id = opaque;
281 if (value == NULL || opaque == NULL) {
282 DLB2_LOG_ERR("NULL pointer\n");
286 ret = dlb2_string_to_int(dev_id, value);
294 set_cos(const char *key __rte_unused,
298 enum dlb2_cos *cos_id = opaque;
302 if (value == NULL || opaque == NULL) {
303 DLB2_LOG_ERR("NULL pointer\n");
307 ret = dlb2_string_to_int(&x, value);
311 if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
313 "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
325 set_qid_depth_thresh(const char *key __rte_unused,
329 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
330 int first, last, thresh, i;
332 if (value == NULL || opaque == NULL) {
333 DLB2_LOG_ERR("NULL pointer\n");
337 /* command line override may take one of the following 3 forms:
338 * qid_depth_thresh=all:<threshold_value> ... all queues
339 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
340 * qid_depth_thresh=qid:<threshold_value> ... just one queue
342 if (sscanf(value, "all:%d", &thresh) == 1) {
344 last = DLB2_MAX_NUM_QUEUES - 1;
345 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
346 /* we have everything we need */
347 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
350 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
354 if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
355 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
359 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
360 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
361 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
365 for (i = first; i <= last; i++)
366 qid_thresh->val[i] = thresh; /* indexed by qid */
372 dlb2_eventdev_info_get(struct rte_eventdev *dev,
373 struct rte_event_dev_info *dev_info)
375 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
378 ret = dlb2_hw_query_resources(dlb2);
380 const struct rte_eventdev_data *data = dev->data;
382 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
384 /* fn is void, so fall through and return values set up in
389 /* Add num resources currently owned by this domain.
390 * These would become available if the scheduling domain were reset due
391 * to the application recalling eventdev_configure to *reconfigure* the
394 evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
395 evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
396 evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
398 evdev_dlb2_default_info.max_event_queues =
399 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
400 RTE_EVENT_MAX_QUEUES_PER_DEV);
402 evdev_dlb2_default_info.max_num_events =
403 RTE_MIN(evdev_dlb2_default_info.max_num_events,
404 dlb2->max_num_events_override);
406 *dev_info = evdev_dlb2_default_info;
410 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
411 const struct dlb2_hw_rsrcs *resources_asked)
414 struct dlb2_create_sched_domain_args *cfg;
416 if (resources_asked == NULL) {
417 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
422 /* Map generic qm resources to dlb2 resources */
423 cfg = &handle->cfg.resources;
425 /* DIR ports and queues */
427 cfg->num_dir_ports = resources_asked->num_dir_ports;
429 cfg->num_dir_credits = resources_asked->num_dir_credits;
433 cfg->num_ldb_queues = resources_asked->num_ldb_queues;
437 cfg->cos_strict = 0; /* Best effort */
438 cfg->num_cos_ldb_ports[0] = 0;
439 cfg->num_cos_ldb_ports[1] = 0;
440 cfg->num_cos_ldb_ports[2] = 0;
441 cfg->num_cos_ldb_ports[3] = 0;
443 switch (handle->cos_id) {
445 cfg->num_ldb_ports = 0; /* no don't care ports */
446 cfg->num_cos_ldb_ports[0] =
447 resources_asked->num_ldb_ports;
450 cfg->num_ldb_ports = 0; /* no don't care ports */
451 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
454 cfg->num_ldb_ports = 0; /* no don't care ports */
455 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
458 cfg->num_ldb_ports = 0; /* no don't care ports */
459 cfg->num_cos_ldb_ports[3] =
460 resources_asked->num_ldb_ports;
462 case DLB2_COS_DEFAULT:
463 /* all ldb ports are don't care ports from a cos perspective */
465 resources_asked->num_ldb_ports;
469 cfg->num_ldb_credits =
470 resources_asked->num_ldb_credits;
472 cfg->num_atomic_inflights =
473 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
476 cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
477 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
479 DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
481 resources_asked->num_ldb_ports,
483 cfg->num_atomic_inflights,
484 cfg->num_hist_list_entries,
485 cfg->num_ldb_credits,
486 cfg->num_dir_credits);
488 /* Configure the QM */
490 ret = dlb2_iface_sched_domain_create(handle, cfg);
492 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
494 dlb2_error_strings[cfg->response.status]);
499 handle->domain_id = cfg->response.id;
500 handle->cfg.configured = true;
508 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
510 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
511 enum dlb2_configuration_state config_state;
514 dlb2_iface_domain_reset(dlb2);
516 /* Free all dynamically allocated port memory */
517 for (i = 0; i < dlb2->num_ports; i++)
518 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
520 /* If reconfiguring, mark the device's queues and ports as "previously
521 * configured." If the user doesn't reconfigure them, the PMD will
522 * reapply their previous configuration when the device is started.
524 config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
527 for (i = 0; i < dlb2->num_ports; i++) {
528 dlb2->ev_ports[i].qm_port.config_state = config_state;
529 /* Reset setup_done so ports can be reconfigured */
530 dlb2->ev_ports[i].setup_done = false;
531 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
532 dlb2->ev_ports[i].link[j].mapped = false;
535 for (i = 0; i < dlb2->num_queues; i++)
536 dlb2->ev_queues[i].qm_queue.config_state = config_state;
538 for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
539 dlb2->ev_queues[i].setup_done = false;
542 dlb2->num_ldb_ports = 0;
543 dlb2->num_dir_ports = 0;
544 dlb2->num_queues = 0;
545 dlb2->num_ldb_queues = 0;
546 dlb2->num_dir_queues = 0;
547 dlb2->configured = false;
550 /* Note: 1 QM instance per QM device, QM instance/device == event device */
552 dlb2_eventdev_configure(const struct rte_eventdev *dev)
554 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
555 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
556 struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
557 const struct rte_eventdev_data *data = dev->data;
558 const struct rte_event_dev_config *config = &data->dev_conf;
561 /* If this eventdev is already configured, we must release the current
562 * scheduling domain before attempting to configure a new one.
564 if (dlb2->configured) {
565 dlb2_hw_reset_sched_domain(dev, true);
567 ret = dlb2_hw_query_resources(dlb2);
569 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
575 if (config->nb_event_queues > rsrcs->num_queues) {
576 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
577 config->nb_event_queues,
581 if (config->nb_event_ports > (rsrcs->num_ldb_ports
582 + rsrcs->num_dir_ports)) {
583 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
584 config->nb_event_ports,
585 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
588 if (config->nb_events_limit > rsrcs->nb_events_limit) {
589 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
590 config->nb_events_limit,
591 rsrcs->nb_events_limit);
595 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
596 dlb2->global_dequeue_wait = false;
600 dlb2->global_dequeue_wait = true;
602 /* note size mismatch of timeout vals in eventdev lib. */
603 timeout32 = config->dequeue_timeout_ns;
605 dlb2->global_dequeue_wait_ticks =
606 timeout32 * (rte_get_timer_hz() / 1E9);
609 /* Does this platform support umonitor/umwait? */
610 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
611 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
612 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
613 DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
614 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
617 dlb2->umwait_allowed = true;
620 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
621 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
622 /* 1 dir queue per dir port */
623 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
625 /* Scale down nb_events_limit by 4 for directed credits, since there
626 * are 4x as many load-balanced credits.
628 rsrcs->num_ldb_credits = 0;
629 rsrcs->num_dir_credits = 0;
631 if (rsrcs->num_ldb_queues)
632 rsrcs->num_ldb_credits = config->nb_events_limit;
633 if (rsrcs->num_dir_ports)
634 rsrcs->num_dir_credits = config->nb_events_limit / 4;
635 if (dlb2->num_dir_credits_override != -1)
636 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
638 if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
639 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
643 dlb2->new_event_limit = config->nb_events_limit;
644 __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
646 /* Save number of ports/queues for this event dev */
647 dlb2->num_ports = config->nb_event_ports;
648 dlb2->num_queues = config->nb_event_queues;
649 dlb2->num_dir_ports = rsrcs->num_dir_ports;
650 dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
651 dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
652 dlb2->num_dir_queues = dlb2->num_dir_ports;
653 dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
654 dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
655 dlb2->dir_credit_pool = rsrcs->num_dir_credits;
656 dlb2->max_dir_credits = rsrcs->num_dir_credits;
658 dlb2->configured = true;
664 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
666 struct rte_event_port_conf *port_conf)
668 RTE_SET_USED(port_id);
669 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
671 port_conf->new_event_threshold = dlb2->new_event_limit;
672 port_conf->dequeue_depth = 32;
673 port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
674 port_conf->event_port_cfg = 0;
678 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
680 struct rte_event_queue_conf *queue_conf)
683 RTE_SET_USED(queue_id);
685 queue_conf->nb_atomic_flows = 1024;
686 queue_conf->nb_atomic_order_sequences = 64;
687 queue_conf->event_queue_cfg = 0;
688 queue_conf->priority = 0;
692 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
694 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
695 struct dlb2_get_sn_allocation_args cfg;
700 ret = dlb2_iface_get_sn_allocation(handle, &cfg);
702 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
703 ret, dlb2_error_strings[cfg.response.status]);
707 return cfg.response.id;
711 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
713 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
714 struct dlb2_set_sn_allocation_args cfg;
720 ret = dlb2_iface_set_sn_allocation(handle, &cfg);
722 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
723 ret, dlb2_error_strings[cfg.response.status]);
731 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
733 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
734 struct dlb2_get_sn_occupancy_args cfg;
739 ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
741 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
742 ret, dlb2_error_strings[cfg.response.status]);
746 return cfg.response.id;
749 /* Query the current sequence number allocations and, if they conflict with the
750 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
751 * This is best-effort; if it fails, the PMD will attempt to configure the
752 * load-balanced queue and return an error.
755 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
756 const struct rte_event_queue_conf *queue_conf)
758 int grp_occupancy[DLB2_NUM_SN_GROUPS];
759 int grp_alloc[DLB2_NUM_SN_GROUPS];
760 int i, sequence_numbers;
762 sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
764 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
767 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
768 if (grp_alloc[i] < 0)
771 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
773 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
774 if (grp_occupancy[i] < 0)
777 /* DLB has at least one available slot for the requested
778 * sequence numbers, so no further configuration required.
780 if (grp_alloc[i] == sequence_numbers &&
781 grp_occupancy[i] < total_slots)
785 /* None of the sequence number groups are configured for the requested
786 * sequence numbers, so we have to reconfigure one of them. This is
787 * only possible if a group is not in use.
789 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
790 if (grp_occupancy[i] == 0)
794 if (i == DLB2_NUM_SN_GROUPS) {
795 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
796 __func__, sequence_numbers);
800 /* Attempt to configure slot i with the requested number of sequence
801 * numbers. Ignore the return value -- if this fails, the error will be
802 * caught during subsequent queue configuration.
804 dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
808 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
809 struct dlb2_eventdev_queue *ev_queue,
810 const struct rte_event_queue_conf *evq_conf)
812 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
813 struct dlb2_queue *queue = &ev_queue->qm_queue;
814 struct dlb2_create_ldb_queue_args cfg;
819 if (evq_conf == NULL)
822 if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
823 if (evq_conf->nb_atomic_order_sequences != 0)
824 sched_type = RTE_SCHED_TYPE_ORDERED;
826 sched_type = RTE_SCHED_TYPE_PARALLEL;
828 sched_type = evq_conf->schedule_type;
830 cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
831 cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
832 cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
834 if (sched_type != RTE_SCHED_TYPE_ORDERED) {
835 cfg.num_sequence_numbers = 0;
836 cfg.num_qid_inflights = 2048;
839 /* App should set this to the number of hardware flows they want, not
840 * the overall number of flows they're going to use. E.g. if app is
841 * using 64 flows and sets compression to 64, best-case they'll get
842 * 64 unique hashed flows in hardware.
844 switch (evq_conf->nb_atomic_flows) {
845 /* Valid DLB2 compression levels */
850 case (1 * 1024): /* 1K */
851 case (2 * 1024): /* 2K */
852 case (4 * 1024): /* 4K */
853 case (64 * 1024): /* 64K */
854 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
857 /* Invalid compression level */
858 cfg.lock_id_comp_level = 0; /* no compression */
861 if (ev_queue->depth_threshold == 0) {
862 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
863 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
865 cfg.depth_threshold = ev_queue->depth_threshold;
867 ret = dlb2_iface_ldb_queue_create(handle, &cfg);
869 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
870 ret, dlb2_error_strings[cfg.response.status]);
874 qm_qid = cfg.response.id;
876 /* Save off queue config for debug, resource lookups, and reconfig */
877 queue->num_qid_inflights = cfg.num_qid_inflights;
878 queue->num_atm_inflights = cfg.num_atomic_inflights;
880 queue->sched_type = sched_type;
881 queue->config_state = DLB2_CONFIGURED;
883 DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
885 cfg.num_atomic_inflights,
886 cfg.num_sequence_numbers,
887 cfg.num_qid_inflights);
893 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
894 struct dlb2_eventdev_queue *ev_queue,
895 const struct rte_event_queue_conf *queue_conf)
897 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
900 if (queue_conf->nb_atomic_order_sequences)
901 dlb2_program_sn_allocation(dlb2, queue_conf);
903 qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
905 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
910 dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
912 ev_queue->qm_queue.id = qm_qid;
917 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
921 for (i = 0; i < dlb2->num_queues; i++) {
922 if (dlb2->ev_queues[i].setup_done &&
923 dlb2->ev_queues[i].qm_queue.is_directed)
931 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
932 struct dlb2_eventdev_queue *ev_queue)
934 struct dlb2_eventdev_port *ev_port;
937 for (i = 0; i < dlb2->num_ports; i++) {
938 ev_port = &dlb2->ev_ports[i];
940 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
941 if (!ev_port->link[j].valid ||
942 ev_port->link[j].queue_id != ev_queue->id)
945 ev_port->link[j].valid = false;
946 ev_port->num_links--;
950 ev_queue->num_links = 0;
954 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
956 const struct rte_event_queue_conf *queue_conf)
958 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
959 struct dlb2_eventdev_queue *ev_queue;
962 if (queue_conf == NULL)
965 if (ev_qid >= dlb2->num_queues)
968 ev_queue = &dlb2->ev_queues[ev_qid];
970 ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
971 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
972 ev_queue->id = ev_qid;
973 ev_queue->conf = *queue_conf;
975 if (!ev_queue->qm_queue.is_directed) {
976 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
978 /* The directed queue isn't setup until link time, at which
979 * point we know its directed port ID. Directed queue setup
980 * will only fail if this queue is already setup or there are
981 * no directed queues left to configure.
985 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
987 if (ev_queue->setup_done ||
988 dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
992 /* Tear down pre-existing port->queue links */
993 if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
994 dlb2_queue_link_teardown(dlb2, ev_queue);
997 ev_queue->setup_done = true;
1003 dlb2_entry_points_init(struct rte_eventdev *dev)
1005 /* Expose PMD's eventdev interface */
1006 static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
1007 .dev_infos_get = dlb2_eventdev_info_get,
1008 .dev_configure = dlb2_eventdev_configure,
1009 .queue_def_conf = dlb2_eventdev_queue_default_conf_get,
1010 .queue_setup = dlb2_eventdev_queue_setup,
1011 .port_def_conf = dlb2_eventdev_port_default_conf_get,
1012 .dump = dlb2_eventdev_dump,
1013 .xstats_get = dlb2_eventdev_xstats_get,
1014 .xstats_get_names = dlb2_eventdev_xstats_get_names,
1015 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
1016 .xstats_reset = dlb2_eventdev_xstats_reset,
1019 dev->dev_ops = &dlb2_eventdev_entry_ops;
1023 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
1025 struct dlb2_devargs *dlb2_args)
1027 struct dlb2_eventdev *dlb2;
1030 dlb2 = dev->data->dev_private;
1032 dlb2->event_dev = dev; /* backlink */
1034 evdev_dlb2_default_info.driver_name = name;
1036 dlb2->max_num_events_override = dlb2_args->max_num_events;
1037 dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
1038 dlb2->qm_instance.cos_id = dlb2_args->cos_id;
1040 err = dlb2_iface_open(&dlb2->qm_instance, name);
1042 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
1047 err = dlb2_iface_get_device_version(&dlb2->qm_instance,
1050 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
1055 err = dlb2_hw_query_resources(dlb2);
1057 DLB2_LOG_ERR("get resources err=%d for %s\n",
1062 dlb2_iface_hardware_init(&dlb2->qm_instance);
1064 err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
1066 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
1071 /* Complete xtstats runtime initialization */
1072 err = dlb2_xstats_init(dlb2);
1074 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
1078 rte_spinlock_init(&dlb2->qm_instance.resource_lock);
1080 dlb2_iface_low_level_io_init();
1082 dlb2_entry_points_init(dev);
1084 dlb2_init_queue_depth_thresholds(dlb2,
1085 dlb2_args->qid_depth_thresholds.val);
1091 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
1094 struct dlb2_eventdev *dlb2;
1097 dlb2 = dev->data->dev_private;
1099 evdev_dlb2_default_info.driver_name = name;
1101 err = dlb2_iface_open(&dlb2->qm_instance, name);
1103 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
1108 err = dlb2_hw_query_resources(dlb2);
1110 DLB2_LOG_ERR("get resources err=%d for %s\n",
1115 dlb2_iface_low_level_io_init();
1117 dlb2_entry_points_init(dev);
1123 dlb2_parse_params(const char *params,
1125 struct dlb2_devargs *dlb2_args)
1128 static const char * const args[] = { NUMA_NODE_ARG,
1129 DLB2_MAX_NUM_EVENTS,
1130 DLB2_NUM_DIR_CREDITS,
1132 DLB2_QID_DEPTH_THRESH_ARG,
1136 if (params != NULL && params[0] != '\0') {
1137 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
1139 if (kvlist == NULL) {
1141 "Ignoring unsupported parameters when creating device '%s'\n",
1144 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
1146 &dlb2_args->socket_id);
1148 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
1150 rte_kvargs_free(kvlist);
1154 ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
1156 &dlb2_args->max_num_events);
1158 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
1160 rte_kvargs_free(kvlist);
1164 ret = rte_kvargs_process(kvlist,
1165 DLB2_NUM_DIR_CREDITS,
1166 set_num_dir_credits,
1167 &dlb2_args->num_dir_credits_override);
1169 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
1171 rte_kvargs_free(kvlist);
1175 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
1177 &dlb2_args->dev_id);
1179 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
1181 rte_kvargs_free(kvlist);
1185 ret = rte_kvargs_process(
1187 DLB2_QID_DEPTH_THRESH_ARG,
1188 set_qid_depth_thresh,
1189 &dlb2_args->qid_depth_thresholds);
1191 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
1193 rte_kvargs_free(kvlist);
1197 ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
1199 &dlb2_args->cos_id);
1201 DLB2_LOG_ERR("%s: Error parsing cos parameter",
1203 rte_kvargs_free(kvlist);
1207 rte_kvargs_free(kvlist);
1212 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);