1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
14 #include <sys/fcntl.h>
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
25 #include <rte_kvargs.h>
27 #include <rte_malloc.h>
29 #include <rte_power_intrinsics.h>
30 #include <rte_prefetch.h>
32 #include <rte_string_fns.h>
34 #include "dlb2_priv.h"
35 #include "dlb2_iface.h"
36 #include "dlb2_inline_fns.h"
39 * Resources exposed to eventdev. Some values overridden at runtime using
40 * values returned by the DLB kernel driver.
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
45 static struct rte_event_dev_info evdev_dlb2_default_info = {
46 .driver_name = "", /* probe will set */
47 .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
48 .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
50 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
52 .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
54 .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
55 .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
56 .max_event_priority_levels = DLB2_QID_PRIORITIES,
57 .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
58 .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
59 .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
60 .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
61 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
62 .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
63 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64 RTE_EVENT_DEV_CAP_EVENT_QOS |
65 RTE_EVENT_DEV_CAP_BURST_MODE |
66 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
71 struct process_local_port_data
72 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
75 dlb2_free_qe_mem(struct dlb2_port *qm_port)
80 rte_free(qm_port->qe4);
83 rte_free(qm_port->int_arm_qe);
84 qm_port->int_arm_qe = NULL;
86 rte_free(qm_port->consume_qe);
87 qm_port->consume_qe = NULL;
89 rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
90 dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
93 /* override defaults with value(s) provided on command line */
95 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
96 int *qid_depth_thresholds)
100 for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
101 if (qid_depth_thresholds[q] != 0)
102 dlb2->ev_queues[q].depth_threshold =
103 qid_depth_thresholds[q];
108 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
110 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
111 struct dlb2_hw_resource_info *dlb2_info = &handle->info;
114 /* Query driver resources provisioned for this device */
116 ret = dlb2_iface_get_num_resources(handle,
117 &dlb2->hw_rsrc_query_results);
119 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
123 /* Complete filling in device resource info returned to evdev app,
124 * overriding any default values.
125 * The capabilities (CAPs) were set at compile time.
128 evdev_dlb2_default_info.max_event_queues =
129 dlb2->hw_rsrc_query_results.num_ldb_queues;
131 evdev_dlb2_default_info.max_event_ports =
132 dlb2->hw_rsrc_query_results.num_ldb_ports;
134 evdev_dlb2_default_info.max_num_events =
135 dlb2->hw_rsrc_query_results.num_ldb_credits;
137 /* Save off values used when creating the scheduling domain. */
139 handle->info.num_sched_domains =
140 dlb2->hw_rsrc_query_results.num_sched_domains;
142 handle->info.hw_rsrc_max.nb_events_limit =
143 dlb2->hw_rsrc_query_results.num_ldb_credits;
145 handle->info.hw_rsrc_max.num_queues =
146 dlb2->hw_rsrc_query_results.num_ldb_queues +
147 dlb2->hw_rsrc_query_results.num_dir_ports;
149 handle->info.hw_rsrc_max.num_ldb_queues =
150 dlb2->hw_rsrc_query_results.num_ldb_queues;
152 handle->info.hw_rsrc_max.num_ldb_ports =
153 dlb2->hw_rsrc_query_results.num_ldb_ports;
155 handle->info.hw_rsrc_max.num_dir_ports =
156 dlb2->hw_rsrc_query_results.num_dir_ports;
158 handle->info.hw_rsrc_max.reorder_window_size =
159 dlb2->hw_rsrc_query_results.num_hist_list_entries;
161 rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
166 #define DLB2_BASE_10 10
169 dlb2_string_to_int(int *result, const char *str)
174 if (str == NULL || result == NULL)
178 ret = strtol(str, &endptr, DLB2_BASE_10);
182 /* long int and int may be different width for some architectures */
183 if (ret < INT_MIN || ret > INT_MAX || endptr == str)
191 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
193 int *socket_id = opaque;
196 ret = dlb2_string_to_int(socket_id, value);
200 if (*socket_id > RTE_MAX_NUMA_NODES)
206 set_max_num_events(const char *key __rte_unused,
210 int *max_num_events = opaque;
213 if (value == NULL || opaque == NULL) {
214 DLB2_LOG_ERR("NULL pointer\n");
218 ret = dlb2_string_to_int(max_num_events, value);
222 if (*max_num_events < 0 || *max_num_events >
223 DLB2_MAX_NUM_LDB_CREDITS) {
224 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
225 DLB2_MAX_NUM_LDB_CREDITS);
233 set_num_dir_credits(const char *key __rte_unused,
237 int *num_dir_credits = opaque;
240 if (value == NULL || opaque == NULL) {
241 DLB2_LOG_ERR("NULL pointer\n");
245 ret = dlb2_string_to_int(num_dir_credits, value);
249 if (*num_dir_credits < 0 ||
250 *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
251 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
252 DLB2_MAX_NUM_DIR_CREDITS);
260 set_dev_id(const char *key __rte_unused,
264 int *dev_id = opaque;
267 if (value == NULL || opaque == NULL) {
268 DLB2_LOG_ERR("NULL pointer\n");
272 ret = dlb2_string_to_int(dev_id, value);
280 set_cos(const char *key __rte_unused,
284 enum dlb2_cos *cos_id = opaque;
288 if (value == NULL || opaque == NULL) {
289 DLB2_LOG_ERR("NULL pointer\n");
293 ret = dlb2_string_to_int(&x, value);
297 if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
299 "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
311 set_qid_depth_thresh(const char *key __rte_unused,
315 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
316 int first, last, thresh, i;
318 if (value == NULL || opaque == NULL) {
319 DLB2_LOG_ERR("NULL pointer\n");
323 /* command line override may take one of the following 3 forms:
324 * qid_depth_thresh=all:<threshold_value> ... all queues
325 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
326 * qid_depth_thresh=qid:<threshold_value> ... just one queue
328 if (sscanf(value, "all:%d", &thresh) == 1) {
330 last = DLB2_MAX_NUM_QUEUES - 1;
331 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
332 /* we have everything we need */
333 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
336 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
340 if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
341 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
345 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
346 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
347 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
351 for (i = first; i <= last; i++)
352 qid_thresh->val[i] = thresh; /* indexed by qid */
358 dlb2_eventdev_info_get(struct rte_eventdev *dev,
359 struct rte_event_dev_info *dev_info)
361 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
364 ret = dlb2_hw_query_resources(dlb2);
366 const struct rte_eventdev_data *data = dev->data;
368 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
370 /* fn is void, so fall through and return values set up in
375 /* Add num resources currently owned by this domain.
376 * These would become available if the scheduling domain were reset due
377 * to the application recalling eventdev_configure to *reconfigure* the
380 evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
381 evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
382 evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
384 evdev_dlb2_default_info.max_event_queues =
385 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
386 RTE_EVENT_MAX_QUEUES_PER_DEV);
388 evdev_dlb2_default_info.max_num_events =
389 RTE_MIN(evdev_dlb2_default_info.max_num_events,
390 dlb2->max_num_events_override);
392 *dev_info = evdev_dlb2_default_info;
396 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
397 const struct dlb2_hw_rsrcs *resources_asked)
400 struct dlb2_create_sched_domain_args *cfg;
402 if (resources_asked == NULL) {
403 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
408 /* Map generic qm resources to dlb2 resources */
409 cfg = &handle->cfg.resources;
411 /* DIR ports and queues */
413 cfg->num_dir_ports = resources_asked->num_dir_ports;
415 cfg->num_dir_credits = resources_asked->num_dir_credits;
419 cfg->num_ldb_queues = resources_asked->num_ldb_queues;
423 cfg->cos_strict = 0; /* Best effort */
424 cfg->num_cos_ldb_ports[0] = 0;
425 cfg->num_cos_ldb_ports[1] = 0;
426 cfg->num_cos_ldb_ports[2] = 0;
427 cfg->num_cos_ldb_ports[3] = 0;
429 switch (handle->cos_id) {
431 cfg->num_ldb_ports = 0; /* no don't care ports */
432 cfg->num_cos_ldb_ports[0] =
433 resources_asked->num_ldb_ports;
436 cfg->num_ldb_ports = 0; /* no don't care ports */
437 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
440 cfg->num_ldb_ports = 0; /* no don't care ports */
441 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
444 cfg->num_ldb_ports = 0; /* no don't care ports */
445 cfg->num_cos_ldb_ports[3] =
446 resources_asked->num_ldb_ports;
448 case DLB2_COS_DEFAULT:
449 /* all ldb ports are don't care ports from a cos perspective */
451 resources_asked->num_ldb_ports;
455 cfg->num_ldb_credits =
456 resources_asked->num_ldb_credits;
458 cfg->num_atomic_inflights =
459 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
462 cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
463 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
465 DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
467 resources_asked->num_ldb_ports,
469 cfg->num_atomic_inflights,
470 cfg->num_hist_list_entries,
471 cfg->num_ldb_credits,
472 cfg->num_dir_credits);
474 /* Configure the QM */
476 ret = dlb2_iface_sched_domain_create(handle, cfg);
478 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
480 dlb2_error_strings[cfg->response.status]);
485 handle->domain_id = cfg->response.id;
486 handle->cfg.configured = true;
494 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
496 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
497 enum dlb2_configuration_state config_state;
500 dlb2_iface_domain_reset(dlb2);
502 /* Free all dynamically allocated port memory */
503 for (i = 0; i < dlb2->num_ports; i++)
504 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
506 /* If reconfiguring, mark the device's queues and ports as "previously
507 * configured." If the user doesn't reconfigure them, the PMD will
508 * reapply their previous configuration when the device is started.
510 config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
513 for (i = 0; i < dlb2->num_ports; i++) {
514 dlb2->ev_ports[i].qm_port.config_state = config_state;
515 /* Reset setup_done so ports can be reconfigured */
516 dlb2->ev_ports[i].setup_done = false;
517 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
518 dlb2->ev_ports[i].link[j].mapped = false;
521 for (i = 0; i < dlb2->num_queues; i++)
522 dlb2->ev_queues[i].qm_queue.config_state = config_state;
524 for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
525 dlb2->ev_queues[i].setup_done = false;
528 dlb2->num_ldb_ports = 0;
529 dlb2->num_dir_ports = 0;
530 dlb2->num_queues = 0;
531 dlb2->num_ldb_queues = 0;
532 dlb2->num_dir_queues = 0;
533 dlb2->configured = false;
536 /* Note: 1 QM instance per QM device, QM instance/device == event device */
538 dlb2_eventdev_configure(const struct rte_eventdev *dev)
540 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
541 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
542 struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
543 const struct rte_eventdev_data *data = dev->data;
544 const struct rte_event_dev_config *config = &data->dev_conf;
547 /* If this eventdev is already configured, we must release the current
548 * scheduling domain before attempting to configure a new one.
550 if (dlb2->configured) {
551 dlb2_hw_reset_sched_domain(dev, true);
553 ret = dlb2_hw_query_resources(dlb2);
555 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
561 if (config->nb_event_queues > rsrcs->num_queues) {
562 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
563 config->nb_event_queues,
567 if (config->nb_event_ports > (rsrcs->num_ldb_ports
568 + rsrcs->num_dir_ports)) {
569 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
570 config->nb_event_ports,
571 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
574 if (config->nb_events_limit > rsrcs->nb_events_limit) {
575 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
576 config->nb_events_limit,
577 rsrcs->nb_events_limit);
581 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
582 dlb2->global_dequeue_wait = false;
586 dlb2->global_dequeue_wait = true;
588 /* note size mismatch of timeout vals in eventdev lib. */
589 timeout32 = config->dequeue_timeout_ns;
591 dlb2->global_dequeue_wait_ticks =
592 timeout32 * (rte_get_timer_hz() / 1E9);
595 /* Does this platform support umonitor/umwait? */
596 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
597 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
598 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
599 DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
600 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
603 dlb2->umwait_allowed = true;
606 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
607 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
608 /* 1 dir queue per dir port */
609 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
611 /* Scale down nb_events_limit by 4 for directed credits, since there
612 * are 4x as many load-balanced credits.
614 rsrcs->num_ldb_credits = 0;
615 rsrcs->num_dir_credits = 0;
617 if (rsrcs->num_ldb_queues)
618 rsrcs->num_ldb_credits = config->nb_events_limit;
619 if (rsrcs->num_dir_ports)
620 rsrcs->num_dir_credits = config->nb_events_limit / 4;
621 if (dlb2->num_dir_credits_override != -1)
622 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
624 if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
625 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
629 dlb2->new_event_limit = config->nb_events_limit;
630 __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
632 /* Save number of ports/queues for this event dev */
633 dlb2->num_ports = config->nb_event_ports;
634 dlb2->num_queues = config->nb_event_queues;
635 dlb2->num_dir_ports = rsrcs->num_dir_ports;
636 dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
637 dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
638 dlb2->num_dir_queues = dlb2->num_dir_ports;
639 dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
640 dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
641 dlb2->dir_credit_pool = rsrcs->num_dir_credits;
642 dlb2->max_dir_credits = rsrcs->num_dir_credits;
644 dlb2->configured = true;
650 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
652 struct rte_event_port_conf *port_conf)
654 RTE_SET_USED(port_id);
655 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
657 port_conf->new_event_threshold = dlb2->new_event_limit;
658 port_conf->dequeue_depth = 32;
659 port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
660 port_conf->event_port_cfg = 0;
664 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
666 struct rte_event_queue_conf *queue_conf)
669 RTE_SET_USED(queue_id);
671 queue_conf->nb_atomic_flows = 1024;
672 queue_conf->nb_atomic_order_sequences = 64;
673 queue_conf->event_queue_cfg = 0;
674 queue_conf->priority = 0;
678 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
680 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
681 struct dlb2_get_sn_allocation_args cfg;
686 ret = dlb2_iface_get_sn_allocation(handle, &cfg);
688 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
689 ret, dlb2_error_strings[cfg.response.status]);
693 return cfg.response.id;
697 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
699 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
700 struct dlb2_set_sn_allocation_args cfg;
706 ret = dlb2_iface_set_sn_allocation(handle, &cfg);
708 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
709 ret, dlb2_error_strings[cfg.response.status]);
717 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
719 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
720 struct dlb2_get_sn_occupancy_args cfg;
725 ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
727 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
728 ret, dlb2_error_strings[cfg.response.status]);
732 return cfg.response.id;
735 /* Query the current sequence number allocations and, if they conflict with the
736 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
737 * This is best-effort; if it fails, the PMD will attempt to configure the
738 * load-balanced queue and return an error.
741 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
742 const struct rte_event_queue_conf *queue_conf)
744 int grp_occupancy[DLB2_NUM_SN_GROUPS];
745 int grp_alloc[DLB2_NUM_SN_GROUPS];
746 int i, sequence_numbers;
748 sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
750 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
753 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
754 if (grp_alloc[i] < 0)
757 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
759 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
760 if (grp_occupancy[i] < 0)
763 /* DLB has at least one available slot for the requested
764 * sequence numbers, so no further configuration required.
766 if (grp_alloc[i] == sequence_numbers &&
767 grp_occupancy[i] < total_slots)
771 /* None of the sequence number groups are configured for the requested
772 * sequence numbers, so we have to reconfigure one of them. This is
773 * only possible if a group is not in use.
775 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
776 if (grp_occupancy[i] == 0)
780 if (i == DLB2_NUM_SN_GROUPS) {
781 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
782 __func__, sequence_numbers);
786 /* Attempt to configure slot i with the requested number of sequence
787 * numbers. Ignore the return value -- if this fails, the error will be
788 * caught during subsequent queue configuration.
790 dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
794 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
795 struct dlb2_eventdev_queue *ev_queue,
796 const struct rte_event_queue_conf *evq_conf)
798 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
799 struct dlb2_queue *queue = &ev_queue->qm_queue;
800 struct dlb2_create_ldb_queue_args cfg;
805 if (evq_conf == NULL)
808 if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
809 if (evq_conf->nb_atomic_order_sequences != 0)
810 sched_type = RTE_SCHED_TYPE_ORDERED;
812 sched_type = RTE_SCHED_TYPE_PARALLEL;
814 sched_type = evq_conf->schedule_type;
816 cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
817 cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
818 cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
820 if (sched_type != RTE_SCHED_TYPE_ORDERED) {
821 cfg.num_sequence_numbers = 0;
822 cfg.num_qid_inflights = 2048;
825 /* App should set this to the number of hardware flows they want, not
826 * the overall number of flows they're going to use. E.g. if app is
827 * using 64 flows and sets compression to 64, best-case they'll get
828 * 64 unique hashed flows in hardware.
830 switch (evq_conf->nb_atomic_flows) {
831 /* Valid DLB2 compression levels */
836 case (1 * 1024): /* 1K */
837 case (2 * 1024): /* 2K */
838 case (4 * 1024): /* 4K */
839 case (64 * 1024): /* 64K */
840 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
843 /* Invalid compression level */
844 cfg.lock_id_comp_level = 0; /* no compression */
847 if (ev_queue->depth_threshold == 0) {
848 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
849 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
851 cfg.depth_threshold = ev_queue->depth_threshold;
853 ret = dlb2_iface_ldb_queue_create(handle, &cfg);
855 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
856 ret, dlb2_error_strings[cfg.response.status]);
860 qm_qid = cfg.response.id;
862 /* Save off queue config for debug, resource lookups, and reconfig */
863 queue->num_qid_inflights = cfg.num_qid_inflights;
864 queue->num_atm_inflights = cfg.num_atomic_inflights;
866 queue->sched_type = sched_type;
867 queue->config_state = DLB2_CONFIGURED;
869 DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
871 cfg.num_atomic_inflights,
872 cfg.num_sequence_numbers,
873 cfg.num_qid_inflights);
879 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
880 struct dlb2_eventdev_queue *ev_queue,
881 const struct rte_event_queue_conf *queue_conf)
883 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
886 if (queue_conf->nb_atomic_order_sequences)
887 dlb2_program_sn_allocation(dlb2, queue_conf);
889 qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
891 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
896 dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
898 ev_queue->qm_queue.id = qm_qid;
903 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
907 for (i = 0; i < dlb2->num_queues; i++) {
908 if (dlb2->ev_queues[i].setup_done &&
909 dlb2->ev_queues[i].qm_queue.is_directed)
917 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
918 struct dlb2_eventdev_queue *ev_queue)
920 struct dlb2_eventdev_port *ev_port;
923 for (i = 0; i < dlb2->num_ports; i++) {
924 ev_port = &dlb2->ev_ports[i];
926 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
927 if (!ev_port->link[j].valid ||
928 ev_port->link[j].queue_id != ev_queue->id)
931 ev_port->link[j].valid = false;
932 ev_port->num_links--;
936 ev_queue->num_links = 0;
940 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
942 const struct rte_event_queue_conf *queue_conf)
944 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
945 struct dlb2_eventdev_queue *ev_queue;
948 if (queue_conf == NULL)
951 if (ev_qid >= dlb2->num_queues)
954 ev_queue = &dlb2->ev_queues[ev_qid];
956 ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
957 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
958 ev_queue->id = ev_qid;
959 ev_queue->conf = *queue_conf;
961 if (!ev_queue->qm_queue.is_directed) {
962 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
964 /* The directed queue isn't setup until link time, at which
965 * point we know its directed port ID. Directed queue setup
966 * will only fail if this queue is already setup or there are
967 * no directed queues left to configure.
971 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
973 if (ev_queue->setup_done ||
974 dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
978 /* Tear down pre-existing port->queue links */
979 if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
980 dlb2_queue_link_teardown(dlb2, ev_queue);
983 ev_queue->setup_done = true;
989 dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
991 struct dlb2_cq_pop_qe *qe;
993 qe = rte_zmalloc(mz_name,
994 DLB2_NUM_QES_PER_CACHE_LINE *
995 sizeof(struct dlb2_cq_pop_qe),
996 RTE_CACHE_LINE_SIZE);
999 DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1002 qm_port->consume_qe = qe;
1008 /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1011 qe->tokens = 0; /* set at run time */
1014 /* Completion IDs are disabled */
1021 dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1023 struct dlb2_enqueue_qe *qe;
1025 qe = rte_zmalloc(mz_name,
1026 DLB2_NUM_QES_PER_CACHE_LINE *
1027 sizeof(struct dlb2_enqueue_qe),
1028 RTE_CACHE_LINE_SIZE);
1031 DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1034 qm_port->int_arm_qe = qe;
1036 /* V2 - INT ARM is CQ_TOKEN + FRAG */
1043 /* Completion IDs are disabled */
1050 dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1054 sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1056 qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1058 if (qm_port->qe4 == NULL) {
1059 DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1064 ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1066 DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1070 ret = dlb2_init_consume_qe(qm_port, mz_name);
1072 DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1080 dlb2_free_qe_mem(qm_port);
1085 static inline uint16_t
1086 dlb2_event_enqueue_delayed(void *event_port,
1087 const struct rte_event events[]);
1089 static inline uint16_t
1090 dlb2_event_enqueue_burst_delayed(void *event_port,
1091 const struct rte_event events[],
1094 static inline uint16_t
1095 dlb2_event_enqueue_new_burst_delayed(void *event_port,
1096 const struct rte_event events[],
1099 static inline uint16_t
1100 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
1101 const struct rte_event events[],
1105 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1106 struct dlb2_eventdev_port *ev_port,
1107 uint32_t dequeue_depth,
1108 uint32_t enqueue_depth)
1110 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1111 struct dlb2_create_ldb_port_args cfg = { {0} };
1113 struct dlb2_port *qm_port = NULL;
1114 char mz_name[RTE_MEMZONE_NAMESIZE];
1115 uint32_t qm_port_id;
1116 uint16_t ldb_credit_high_watermark;
1117 uint16_t dir_credit_high_watermark;
1122 if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1123 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1128 if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1129 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1130 DLB2_MIN_ENQUEUE_DEPTH);
1134 rte_spinlock_lock(&handle->resource_lock);
1136 /* We round up to the next power of 2 if necessary */
1137 cfg.cq_depth = rte_align32pow2(dequeue_depth);
1138 cfg.cq_depth_threshold = 1;
1140 cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1142 if (handle->cos_id == DLB2_COS_DEFAULT)
1145 cfg.cos_id = handle->cos_id;
1149 /* User controls the LDB high watermark via enqueue depth. The DIR high
1150 * watermark is equal, unless the directed credit pool is too small.
1152 ldb_credit_high_watermark = enqueue_depth;
1154 /* If there are no directed ports, the kernel driver will ignore this
1155 * port's directed credit settings. Don't use enqueue_depth if it would
1156 * require more directed credits than are available.
1158 dir_credit_high_watermark =
1159 RTE_MIN(enqueue_depth,
1160 handle->cfg.num_dir_credits / dlb2->num_ports);
1164 ret = dlb2_iface_ldb_port_create(handle, &cfg, dlb2->poll_mode);
1166 DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1167 ret, dlb2_error_strings[cfg.response.status]);
1171 qm_port_id = cfg.response.id;
1173 DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1174 ev_port->id, qm_port_id);
1176 qm_port = &ev_port->qm_port;
1177 qm_port->ev_port = ev_port; /* back ptr */
1178 qm_port->dlb2 = dlb2; /* back ptr */
1180 * Allocate and init local qe struct(s).
1181 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1184 snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1187 ret = dlb2_init_qe_mem(qm_port, mz_name);
1189 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1193 qm_port->id = qm_port_id;
1195 qm_port->cached_ldb_credits = 0;
1196 qm_port->cached_dir_credits = 0;
1197 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1198 * the effective depth is smaller.
1200 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1201 qm_port->cq_idx = 0;
1202 qm_port->cq_idx_unmasked = 0;
1204 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1205 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1207 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1209 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1210 /* starting value of gen bit - it toggles at wrap time */
1211 qm_port->gen_bit = 1;
1213 qm_port->int_armed = false;
1215 /* Save off for later use in info and lookup APIs. */
1216 qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1218 qm_port->dequeue_depth = dequeue_depth;
1219 qm_port->token_pop_thresh = dequeue_depth;
1221 /* The default enqueue functions do not include delayed-pop support for
1222 * performance reasons.
1224 if (qm_port->token_pop_mode == DELAYED_POP) {
1225 dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
1226 dlb2->event_dev->enqueue_burst =
1227 dlb2_event_enqueue_burst_delayed;
1228 dlb2->event_dev->enqueue_new_burst =
1229 dlb2_event_enqueue_new_burst_delayed;
1230 dlb2->event_dev->enqueue_forward_burst =
1231 dlb2_event_enqueue_forward_burst_delayed;
1234 qm_port->owed_tokens = 0;
1235 qm_port->issued_releases = 0;
1237 /* Save config message too. */
1238 rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1241 qm_port->state = PORT_STARTED; /* enabled at create time */
1242 qm_port->config_state = DLB2_CONFIGURED;
1244 qm_port->dir_credits = dir_credit_high_watermark;
1245 qm_port->ldb_credits = ldb_credit_high_watermark;
1246 qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1247 qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1249 DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1252 qm_port->ldb_credits,
1253 qm_port->dir_credits);
1255 rte_spinlock_unlock(&handle->resource_lock);
1262 dlb2_free_qe_mem(qm_port);
1264 rte_spinlock_unlock(&handle->resource_lock);
1266 DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1272 dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1273 struct dlb2_eventdev_port *ev_port)
1275 struct dlb2_eventdev_queue *ev_queue;
1278 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1279 if (!ev_port->link[i].valid)
1282 ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1284 ev_port->link[i].valid = false;
1285 ev_port->num_links--;
1286 ev_queue->num_links--;
1291 dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1292 struct dlb2_eventdev_port *ev_port,
1293 uint32_t dequeue_depth,
1294 uint32_t enqueue_depth)
1296 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1297 struct dlb2_create_dir_port_args cfg = { {0} };
1299 struct dlb2_port *qm_port = NULL;
1300 char mz_name[RTE_MEMZONE_NAMESIZE];
1301 uint32_t qm_port_id;
1302 uint16_t ldb_credit_high_watermark;
1303 uint16_t dir_credit_high_watermark;
1305 if (dlb2 == NULL || handle == NULL)
1308 if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1309 DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1310 DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1314 if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1315 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1316 DLB2_MIN_ENQUEUE_DEPTH);
1320 rte_spinlock_lock(&handle->resource_lock);
1322 /* Directed queues are configured at link time. */
1325 /* We round up to the next power of 2 if necessary */
1326 cfg.cq_depth = rte_align32pow2(dequeue_depth);
1327 cfg.cq_depth_threshold = 1;
1329 /* User controls the LDB high watermark via enqueue depth. The DIR high
1330 * watermark is equal, unless the directed credit pool is too small.
1332 ldb_credit_high_watermark = enqueue_depth;
1334 /* Don't use enqueue_depth if it would require more directed credits
1335 * than are available.
1337 dir_credit_high_watermark =
1338 RTE_MIN(enqueue_depth,
1339 handle->cfg.num_dir_credits / dlb2->num_ports);
1343 ret = dlb2_iface_dir_port_create(handle, &cfg, dlb2->poll_mode);
1345 DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1346 ret, dlb2_error_strings[cfg.response.status]);
1350 qm_port_id = cfg.response.id;
1352 DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1353 ev_port->id, qm_port_id);
1355 qm_port = &ev_port->qm_port;
1356 qm_port->ev_port = ev_port; /* back ptr */
1357 qm_port->dlb2 = dlb2; /* back ptr */
1360 * Init local qe struct(s).
1361 * Note: MOVDIR64 requires the enqueue QE to be aligned
1364 snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1367 ret = dlb2_init_qe_mem(qm_port, mz_name);
1370 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1374 qm_port->id = qm_port_id;
1376 qm_port->cached_ldb_credits = 0;
1377 qm_port->cached_dir_credits = 0;
1378 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1379 * the effective depth is smaller.
1381 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1382 qm_port->cq_idx = 0;
1383 qm_port->cq_idx_unmasked = 0;
1385 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1386 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1388 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1390 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1391 /* starting value of gen bit - it toggles at wrap time */
1392 qm_port->gen_bit = 1;
1394 qm_port->int_armed = false;
1396 /* Save off for later use in info and lookup APIs. */
1397 qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1399 qm_port->dequeue_depth = dequeue_depth;
1401 /* Directed ports are auto-pop, by default. */
1402 qm_port->token_pop_mode = AUTO_POP;
1403 qm_port->owed_tokens = 0;
1404 qm_port->issued_releases = 0;
1406 /* Save config message too. */
1407 rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1410 qm_port->state = PORT_STARTED; /* enabled at create time */
1411 qm_port->config_state = DLB2_CONFIGURED;
1413 qm_port->dir_credits = dir_credit_high_watermark;
1414 qm_port->ldb_credits = ldb_credit_high_watermark;
1415 qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1416 qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1418 DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1421 dir_credit_high_watermark,
1422 ldb_credit_high_watermark);
1424 rte_spinlock_unlock(&handle->resource_lock);
1431 dlb2_free_qe_mem(qm_port);
1433 rte_spinlock_unlock(&handle->resource_lock);
1435 DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1441 dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1443 const struct rte_event_port_conf *port_conf)
1445 struct dlb2_eventdev *dlb2;
1446 struct dlb2_eventdev_port *ev_port;
1449 if (dev == NULL || port_conf == NULL) {
1450 DLB2_LOG_ERR("Null parameter\n");
1454 dlb2 = dlb2_pmd_priv(dev);
1456 if (ev_port_id >= DLB2_MAX_NUM_PORTS)
1459 if (port_conf->dequeue_depth >
1460 evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1461 port_conf->enqueue_depth >
1462 evdev_dlb2_default_info.max_event_port_enqueue_depth)
1465 ev_port = &dlb2->ev_ports[ev_port_id];
1467 if (ev_port->setup_done) {
1468 DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1472 ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1473 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1475 if (!ev_port->qm_port.is_directed) {
1476 ret = dlb2_hw_create_ldb_port(dlb2,
1478 port_conf->dequeue_depth,
1479 port_conf->enqueue_depth);
1481 DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1487 ret = dlb2_hw_create_dir_port(dlb2,
1489 port_conf->dequeue_depth,
1490 port_conf->enqueue_depth);
1492 DLB2_LOG_ERR("Failed to create the DIR port\n");
1497 /* Save off port config for reconfig */
1498 ev_port->conf = *port_conf;
1500 ev_port->id = ev_port_id;
1501 ev_port->enq_configured = true;
1502 ev_port->setup_done = true;
1503 ev_port->inflight_max = port_conf->new_event_threshold;
1504 ev_port->implicit_release = !(port_conf->event_port_cfg &
1505 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1506 ev_port->outstanding_releases = 0;
1507 ev_port->inflight_credits = 0;
1508 ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1509 ev_port->dlb2 = dlb2; /* reverse link */
1511 /* Tear down pre-existing port->queue links */
1512 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1513 dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1515 dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1521 dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1522 uint32_t qm_port_id,
1526 struct dlb2_map_qid_args cfg;
1533 cfg.port_id = qm_port_id;
1535 cfg.priority = EV_TO_DLB2_PRIO(priority);
1537 ret = dlb2_iface_map_qid(handle, &cfg);
1539 DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1540 ret, dlb2_error_strings[cfg.response.status]);
1541 DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1542 handle->domain_id, cfg.port_id,
1546 DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1547 qm_qid, qm_port_id);
1554 dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1555 struct dlb2_eventdev_port *ev_port,
1556 struct dlb2_eventdev_queue *ev_queue,
1559 int first_avail = -1;
1562 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1563 if (ev_port->link[i].valid) {
1564 if (ev_port->link[i].queue_id == ev_queue->id &&
1565 ev_port->link[i].priority == priority) {
1566 if (ev_port->link[i].mapped)
1567 return 0; /* already mapped */
1570 } else if (first_avail == -1)
1573 if (first_avail == -1) {
1574 DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1575 ev_port->qm_port.id);
1579 ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1580 ev_port->qm_port.id,
1581 ev_queue->qm_queue.id,
1585 ev_port->link[first_avail].mapped = true;
1591 dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1592 struct dlb2_eventdev_queue *ev_queue,
1595 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1596 struct dlb2_create_dir_queue_args cfg;
1599 /* The directed port is always configured before its queue */
1600 cfg.port_id = qm_port_id;
1602 if (ev_queue->depth_threshold == 0) {
1603 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1604 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1606 cfg.depth_threshold = ev_queue->depth_threshold;
1608 ret = dlb2_iface_dir_queue_create(handle, &cfg);
1610 DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1611 ret, dlb2_error_strings[cfg.response.status]);
1615 return cfg.response.id;
1619 dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1620 struct dlb2_eventdev_queue *ev_queue,
1621 struct dlb2_eventdev_port *ev_port)
1625 qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1628 DLB2_LOG_ERR("Failed to create the DIR queue\n");
1632 dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1634 ev_queue->qm_queue.id = qm_qid;
1640 dlb2_do_port_link(struct rte_eventdev *dev,
1641 struct dlb2_eventdev_queue *ev_queue,
1642 struct dlb2_eventdev_port *ev_port,
1645 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1648 /* Don't link until start time. */
1649 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1652 if (ev_queue->qm_queue.is_directed)
1653 err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1655 err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1658 DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1659 ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1660 ev_queue->id, ev_port->id);
1670 dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1675 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1676 struct dlb2_eventdev_queue *ev_queue;
1677 bool port_is_dir, queue_is_dir;
1679 if (queue_id > dlb2->num_queues) {
1680 rte_errno = -EINVAL;
1684 ev_queue = &dlb2->ev_queues[queue_id];
1686 if (!ev_queue->setup_done &&
1687 ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1688 rte_errno = -EINVAL;
1692 port_is_dir = ev_port->qm_port.is_directed;
1693 queue_is_dir = ev_queue->qm_queue.is_directed;
1695 if (port_is_dir != queue_is_dir) {
1696 DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1697 queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1698 port_is_dir ? "DIR" : "LDB", ev_port->id);
1700 rte_errno = -EINVAL;
1704 /* Check if there is space for the requested link */
1705 if (!link_exists && index == -1) {
1706 DLB2_LOG_ERR("no space for new link\n");
1707 rte_errno = -ENOSPC;
1711 /* Check if the directed port is already linked */
1712 if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1714 DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1716 rte_errno = -EINVAL;
1720 /* Check if the directed queue is already linked */
1721 if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1723 DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1725 rte_errno = -EINVAL;
1733 dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1734 const uint8_t queues[], const uint8_t priorities[],
1738 struct dlb2_eventdev_port *ev_port = event_port;
1739 struct dlb2_eventdev *dlb2;
1744 if (ev_port == NULL) {
1745 DLB2_LOG_ERR("dlb2: evport not setup\n");
1746 rte_errno = -EINVAL;
1750 if (!ev_port->setup_done &&
1751 ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1752 DLB2_LOG_ERR("dlb2: evport not setup\n");
1753 rte_errno = -EINVAL;
1757 /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1760 if (nb_links == 0) {
1761 DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1762 return 0; /* Ignore and return success */
1765 dlb2 = ev_port->dlb2;
1767 DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1769 ev_port->qm_port.is_directed ? "DIR" : "LDB",
1772 for (i = 0; i < nb_links; i++) {
1773 struct dlb2_eventdev_queue *ev_queue;
1774 uint8_t queue_id, prio;
1778 queue_id = queues[i];
1779 prio = priorities[i];
1781 /* Check if the link already exists. */
1782 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1783 if (ev_port->link[j].valid) {
1784 if (ev_port->link[j].queue_id == queue_id) {
1789 } else if (index == -1) {
1793 /* could not link */
1797 /* Check if already linked at the requested priority */
1798 if (found && ev_port->link[j].priority == prio)
1801 if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1802 break; /* return index of offending queue */
1804 ev_queue = &dlb2->ev_queues[queue_id];
1806 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1807 break; /* return index of offending queue */
1809 ev_queue->num_links++;
1811 ev_port->link[index].queue_id = queue_id;
1812 ev_port->link[index].priority = prio;
1813 ev_port->link[index].valid = true;
1814 /* Entry already exists? If so, then must be prio change */
1816 ev_port->num_links++;
1822 dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1823 uint32_t qm_port_id,
1826 struct dlb2_unmap_qid_args cfg;
1832 cfg.port_id = qm_port_id;
1835 ret = dlb2_iface_unmap_qid(handle, &cfg);
1837 DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1838 ret, dlb2_error_strings[cfg.response.status]);
1844 dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1845 struct dlb2_eventdev_port *ev_port,
1846 struct dlb2_eventdev_queue *ev_queue)
1850 /* Don't unlink until start time. */
1851 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1854 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1855 if (ev_port->link[i].valid &&
1856 ev_port->link[i].queue_id == ev_queue->id)
1860 /* This is expected with eventdev API!
1861 * It blindly attemmpts to unmap all queues.
1863 if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1864 DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1865 ev_queue->qm_queue.id,
1866 ev_port->qm_port.id);
1870 ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1871 ev_port->qm_port.id,
1872 ev_queue->qm_queue.id);
1874 ev_port->link[i].mapped = false;
1880 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1881 uint8_t queues[], uint16_t nb_unlinks)
1883 struct dlb2_eventdev_port *ev_port = event_port;
1884 struct dlb2_eventdev *dlb2;
1889 if (!ev_port->setup_done) {
1890 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1892 rte_errno = -EINVAL;
1896 if (queues == NULL || nb_unlinks == 0) {
1897 DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
1898 return 0; /* Ignore and return success */
1901 if (ev_port->qm_port.is_directed) {
1902 DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
1905 return nb_unlinks; /* as if success */
1908 dlb2 = ev_port->dlb2;
1910 for (i = 0; i < nb_unlinks; i++) {
1911 struct dlb2_eventdev_queue *ev_queue;
1914 if (queues[i] >= dlb2->num_queues) {
1915 DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
1916 rte_errno = -EINVAL;
1917 return i; /* return index of offending queue */
1920 ev_queue = &dlb2->ev_queues[queues[i]];
1922 /* Does a link exist? */
1923 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1924 if (ev_port->link[j].queue_id == queues[i] &&
1925 ev_port->link[j].valid)
1928 if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1931 ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
1933 DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
1934 ret, ev_port->id, queues[i]);
1935 rte_errno = -ENOENT;
1936 return i; /* return index of offending queue */
1939 ev_port->link[j].valid = false;
1940 ev_port->num_links--;
1941 ev_queue->num_links--;
1948 dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
1951 struct dlb2_eventdev_port *ev_port = event_port;
1952 struct dlb2_eventdev *dlb2;
1953 struct dlb2_hw_dev *handle;
1954 struct dlb2_pending_port_unmaps_args cfg;
1959 if (!ev_port->setup_done) {
1960 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1962 rte_errno = -EINVAL;
1966 cfg.port_id = ev_port->qm_port.id;
1967 dlb2 = ev_port->dlb2;
1968 handle = &dlb2->qm_instance;
1969 ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
1972 DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
1973 ret, dlb2_error_strings[cfg.response.status]);
1977 return cfg.response.id;
1981 dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
1983 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1986 /* If an event queue or port was previously configured, but hasn't been
1987 * reconfigured, reapply its original configuration.
1989 for (i = 0; i < dlb2->num_queues; i++) {
1990 struct dlb2_eventdev_queue *ev_queue;
1992 ev_queue = &dlb2->ev_queues[i];
1994 if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
1997 ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
1999 DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
2004 for (i = 0; i < dlb2->num_ports; i++) {
2005 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2007 if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
2010 ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
2012 DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
2022 dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
2024 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2027 /* Perform requested port->queue links */
2028 for (i = 0; i < dlb2->num_ports; i++) {
2029 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2032 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
2033 struct dlb2_eventdev_queue *ev_queue;
2034 uint8_t prio, queue_id;
2036 if (!ev_port->link[j].valid)
2039 prio = ev_port->link[j].priority;
2040 queue_id = ev_port->link[j].queue_id;
2042 if (dlb2_validate_port_link(ev_port, queue_id, true, j))
2045 ev_queue = &dlb2->ev_queues[queue_id];
2047 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
2056 dlb2_eventdev_start(struct rte_eventdev *dev)
2058 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2059 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
2060 struct dlb2_start_domain_args cfg;
2063 rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
2064 if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2065 DLB2_LOG_ERR("bad state %d for dev_start\n",
2066 (int)dlb2->run_state);
2067 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2070 dlb2->run_state = DLB2_RUN_STATE_STARTING;
2071 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2073 /* If the device was configured more than once, some event ports and/or
2074 * queues may need to be reconfigured.
2076 ret = dlb2_eventdev_reapply_configuration(dev);
2080 /* The DLB PMD delays port links until the device is started. */
2081 ret = dlb2_eventdev_apply_port_links(dev);
2085 for (i = 0; i < dlb2->num_ports; i++) {
2086 if (!dlb2->ev_ports[i].setup_done) {
2087 DLB2_LOG_ERR("dlb2: port %d not setup", i);
2092 for (i = 0; i < dlb2->num_queues; i++) {
2093 if (dlb2->ev_queues[i].num_links == 0) {
2094 DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
2099 ret = dlb2_iface_sched_domain_start(handle, &cfg);
2101 DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
2102 ret, dlb2_error_strings[cfg.response.status]);
2106 dlb2->run_state = DLB2_RUN_STATE_STARTED;
2107 DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
2112 static uint8_t cmd_byte_map[DLB2_NUM_PORT_TYPES][DLB2_NUM_HW_SCHED_TYPES] = {
2114 /* Load-balanced cmd bytes */
2115 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2116 [RTE_EVENT_OP_FORWARD] = DLB2_FWD_CMD_BYTE,
2117 [RTE_EVENT_OP_RELEASE] = DLB2_COMP_CMD_BYTE,
2120 /* Directed cmd bytes */
2121 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2122 [RTE_EVENT_OP_FORWARD] = DLB2_NEW_CMD_BYTE,
2123 [RTE_EVENT_OP_RELEASE] = DLB2_NOOP_CMD_BYTE,
2127 static inline uint32_t
2128 dlb2_port_credits_get(struct dlb2_port *qm_port,
2129 enum dlb2_hw_queue_types type)
2131 uint32_t credits = *qm_port->credit_pool[type];
2132 uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2134 if (unlikely(credits < batch_size))
2135 batch_size = credits;
2137 if (likely(credits &&
2138 __atomic_compare_exchange_n(
2139 qm_port->credit_pool[type],
2140 &credits, credits - batch_size, false,
2141 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
2148 dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2149 struct dlb2_eventdev_port *ev_port)
2151 uint16_t quanta = ev_port->credit_update_quanta;
2153 if (ev_port->inflight_credits >= quanta * 2) {
2154 /* Replenish credits, saving one quanta for enqueues */
2155 uint16_t val = ev_port->inflight_credits - quanta;
2157 __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
2158 ev_port->inflight_credits -= val;
2163 dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2164 struct dlb2_eventdev_port *ev_port)
2166 uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
2170 if (unlikely(ev_port->inflight_max < sw_inflights)) {
2171 DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2172 rte_errno = -ENOSPC;
2176 if (ev_port->inflight_credits < num) {
2177 /* check if event enqueue brings ev_port over max threshold */
2178 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2180 if (sw_inflights + credit_update_quanta >
2181 dlb2->new_event_limit) {
2183 ev_port->stats.traffic.tx_nospc_new_event_limit,
2185 rte_errno = -ENOSPC;
2189 __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
2191 ev_port->inflight_credits += (credit_update_quanta);
2193 if (ev_port->inflight_credits < num) {
2195 ev_port->stats.traffic.tx_nospc_inflight_credits,
2197 rte_errno = -ENOSPC;
2206 dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2208 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2209 qm_port->cached_ldb_credits =
2210 dlb2_port_credits_get(qm_port,
2212 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2214 qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2216 DLB2_LOG_DBG("ldb credits exhausted\n");
2217 return 1; /* credits exhausted */
2225 dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2227 if (unlikely(qm_port->cached_dir_credits == 0)) {
2228 qm_port->cached_dir_credits =
2229 dlb2_port_credits_get(qm_port,
2231 if (unlikely(qm_port->cached_dir_credits == 0)) {
2233 qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2235 DLB2_LOG_DBG("dir credits exhausted\n");
2236 return 1; /* credits exhausted */
2243 static __rte_always_inline void
2244 dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
2245 struct process_local_port_data *port_data)
2247 dlb2_movdir64b(port_data->pp_addr, qe4);
2251 dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2253 struct process_local_port_data *port_data;
2254 struct dlb2_cq_pop_qe *qe;
2256 RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2258 qe = qm_port->consume_qe;
2260 qe->tokens = num - 1;
2262 /* No store fence needed since no pointer is being sent, and CQ token
2263 * pops can be safely reordered with other HCWs.
2265 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2267 dlb2_movntdq_single(port_data->pp_addr, qe);
2269 DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
2271 qm_port->owed_tokens = 0;
2277 dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2279 struct process_local_port_data *port_data)
2281 /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2282 * application writes complete before enqueueing the QE.
2287 dlb2_pp_write(qm_port->qe4, port_data);
2291 dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2293 struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2294 int num = qm_port->owed_tokens;
2296 qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2297 qe[idx].tokens = num - 1;
2299 qm_port->owed_tokens = 0;
2303 dlb2_event_build_hcws(struct dlb2_port *qm_port,
2304 const struct rte_event ev[],
2306 uint8_t *sched_type,
2309 struct dlb2_enqueue_qe *qe;
2310 uint16_t sched_word[4];
2316 sse_qe[0] = _mm_setzero_si128();
2317 sse_qe[1] = _mm_setzero_si128();
2321 /* Construct the metadata portion of two HCWs in one 128b SSE
2322 * register. HCW metadata is constructed in the SSE registers
2324 * sse_qe[0][63:0]: qe[0]'s metadata
2325 * sse_qe[0][127:64]: qe[1]'s metadata
2326 * sse_qe[1][63:0]: qe[2]'s metadata
2327 * sse_qe[1][127:64]: qe[3]'s metadata
2330 /* Convert the event operation into a command byte and store it
2332 * sse_qe[0][63:56] = cmd_byte_map[is_directed][ev[0].op]
2333 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2334 * sse_qe[1][63:56] = cmd_byte_map[is_directed][ev[2].op]
2335 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2337 #define DLB2_QE_CMD_BYTE 7
2338 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2339 cmd_byte_map[qm_port->is_directed][ev[0].op],
2341 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2342 cmd_byte_map[qm_port->is_directed][ev[1].op],
2343 DLB2_QE_CMD_BYTE + 8);
2344 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2345 cmd_byte_map[qm_port->is_directed][ev[2].op],
2347 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2348 cmd_byte_map[qm_port->is_directed][ev[3].op],
2349 DLB2_QE_CMD_BYTE + 8);
2351 /* Store priority, scheduling type, and queue ID in the sched
2352 * word array because these values are re-used when the
2353 * destination is a directed queue.
2355 sched_word[0] = EV_TO_DLB2_PRIO(ev[0].priority) << 10 |
2356 sched_type[0] << 8 |
2358 sched_word[1] = EV_TO_DLB2_PRIO(ev[1].priority) << 10 |
2359 sched_type[1] << 8 |
2361 sched_word[2] = EV_TO_DLB2_PRIO(ev[2].priority) << 10 |
2362 sched_type[2] << 8 |
2364 sched_word[3] = EV_TO_DLB2_PRIO(ev[3].priority) << 10 |
2365 sched_type[3] << 8 |
2368 /* Store the event priority, scheduling type, and queue ID in
2370 * sse_qe[0][31:16] = sched_word[0]
2371 * sse_qe[0][95:80] = sched_word[1]
2372 * sse_qe[1][31:16] = sched_word[2]
2373 * sse_qe[1][95:80] = sched_word[3]
2375 #define DLB2_QE_QID_SCHED_WORD 1
2376 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2378 DLB2_QE_QID_SCHED_WORD);
2379 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2381 DLB2_QE_QID_SCHED_WORD + 4);
2382 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2384 DLB2_QE_QID_SCHED_WORD);
2385 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2387 DLB2_QE_QID_SCHED_WORD + 4);
2389 /* If the destination is a load-balanced queue, store the lock
2390 * ID. If it is a directed queue, DLB places this field in
2391 * bytes 10-11 of the received QE, so we format it accordingly:
2392 * sse_qe[0][47:32] = dir queue ? sched_word[0] : flow_id[0]
2393 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2394 * sse_qe[1][47:32] = dir queue ? sched_word[2] : flow_id[2]
2395 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2397 #define DLB2_QE_LOCK_ID_WORD 2
2398 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2399 (sched_type[0] == DLB2_SCHED_DIRECTED) ?
2400 sched_word[0] : ev[0].flow_id,
2401 DLB2_QE_LOCK_ID_WORD);
2402 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2403 (sched_type[1] == DLB2_SCHED_DIRECTED) ?
2404 sched_word[1] : ev[1].flow_id,
2405 DLB2_QE_LOCK_ID_WORD + 4);
2406 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2407 (sched_type[2] == DLB2_SCHED_DIRECTED) ?
2408 sched_word[2] : ev[2].flow_id,
2409 DLB2_QE_LOCK_ID_WORD);
2410 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2411 (sched_type[3] == DLB2_SCHED_DIRECTED) ?
2412 sched_word[3] : ev[3].flow_id,
2413 DLB2_QE_LOCK_ID_WORD + 4);
2415 /* Store the event type and sub event type in the metadata:
2416 * sse_qe[0][15:0] = flow_id[0]
2417 * sse_qe[0][79:64] = flow_id[1]
2418 * sse_qe[1][15:0] = flow_id[2]
2419 * sse_qe[1][79:64] = flow_id[3]
2421 #define DLB2_QE_EV_TYPE_WORD 0
2422 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2423 ev[0].sub_event_type << 8 |
2425 DLB2_QE_EV_TYPE_WORD);
2426 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2427 ev[1].sub_event_type << 8 |
2429 DLB2_QE_EV_TYPE_WORD + 4);
2430 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2431 ev[2].sub_event_type << 8 |
2433 DLB2_QE_EV_TYPE_WORD);
2434 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2435 ev[3].sub_event_type << 8 |
2437 DLB2_QE_EV_TYPE_WORD + 4);
2439 /* Store the metadata to memory (use the double-precision
2440 * _mm_storeh_pd because there is no integer function for
2441 * storing the upper 64b):
2442 * qe[0] metadata = sse_qe[0][63:0]
2443 * qe[1] metadata = sse_qe[0][127:64]
2444 * qe[2] metadata = sse_qe[1][63:0]
2445 * qe[3] metadata = sse_qe[1][127:64]
2447 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2448 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2449 (__m128d)sse_qe[0]);
2450 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2451 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2452 (__m128d)sse_qe[1]);
2454 qe[0].data = ev[0].u64;
2455 qe[1].data = ev[1].u64;
2456 qe[2].data = ev[2].u64;
2457 qe[3].data = ev[3].u64;
2463 for (i = 0; i < num; i++) {
2465 cmd_byte_map[qm_port->is_directed][ev[i].op];
2466 qe[i].sched_type = sched_type[i];
2467 qe[i].data = ev[i].u64;
2468 qe[i].qid = queue_id[i];
2469 qe[i].priority = EV_TO_DLB2_PRIO(ev[i].priority);
2470 qe[i].lock_id = ev[i].flow_id;
2471 if (sched_type[i] == DLB2_SCHED_DIRECTED) {
2472 struct dlb2_msg_info *info =
2473 (struct dlb2_msg_info *)&qe[i].lock_id;
2475 info->qid = queue_id[i];
2476 info->sched_type = DLB2_SCHED_DIRECTED;
2477 info->priority = qe[i].priority;
2479 qe[i].u.event_type.major = ev[i].event_type;
2480 qe[i].u.event_type.sub = ev[i].sub_event_type;
2489 dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2490 struct dlb2_port *qm_port,
2491 const struct rte_event ev[],
2492 uint8_t *sched_type,
2495 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2496 struct dlb2_eventdev_queue *ev_queue;
2497 uint16_t *cached_credits = NULL;
2498 struct dlb2_queue *qm_queue;
2500 ev_queue = &dlb2->ev_queues[ev->queue_id];
2501 qm_queue = &ev_queue->qm_queue;
2502 *queue_id = qm_queue->id;
2504 /* Ignore sched_type and hardware credits on release events */
2505 if (ev->op == RTE_EVENT_OP_RELEASE)
2508 if (!qm_queue->is_directed) {
2509 /* Load balanced destination queue */
2511 if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2512 rte_errno = -ENOSPC;
2515 cached_credits = &qm_port->cached_ldb_credits;
2517 switch (ev->sched_type) {
2518 case RTE_SCHED_TYPE_ORDERED:
2519 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2520 if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2521 DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
2523 rte_errno = -EINVAL;
2526 *sched_type = DLB2_SCHED_ORDERED;
2528 case RTE_SCHED_TYPE_ATOMIC:
2529 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2530 *sched_type = DLB2_SCHED_ATOMIC;
2532 case RTE_SCHED_TYPE_PARALLEL:
2533 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2534 if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2535 *sched_type = DLB2_SCHED_ORDERED;
2537 *sched_type = DLB2_SCHED_UNORDERED;
2540 DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2541 DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2542 rte_errno = -EINVAL;
2546 /* Directed destination queue */
2548 if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2549 rte_errno = -ENOSPC;
2552 cached_credits = &qm_port->cached_dir_credits;
2554 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2556 *sched_type = DLB2_SCHED_DIRECTED;
2561 case RTE_EVENT_OP_NEW:
2562 /* Check that a sw credit is available */
2563 if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2564 rte_errno = -ENOSPC;
2567 ev_port->inflight_credits--;
2568 (*cached_credits)--;
2570 case RTE_EVENT_OP_FORWARD:
2571 /* Check for outstanding_releases underflow. If this occurs,
2572 * the application is not using the EVENT_OPs correctly; for
2573 * example, forwarding or releasing events that were not
2576 RTE_ASSERT(ev_port->outstanding_releases > 0);
2577 ev_port->outstanding_releases--;
2578 qm_port->issued_releases++;
2579 (*cached_credits)--;
2581 case RTE_EVENT_OP_RELEASE:
2582 ev_port->inflight_credits++;
2583 /* Check for outstanding_releases underflow. If this occurs,
2584 * the application is not using the EVENT_OPs correctly; for
2585 * example, forwarding or releasing events that were not
2588 RTE_ASSERT(ev_port->outstanding_releases > 0);
2589 ev_port->outstanding_releases--;
2590 qm_port->issued_releases++;
2592 /* Replenish s/w credits if enough are cached */
2593 dlb2_replenish_sw_credits(dlb2, ev_port);
2597 DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2598 DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2600 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
2601 if (ev->op != RTE_EVENT_OP_RELEASE) {
2602 DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
2603 DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2610 static inline uint16_t
2611 __dlb2_event_enqueue_burst(void *event_port,
2612 const struct rte_event events[],
2616 struct dlb2_eventdev_port *ev_port = event_port;
2617 struct dlb2_port *qm_port = &ev_port->qm_port;
2618 struct process_local_port_data *port_data;
2621 RTE_ASSERT(ev_port->enq_configured);
2622 RTE_ASSERT(events != NULL);
2626 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2629 uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
2630 uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
2634 memset(qm_port->qe4,
2636 DLB2_NUM_QES_PER_CACHE_LINE *
2637 sizeof(struct dlb2_enqueue_qe));
2639 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2640 const struct rte_event *ev = &events[i + j];
2641 int16_t thresh = qm_port->token_pop_thresh;
2644 qm_port->token_pop_mode == DELAYED_POP &&
2645 (ev->op == RTE_EVENT_OP_FORWARD ||
2646 ev->op == RTE_EVENT_OP_RELEASE) &&
2647 qm_port->issued_releases >= thresh - 1) {
2648 /* Insert the token pop QE and break out. This
2649 * may result in a partial HCW, but that is
2650 * simpler than supporting arbitrary QE
2653 dlb2_construct_token_pop_qe(qm_port, j);
2655 /* Reset the releases for the next QE batch */
2656 qm_port->issued_releases -= thresh;
2663 if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
2672 dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
2673 sched_types, queue_ids);
2675 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2677 /* Don't include the token pop QE in the enqueue count */
2680 /* Don't interpret j < DLB2_NUM_... as out-of-credits if
2683 if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2691 dlb2_event_enqueue_burst(void *event_port,
2692 const struct rte_event events[],
2695 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2699 dlb2_event_enqueue_burst_delayed(void *event_port,
2700 const struct rte_event events[],
2703 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2706 static inline uint16_t
2707 dlb2_event_enqueue(void *event_port,
2708 const struct rte_event events[])
2710 return __dlb2_event_enqueue_burst(event_port, events, 1, false);
2713 static inline uint16_t
2714 dlb2_event_enqueue_delayed(void *event_port,
2715 const struct rte_event events[])
2717 return __dlb2_event_enqueue_burst(event_port, events, 1, true);
2721 dlb2_event_enqueue_new_burst(void *event_port,
2722 const struct rte_event events[],
2725 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2729 dlb2_event_enqueue_new_burst_delayed(void *event_port,
2730 const struct rte_event events[],
2733 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2737 dlb2_event_enqueue_forward_burst(void *event_port,
2738 const struct rte_event events[],
2741 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2745 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
2746 const struct rte_event events[],
2749 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2753 dlb2_event_release(struct dlb2_eventdev *dlb2,
2757 struct process_local_port_data *port_data;
2758 struct dlb2_eventdev_port *ev_port;
2759 struct dlb2_port *qm_port;
2762 if (port_id > dlb2->num_ports) {
2763 DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
2765 rte_errno = -EINVAL;
2769 ev_port = &dlb2->ev_ports[port_id];
2770 qm_port = &ev_port->qm_port;
2771 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2775 if (qm_port->is_directed) {
2777 goto sw_credit_update;
2785 qm_port->qe4[0].cmd_byte = 0;
2786 qm_port->qe4[1].cmd_byte = 0;
2787 qm_port->qe4[2].cmd_byte = 0;
2788 qm_port->qe4[3].cmd_byte = 0;
2790 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
2791 int16_t thresh = qm_port->token_pop_thresh;
2793 if (qm_port->token_pop_mode == DELAYED_POP &&
2794 qm_port->issued_releases >= thresh - 1) {
2795 /* Insert the token pop QE */
2796 dlb2_construct_token_pop_qe(qm_port, j);
2798 /* Reset the releases for the next QE batch */
2799 qm_port->issued_releases -= thresh;
2806 qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
2807 qm_port->issued_releases++;
2810 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2812 /* Don't include the token pop QE in the release count */
2817 /* each release returns one credit */
2818 if (!ev_port->outstanding_releases) {
2819 DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
2823 ev_port->outstanding_releases -= i;
2824 ev_port->inflight_credits += i;
2826 /* Replenish s/w credits if enough releases are performed */
2827 dlb2_replenish_sw_credits(dlb2, ev_port);
2831 dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
2833 uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2835 /* increment port credits, and return to pool if exceeds threshold */
2836 if (!qm_port->is_directed) {
2837 qm_port->cached_ldb_credits += num;
2838 if (qm_port->cached_ldb_credits >= 2 * batch_size) {
2840 qm_port->credit_pool[DLB2_LDB_QUEUE],
2841 batch_size, __ATOMIC_SEQ_CST);
2842 qm_port->cached_ldb_credits -= batch_size;
2845 qm_port->cached_dir_credits += num;
2846 if (qm_port->cached_dir_credits >= 2 * batch_size) {
2848 qm_port->credit_pool[DLB2_DIR_QUEUE],
2849 batch_size, __ATOMIC_SEQ_CST);
2850 qm_port->cached_dir_credits -= batch_size;
2856 dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
2857 struct dlb2_eventdev_port *ev_port,
2858 struct dlb2_port *qm_port,
2860 uint64_t start_ticks)
2862 struct process_local_port_data *port_data;
2863 uint64_t elapsed_ticks;
2865 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2867 elapsed_ticks = rte_get_timer_cycles() - start_ticks;
2869 /* Wait/poll time expired */
2870 if (elapsed_ticks >= timeout) {
2872 } else if (dlb2->umwait_allowed) {
2873 volatile struct dlb2_dequeue_qe *cq_base;
2876 struct dlb2_dequeue_qe qe;
2878 uint64_t expected_value;
2879 volatile uint64_t *monitor_addr;
2881 qe_mask.qe.cq_gen = 1; /* set mask */
2883 cq_base = port_data->cq_base;
2884 monitor_addr = (volatile uint64_t *)(volatile void *)
2885 &cq_base[qm_port->cq_idx];
2886 monitor_addr++; /* cq_gen bit is in second 64bit location */
2888 if (qm_port->gen_bit)
2889 expected_value = qe_mask.raw_qe[1];
2893 rte_power_monitor(monitor_addr, expected_value,
2894 qe_mask.raw_qe[1], timeout + start_ticks,
2897 DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
2899 uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
2900 uint64_t curr_ticks = rte_get_timer_cycles();
2901 uint64_t init_ticks = curr_ticks;
2903 while ((curr_ticks - start_ticks < timeout) &&
2904 (curr_ticks - init_ticks < poll_interval))
2905 curr_ticks = rte_get_timer_cycles();
2912 dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
2913 struct dlb2_port *qm_port,
2914 struct rte_event *events,
2915 struct dlb2_dequeue_qe *qes,
2918 uint8_t *qid_mappings = qm_port->qid_mappings;
2921 for (i = 0, num = 0; i < cnt; i++) {
2922 struct dlb2_dequeue_qe *qe = &qes[i];
2923 int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
2924 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2925 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2926 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2927 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2930 /* Fill in event information.
2931 * Note that flow_id must be embedded in the data by
2932 * the app, such as the mbuf RSS hash field if the data
2935 if (unlikely(qe->error)) {
2936 DLB2_LOG_ERR("QE error bit ON\n");
2937 DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
2938 dlb2_consume_qe_immediate(qm_port, 1);
2939 continue; /* Ignore */
2942 events[num].u64 = qe->data;
2943 events[num].flow_id = qe->flow_id;
2944 events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
2945 events[num].event_type = qe->u.event_type.major;
2946 events[num].sub_event_type = qe->u.event_type.sub;
2947 events[num].sched_type = sched_type_map[qe->sched_type];
2948 events[num].impl_opaque = qe->qid_depth;
2950 /* qid not preserved for directed queues */
2951 if (qm_port->is_directed)
2952 evq_id = ev_port->link[0].queue_id;
2954 evq_id = qid_mappings[qe->qid];
2956 events[num].queue_id = evq_id;
2958 ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
2960 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
2964 DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
2970 dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
2971 struct dlb2_port *qm_port,
2972 struct rte_event *events,
2973 struct dlb2_dequeue_qe *qes)
2975 int sched_type_map[] = {
2976 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2977 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2978 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2979 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2981 const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
2982 uint8_t *qid_mappings = qm_port->qid_mappings;
2985 /* In the unlikely case that any of the QE error bits are set, process
2986 * them one at a time.
2988 if (unlikely(qes[0].error || qes[1].error ||
2989 qes[2].error || qes[3].error))
2990 return dlb2_process_dequeue_qes(ev_port, qm_port, events,
2993 events[0].u64 = qes[0].data;
2994 events[1].u64 = qes[1].data;
2995 events[2].u64 = qes[2].data;
2996 events[3].u64 = qes[3].data;
2998 /* Construct the metadata portion of two struct rte_events
2999 * in one 128b SSE register. Event metadata is constructed in the SSE
3000 * registers like so:
3001 * sse_evt[0][63:0]: event[0]'s metadata
3002 * sse_evt[0][127:64]: event[1]'s metadata
3003 * sse_evt[1][63:0]: event[2]'s metadata
3004 * sse_evt[1][127:64]: event[3]'s metadata
3006 sse_evt[0] = _mm_setzero_si128();
3007 sse_evt[1] = _mm_setzero_si128();
3009 /* Convert the hardware queue ID to an event queue ID and store it in
3011 * sse_evt[0][47:40] = qid_mappings[qes[0].qid]
3012 * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
3013 * sse_evt[1][47:40] = qid_mappings[qes[2].qid]
3014 * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
3016 #define DLB_EVENT_QUEUE_ID_BYTE 5
3017 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3018 qid_mappings[qes[0].qid],
3019 DLB_EVENT_QUEUE_ID_BYTE);
3020 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3021 qid_mappings[qes[1].qid],
3022 DLB_EVENT_QUEUE_ID_BYTE + 8);
3023 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3024 qid_mappings[qes[2].qid],
3025 DLB_EVENT_QUEUE_ID_BYTE);
3026 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3027 qid_mappings[qes[3].qid],
3028 DLB_EVENT_QUEUE_ID_BYTE + 8);
3030 /* Convert the hardware priority to an event priority and store it in
3031 * the metadata, while also returning the queue depth status
3032 * value captured by the hardware, storing it in impl_opaque, which can
3033 * be read by the application but not modified
3034 * sse_evt[0][55:48] = DLB2_TO_EV_PRIO(qes[0].priority)
3035 * sse_evt[0][63:56] = qes[0].qid_depth
3036 * sse_evt[0][119:112] = DLB2_TO_EV_PRIO(qes[1].priority)
3037 * sse_evt[0][127:120] = qes[1].qid_depth
3038 * sse_evt[1][55:48] = DLB2_TO_EV_PRIO(qes[2].priority)
3039 * sse_evt[1][63:56] = qes[2].qid_depth
3040 * sse_evt[1][119:112] = DLB2_TO_EV_PRIO(qes[3].priority)
3041 * sse_evt[1][127:120] = qes[3].qid_depth
3043 #define DLB_EVENT_PRIO_IMPL_OPAQUE_WORD 3
3044 #define DLB_BYTE_SHIFT 8
3046 _mm_insert_epi16(sse_evt[0],
3047 DLB2_TO_EV_PRIO((uint8_t)qes[0].priority) |
3048 (qes[0].qid_depth << DLB_BYTE_SHIFT),
3049 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3051 _mm_insert_epi16(sse_evt[0],
3052 DLB2_TO_EV_PRIO((uint8_t)qes[1].priority) |
3053 (qes[1].qid_depth << DLB_BYTE_SHIFT),
3054 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3056 _mm_insert_epi16(sse_evt[1],
3057 DLB2_TO_EV_PRIO((uint8_t)qes[2].priority) |
3058 (qes[2].qid_depth << DLB_BYTE_SHIFT),
3059 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3061 _mm_insert_epi16(sse_evt[1],
3062 DLB2_TO_EV_PRIO((uint8_t)qes[3].priority) |
3063 (qes[3].qid_depth << DLB_BYTE_SHIFT),
3064 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3066 /* Write the event type, sub event type, and flow_id to the event
3068 * sse_evt[0][31:0] = qes[0].flow_id |
3069 * qes[0].u.event_type.major << 28 |
3070 * qes[0].u.event_type.sub << 20;
3071 * sse_evt[0][95:64] = qes[1].flow_id |
3072 * qes[1].u.event_type.major << 28 |
3073 * qes[1].u.event_type.sub << 20;
3074 * sse_evt[1][31:0] = qes[2].flow_id |
3075 * qes[2].u.event_type.major << 28 |
3076 * qes[2].u.event_type.sub << 20;
3077 * sse_evt[1][95:64] = qes[3].flow_id |
3078 * qes[3].u.event_type.major << 28 |
3079 * qes[3].u.event_type.sub << 20;
3081 #define DLB_EVENT_EV_TYPE_DW 0
3082 #define DLB_EVENT_EV_TYPE_SHIFT 28
3083 #define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
3084 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3086 qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3087 qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3088 DLB_EVENT_EV_TYPE_DW);
3089 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3091 qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3092 qes[1].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3093 DLB_EVENT_EV_TYPE_DW + 2);
3094 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3096 qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3097 qes[2].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3098 DLB_EVENT_EV_TYPE_DW);
3099 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3101 qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3102 qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3103 DLB_EVENT_EV_TYPE_DW + 2);
3105 /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
3107 * sse_evt[0][39:32] = sched_type_map[qes[0].sched_type] << 6
3108 * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
3109 * sse_evt[1][39:32] = sched_type_map[qes[2].sched_type] << 6
3110 * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
3112 #define DLB_EVENT_SCHED_TYPE_BYTE 4
3113 #define DLB_EVENT_SCHED_TYPE_SHIFT 6
3114 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3115 sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3116 DLB_EVENT_SCHED_TYPE_BYTE);
3117 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3118 sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3119 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3120 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3121 sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3122 DLB_EVENT_SCHED_TYPE_BYTE);
3123 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3124 sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3125 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3127 /* Store the metadata to the event (use the double-precision
3128 * _mm_storeh_pd because there is no integer function for storing the
3130 * events[0].event = sse_evt[0][63:0]
3131 * events[1].event = sse_evt[0][127:64]
3132 * events[2].event = sse_evt[1][63:0]
3133 * events[3].event = sse_evt[1][127:64]
3135 _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3136 _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3137 _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3138 _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3140 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3141 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3142 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3143 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3146 ev_port->stats.queue[events[0].queue_id].
3147 qid_depth[qes[0].qid_depth],
3150 ev_port->stats.queue[events[1].queue_id].
3151 qid_depth[qes[1].qid_depth],
3154 ev_port->stats.queue[events[2].queue_id].
3155 qid_depth[qes[2].qid_depth],
3158 ev_port->stats.queue[events[3].queue_id].
3159 qid_depth[qes[3].qid_depth],
3162 DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3167 static __rte_always_inline int
3168 dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3170 volatile struct dlb2_dequeue_qe *cq_addr;
3171 uint8_t xor_mask[2] = {0x0F, 0x00};
3172 const uint8_t and_mask = 0x0F;
3173 __m128i *qes = (__m128i *)qe;
3174 uint8_t gen_bits, gen_bit;
3178 cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3180 idx = qm_port->cq_idx;
3182 /* Load the next 4 QEs */
3183 addr[0] = (uintptr_t)&cq_addr[idx];
3184 addr[1] = (uintptr_t)&cq_addr[(idx + 4) & qm_port->cq_depth_mask];
3185 addr[2] = (uintptr_t)&cq_addr[(idx + 8) & qm_port->cq_depth_mask];
3186 addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3188 /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3189 rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3190 rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3191 rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3192 rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3194 /* Correct the xor_mask for wrap-around QEs */
3195 gen_bit = qm_port->gen_bit;
3196 xor_mask[gen_bit] ^= !!((idx + 4) > qm_port->cq_depth_mask) << 1;
3197 xor_mask[gen_bit] ^= !!((idx + 8) > qm_port->cq_depth_mask) << 2;
3198 xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3200 /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3201 * valid, then QEs[0:N-1] are too.
3203 qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3204 rte_compiler_barrier();
3205 qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3206 rte_compiler_barrier();
3207 qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3208 rte_compiler_barrier();
3209 qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3211 /* Extract and combine the gen bits */
3212 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3213 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3214 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3215 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3217 /* XOR the combined bits such that a 1 represents a valid QE */
3218 gen_bits ^= xor_mask[gen_bit];
3220 /* Mask off gen bits we don't care about */
3221 gen_bits &= and_mask;
3223 return __builtin_popcount(gen_bits);
3227 dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
3229 uint16_t idx = qm_port->cq_idx_unmasked + cnt;
3231 qm_port->cq_idx_unmasked = idx;
3232 qm_port->cq_idx = idx & qm_port->cq_depth_mask;
3233 qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
3236 static inline int16_t
3237 dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
3238 struct dlb2_eventdev_port *ev_port,
3239 struct rte_event *events,
3241 uint64_t dequeue_timeout_ticks)
3244 uint64_t start_ticks = 0ULL;
3245 struct dlb2_port *qm_port;
3248 qm_port = &ev_port->qm_port;
3250 /* We have a special implementation for waiting. Wait can be:
3251 * 1) no waiting at all
3253 * 3) wait for interrupt. If wakeup and poll time
3254 * has expired, then return to caller
3255 * 4) umonitor/umwait repeatedly up to poll time
3258 /* If configured for per dequeue wait, then use wait value provided
3259 * to this API. Otherwise we must use the global
3260 * value from eventdev config time.
3262 if (!dlb2->global_dequeue_wait)
3263 timeout = dequeue_timeout_ticks;
3265 timeout = dlb2->global_dequeue_wait_ticks;
3267 start_ticks = rte_get_timer_cycles();
3269 while (num < max_num) {
3270 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3273 /* Copy up to 4 QEs from the current cache line into qes */
3274 num_avail = dlb2_recv_qe_sparse(qm_port, qes);
3276 /* But don't process more than the user requested */
3277 num_avail = RTE_MIN(num_avail, max_num - num);
3279 dlb2_inc_cq_idx(qm_port, num_avail << 2);
3281 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3282 num += dlb2_process_dequeue_four_qes(ev_port,
3287 num += dlb2_process_dequeue_qes(ev_port,
3292 else if ((timeout == 0) || (num > 0))
3293 /* Not waiting in any form, or 1+ events received? */
3295 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3296 timeout, start_ticks))
3300 qm_port->owed_tokens += num;
3303 if (qm_port->token_pop_mode == AUTO_POP)
3304 dlb2_consume_qe_immediate(qm_port, num);
3306 ev_port->outstanding_releases += num;
3308 dlb2_port_credits_inc(qm_port, num);
3314 static __rte_always_inline int
3315 dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
3318 uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
3319 {0x00, 0x01, 0x03, 0x07} };
3320 uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
3321 volatile struct dlb2_dequeue_qe *cq_addr;
3322 __m128i *qes = (__m128i *)qe;
3323 uint64_t *cache_line_base;
3326 cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3327 cq_addr = &cq_addr[qm_port->cq_idx];
3329 cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
3330 *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
3332 /* Load the next CQ cache line from memory. Pack these reads as tight
3333 * as possible to reduce the chance that DLB invalidates the line while
3334 * the CPU is reading it. Read the cache line backwards to ensure that
3335 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
3337 * (Valid QEs start at &qe[offset])
3339 qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
3340 qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
3341 qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
3342 qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
3344 /* Evict the cache line ASAP */
3345 rte_cldemote(cache_line_base);
3347 /* Extract and combine the gen bits */
3348 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3349 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3350 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3351 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3353 /* XOR the combined bits such that a 1 represents a valid QE */
3354 gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
3356 /* Mask off gen bits we don't care about */
3357 gen_bits &= and_mask[*offset];
3359 return __builtin_popcount(gen_bits);
3362 static inline int16_t
3363 dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
3364 struct dlb2_eventdev_port *ev_port,
3365 struct rte_event *events,
3367 uint64_t dequeue_timeout_ticks)
3370 uint64_t start_ticks = 0ULL;
3371 struct dlb2_port *qm_port;
3374 qm_port = &ev_port->qm_port;
3376 /* We have a special implementation for waiting. Wait can be:
3377 * 1) no waiting at all
3379 * 3) wait for interrupt. If wakeup and poll time
3380 * has expired, then return to caller
3381 * 4) umonitor/umwait repeatedly up to poll time
3384 /* If configured for per dequeue wait, then use wait value provided
3385 * to this API. Otherwise we must use the global
3386 * value from eventdev config time.
3388 if (!dlb2->global_dequeue_wait)
3389 timeout = dequeue_timeout_ticks;
3391 timeout = dlb2->global_dequeue_wait_ticks;
3393 start_ticks = rte_get_timer_cycles();
3395 while (num < max_num) {
3396 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3400 /* Copy up to 4 QEs from the current cache line into qes */
3401 num_avail = dlb2_recv_qe(qm_port, qes, &offset);
3403 /* But don't process more than the user requested */
3404 num_avail = RTE_MIN(num_avail, max_num - num);
3406 dlb2_inc_cq_idx(qm_port, num_avail);
3408 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3409 num += dlb2_process_dequeue_four_qes(ev_port,
3414 num += dlb2_process_dequeue_qes(ev_port,
3419 else if ((timeout == 0) || (num > 0))
3420 /* Not waiting in any form, or 1+ events received? */
3422 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3423 timeout, start_ticks))
3427 qm_port->owed_tokens += num;
3430 if (qm_port->token_pop_mode == AUTO_POP)
3431 dlb2_consume_qe_immediate(qm_port, num);
3433 ev_port->outstanding_releases += num;
3435 dlb2_port_credits_inc(qm_port, num);
3442 dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3445 struct dlb2_eventdev_port *ev_port = event_port;
3446 struct dlb2_port *qm_port = &ev_port->qm_port;
3447 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3450 RTE_ASSERT(ev_port->setup_done);
3451 RTE_ASSERT(ev != NULL);
3453 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3454 uint16_t out_rels = ev_port->outstanding_releases;
3456 dlb2_event_release(dlb2, ev_port->id, out_rels);
3458 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3461 if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3462 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3464 cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
3466 DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3467 DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3473 dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3475 return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
3479 dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3480 uint16_t num, uint64_t wait)
3482 struct dlb2_eventdev_port *ev_port = event_port;
3483 struct dlb2_port *qm_port = &ev_port->qm_port;
3484 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3487 RTE_ASSERT(ev_port->setup_done);
3488 RTE_ASSERT(ev != NULL);
3490 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3491 uint16_t out_rels = ev_port->outstanding_releases;
3493 dlb2_event_release(dlb2, ev_port->id, out_rels);
3495 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3498 if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3499 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3501 cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
3503 DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3504 DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3509 dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
3512 return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3516 dlb2_flush_port(struct rte_eventdev *dev, int port_id)
3518 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3519 eventdev_stop_flush_t flush;
3520 struct rte_event ev;
3525 flush = dev->dev_ops->dev_stop_flush;
3526 dev_id = dev->data->dev_id;
3527 arg = dev->data->dev_stop_flush_arg;
3529 while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
3531 flush(dev_id, ev, arg);
3533 if (dlb2->ev_ports[port_id].qm_port.is_directed)
3536 ev.op = RTE_EVENT_OP_RELEASE;
3538 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3541 /* Enqueue any additional outstanding releases */
3542 ev.op = RTE_EVENT_OP_RELEASE;
3544 for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)
3545 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3549 dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
3550 struct dlb2_eventdev_queue *queue)
3552 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3553 struct dlb2_get_ldb_queue_depth_args cfg;
3556 cfg.queue_id = queue->qm_queue.id;
3558 ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
3560 DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
3561 ret, dlb2_error_strings[cfg.response.status]);
3565 return cfg.response.id;
3569 dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
3570 struct dlb2_eventdev_queue *queue)
3572 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3573 struct dlb2_get_dir_queue_depth_args cfg;
3576 cfg.queue_id = queue->qm_queue.id;
3578 ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
3580 DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
3581 ret, dlb2_error_strings[cfg.response.status]);
3585 return cfg.response.id;
3589 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
3590 struct dlb2_eventdev_queue *queue)
3592 if (queue->qm_queue.is_directed)
3593 return dlb2_get_dir_queue_depth(dlb2, queue);
3595 return dlb2_get_ldb_queue_depth(dlb2, queue);
3599 dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
3600 struct dlb2_eventdev_queue *queue)
3602 return dlb2_get_queue_depth(dlb2, queue) == 0;
3606 dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
3610 for (i = 0; i < dlb2->num_queues; i++) {
3611 if (dlb2->ev_queues[i].num_links == 0)
3613 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3621 dlb2_queues_empty(struct dlb2_eventdev *dlb2)
3625 for (i = 0; i < dlb2->num_queues; i++) {
3626 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3634 dlb2_drain(struct rte_eventdev *dev)
3636 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3637 struct dlb2_eventdev_port *ev_port = NULL;
3641 dev_id = dev->data->dev_id;
3643 while (!dlb2_linked_queues_empty(dlb2)) {
3644 /* Flush all the ev_ports, which will drain all their connected
3647 for (i = 0; i < dlb2->num_ports; i++)
3648 dlb2_flush_port(dev, i);
3651 /* The queues are empty, but there may be events left in the ports. */
3652 for (i = 0; i < dlb2->num_ports; i++)
3653 dlb2_flush_port(dev, i);
3655 /* If the domain's queues are empty, we're done. */
3656 if (dlb2_queues_empty(dlb2))
3659 /* Else, there must be at least one unlinked load-balanced queue.
3660 * Select a load-balanced port with which to drain the unlinked
3663 for (i = 0; i < dlb2->num_ports; i++) {
3664 ev_port = &dlb2->ev_ports[i];
3666 if (!ev_port->qm_port.is_directed)
3670 if (i == dlb2->num_ports) {
3671 DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
3676 rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
3679 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
3684 for (i = 0; i < dlb2->num_queues; i++) {
3688 if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3694 /* Link the ev_port to the queue */
3695 ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
3697 DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
3702 /* Flush the queue */
3703 while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3704 dlb2_flush_port(dev, ev_port->id);
3706 /* Drain any extant events in the ev_port. */
3707 dlb2_flush_port(dev, ev_port->id);
3709 /* Unlink the ev_port from the queue */
3710 ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
3712 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
3720 dlb2_eventdev_stop(struct rte_eventdev *dev)
3722 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3724 rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
3726 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
3727 DLB2_LOG_DBG("Internal error: already stopped\n");
3728 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3730 } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
3731 DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
3732 (int)dlb2->run_state);
3733 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3737 dlb2->run_state = DLB2_RUN_STATE_STOPPING;
3739 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3743 dlb2->run_state = DLB2_RUN_STATE_STOPPED;
3747 dlb2_eventdev_close(struct rte_eventdev *dev)
3749 dlb2_hw_reset_sched_domain(dev, false);
3755 dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
3760 /* This function intentionally left blank. */
3764 dlb2_eventdev_port_release(void *port)
3766 struct dlb2_eventdev_port *ev_port = port;
3767 struct dlb2_port *qm_port;
3770 qm_port = &ev_port->qm_port;
3771 if (qm_port->config_state == DLB2_CONFIGURED)
3772 dlb2_free_qe_mem(qm_port);
3777 dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
3778 uint64_t *timeout_ticks)
3781 uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
3783 *timeout_ticks = ns * cycles_per_ns;
3789 dlb2_entry_points_init(struct rte_eventdev *dev)
3791 struct dlb2_eventdev *dlb2;
3793 /* Expose PMD's eventdev interface */
3794 static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
3795 .dev_infos_get = dlb2_eventdev_info_get,
3796 .dev_configure = dlb2_eventdev_configure,
3797 .dev_start = dlb2_eventdev_start,
3798 .dev_stop = dlb2_eventdev_stop,
3799 .dev_close = dlb2_eventdev_close,
3800 .queue_def_conf = dlb2_eventdev_queue_default_conf_get,
3801 .queue_setup = dlb2_eventdev_queue_setup,
3802 .queue_release = dlb2_eventdev_queue_release,
3803 .port_def_conf = dlb2_eventdev_port_default_conf_get,
3804 .port_setup = dlb2_eventdev_port_setup,
3805 .port_release = dlb2_eventdev_port_release,
3806 .port_link = dlb2_eventdev_port_link,
3807 .port_unlink = dlb2_eventdev_port_unlink,
3808 .port_unlinks_in_progress =
3809 dlb2_eventdev_port_unlinks_in_progress,
3810 .timeout_ticks = dlb2_eventdev_timeout_ticks,
3811 .dump = dlb2_eventdev_dump,
3812 .xstats_get = dlb2_eventdev_xstats_get,
3813 .xstats_get_names = dlb2_eventdev_xstats_get_names,
3814 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
3815 .xstats_reset = dlb2_eventdev_xstats_reset,
3816 .dev_selftest = test_dlb2_eventdev,
3819 /* Expose PMD's eventdev interface */
3821 dev->dev_ops = &dlb2_eventdev_entry_ops;
3822 dev->enqueue = dlb2_event_enqueue;
3823 dev->enqueue_burst = dlb2_event_enqueue_burst;
3824 dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
3825 dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
3827 dlb2 = dev->data->dev_private;
3828 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
3829 dev->dequeue = dlb2_event_dequeue_sparse;
3830 dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
3832 dev->dequeue = dlb2_event_dequeue;
3833 dev->dequeue_burst = dlb2_event_dequeue_burst;
3838 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
3840 struct dlb2_devargs *dlb2_args)
3842 struct dlb2_eventdev *dlb2;
3845 dlb2 = dev->data->dev_private;
3847 dlb2->event_dev = dev; /* backlink */
3849 evdev_dlb2_default_info.driver_name = name;
3851 dlb2->max_num_events_override = dlb2_args->max_num_events;
3852 dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
3853 dlb2->qm_instance.cos_id = dlb2_args->cos_id;
3855 err = dlb2_iface_open(&dlb2->qm_instance, name);
3857 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3862 err = dlb2_iface_get_device_version(&dlb2->qm_instance,
3865 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
3870 err = dlb2_hw_query_resources(dlb2);
3872 DLB2_LOG_ERR("get resources err=%d for %s\n",
3877 dlb2_iface_hardware_init(&dlb2->qm_instance);
3879 err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
3881 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
3886 /* Complete xtstats runtime initialization */
3887 err = dlb2_xstats_init(dlb2);
3889 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
3893 /* Initialize each port's token pop mode */
3894 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
3895 dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
3897 rte_spinlock_init(&dlb2->qm_instance.resource_lock);
3899 dlb2_iface_low_level_io_init();
3901 dlb2_entry_points_init(dev);
3903 dlb2_init_queue_depth_thresholds(dlb2,
3904 dlb2_args->qid_depth_thresholds.val);
3910 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
3913 struct dlb2_eventdev *dlb2;
3916 dlb2 = dev->data->dev_private;
3918 evdev_dlb2_default_info.driver_name = name;
3920 err = dlb2_iface_open(&dlb2->qm_instance, name);
3922 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3927 err = dlb2_hw_query_resources(dlb2);
3929 DLB2_LOG_ERR("get resources err=%d for %s\n",
3934 dlb2_iface_low_level_io_init();
3936 dlb2_entry_points_init(dev);
3942 dlb2_parse_params(const char *params,
3944 struct dlb2_devargs *dlb2_args)
3947 static const char * const args[] = { NUMA_NODE_ARG,
3948 DLB2_MAX_NUM_EVENTS,
3949 DLB2_NUM_DIR_CREDITS,
3951 DLB2_QID_DEPTH_THRESH_ARG,
3955 if (params != NULL && params[0] != '\0') {
3956 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
3958 if (kvlist == NULL) {
3960 "Ignoring unsupported parameters when creating device '%s'\n",
3963 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
3965 &dlb2_args->socket_id);
3967 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
3969 rte_kvargs_free(kvlist);
3973 ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
3975 &dlb2_args->max_num_events);
3977 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
3979 rte_kvargs_free(kvlist);
3983 ret = rte_kvargs_process(kvlist,
3984 DLB2_NUM_DIR_CREDITS,
3985 set_num_dir_credits,
3986 &dlb2_args->num_dir_credits_override);
3988 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
3990 rte_kvargs_free(kvlist);
3994 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
3996 &dlb2_args->dev_id);
3998 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
4000 rte_kvargs_free(kvlist);
4004 ret = rte_kvargs_process(
4006 DLB2_QID_DEPTH_THRESH_ARG,
4007 set_qid_depth_thresh,
4008 &dlb2_args->qid_depth_thresholds);
4010 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
4012 rte_kvargs_free(kvlist);
4016 ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
4018 &dlb2_args->cos_id);
4020 DLB2_LOG_ERR("%s: Error parsing cos parameter",
4022 rte_kvargs_free(kvlist);
4026 rte_kvargs_free(kvlist);
4031 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);