1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <eventdev_pmd.h>
25 #include <rte_kvargs.h>
27 #include <rte_malloc.h>
29 #include <rte_power_intrinsics.h>
30 #include <rte_prefetch.h>
32 #include <rte_string_fns.h>
34 #include "dlb2_priv.h"
35 #include "dlb2_iface.h"
36 #include "dlb2_inline_fns.h"
39 * Resources exposed to eventdev. Some values overridden at runtime using
40 * values returned by the DLB kernel driver.
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
45 static struct rte_event_dev_info evdev_dlb2_default_info = {
46 .driver_name = "", /* probe will set */
47 .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
48 .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
50 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
52 .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
54 .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
55 .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
56 .max_event_priority_levels = DLB2_QID_PRIORITIES,
57 .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
58 .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
59 .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
60 .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
61 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
62 .max_single_link_event_port_queue_pairs =
63 DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
64 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
65 RTE_EVENT_DEV_CAP_EVENT_QOS |
66 RTE_EVENT_DEV_CAP_BURST_MODE |
67 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
68 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
69 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
72 struct process_local_port_data
73 dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES];
76 dlb2_free_qe_mem(struct dlb2_port *qm_port)
81 rte_free(qm_port->qe4);
84 rte_free(qm_port->int_arm_qe);
85 qm_port->int_arm_qe = NULL;
87 rte_free(qm_port->consume_qe);
88 qm_port->consume_qe = NULL;
90 rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
91 dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
94 /* override defaults with value(s) provided on command line */
96 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
97 int *qid_depth_thresholds)
101 for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) {
102 if (qid_depth_thresholds[q] != 0)
103 dlb2->ev_queues[q].depth_threshold =
104 qid_depth_thresholds[q];
109 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
111 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
112 struct dlb2_hw_resource_info *dlb2_info = &handle->info;
115 /* Query driver resources provisioned for this device */
117 ret = dlb2_iface_get_num_resources(handle,
118 &dlb2->hw_rsrc_query_results);
120 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
124 /* Complete filling in device resource info returned to evdev app,
125 * overriding any default values.
126 * The capabilities (CAPs) were set at compile time.
129 evdev_dlb2_default_info.max_event_queues =
130 dlb2->hw_rsrc_query_results.num_ldb_queues;
132 evdev_dlb2_default_info.max_event_ports =
133 dlb2->hw_rsrc_query_results.num_ldb_ports;
135 if (dlb2->version == DLB2_HW_V2_5) {
136 evdev_dlb2_default_info.max_num_events =
137 dlb2->hw_rsrc_query_results.num_credits;
139 evdev_dlb2_default_info.max_num_events =
140 dlb2->hw_rsrc_query_results.num_ldb_credits;
142 /* Save off values used when creating the scheduling domain. */
144 handle->info.num_sched_domains =
145 dlb2->hw_rsrc_query_results.num_sched_domains;
147 if (dlb2->version == DLB2_HW_V2_5) {
148 handle->info.hw_rsrc_max.nb_events_limit =
149 dlb2->hw_rsrc_query_results.num_credits;
151 handle->info.hw_rsrc_max.nb_events_limit =
152 dlb2->hw_rsrc_query_results.num_ldb_credits;
154 handle->info.hw_rsrc_max.num_queues =
155 dlb2->hw_rsrc_query_results.num_ldb_queues +
156 dlb2->hw_rsrc_query_results.num_dir_ports;
158 handle->info.hw_rsrc_max.num_ldb_queues =
159 dlb2->hw_rsrc_query_results.num_ldb_queues;
161 handle->info.hw_rsrc_max.num_ldb_ports =
162 dlb2->hw_rsrc_query_results.num_ldb_ports;
164 handle->info.hw_rsrc_max.num_dir_ports =
165 dlb2->hw_rsrc_query_results.num_dir_ports;
167 handle->info.hw_rsrc_max.reorder_window_size =
168 dlb2->hw_rsrc_query_results.num_hist_list_entries;
170 rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
175 #define DLB2_BASE_10 10
178 dlb2_string_to_int(int *result, const char *str)
183 if (str == NULL || result == NULL)
187 ret = strtol(str, &endptr, DLB2_BASE_10);
191 /* long int and int may be different width for some architectures */
192 if (ret < INT_MIN || ret > INT_MAX || endptr == str)
200 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
202 int *socket_id = opaque;
205 ret = dlb2_string_to_int(socket_id, value);
209 if (*socket_id > RTE_MAX_NUMA_NODES)
215 set_max_num_events(const char *key __rte_unused,
219 int *max_num_events = opaque;
222 if (value == NULL || opaque == NULL) {
223 DLB2_LOG_ERR("NULL pointer\n");
227 ret = dlb2_string_to_int(max_num_events, value);
231 if (*max_num_events < 0 || *max_num_events >
232 DLB2_MAX_NUM_LDB_CREDITS) {
233 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
234 DLB2_MAX_NUM_LDB_CREDITS);
242 set_num_dir_credits(const char *key __rte_unused,
246 int *num_dir_credits = opaque;
249 if (value == NULL || opaque == NULL) {
250 DLB2_LOG_ERR("NULL pointer\n");
254 ret = dlb2_string_to_int(num_dir_credits, value);
258 if (*num_dir_credits < 0 ||
259 *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
260 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
261 DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
269 set_dev_id(const char *key __rte_unused,
273 int *dev_id = opaque;
276 if (value == NULL || opaque == NULL) {
277 DLB2_LOG_ERR("NULL pointer\n");
281 ret = dlb2_string_to_int(dev_id, value);
289 set_cos(const char *key __rte_unused,
293 enum dlb2_cos *cos_id = opaque;
297 if (value == NULL || opaque == NULL) {
298 DLB2_LOG_ERR("NULL pointer\n");
302 ret = dlb2_string_to_int(&x, value);
306 if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
308 "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
319 set_qid_depth_thresh(const char *key __rte_unused,
323 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
324 int first, last, thresh, i;
326 if (value == NULL || opaque == NULL) {
327 DLB2_LOG_ERR("NULL pointer\n");
331 /* command line override may take one of the following 3 forms:
332 * qid_depth_thresh=all:<threshold_value> ... all queues
333 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
334 * qid_depth_thresh=qid:<threshold_value> ... just one queue
336 if (sscanf(value, "all:%d", &thresh) == 1) {
338 last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1;
339 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
340 /* we have everything we need */
341 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
344 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
348 if (first > last || first < 0 ||
349 last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
350 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
354 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
355 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
356 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
360 for (i = first; i <= last; i++)
361 qid_thresh->val[i] = thresh; /* indexed by qid */
367 set_qid_depth_thresh_v2_5(const char *key __rte_unused,
371 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
372 int first, last, thresh, i;
374 if (value == NULL || opaque == NULL) {
375 DLB2_LOG_ERR("NULL pointer\n");
379 /* command line override may take one of the following 3 forms:
380 * qid_depth_thresh=all:<threshold_value> ... all queues
381 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
382 * qid_depth_thresh=qid:<threshold_value> ... just one queue
384 if (sscanf(value, "all:%d", &thresh) == 1) {
386 last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1;
387 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
388 /* we have everything we need */
389 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
392 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
396 if (first > last || first < 0 ||
397 last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
398 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
402 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
403 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
404 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
408 for (i = first; i <= last; i++)
409 qid_thresh->val[i] = thresh; /* indexed by qid */
415 dlb2_eventdev_info_get(struct rte_eventdev *dev,
416 struct rte_event_dev_info *dev_info)
418 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
421 ret = dlb2_hw_query_resources(dlb2);
423 const struct rte_eventdev_data *data = dev->data;
425 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
427 /* fn is void, so fall through and return values set up in
432 /* Add num resources currently owned by this domain.
433 * These would become available if the scheduling domain were reset due
434 * to the application recalling eventdev_configure to *reconfigure* the
437 evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
438 evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
439 if (dlb2->version == DLB2_HW_V2_5) {
440 evdev_dlb2_default_info.max_num_events +=
443 evdev_dlb2_default_info.max_num_events +=
444 dlb2->max_ldb_credits;
446 evdev_dlb2_default_info.max_event_queues =
447 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
448 RTE_EVENT_MAX_QUEUES_PER_DEV);
450 evdev_dlb2_default_info.max_num_events =
451 RTE_MIN(evdev_dlb2_default_info.max_num_events,
452 dlb2->max_num_events_override);
454 *dev_info = evdev_dlb2_default_info;
458 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
459 const struct dlb2_hw_rsrcs *resources_asked,
460 uint8_t device_version)
463 struct dlb2_create_sched_domain_args *cfg;
465 if (resources_asked == NULL) {
466 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
471 /* Map generic qm resources to dlb2 resources */
472 cfg = &handle->cfg.resources;
474 /* DIR ports and queues */
476 cfg->num_dir_ports = resources_asked->num_dir_ports;
477 if (device_version == DLB2_HW_V2_5)
478 cfg->num_credits = resources_asked->num_credits;
480 cfg->num_dir_credits = resources_asked->num_dir_credits;
484 cfg->num_ldb_queues = resources_asked->num_ldb_queues;
488 cfg->cos_strict = 0; /* Best effort */
489 cfg->num_cos_ldb_ports[0] = 0;
490 cfg->num_cos_ldb_ports[1] = 0;
491 cfg->num_cos_ldb_ports[2] = 0;
492 cfg->num_cos_ldb_ports[3] = 0;
494 switch (handle->cos_id) {
496 cfg->num_ldb_ports = 0; /* no don't care ports */
497 cfg->num_cos_ldb_ports[0] =
498 resources_asked->num_ldb_ports;
501 cfg->num_ldb_ports = 0; /* no don't care ports */
502 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
505 cfg->num_ldb_ports = 0; /* no don't care ports */
506 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
509 cfg->num_ldb_ports = 0; /* no don't care ports */
510 cfg->num_cos_ldb_ports[3] =
511 resources_asked->num_ldb_ports;
513 case DLB2_COS_DEFAULT:
514 /* all ldb ports are don't care ports from a cos perspective */
516 resources_asked->num_ldb_ports;
520 if (device_version == DLB2_HW_V2)
521 cfg->num_ldb_credits = resources_asked->num_ldb_credits;
523 cfg->num_atomic_inflights =
524 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
527 cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
528 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
530 if (device_version == DLB2_HW_V2_5) {
531 DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n",
533 resources_asked->num_ldb_ports,
535 cfg->num_atomic_inflights,
536 cfg->num_hist_list_entries,
539 DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
541 resources_asked->num_ldb_ports,
543 cfg->num_atomic_inflights,
544 cfg->num_hist_list_entries,
545 cfg->num_ldb_credits,
546 cfg->num_dir_credits);
549 /* Configure the QM */
551 ret = dlb2_iface_sched_domain_create(handle, cfg);
553 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
555 dlb2_error_strings[cfg->response.status]);
560 handle->domain_id = cfg->response.id;
561 handle->cfg.configured = true;
569 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
571 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
572 enum dlb2_configuration_state config_state;
575 dlb2_iface_domain_reset(dlb2);
577 /* Free all dynamically allocated port memory */
578 for (i = 0; i < dlb2->num_ports; i++)
579 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
581 /* If reconfiguring, mark the device's queues and ports as "previously
582 * configured." If the user doesn't reconfigure them, the PMD will
583 * reapply their previous configuration when the device is started.
585 config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
588 for (i = 0; i < dlb2->num_ports; i++) {
589 dlb2->ev_ports[i].qm_port.config_state = config_state;
590 /* Reset setup_done so ports can be reconfigured */
591 dlb2->ev_ports[i].setup_done = false;
592 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
593 dlb2->ev_ports[i].link[j].mapped = false;
596 for (i = 0; i < dlb2->num_queues; i++)
597 dlb2->ev_queues[i].qm_queue.config_state = config_state;
599 for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++)
600 dlb2->ev_queues[i].setup_done = false;
603 dlb2->num_ldb_ports = 0;
604 dlb2->num_dir_ports = 0;
605 dlb2->num_queues = 0;
606 dlb2->num_ldb_queues = 0;
607 dlb2->num_dir_queues = 0;
608 dlb2->configured = false;
611 /* Note: 1 QM instance per QM device, QM instance/device == event device */
613 dlb2_eventdev_configure(const struct rte_eventdev *dev)
615 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
616 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
617 struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
618 const struct rte_eventdev_data *data = dev->data;
619 const struct rte_event_dev_config *config = &data->dev_conf;
622 /* If this eventdev is already configured, we must release the current
623 * scheduling domain before attempting to configure a new one.
625 if (dlb2->configured) {
626 dlb2_hw_reset_sched_domain(dev, true);
627 ret = dlb2_hw_query_resources(dlb2);
629 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
635 if (config->nb_event_queues > rsrcs->num_queues) {
636 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
637 config->nb_event_queues,
641 if (config->nb_event_ports > (rsrcs->num_ldb_ports
642 + rsrcs->num_dir_ports)) {
643 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
644 config->nb_event_ports,
645 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
648 if (config->nb_events_limit > rsrcs->nb_events_limit) {
649 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
650 config->nb_events_limit,
651 rsrcs->nb_events_limit);
655 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
656 dlb2->global_dequeue_wait = false;
660 dlb2->global_dequeue_wait = true;
662 /* note size mismatch of timeout vals in eventdev lib. */
663 timeout32 = config->dequeue_timeout_ns;
665 dlb2->global_dequeue_wait_ticks =
666 timeout32 * (rte_get_timer_hz() / 1E9);
669 /* Does this platform support umonitor/umwait? */
670 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
671 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
672 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
673 DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
674 RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
677 dlb2->umwait_allowed = true;
680 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
681 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
682 /* 1 dir queue per dir port */
683 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
685 if (dlb2->version == DLB2_HW_V2_5) {
686 rsrcs->num_credits = 0;
687 if (rsrcs->num_ldb_queues || rsrcs->num_dir_ports)
688 rsrcs->num_credits = config->nb_events_limit;
690 /* Scale down nb_events_limit by 4 for directed credits,
691 * since there are 4x as many load-balanced credits.
693 rsrcs->num_ldb_credits = 0;
694 rsrcs->num_dir_credits = 0;
696 if (rsrcs->num_ldb_queues)
697 rsrcs->num_ldb_credits = config->nb_events_limit;
698 if (rsrcs->num_dir_ports)
699 rsrcs->num_dir_credits = config->nb_events_limit / 4;
700 if (dlb2->num_dir_credits_override != -1)
701 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
704 if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) {
705 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
709 dlb2->new_event_limit = config->nb_events_limit;
710 __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
712 /* Save number of ports/queues for this event dev */
713 dlb2->num_ports = config->nb_event_ports;
714 dlb2->num_queues = config->nb_event_queues;
715 dlb2->num_dir_ports = rsrcs->num_dir_ports;
716 dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
717 dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
718 dlb2->num_dir_queues = dlb2->num_dir_ports;
719 if (dlb2->version == DLB2_HW_V2_5) {
720 dlb2->credit_pool = rsrcs->num_credits;
721 dlb2->max_credits = rsrcs->num_credits;
723 dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
724 dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
725 dlb2->dir_credit_pool = rsrcs->num_dir_credits;
726 dlb2->max_dir_credits = rsrcs->num_dir_credits;
729 dlb2->configured = true;
735 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
737 struct rte_event_port_conf *port_conf)
739 RTE_SET_USED(port_id);
740 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
742 port_conf->new_event_threshold = dlb2->new_event_limit;
743 port_conf->dequeue_depth = 32;
744 port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
745 port_conf->event_port_cfg = 0;
749 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
751 struct rte_event_queue_conf *queue_conf)
754 RTE_SET_USED(queue_id);
756 queue_conf->nb_atomic_flows = 1024;
757 queue_conf->nb_atomic_order_sequences = 64;
758 queue_conf->event_queue_cfg = 0;
759 queue_conf->priority = 0;
763 dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
765 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
766 struct dlb2_get_sn_allocation_args cfg;
771 ret = dlb2_iface_get_sn_allocation(handle, &cfg);
773 DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
774 ret, dlb2_error_strings[cfg.response.status]);
778 return cfg.response.id;
782 dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
784 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
785 struct dlb2_set_sn_allocation_args cfg;
791 ret = dlb2_iface_set_sn_allocation(handle, &cfg);
793 DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
794 ret, dlb2_error_strings[cfg.response.status]);
802 dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
804 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
805 struct dlb2_get_sn_occupancy_args cfg;
810 ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
812 DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
813 ret, dlb2_error_strings[cfg.response.status]);
817 return cfg.response.id;
820 /* Query the current sequence number allocations and, if they conflict with the
821 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
822 * This is best-effort; if it fails, the PMD will attempt to configure the
823 * load-balanced queue and return an error.
826 dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
827 const struct rte_event_queue_conf *queue_conf)
829 int grp_occupancy[DLB2_NUM_SN_GROUPS];
830 int grp_alloc[DLB2_NUM_SN_GROUPS];
831 int i, sequence_numbers;
833 sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
835 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
838 grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
839 if (grp_alloc[i] < 0)
842 total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
844 grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
845 if (grp_occupancy[i] < 0)
848 /* DLB has at least one available slot for the requested
849 * sequence numbers, so no further configuration required.
851 if (grp_alloc[i] == sequence_numbers &&
852 grp_occupancy[i] < total_slots)
856 /* None of the sequence number groups are configured for the requested
857 * sequence numbers, so we have to reconfigure one of them. This is
858 * only possible if a group is not in use.
860 for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
861 if (grp_occupancy[i] == 0)
865 if (i == DLB2_NUM_SN_GROUPS) {
866 DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
867 __func__, sequence_numbers);
871 /* Attempt to configure slot i with the requested number of sequence
872 * numbers. Ignore the return value -- if this fails, the error will be
873 * caught during subsequent queue configuration.
875 dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
879 dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
880 struct dlb2_eventdev_queue *ev_queue,
881 const struct rte_event_queue_conf *evq_conf)
883 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
884 struct dlb2_queue *queue = &ev_queue->qm_queue;
885 struct dlb2_create_ldb_queue_args cfg;
890 if (evq_conf == NULL)
893 if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
894 if (evq_conf->nb_atomic_order_sequences != 0)
895 sched_type = RTE_SCHED_TYPE_ORDERED;
897 sched_type = RTE_SCHED_TYPE_PARALLEL;
899 sched_type = evq_conf->schedule_type;
901 cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
902 cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
903 cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
905 if (sched_type != RTE_SCHED_TYPE_ORDERED) {
906 cfg.num_sequence_numbers = 0;
907 cfg.num_qid_inflights = 2048;
910 /* App should set this to the number of hardware flows they want, not
911 * the overall number of flows they're going to use. E.g. if app is
912 * using 64 flows and sets compression to 64, best-case they'll get
913 * 64 unique hashed flows in hardware.
915 switch (evq_conf->nb_atomic_flows) {
916 /* Valid DLB2 compression levels */
921 case (1 * 1024): /* 1K */
922 case (2 * 1024): /* 2K */
923 case (4 * 1024): /* 4K */
924 case (64 * 1024): /* 64K */
925 cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
928 /* Invalid compression level */
929 cfg.lock_id_comp_level = 0; /* no compression */
932 if (ev_queue->depth_threshold == 0) {
933 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
934 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
936 cfg.depth_threshold = ev_queue->depth_threshold;
938 ret = dlb2_iface_ldb_queue_create(handle, &cfg);
940 DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
941 ret, dlb2_error_strings[cfg.response.status]);
945 qm_qid = cfg.response.id;
947 /* Save off queue config for debug, resource lookups, and reconfig */
948 queue->num_qid_inflights = cfg.num_qid_inflights;
949 queue->num_atm_inflights = cfg.num_atomic_inflights;
951 queue->sched_type = sched_type;
952 queue->config_state = DLB2_CONFIGURED;
954 DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
956 cfg.num_atomic_inflights,
957 cfg.num_sequence_numbers,
958 cfg.num_qid_inflights);
964 dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
965 struct dlb2_eventdev_queue *ev_queue,
966 const struct rte_event_queue_conf *queue_conf)
968 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
971 if (queue_conf->nb_atomic_order_sequences)
972 dlb2_program_sn_allocation(dlb2, queue_conf);
974 qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
976 DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
981 dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
983 ev_queue->qm_queue.id = qm_qid;
988 static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
992 for (i = 0; i < dlb2->num_queues; i++) {
993 if (dlb2->ev_queues[i].setup_done &&
994 dlb2->ev_queues[i].qm_queue.is_directed)
1002 dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
1003 struct dlb2_eventdev_queue *ev_queue)
1005 struct dlb2_eventdev_port *ev_port;
1008 for (i = 0; i < dlb2->num_ports; i++) {
1009 ev_port = &dlb2->ev_ports[i];
1011 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1012 if (!ev_port->link[j].valid ||
1013 ev_port->link[j].queue_id != ev_queue->id)
1016 ev_port->link[j].valid = false;
1017 ev_port->num_links--;
1021 ev_queue->num_links = 0;
1025 dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
1027 const struct rte_event_queue_conf *queue_conf)
1029 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1030 struct dlb2_eventdev_queue *ev_queue;
1033 if (queue_conf == NULL)
1036 if (ev_qid >= dlb2->num_queues)
1039 ev_queue = &dlb2->ev_queues[ev_qid];
1041 ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
1042 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
1043 ev_queue->id = ev_qid;
1044 ev_queue->conf = *queue_conf;
1046 if (!ev_queue->qm_queue.is_directed) {
1047 ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
1049 /* The directed queue isn't setup until link time, at which
1050 * point we know its directed port ID. Directed queue setup
1051 * will only fail if this queue is already setup or there are
1052 * no directed queues left to configure.
1056 ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
1058 if (ev_queue->setup_done ||
1059 dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
1063 /* Tear down pre-existing port->queue links */
1064 if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1065 dlb2_queue_link_teardown(dlb2, ev_queue);
1068 ev_queue->setup_done = true;
1074 dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
1076 struct dlb2_cq_pop_qe *qe;
1078 qe = rte_zmalloc(mz_name,
1079 DLB2_NUM_QES_PER_CACHE_LINE *
1080 sizeof(struct dlb2_cq_pop_qe),
1081 RTE_CACHE_LINE_SIZE);
1084 DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1087 qm_port->consume_qe = qe;
1093 /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1096 qe->tokens = 0; /* set at run time */
1099 /* Completion IDs are disabled */
1106 dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1108 struct dlb2_enqueue_qe *qe;
1110 qe = rte_zmalloc(mz_name,
1111 DLB2_NUM_QES_PER_CACHE_LINE *
1112 sizeof(struct dlb2_enqueue_qe),
1113 RTE_CACHE_LINE_SIZE);
1116 DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1119 qm_port->int_arm_qe = qe;
1121 /* V2 - INT ARM is CQ_TOKEN + FRAG */
1128 /* Completion IDs are disabled */
1135 dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1139 sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1141 qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1143 if (qm_port->qe4 == NULL) {
1144 DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1149 ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1151 DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1155 ret = dlb2_init_consume_qe(qm_port, mz_name);
1157 DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1165 dlb2_free_qe_mem(qm_port);
1170 static inline uint16_t
1171 dlb2_event_enqueue_delayed(void *event_port,
1172 const struct rte_event events[]);
1174 static inline uint16_t
1175 dlb2_event_enqueue_burst_delayed(void *event_port,
1176 const struct rte_event events[],
1179 static inline uint16_t
1180 dlb2_event_enqueue_new_burst_delayed(void *event_port,
1181 const struct rte_event events[],
1184 static inline uint16_t
1185 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
1186 const struct rte_event events[],
1190 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1191 struct dlb2_eventdev_port *ev_port,
1192 uint32_t dequeue_depth,
1193 uint32_t enqueue_depth)
1195 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1196 struct dlb2_create_ldb_port_args cfg = { {0} };
1198 struct dlb2_port *qm_port = NULL;
1199 char mz_name[RTE_MEMZONE_NAMESIZE];
1200 uint32_t qm_port_id;
1201 uint16_t ldb_credit_high_watermark = 0;
1202 uint16_t dir_credit_high_watermark = 0;
1203 uint16_t credit_high_watermark = 0;
1208 if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1209 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1214 if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1215 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1216 DLB2_MIN_ENQUEUE_DEPTH);
1220 rte_spinlock_lock(&handle->resource_lock);
1222 /* We round up to the next power of 2 if necessary */
1223 cfg.cq_depth = rte_align32pow2(dequeue_depth);
1224 cfg.cq_depth_threshold = 1;
1226 cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1228 if (handle->cos_id == DLB2_COS_DEFAULT)
1231 cfg.cos_id = handle->cos_id;
1235 /* User controls the LDB high watermark via enqueue depth. The DIR high
1236 * watermark is equal, unless the directed credit pool is too small.
1238 if (dlb2->version == DLB2_HW_V2) {
1239 ldb_credit_high_watermark = enqueue_depth;
1240 /* If there are no directed ports, the kernel driver will
1241 * ignore this port's directed credit settings. Don't use
1242 * enqueue_depth if it would require more directed credits
1243 * than are available.
1245 dir_credit_high_watermark =
1246 RTE_MIN(enqueue_depth,
1247 handle->cfg.num_dir_credits / dlb2->num_ports);
1249 credit_high_watermark = enqueue_depth;
1253 ret = dlb2_iface_ldb_port_create(handle, &cfg, dlb2->poll_mode);
1255 DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1256 ret, dlb2_error_strings[cfg.response.status]);
1260 qm_port_id = cfg.response.id;
1262 DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1263 ev_port->id, qm_port_id);
1265 qm_port = &ev_port->qm_port;
1266 qm_port->ev_port = ev_port; /* back ptr */
1267 qm_port->dlb2 = dlb2; /* back ptr */
1269 * Allocate and init local qe struct(s).
1270 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1273 snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1276 ret = dlb2_init_qe_mem(qm_port, mz_name);
1278 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1282 qm_port->id = qm_port_id;
1284 if (dlb2->version == DLB2_HW_V2) {
1285 qm_port->cached_ldb_credits = 0;
1286 qm_port->cached_dir_credits = 0;
1288 qm_port->cached_credits = 0;
1290 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1291 * the effective depth is smaller.
1293 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1294 qm_port->cq_idx = 0;
1295 qm_port->cq_idx_unmasked = 0;
1297 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1298 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1300 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1302 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1303 /* starting value of gen bit - it toggles at wrap time */
1304 qm_port->gen_bit = 1;
1306 qm_port->int_armed = false;
1308 /* Save off for later use in info and lookup APIs. */
1309 qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1311 qm_port->dequeue_depth = dequeue_depth;
1312 qm_port->token_pop_thresh = dequeue_depth;
1314 /* The default enqueue functions do not include delayed-pop support for
1315 * performance reasons.
1317 if (qm_port->token_pop_mode == DELAYED_POP) {
1318 dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
1319 dlb2->event_dev->enqueue_burst =
1320 dlb2_event_enqueue_burst_delayed;
1321 dlb2->event_dev->enqueue_new_burst =
1322 dlb2_event_enqueue_new_burst_delayed;
1323 dlb2->event_dev->enqueue_forward_burst =
1324 dlb2_event_enqueue_forward_burst_delayed;
1327 qm_port->owed_tokens = 0;
1328 qm_port->issued_releases = 0;
1330 /* Save config message too. */
1331 rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1334 qm_port->state = PORT_STARTED; /* enabled at create time */
1335 qm_port->config_state = DLB2_CONFIGURED;
1337 if (dlb2->version == DLB2_HW_V2) {
1338 qm_port->dir_credits = dir_credit_high_watermark;
1339 qm_port->ldb_credits = ldb_credit_high_watermark;
1340 qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1341 qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1343 DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1346 qm_port->ldb_credits,
1347 qm_port->dir_credits);
1349 qm_port->credits = credit_high_watermark;
1350 qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
1352 DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n",
1357 rte_spinlock_unlock(&handle->resource_lock);
1364 dlb2_free_qe_mem(qm_port);
1366 rte_spinlock_unlock(&handle->resource_lock);
1368 DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1374 dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1375 struct dlb2_eventdev_port *ev_port)
1377 struct dlb2_eventdev_queue *ev_queue;
1380 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1381 if (!ev_port->link[i].valid)
1384 ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1386 ev_port->link[i].valid = false;
1387 ev_port->num_links--;
1388 ev_queue->num_links--;
1393 dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1394 struct dlb2_eventdev_port *ev_port,
1395 uint32_t dequeue_depth,
1396 uint32_t enqueue_depth)
1398 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1399 struct dlb2_create_dir_port_args cfg = { {0} };
1401 struct dlb2_port *qm_port = NULL;
1402 char mz_name[RTE_MEMZONE_NAMESIZE];
1403 uint32_t qm_port_id;
1404 uint16_t ldb_credit_high_watermark = 0;
1405 uint16_t dir_credit_high_watermark = 0;
1406 uint16_t credit_high_watermark = 0;
1408 if (dlb2 == NULL || handle == NULL)
1411 if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1412 DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1413 DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1417 if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1418 DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1419 DLB2_MIN_ENQUEUE_DEPTH);
1423 rte_spinlock_lock(&handle->resource_lock);
1425 /* Directed queues are configured at link time. */
1428 /* We round up to the next power of 2 if necessary */
1429 cfg.cq_depth = rte_align32pow2(dequeue_depth);
1430 cfg.cq_depth_threshold = 1;
1432 /* User controls the LDB high watermark via enqueue depth. The DIR high
1433 * watermark is equal, unless the directed credit pool is too small.
1435 if (dlb2->version == DLB2_HW_V2) {
1436 ldb_credit_high_watermark = enqueue_depth;
1437 /* Don't use enqueue_depth if it would require more directed
1438 * credits than are available.
1440 dir_credit_high_watermark =
1441 RTE_MIN(enqueue_depth,
1442 handle->cfg.num_dir_credits / dlb2->num_ports);
1444 credit_high_watermark = enqueue_depth;
1448 ret = dlb2_iface_dir_port_create(handle, &cfg, dlb2->poll_mode);
1450 DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1451 ret, dlb2_error_strings[cfg.response.status]);
1455 qm_port_id = cfg.response.id;
1457 DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1458 ev_port->id, qm_port_id);
1460 qm_port = &ev_port->qm_port;
1461 qm_port->ev_port = ev_port; /* back ptr */
1462 qm_port->dlb2 = dlb2; /* back ptr */
1465 * Init local qe struct(s).
1466 * Note: MOVDIR64 requires the enqueue QE to be aligned
1469 snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1472 ret = dlb2_init_qe_mem(qm_port, mz_name);
1475 DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1479 qm_port->id = qm_port_id;
1481 if (dlb2->version == DLB2_HW_V2) {
1482 qm_port->cached_ldb_credits = 0;
1483 qm_port->cached_dir_credits = 0;
1485 qm_port->cached_credits = 0;
1487 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1488 * the effective depth is smaller.
1490 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1491 qm_port->cq_idx = 0;
1492 qm_port->cq_idx_unmasked = 0;
1494 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1495 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1497 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1499 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1500 /* starting value of gen bit - it toggles at wrap time */
1501 qm_port->gen_bit = 1;
1503 qm_port->int_armed = false;
1505 /* Save off for later use in info and lookup APIs. */
1506 qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1508 qm_port->dequeue_depth = dequeue_depth;
1510 /* Directed ports are auto-pop, by default. */
1511 qm_port->token_pop_mode = AUTO_POP;
1512 qm_port->owed_tokens = 0;
1513 qm_port->issued_releases = 0;
1515 /* Save config message too. */
1516 rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1519 qm_port->state = PORT_STARTED; /* enabled at create time */
1520 qm_port->config_state = DLB2_CONFIGURED;
1522 if (dlb2->version == DLB2_HW_V2) {
1523 qm_port->dir_credits = dir_credit_high_watermark;
1524 qm_port->ldb_credits = ldb_credit_high_watermark;
1525 qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1526 qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1528 DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1531 dir_credit_high_watermark,
1532 ldb_credit_high_watermark);
1534 qm_port->credits = credit_high_watermark;
1535 qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
1537 DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n",
1540 credit_high_watermark);
1542 rte_spinlock_unlock(&handle->resource_lock);
1549 dlb2_free_qe_mem(qm_port);
1551 rte_spinlock_unlock(&handle->resource_lock);
1553 DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1559 dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1561 const struct rte_event_port_conf *port_conf)
1563 struct dlb2_eventdev *dlb2;
1564 struct dlb2_eventdev_port *ev_port;
1567 if (dev == NULL || port_conf == NULL) {
1568 DLB2_LOG_ERR("Null parameter\n");
1572 dlb2 = dlb2_pmd_priv(dev);
1574 if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
1577 if (port_conf->dequeue_depth >
1578 evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1579 port_conf->enqueue_depth >
1580 evdev_dlb2_default_info.max_event_port_enqueue_depth)
1583 ev_port = &dlb2->ev_ports[ev_port_id];
1585 if (ev_port->setup_done) {
1586 DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1590 ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1591 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1593 if (!ev_port->qm_port.is_directed) {
1594 ret = dlb2_hw_create_ldb_port(dlb2,
1596 port_conf->dequeue_depth,
1597 port_conf->enqueue_depth);
1599 DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1605 ret = dlb2_hw_create_dir_port(dlb2,
1607 port_conf->dequeue_depth,
1608 port_conf->enqueue_depth);
1610 DLB2_LOG_ERR("Failed to create the DIR port\n");
1615 /* Save off port config for reconfig */
1616 ev_port->conf = *port_conf;
1618 ev_port->id = ev_port_id;
1619 ev_port->enq_configured = true;
1620 ev_port->setup_done = true;
1621 ev_port->inflight_max = port_conf->new_event_threshold;
1622 ev_port->implicit_release = !(port_conf->event_port_cfg &
1623 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1624 ev_port->outstanding_releases = 0;
1625 ev_port->inflight_credits = 0;
1626 ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1627 ev_port->dlb2 = dlb2; /* reverse link */
1629 /* Tear down pre-existing port->queue links */
1630 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1631 dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1633 dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1639 dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1640 uint32_t qm_port_id,
1644 struct dlb2_map_qid_args cfg;
1651 cfg.port_id = qm_port_id;
1653 cfg.priority = EV_TO_DLB2_PRIO(priority);
1655 ret = dlb2_iface_map_qid(handle, &cfg);
1657 DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1658 ret, dlb2_error_strings[cfg.response.status]);
1659 DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1660 handle->domain_id, cfg.port_id,
1664 DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1665 qm_qid, qm_port_id);
1672 dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1673 struct dlb2_eventdev_port *ev_port,
1674 struct dlb2_eventdev_queue *ev_queue,
1677 int first_avail = -1;
1680 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1681 if (ev_port->link[i].valid) {
1682 if (ev_port->link[i].queue_id == ev_queue->id &&
1683 ev_port->link[i].priority == priority) {
1684 if (ev_port->link[i].mapped)
1685 return 0; /* already mapped */
1688 } else if (first_avail == -1)
1691 if (first_avail == -1) {
1692 DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1693 ev_port->qm_port.id);
1697 ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1698 ev_port->qm_port.id,
1699 ev_queue->qm_queue.id,
1703 ev_port->link[first_avail].mapped = true;
1709 dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1710 struct dlb2_eventdev_queue *ev_queue,
1713 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1714 struct dlb2_create_dir_queue_args cfg;
1717 /* The directed port is always configured before its queue */
1718 cfg.port_id = qm_port_id;
1720 if (ev_queue->depth_threshold == 0) {
1721 cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1722 ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1724 cfg.depth_threshold = ev_queue->depth_threshold;
1726 ret = dlb2_iface_dir_queue_create(handle, &cfg);
1728 DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1729 ret, dlb2_error_strings[cfg.response.status]);
1733 return cfg.response.id;
1737 dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1738 struct dlb2_eventdev_queue *ev_queue,
1739 struct dlb2_eventdev_port *ev_port)
1743 qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1746 DLB2_LOG_ERR("Failed to create the DIR queue\n");
1750 dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1752 ev_queue->qm_queue.id = qm_qid;
1758 dlb2_do_port_link(struct rte_eventdev *dev,
1759 struct dlb2_eventdev_queue *ev_queue,
1760 struct dlb2_eventdev_port *ev_port,
1763 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1766 /* Don't link until start time. */
1767 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1770 if (ev_queue->qm_queue.is_directed)
1771 err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1773 err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1776 DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1777 ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1778 ev_queue->id, ev_port->id);
1788 dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1793 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1794 struct dlb2_eventdev_queue *ev_queue;
1795 bool port_is_dir, queue_is_dir;
1797 if (queue_id > dlb2->num_queues) {
1798 rte_errno = -EINVAL;
1802 ev_queue = &dlb2->ev_queues[queue_id];
1804 if (!ev_queue->setup_done &&
1805 ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1806 rte_errno = -EINVAL;
1810 port_is_dir = ev_port->qm_port.is_directed;
1811 queue_is_dir = ev_queue->qm_queue.is_directed;
1813 if (port_is_dir != queue_is_dir) {
1814 DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1815 queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1816 port_is_dir ? "DIR" : "LDB", ev_port->id);
1818 rte_errno = -EINVAL;
1822 /* Check if there is space for the requested link */
1823 if (!link_exists && index == -1) {
1824 DLB2_LOG_ERR("no space for new link\n");
1825 rte_errno = -ENOSPC;
1829 /* Check if the directed port is already linked */
1830 if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1832 DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1834 rte_errno = -EINVAL;
1838 /* Check if the directed queue is already linked */
1839 if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1841 DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1843 rte_errno = -EINVAL;
1851 dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1852 const uint8_t queues[], const uint8_t priorities[],
1856 struct dlb2_eventdev_port *ev_port = event_port;
1857 struct dlb2_eventdev *dlb2;
1862 if (ev_port == NULL) {
1863 DLB2_LOG_ERR("dlb2: evport not setup\n");
1864 rte_errno = -EINVAL;
1868 if (!ev_port->setup_done &&
1869 ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1870 DLB2_LOG_ERR("dlb2: evport not setup\n");
1871 rte_errno = -EINVAL;
1875 /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1878 if (nb_links == 0) {
1879 DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1880 return 0; /* Ignore and return success */
1883 dlb2 = ev_port->dlb2;
1885 DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1887 ev_port->qm_port.is_directed ? "DIR" : "LDB",
1890 for (i = 0; i < nb_links; i++) {
1891 struct dlb2_eventdev_queue *ev_queue;
1892 uint8_t queue_id, prio;
1896 queue_id = queues[i];
1897 prio = priorities[i];
1899 /* Check if the link already exists. */
1900 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1901 if (ev_port->link[j].valid) {
1902 if (ev_port->link[j].queue_id == queue_id) {
1907 } else if (index == -1) {
1911 /* could not link */
1915 /* Check if already linked at the requested priority */
1916 if (found && ev_port->link[j].priority == prio)
1919 if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1920 break; /* return index of offending queue */
1922 ev_queue = &dlb2->ev_queues[queue_id];
1924 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1925 break; /* return index of offending queue */
1927 ev_queue->num_links++;
1929 ev_port->link[index].queue_id = queue_id;
1930 ev_port->link[index].priority = prio;
1931 ev_port->link[index].valid = true;
1932 /* Entry already exists? If so, then must be prio change */
1934 ev_port->num_links++;
1940 dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1941 uint32_t qm_port_id,
1944 struct dlb2_unmap_qid_args cfg;
1950 cfg.port_id = qm_port_id;
1953 ret = dlb2_iface_unmap_qid(handle, &cfg);
1955 DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1956 ret, dlb2_error_strings[cfg.response.status]);
1962 dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1963 struct dlb2_eventdev_port *ev_port,
1964 struct dlb2_eventdev_queue *ev_queue)
1968 /* Don't unlink until start time. */
1969 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1972 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1973 if (ev_port->link[i].valid &&
1974 ev_port->link[i].queue_id == ev_queue->id)
1978 /* This is expected with eventdev API!
1979 * It blindly attemmpts to unmap all queues.
1981 if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1982 DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1983 ev_queue->qm_queue.id,
1984 ev_port->qm_port.id);
1988 ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1989 ev_port->qm_port.id,
1990 ev_queue->qm_queue.id);
1992 ev_port->link[i].mapped = false;
1998 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1999 uint8_t queues[], uint16_t nb_unlinks)
2001 struct dlb2_eventdev_port *ev_port = event_port;
2002 struct dlb2_eventdev *dlb2;
2007 if (!ev_port->setup_done) {
2008 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
2010 rte_errno = -EINVAL;
2014 if (queues == NULL || nb_unlinks == 0) {
2015 DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
2016 return 0; /* Ignore and return success */
2019 if (ev_port->qm_port.is_directed) {
2020 DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
2023 return nb_unlinks; /* as if success */
2026 dlb2 = ev_port->dlb2;
2028 for (i = 0; i < nb_unlinks; i++) {
2029 struct dlb2_eventdev_queue *ev_queue;
2032 if (queues[i] >= dlb2->num_queues) {
2033 DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
2034 rte_errno = -EINVAL;
2035 return i; /* return index of offending queue */
2038 ev_queue = &dlb2->ev_queues[queues[i]];
2040 /* Does a link exist? */
2041 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
2042 if (ev_port->link[j].queue_id == queues[i] &&
2043 ev_port->link[j].valid)
2046 if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
2049 ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
2051 DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
2052 ret, ev_port->id, queues[i]);
2053 rte_errno = -ENOENT;
2054 return i; /* return index of offending queue */
2057 ev_port->link[j].valid = false;
2058 ev_port->num_links--;
2059 ev_queue->num_links--;
2066 dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
2069 struct dlb2_eventdev_port *ev_port = event_port;
2070 struct dlb2_eventdev *dlb2;
2071 struct dlb2_hw_dev *handle;
2072 struct dlb2_pending_port_unmaps_args cfg;
2077 if (!ev_port->setup_done) {
2078 DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
2080 rte_errno = -EINVAL;
2084 cfg.port_id = ev_port->qm_port.id;
2085 dlb2 = ev_port->dlb2;
2086 handle = &dlb2->qm_instance;
2087 ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
2090 DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
2091 ret, dlb2_error_strings[cfg.response.status]);
2095 return cfg.response.id;
2099 dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
2101 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2104 /* If an event queue or port was previously configured, but hasn't been
2105 * reconfigured, reapply its original configuration.
2107 for (i = 0; i < dlb2->num_queues; i++) {
2108 struct dlb2_eventdev_queue *ev_queue;
2110 ev_queue = &dlb2->ev_queues[i];
2112 if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
2115 ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
2117 DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
2122 for (i = 0; i < dlb2->num_ports; i++) {
2123 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2125 if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
2128 ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
2130 DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
2140 dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
2142 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2145 /* Perform requested port->queue links */
2146 for (i = 0; i < dlb2->num_ports; i++) {
2147 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2150 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
2151 struct dlb2_eventdev_queue *ev_queue;
2152 uint8_t prio, queue_id;
2154 if (!ev_port->link[j].valid)
2157 prio = ev_port->link[j].priority;
2158 queue_id = ev_port->link[j].queue_id;
2160 if (dlb2_validate_port_link(ev_port, queue_id, true, j))
2163 ev_queue = &dlb2->ev_queues[queue_id];
2165 if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
2174 dlb2_eventdev_start(struct rte_eventdev *dev)
2176 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2177 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
2178 struct dlb2_start_domain_args cfg;
2181 rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
2182 if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2183 DLB2_LOG_ERR("bad state %d for dev_start\n",
2184 (int)dlb2->run_state);
2185 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2188 dlb2->run_state = DLB2_RUN_STATE_STARTING;
2189 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2191 /* If the device was configured more than once, some event ports and/or
2192 * queues may need to be reconfigured.
2194 ret = dlb2_eventdev_reapply_configuration(dev);
2198 /* The DLB PMD delays port links until the device is started. */
2199 ret = dlb2_eventdev_apply_port_links(dev);
2203 for (i = 0; i < dlb2->num_ports; i++) {
2204 if (!dlb2->ev_ports[i].setup_done) {
2205 DLB2_LOG_ERR("dlb2: port %d not setup", i);
2210 for (i = 0; i < dlb2->num_queues; i++) {
2211 if (dlb2->ev_queues[i].num_links == 0) {
2212 DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
2217 ret = dlb2_iface_sched_domain_start(handle, &cfg);
2219 DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
2220 ret, dlb2_error_strings[cfg.response.status]);
2224 dlb2->run_state = DLB2_RUN_STATE_STARTED;
2225 DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
2230 static uint8_t cmd_byte_map[DLB2_NUM_PORT_TYPES][DLB2_NUM_HW_SCHED_TYPES] = {
2232 /* Load-balanced cmd bytes */
2233 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2234 [RTE_EVENT_OP_FORWARD] = DLB2_FWD_CMD_BYTE,
2235 [RTE_EVENT_OP_RELEASE] = DLB2_COMP_CMD_BYTE,
2238 /* Directed cmd bytes */
2239 [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2240 [RTE_EVENT_OP_FORWARD] = DLB2_NEW_CMD_BYTE,
2241 [RTE_EVENT_OP_RELEASE] = DLB2_NOOP_CMD_BYTE,
2245 static inline uint32_t
2246 dlb2_port_credits_get(struct dlb2_port *qm_port,
2247 enum dlb2_hw_queue_types type)
2249 uint32_t credits = *qm_port->credit_pool[type];
2250 uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2252 if (unlikely(credits < batch_size))
2253 batch_size = credits;
2255 if (likely(credits &&
2256 __atomic_compare_exchange_n(
2257 qm_port->credit_pool[type],
2258 &credits, credits - batch_size, false,
2259 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
2266 dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2267 struct dlb2_eventdev_port *ev_port)
2269 uint16_t quanta = ev_port->credit_update_quanta;
2271 if (ev_port->inflight_credits >= quanta * 2) {
2272 /* Replenish credits, saving one quanta for enqueues */
2273 uint16_t val = ev_port->inflight_credits - quanta;
2275 __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
2276 ev_port->inflight_credits -= val;
2281 dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2282 struct dlb2_eventdev_port *ev_port)
2284 uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
2288 if (unlikely(ev_port->inflight_max < sw_inflights)) {
2289 DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2290 rte_errno = -ENOSPC;
2294 if (ev_port->inflight_credits < num) {
2295 /* check if event enqueue brings ev_port over max threshold */
2296 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2298 if (sw_inflights + credit_update_quanta >
2299 dlb2->new_event_limit) {
2301 ev_port->stats.traffic.tx_nospc_new_event_limit,
2303 rte_errno = -ENOSPC;
2307 __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
2309 ev_port->inflight_credits += (credit_update_quanta);
2311 if (ev_port->inflight_credits < num) {
2313 ev_port->stats.traffic.tx_nospc_inflight_credits,
2315 rte_errno = -ENOSPC;
2324 dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2326 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2327 qm_port->cached_ldb_credits =
2328 dlb2_port_credits_get(qm_port,
2330 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2332 qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2334 DLB2_LOG_DBG("ldb credits exhausted\n");
2335 return 1; /* credits exhausted */
2343 dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2345 if (unlikely(qm_port->cached_dir_credits == 0)) {
2346 qm_port->cached_dir_credits =
2347 dlb2_port_credits_get(qm_port,
2349 if (unlikely(qm_port->cached_dir_credits == 0)) {
2351 qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2353 DLB2_LOG_DBG("dir credits exhausted\n");
2354 return 1; /* credits exhausted */
2362 dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
2364 if (unlikely(qm_port->cached_credits == 0)) {
2365 qm_port->cached_credits =
2366 dlb2_port_credits_get(qm_port,
2367 DLB2_COMBINED_POOL);
2368 if (unlikely(qm_port->cached_credits == 0)) {
2370 qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
2371 DLB2_LOG_DBG("credits exhausted\n");
2372 return 1; /* credits exhausted */
2379 static __rte_always_inline void
2380 dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
2381 struct process_local_port_data *port_data)
2383 dlb2_movdir64b(port_data->pp_addr, qe4);
2387 dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2389 struct process_local_port_data *port_data;
2390 struct dlb2_cq_pop_qe *qe;
2392 RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2394 qe = qm_port->consume_qe;
2396 qe->tokens = num - 1;
2398 /* No store fence needed since no pointer is being sent, and CQ token
2399 * pops can be safely reordered with other HCWs.
2401 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2403 dlb2_movntdq_single(port_data->pp_addr, qe);
2405 DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
2407 qm_port->owed_tokens = 0;
2413 dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2415 struct process_local_port_data *port_data)
2417 /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2418 * application writes complete before enqueueing the QE.
2423 dlb2_pp_write(qm_port->qe4, port_data);
2427 dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2429 struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2430 int num = qm_port->owed_tokens;
2432 qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2433 qe[idx].tokens = num - 1;
2435 qm_port->owed_tokens = 0;
2439 dlb2_event_build_hcws(struct dlb2_port *qm_port,
2440 const struct rte_event ev[],
2442 uint8_t *sched_type,
2445 struct dlb2_enqueue_qe *qe;
2446 uint16_t sched_word[4];
2452 sse_qe[0] = _mm_setzero_si128();
2453 sse_qe[1] = _mm_setzero_si128();
2457 /* Construct the metadata portion of two HCWs in one 128b SSE
2458 * register. HCW metadata is constructed in the SSE registers
2460 * sse_qe[0][63:0]: qe[0]'s metadata
2461 * sse_qe[0][127:64]: qe[1]'s metadata
2462 * sse_qe[1][63:0]: qe[2]'s metadata
2463 * sse_qe[1][127:64]: qe[3]'s metadata
2466 /* Convert the event operation into a command byte and store it
2468 * sse_qe[0][63:56] = cmd_byte_map[is_directed][ev[0].op]
2469 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2470 * sse_qe[1][63:56] = cmd_byte_map[is_directed][ev[2].op]
2471 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2473 #define DLB2_QE_CMD_BYTE 7
2474 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2475 cmd_byte_map[qm_port->is_directed][ev[0].op],
2477 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2478 cmd_byte_map[qm_port->is_directed][ev[1].op],
2479 DLB2_QE_CMD_BYTE + 8);
2480 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2481 cmd_byte_map[qm_port->is_directed][ev[2].op],
2483 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2484 cmd_byte_map[qm_port->is_directed][ev[3].op],
2485 DLB2_QE_CMD_BYTE + 8);
2487 /* Store priority, scheduling type, and queue ID in the sched
2488 * word array because these values are re-used when the
2489 * destination is a directed queue.
2491 sched_word[0] = EV_TO_DLB2_PRIO(ev[0].priority) << 10 |
2492 sched_type[0] << 8 |
2494 sched_word[1] = EV_TO_DLB2_PRIO(ev[1].priority) << 10 |
2495 sched_type[1] << 8 |
2497 sched_word[2] = EV_TO_DLB2_PRIO(ev[2].priority) << 10 |
2498 sched_type[2] << 8 |
2500 sched_word[3] = EV_TO_DLB2_PRIO(ev[3].priority) << 10 |
2501 sched_type[3] << 8 |
2504 /* Store the event priority, scheduling type, and queue ID in
2506 * sse_qe[0][31:16] = sched_word[0]
2507 * sse_qe[0][95:80] = sched_word[1]
2508 * sse_qe[1][31:16] = sched_word[2]
2509 * sse_qe[1][95:80] = sched_word[3]
2511 #define DLB2_QE_QID_SCHED_WORD 1
2512 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2514 DLB2_QE_QID_SCHED_WORD);
2515 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2517 DLB2_QE_QID_SCHED_WORD + 4);
2518 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2520 DLB2_QE_QID_SCHED_WORD);
2521 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2523 DLB2_QE_QID_SCHED_WORD + 4);
2525 /* If the destination is a load-balanced queue, store the lock
2526 * ID. If it is a directed queue, DLB places this field in
2527 * bytes 10-11 of the received QE, so we format it accordingly:
2528 * sse_qe[0][47:32] = dir queue ? sched_word[0] : flow_id[0]
2529 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2530 * sse_qe[1][47:32] = dir queue ? sched_word[2] : flow_id[2]
2531 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2533 #define DLB2_QE_LOCK_ID_WORD 2
2534 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2535 (sched_type[0] == DLB2_SCHED_DIRECTED) ?
2536 sched_word[0] : ev[0].flow_id,
2537 DLB2_QE_LOCK_ID_WORD);
2538 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2539 (sched_type[1] == DLB2_SCHED_DIRECTED) ?
2540 sched_word[1] : ev[1].flow_id,
2541 DLB2_QE_LOCK_ID_WORD + 4);
2542 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2543 (sched_type[2] == DLB2_SCHED_DIRECTED) ?
2544 sched_word[2] : ev[2].flow_id,
2545 DLB2_QE_LOCK_ID_WORD);
2546 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2547 (sched_type[3] == DLB2_SCHED_DIRECTED) ?
2548 sched_word[3] : ev[3].flow_id,
2549 DLB2_QE_LOCK_ID_WORD + 4);
2551 /* Store the event type and sub event type in the metadata:
2552 * sse_qe[0][15:0] = flow_id[0]
2553 * sse_qe[0][79:64] = flow_id[1]
2554 * sse_qe[1][15:0] = flow_id[2]
2555 * sse_qe[1][79:64] = flow_id[3]
2557 #define DLB2_QE_EV_TYPE_WORD 0
2558 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2559 ev[0].sub_event_type << 8 |
2561 DLB2_QE_EV_TYPE_WORD);
2562 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2563 ev[1].sub_event_type << 8 |
2565 DLB2_QE_EV_TYPE_WORD + 4);
2566 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2567 ev[2].sub_event_type << 8 |
2569 DLB2_QE_EV_TYPE_WORD);
2570 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2571 ev[3].sub_event_type << 8 |
2573 DLB2_QE_EV_TYPE_WORD + 4);
2575 /* Store the metadata to memory (use the double-precision
2576 * _mm_storeh_pd because there is no integer function for
2577 * storing the upper 64b):
2578 * qe[0] metadata = sse_qe[0][63:0]
2579 * qe[1] metadata = sse_qe[0][127:64]
2580 * qe[2] metadata = sse_qe[1][63:0]
2581 * qe[3] metadata = sse_qe[1][127:64]
2583 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2584 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2585 (__m128d)sse_qe[0]);
2586 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2587 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2588 (__m128d)sse_qe[1]);
2590 qe[0].data = ev[0].u64;
2591 qe[1].data = ev[1].u64;
2592 qe[2].data = ev[2].u64;
2593 qe[3].data = ev[3].u64;
2599 for (i = 0; i < num; i++) {
2601 cmd_byte_map[qm_port->is_directed][ev[i].op];
2602 qe[i].sched_type = sched_type[i];
2603 qe[i].data = ev[i].u64;
2604 qe[i].qid = queue_id[i];
2605 qe[i].priority = EV_TO_DLB2_PRIO(ev[i].priority);
2606 qe[i].lock_id = ev[i].flow_id;
2607 if (sched_type[i] == DLB2_SCHED_DIRECTED) {
2608 struct dlb2_msg_info *info =
2609 (struct dlb2_msg_info *)&qe[i].lock_id;
2611 info->qid = queue_id[i];
2612 info->sched_type = DLB2_SCHED_DIRECTED;
2613 info->priority = qe[i].priority;
2615 qe[i].u.event_type.major = ev[i].event_type;
2616 qe[i].u.event_type.sub = ev[i].sub_event_type;
2625 dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2626 struct dlb2_port *qm_port,
2627 const struct rte_event ev[],
2628 uint8_t *sched_type,
2631 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2632 struct dlb2_eventdev_queue *ev_queue;
2633 uint16_t *cached_credits = NULL;
2634 struct dlb2_queue *qm_queue;
2636 ev_queue = &dlb2->ev_queues[ev->queue_id];
2637 qm_queue = &ev_queue->qm_queue;
2638 *queue_id = qm_queue->id;
2640 /* Ignore sched_type and hardware credits on release events */
2641 if (ev->op == RTE_EVENT_OP_RELEASE)
2644 if (!qm_queue->is_directed) {
2645 /* Load balanced destination queue */
2647 if (dlb2->version == DLB2_HW_V2) {
2648 if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2649 rte_errno = -ENOSPC;
2652 cached_credits = &qm_port->cached_ldb_credits;
2654 if (dlb2_check_enqueue_hw_credits(qm_port)) {
2655 rte_errno = -ENOSPC;
2658 cached_credits = &qm_port->cached_credits;
2660 switch (ev->sched_type) {
2661 case RTE_SCHED_TYPE_ORDERED:
2662 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2663 if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2664 DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
2666 rte_errno = -EINVAL;
2669 *sched_type = DLB2_SCHED_ORDERED;
2671 case RTE_SCHED_TYPE_ATOMIC:
2672 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2673 *sched_type = DLB2_SCHED_ATOMIC;
2675 case RTE_SCHED_TYPE_PARALLEL:
2676 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2677 if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2678 *sched_type = DLB2_SCHED_ORDERED;
2680 *sched_type = DLB2_SCHED_UNORDERED;
2683 DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2684 DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2685 rte_errno = -EINVAL;
2689 /* Directed destination queue */
2691 if (dlb2->version == DLB2_HW_V2) {
2692 if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2693 rte_errno = -ENOSPC;
2696 cached_credits = &qm_port->cached_dir_credits;
2698 if (dlb2_check_enqueue_hw_credits(qm_port)) {
2699 rte_errno = -ENOSPC;
2702 cached_credits = &qm_port->cached_credits;
2704 DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2706 *sched_type = DLB2_SCHED_DIRECTED;
2711 case RTE_EVENT_OP_NEW:
2712 /* Check that a sw credit is available */
2713 if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2714 rte_errno = -ENOSPC;
2717 ev_port->inflight_credits--;
2718 (*cached_credits)--;
2720 case RTE_EVENT_OP_FORWARD:
2721 /* Check for outstanding_releases underflow. If this occurs,
2722 * the application is not using the EVENT_OPs correctly; for
2723 * example, forwarding or releasing events that were not
2726 RTE_ASSERT(ev_port->outstanding_releases > 0);
2727 ev_port->outstanding_releases--;
2728 qm_port->issued_releases++;
2729 (*cached_credits)--;
2731 case RTE_EVENT_OP_RELEASE:
2732 ev_port->inflight_credits++;
2733 /* Check for outstanding_releases underflow. If this occurs,
2734 * the application is not using the EVENT_OPs correctly; for
2735 * example, forwarding or releasing events that were not
2738 RTE_ASSERT(ev_port->outstanding_releases > 0);
2739 ev_port->outstanding_releases--;
2740 qm_port->issued_releases++;
2742 /* Replenish s/w credits if enough are cached */
2743 dlb2_replenish_sw_credits(dlb2, ev_port);
2747 DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2748 DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2750 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
2751 if (ev->op != RTE_EVENT_OP_RELEASE) {
2752 DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
2753 DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2760 static inline uint16_t
2761 __dlb2_event_enqueue_burst(void *event_port,
2762 const struct rte_event events[],
2766 struct dlb2_eventdev_port *ev_port = event_port;
2767 struct dlb2_port *qm_port = &ev_port->qm_port;
2768 struct process_local_port_data *port_data;
2771 RTE_ASSERT(ev_port->enq_configured);
2772 RTE_ASSERT(events != NULL);
2776 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2779 uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
2780 uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
2784 memset(qm_port->qe4,
2786 DLB2_NUM_QES_PER_CACHE_LINE *
2787 sizeof(struct dlb2_enqueue_qe));
2789 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2790 const struct rte_event *ev = &events[i + j];
2791 int16_t thresh = qm_port->token_pop_thresh;
2794 qm_port->token_pop_mode == DELAYED_POP &&
2795 (ev->op == RTE_EVENT_OP_FORWARD ||
2796 ev->op == RTE_EVENT_OP_RELEASE) &&
2797 qm_port->issued_releases >= thresh - 1) {
2798 /* Insert the token pop QE and break out. This
2799 * may result in a partial HCW, but that is
2800 * simpler than supporting arbitrary QE
2803 dlb2_construct_token_pop_qe(qm_port, j);
2805 /* Reset the releases for the next QE batch */
2806 qm_port->issued_releases -= thresh;
2813 if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
2822 dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
2823 sched_types, queue_ids);
2825 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2827 /* Don't include the token pop QE in the enqueue count */
2830 /* Don't interpret j < DLB2_NUM_... as out-of-credits if
2833 if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2841 dlb2_event_enqueue_burst(void *event_port,
2842 const struct rte_event events[],
2845 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2849 dlb2_event_enqueue_burst_delayed(void *event_port,
2850 const struct rte_event events[],
2853 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2856 static inline uint16_t
2857 dlb2_event_enqueue(void *event_port,
2858 const struct rte_event events[])
2860 return __dlb2_event_enqueue_burst(event_port, events, 1, false);
2863 static inline uint16_t
2864 dlb2_event_enqueue_delayed(void *event_port,
2865 const struct rte_event events[])
2867 return __dlb2_event_enqueue_burst(event_port, events, 1, true);
2871 dlb2_event_enqueue_new_burst(void *event_port,
2872 const struct rte_event events[],
2875 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2879 dlb2_event_enqueue_new_burst_delayed(void *event_port,
2880 const struct rte_event events[],
2883 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2887 dlb2_event_enqueue_forward_burst(void *event_port,
2888 const struct rte_event events[],
2891 return __dlb2_event_enqueue_burst(event_port, events, num, false);
2895 dlb2_event_enqueue_forward_burst_delayed(void *event_port,
2896 const struct rte_event events[],
2899 return __dlb2_event_enqueue_burst(event_port, events, num, true);
2903 dlb2_event_release(struct dlb2_eventdev *dlb2,
2907 struct process_local_port_data *port_data;
2908 struct dlb2_eventdev_port *ev_port;
2909 struct dlb2_port *qm_port;
2912 if (port_id > dlb2->num_ports) {
2913 DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
2915 rte_errno = -EINVAL;
2919 ev_port = &dlb2->ev_ports[port_id];
2920 qm_port = &ev_port->qm_port;
2921 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2925 if (qm_port->is_directed) {
2927 goto sw_credit_update;
2935 qm_port->qe4[0].cmd_byte = 0;
2936 qm_port->qe4[1].cmd_byte = 0;
2937 qm_port->qe4[2].cmd_byte = 0;
2938 qm_port->qe4[3].cmd_byte = 0;
2940 for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
2941 int16_t thresh = qm_port->token_pop_thresh;
2943 if (qm_port->token_pop_mode == DELAYED_POP &&
2944 qm_port->issued_releases >= thresh - 1) {
2945 /* Insert the token pop QE */
2946 dlb2_construct_token_pop_qe(qm_port, j);
2948 /* Reset the releases for the next QE batch */
2949 qm_port->issued_releases -= thresh;
2956 qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
2957 qm_port->issued_releases++;
2960 dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2962 /* Don't include the token pop QE in the release count */
2967 /* each release returns one credit */
2968 if (!ev_port->outstanding_releases) {
2969 DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
2973 ev_port->outstanding_releases -= i;
2974 ev_port->inflight_credits += i;
2976 /* Replenish s/w credits if enough releases are performed */
2977 dlb2_replenish_sw_credits(dlb2, ev_port);
2981 dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
2983 uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2985 /* increment port credits, and return to pool if exceeds threshold */
2986 if (!qm_port->is_directed) {
2987 if (qm_port->dlb2->version == DLB2_HW_V2) {
2988 qm_port->cached_ldb_credits += num;
2989 if (qm_port->cached_ldb_credits >= 2 * batch_size) {
2991 qm_port->credit_pool[DLB2_LDB_QUEUE],
2992 batch_size, __ATOMIC_SEQ_CST);
2993 qm_port->cached_ldb_credits -= batch_size;
2996 qm_port->cached_credits += num;
2997 if (qm_port->cached_credits >= 2 * batch_size) {
2999 qm_port->credit_pool[DLB2_COMBINED_POOL],
3000 batch_size, __ATOMIC_SEQ_CST);
3001 qm_port->cached_credits -= batch_size;
3005 if (qm_port->dlb2->version == DLB2_HW_V2) {
3006 qm_port->cached_dir_credits += num;
3007 if (qm_port->cached_dir_credits >= 2 * batch_size) {
3009 qm_port->credit_pool[DLB2_DIR_QUEUE],
3010 batch_size, __ATOMIC_SEQ_CST);
3011 qm_port->cached_dir_credits -= batch_size;
3014 qm_port->cached_credits += num;
3015 if (qm_port->cached_credits >= 2 * batch_size) {
3017 qm_port->credit_pool[DLB2_COMBINED_POOL],
3018 batch_size, __ATOMIC_SEQ_CST);
3019 qm_port->cached_credits -= batch_size;
3026 dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
3027 struct dlb2_eventdev_port *ev_port,
3028 struct dlb2_port *qm_port,
3030 uint64_t start_ticks)
3032 struct process_local_port_data *port_data;
3033 uint64_t elapsed_ticks;
3035 port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
3037 elapsed_ticks = rte_get_timer_cycles() - start_ticks;
3039 /* Wait/poll time expired */
3040 if (elapsed_ticks >= timeout) {
3042 } else if (dlb2->umwait_allowed) {
3043 struct rte_power_monitor_cond pmc;
3044 volatile struct dlb2_dequeue_qe *cq_base;
3047 struct dlb2_dequeue_qe qe;
3049 uint64_t expected_value;
3050 volatile uint64_t *monitor_addr;
3052 qe_mask.qe.cq_gen = 1; /* set mask */
3054 cq_base = port_data->cq_base;
3055 monitor_addr = (volatile uint64_t *)(volatile void *)
3056 &cq_base[qm_port->cq_idx];
3057 monitor_addr++; /* cq_gen bit is in second 64bit location */
3059 if (qm_port->gen_bit)
3060 expected_value = qe_mask.raw_qe[1];
3064 pmc.addr = monitor_addr;
3065 pmc.val = expected_value;
3066 pmc.mask = qe_mask.raw_qe[1];
3067 pmc.size = sizeof(uint64_t);
3069 rte_power_monitor(&pmc, timeout + start_ticks);
3071 DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
3073 uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
3074 uint64_t curr_ticks = rte_get_timer_cycles();
3075 uint64_t init_ticks = curr_ticks;
3077 while ((curr_ticks - start_ticks < timeout) &&
3078 (curr_ticks - init_ticks < poll_interval))
3079 curr_ticks = rte_get_timer_cycles();
3086 dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
3087 struct dlb2_port *qm_port,
3088 struct rte_event *events,
3089 struct dlb2_dequeue_qe *qes,
3092 uint8_t *qid_mappings = qm_port->qid_mappings;
3095 for (i = 0, num = 0; i < cnt; i++) {
3096 struct dlb2_dequeue_qe *qe = &qes[i];
3097 int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
3098 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3099 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3100 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3101 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3104 /* Fill in event information.
3105 * Note that flow_id must be embedded in the data by
3106 * the app, such as the mbuf RSS hash field if the data
3109 if (unlikely(qe->error)) {
3110 DLB2_LOG_ERR("QE error bit ON\n");
3111 DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
3112 dlb2_consume_qe_immediate(qm_port, 1);
3113 continue; /* Ignore */
3116 events[num].u64 = qe->data;
3117 events[num].flow_id = qe->flow_id;
3118 events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
3119 events[num].event_type = qe->u.event_type.major;
3120 events[num].sub_event_type = qe->u.event_type.sub;
3121 events[num].sched_type = sched_type_map[qe->sched_type];
3122 events[num].impl_opaque = qe->qid_depth;
3124 /* qid not preserved for directed queues */
3125 if (qm_port->is_directed)
3126 evq_id = ev_port->link[0].queue_id;
3128 evq_id = qid_mappings[qe->qid];
3130 events[num].queue_id = evq_id;
3132 ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
3134 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
3138 DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
3144 dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
3145 struct dlb2_port *qm_port,
3146 struct rte_event *events,
3147 struct dlb2_dequeue_qe *qes)
3149 int sched_type_map[] = {
3150 [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3151 [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3152 [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3153 [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3155 const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
3156 uint8_t *qid_mappings = qm_port->qid_mappings;
3159 /* In the unlikely case that any of the QE error bits are set, process
3160 * them one at a time.
3162 if (unlikely(qes[0].error || qes[1].error ||
3163 qes[2].error || qes[3].error))
3164 return dlb2_process_dequeue_qes(ev_port, qm_port, events,
3167 events[0].u64 = qes[0].data;
3168 events[1].u64 = qes[1].data;
3169 events[2].u64 = qes[2].data;
3170 events[3].u64 = qes[3].data;
3172 /* Construct the metadata portion of two struct rte_events
3173 * in one 128b SSE register. Event metadata is constructed in the SSE
3174 * registers like so:
3175 * sse_evt[0][63:0]: event[0]'s metadata
3176 * sse_evt[0][127:64]: event[1]'s metadata
3177 * sse_evt[1][63:0]: event[2]'s metadata
3178 * sse_evt[1][127:64]: event[3]'s metadata
3180 sse_evt[0] = _mm_setzero_si128();
3181 sse_evt[1] = _mm_setzero_si128();
3183 /* Convert the hardware queue ID to an event queue ID and store it in
3185 * sse_evt[0][47:40] = qid_mappings[qes[0].qid]
3186 * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
3187 * sse_evt[1][47:40] = qid_mappings[qes[2].qid]
3188 * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
3190 #define DLB_EVENT_QUEUE_ID_BYTE 5
3191 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3192 qid_mappings[qes[0].qid],
3193 DLB_EVENT_QUEUE_ID_BYTE);
3194 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3195 qid_mappings[qes[1].qid],
3196 DLB_EVENT_QUEUE_ID_BYTE + 8);
3197 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3198 qid_mappings[qes[2].qid],
3199 DLB_EVENT_QUEUE_ID_BYTE);
3200 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3201 qid_mappings[qes[3].qid],
3202 DLB_EVENT_QUEUE_ID_BYTE + 8);
3204 /* Convert the hardware priority to an event priority and store it in
3205 * the metadata, while also returning the queue depth status
3206 * value captured by the hardware, storing it in impl_opaque, which can
3207 * be read by the application but not modified
3208 * sse_evt[0][55:48] = DLB2_TO_EV_PRIO(qes[0].priority)
3209 * sse_evt[0][63:56] = qes[0].qid_depth
3210 * sse_evt[0][119:112] = DLB2_TO_EV_PRIO(qes[1].priority)
3211 * sse_evt[0][127:120] = qes[1].qid_depth
3212 * sse_evt[1][55:48] = DLB2_TO_EV_PRIO(qes[2].priority)
3213 * sse_evt[1][63:56] = qes[2].qid_depth
3214 * sse_evt[1][119:112] = DLB2_TO_EV_PRIO(qes[3].priority)
3215 * sse_evt[1][127:120] = qes[3].qid_depth
3217 #define DLB_EVENT_PRIO_IMPL_OPAQUE_WORD 3
3218 #define DLB_BYTE_SHIFT 8
3220 _mm_insert_epi16(sse_evt[0],
3221 DLB2_TO_EV_PRIO((uint8_t)qes[0].priority) |
3222 (qes[0].qid_depth << DLB_BYTE_SHIFT),
3223 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3225 _mm_insert_epi16(sse_evt[0],
3226 DLB2_TO_EV_PRIO((uint8_t)qes[1].priority) |
3227 (qes[1].qid_depth << DLB_BYTE_SHIFT),
3228 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3230 _mm_insert_epi16(sse_evt[1],
3231 DLB2_TO_EV_PRIO((uint8_t)qes[2].priority) |
3232 (qes[2].qid_depth << DLB_BYTE_SHIFT),
3233 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3235 _mm_insert_epi16(sse_evt[1],
3236 DLB2_TO_EV_PRIO((uint8_t)qes[3].priority) |
3237 (qes[3].qid_depth << DLB_BYTE_SHIFT),
3238 DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3240 /* Write the event type, sub event type, and flow_id to the event
3242 * sse_evt[0][31:0] = qes[0].flow_id |
3243 * qes[0].u.event_type.major << 28 |
3244 * qes[0].u.event_type.sub << 20;
3245 * sse_evt[0][95:64] = qes[1].flow_id |
3246 * qes[1].u.event_type.major << 28 |
3247 * qes[1].u.event_type.sub << 20;
3248 * sse_evt[1][31:0] = qes[2].flow_id |
3249 * qes[2].u.event_type.major << 28 |
3250 * qes[2].u.event_type.sub << 20;
3251 * sse_evt[1][95:64] = qes[3].flow_id |
3252 * qes[3].u.event_type.major << 28 |
3253 * qes[3].u.event_type.sub << 20;
3255 #define DLB_EVENT_EV_TYPE_DW 0
3256 #define DLB_EVENT_EV_TYPE_SHIFT 28
3257 #define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
3258 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3260 qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3261 qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3262 DLB_EVENT_EV_TYPE_DW);
3263 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3265 qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3266 qes[1].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3267 DLB_EVENT_EV_TYPE_DW + 2);
3268 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3270 qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3271 qes[2].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3272 DLB_EVENT_EV_TYPE_DW);
3273 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3275 qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3276 qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3277 DLB_EVENT_EV_TYPE_DW + 2);
3279 /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
3281 * sse_evt[0][39:32] = sched_type_map[qes[0].sched_type] << 6
3282 * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
3283 * sse_evt[1][39:32] = sched_type_map[qes[2].sched_type] << 6
3284 * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
3286 #define DLB_EVENT_SCHED_TYPE_BYTE 4
3287 #define DLB_EVENT_SCHED_TYPE_SHIFT 6
3288 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3289 sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3290 DLB_EVENT_SCHED_TYPE_BYTE);
3291 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3292 sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3293 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3294 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3295 sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3296 DLB_EVENT_SCHED_TYPE_BYTE);
3297 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3298 sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3299 DLB_EVENT_SCHED_TYPE_BYTE + 8);
3301 /* Store the metadata to the event (use the double-precision
3302 * _mm_storeh_pd because there is no integer function for storing the
3304 * events[0].event = sse_evt[0][63:0]
3305 * events[1].event = sse_evt[0][127:64]
3306 * events[2].event = sse_evt[1][63:0]
3307 * events[3].event = sse_evt[1][127:64]
3309 _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3310 _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3311 _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3312 _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3314 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3315 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3316 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3317 DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3320 ev_port->stats.queue[events[0].queue_id].
3321 qid_depth[qes[0].qid_depth],
3324 ev_port->stats.queue[events[1].queue_id].
3325 qid_depth[qes[1].qid_depth],
3328 ev_port->stats.queue[events[2].queue_id].
3329 qid_depth[qes[2].qid_depth],
3332 ev_port->stats.queue[events[3].queue_id].
3333 qid_depth[qes[3].qid_depth],
3336 DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3341 static __rte_always_inline int
3342 dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3344 volatile struct dlb2_dequeue_qe *cq_addr;
3345 uint8_t xor_mask[2] = {0x0F, 0x00};
3346 const uint8_t and_mask = 0x0F;
3347 __m128i *qes = (__m128i *)qe;
3348 uint8_t gen_bits, gen_bit;
3352 cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3354 idx = qm_port->cq_idx;
3356 /* Load the next 4 QEs */
3357 addr[0] = (uintptr_t)&cq_addr[idx];
3358 addr[1] = (uintptr_t)&cq_addr[(idx + 4) & qm_port->cq_depth_mask];
3359 addr[2] = (uintptr_t)&cq_addr[(idx + 8) & qm_port->cq_depth_mask];
3360 addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3362 /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3363 rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3364 rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3365 rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3366 rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3368 /* Correct the xor_mask for wrap-around QEs */
3369 gen_bit = qm_port->gen_bit;
3370 xor_mask[gen_bit] ^= !!((idx + 4) > qm_port->cq_depth_mask) << 1;
3371 xor_mask[gen_bit] ^= !!((idx + 8) > qm_port->cq_depth_mask) << 2;
3372 xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3374 /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3375 * valid, then QEs[0:N-1] are too.
3377 qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3378 rte_compiler_barrier();
3379 qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3380 rte_compiler_barrier();
3381 qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3382 rte_compiler_barrier();
3383 qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3385 /* Extract and combine the gen bits */
3386 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3387 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3388 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3389 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3391 /* XOR the combined bits such that a 1 represents a valid QE */
3392 gen_bits ^= xor_mask[gen_bit];
3394 /* Mask off gen bits we don't care about */
3395 gen_bits &= and_mask;
3397 return __builtin_popcount(gen_bits);
3401 dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
3403 uint16_t idx = qm_port->cq_idx_unmasked + cnt;
3405 qm_port->cq_idx_unmasked = idx;
3406 qm_port->cq_idx = idx & qm_port->cq_depth_mask;
3407 qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
3410 static inline int16_t
3411 dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
3412 struct dlb2_eventdev_port *ev_port,
3413 struct rte_event *events,
3415 uint64_t dequeue_timeout_ticks)
3418 uint64_t start_ticks = 0ULL;
3419 struct dlb2_port *qm_port;
3422 qm_port = &ev_port->qm_port;
3424 /* We have a special implementation for waiting. Wait can be:
3425 * 1) no waiting at all
3427 * 3) wait for interrupt. If wakeup and poll time
3428 * has expired, then return to caller
3429 * 4) umonitor/umwait repeatedly up to poll time
3432 /* If configured for per dequeue wait, then use wait value provided
3433 * to this API. Otherwise we must use the global
3434 * value from eventdev config time.
3436 if (!dlb2->global_dequeue_wait)
3437 timeout = dequeue_timeout_ticks;
3439 timeout = dlb2->global_dequeue_wait_ticks;
3441 start_ticks = rte_get_timer_cycles();
3443 while (num < max_num) {
3444 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3447 /* Copy up to 4 QEs from the current cache line into qes */
3448 num_avail = dlb2_recv_qe_sparse(qm_port, qes);
3450 /* But don't process more than the user requested */
3451 num_avail = RTE_MIN(num_avail, max_num - num);
3453 dlb2_inc_cq_idx(qm_port, num_avail << 2);
3455 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3456 num += dlb2_process_dequeue_four_qes(ev_port,
3461 num += dlb2_process_dequeue_qes(ev_port,
3466 else if ((timeout == 0) || (num > 0))
3467 /* Not waiting in any form, or 1+ events received? */
3469 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3470 timeout, start_ticks))
3474 qm_port->owed_tokens += num;
3477 if (qm_port->token_pop_mode == AUTO_POP)
3478 dlb2_consume_qe_immediate(qm_port, num);
3480 ev_port->outstanding_releases += num;
3482 dlb2_port_credits_inc(qm_port, num);
3488 static __rte_always_inline int
3489 dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
3492 uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
3493 {0x00, 0x01, 0x03, 0x07} };
3494 uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
3495 volatile struct dlb2_dequeue_qe *cq_addr;
3496 __m128i *qes = (__m128i *)qe;
3497 uint64_t *cache_line_base;
3500 cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3501 cq_addr = &cq_addr[qm_port->cq_idx];
3503 cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
3504 *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
3506 /* Load the next CQ cache line from memory. Pack these reads as tight
3507 * as possible to reduce the chance that DLB invalidates the line while
3508 * the CPU is reading it. Read the cache line backwards to ensure that
3509 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
3511 * (Valid QEs start at &qe[offset])
3513 qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
3514 qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
3515 qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
3516 qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
3518 /* Evict the cache line ASAP */
3519 rte_cldemote(cache_line_base);
3521 /* Extract and combine the gen bits */
3522 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3523 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3524 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3525 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3527 /* XOR the combined bits such that a 1 represents a valid QE */
3528 gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
3530 /* Mask off gen bits we don't care about */
3531 gen_bits &= and_mask[*offset];
3533 return __builtin_popcount(gen_bits);
3536 static inline int16_t
3537 dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
3538 struct dlb2_eventdev_port *ev_port,
3539 struct rte_event *events,
3541 uint64_t dequeue_timeout_ticks)
3544 uint64_t start_ticks = 0ULL;
3545 struct dlb2_port *qm_port;
3548 qm_port = &ev_port->qm_port;
3550 /* We have a special implementation for waiting. Wait can be:
3551 * 1) no waiting at all
3553 * 3) wait for interrupt. If wakeup and poll time
3554 * has expired, then return to caller
3555 * 4) umonitor/umwait repeatedly up to poll time
3558 /* If configured for per dequeue wait, then use wait value provided
3559 * to this API. Otherwise we must use the global
3560 * value from eventdev config time.
3562 if (!dlb2->global_dequeue_wait)
3563 timeout = dequeue_timeout_ticks;
3565 timeout = dlb2->global_dequeue_wait_ticks;
3567 start_ticks = rte_get_timer_cycles();
3569 while (num < max_num) {
3570 struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3574 /* Copy up to 4 QEs from the current cache line into qes */
3575 num_avail = dlb2_recv_qe(qm_port, qes, &offset);
3577 /* But don't process more than the user requested */
3578 num_avail = RTE_MIN(num_avail, max_num - num);
3580 dlb2_inc_cq_idx(qm_port, num_avail);
3582 if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3583 num += dlb2_process_dequeue_four_qes(ev_port,
3588 num += dlb2_process_dequeue_qes(ev_port,
3593 else if ((timeout == 0) || (num > 0))
3594 /* Not waiting in any form, or 1+ events received? */
3596 else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3597 timeout, start_ticks))
3601 qm_port->owed_tokens += num;
3604 if (qm_port->token_pop_mode == AUTO_POP)
3605 dlb2_consume_qe_immediate(qm_port, num);
3607 ev_port->outstanding_releases += num;
3609 dlb2_port_credits_inc(qm_port, num);
3616 dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3619 struct dlb2_eventdev_port *ev_port = event_port;
3620 struct dlb2_port *qm_port = &ev_port->qm_port;
3621 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3624 RTE_ASSERT(ev_port->setup_done);
3625 RTE_ASSERT(ev != NULL);
3627 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3628 uint16_t out_rels = ev_port->outstanding_releases;
3630 dlb2_event_release(dlb2, ev_port->id, out_rels);
3632 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3635 if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3636 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3638 cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
3640 DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3641 DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3647 dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3649 return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
3653 dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3654 uint16_t num, uint64_t wait)
3656 struct dlb2_eventdev_port *ev_port = event_port;
3657 struct dlb2_port *qm_port = &ev_port->qm_port;
3658 struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3661 RTE_ASSERT(ev_port->setup_done);
3662 RTE_ASSERT(ev != NULL);
3664 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3665 uint16_t out_rels = ev_port->outstanding_releases;
3667 dlb2_event_release(dlb2, ev_port->id, out_rels);
3669 DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3672 if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3673 dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3675 cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
3677 DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3678 DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3683 dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
3686 return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3690 dlb2_flush_port(struct rte_eventdev *dev, int port_id)
3692 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3693 eventdev_stop_flush_t flush;
3694 struct rte_event ev;
3699 flush = dev->dev_ops->dev_stop_flush;
3700 dev_id = dev->data->dev_id;
3701 arg = dev->data->dev_stop_flush_arg;
3703 while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
3705 flush(dev_id, ev, arg);
3707 if (dlb2->ev_ports[port_id].qm_port.is_directed)
3710 ev.op = RTE_EVENT_OP_RELEASE;
3712 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3715 /* Enqueue any additional outstanding releases */
3716 ev.op = RTE_EVENT_OP_RELEASE;
3718 for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)
3719 rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3723 dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
3724 struct dlb2_eventdev_queue *queue)
3726 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3727 struct dlb2_get_ldb_queue_depth_args cfg;
3730 cfg.queue_id = queue->qm_queue.id;
3732 ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
3734 DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
3735 ret, dlb2_error_strings[cfg.response.status]);
3739 return cfg.response.id;
3743 dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
3744 struct dlb2_eventdev_queue *queue)
3746 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3747 struct dlb2_get_dir_queue_depth_args cfg;
3750 cfg.queue_id = queue->qm_queue.id;
3752 ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
3754 DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
3755 ret, dlb2_error_strings[cfg.response.status]);
3759 return cfg.response.id;
3763 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
3764 struct dlb2_eventdev_queue *queue)
3766 if (queue->qm_queue.is_directed)
3767 return dlb2_get_dir_queue_depth(dlb2, queue);
3769 return dlb2_get_ldb_queue_depth(dlb2, queue);
3773 dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
3774 struct dlb2_eventdev_queue *queue)
3776 return dlb2_get_queue_depth(dlb2, queue) == 0;
3780 dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
3784 for (i = 0; i < dlb2->num_queues; i++) {
3785 if (dlb2->ev_queues[i].num_links == 0)
3787 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3795 dlb2_queues_empty(struct dlb2_eventdev *dlb2)
3799 for (i = 0; i < dlb2->num_queues; i++) {
3800 if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3808 dlb2_drain(struct rte_eventdev *dev)
3810 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3811 struct dlb2_eventdev_port *ev_port = NULL;
3815 dev_id = dev->data->dev_id;
3817 while (!dlb2_linked_queues_empty(dlb2)) {
3818 /* Flush all the ev_ports, which will drain all their connected
3821 for (i = 0; i < dlb2->num_ports; i++)
3822 dlb2_flush_port(dev, i);
3825 /* The queues are empty, but there may be events left in the ports. */
3826 for (i = 0; i < dlb2->num_ports; i++)
3827 dlb2_flush_port(dev, i);
3829 /* If the domain's queues are empty, we're done. */
3830 if (dlb2_queues_empty(dlb2))
3833 /* Else, there must be at least one unlinked load-balanced queue.
3834 * Select a load-balanced port with which to drain the unlinked
3837 for (i = 0; i < dlb2->num_ports; i++) {
3838 ev_port = &dlb2->ev_ports[i];
3840 if (!ev_port->qm_port.is_directed)
3844 if (i == dlb2->num_ports) {
3845 DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
3850 rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
3853 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
3858 for (i = 0; i < dlb2->num_queues; i++) {
3862 if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3868 /* Link the ev_port to the queue */
3869 ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
3871 DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
3876 /* Flush the queue */
3877 while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3878 dlb2_flush_port(dev, ev_port->id);
3880 /* Drain any extant events in the ev_port. */
3881 dlb2_flush_port(dev, ev_port->id);
3883 /* Unlink the ev_port from the queue */
3884 ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
3886 DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
3894 dlb2_eventdev_stop(struct rte_eventdev *dev)
3896 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3898 rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
3900 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
3901 DLB2_LOG_DBG("Internal error: already stopped\n");
3902 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3904 } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
3905 DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
3906 (int)dlb2->run_state);
3907 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3911 dlb2->run_state = DLB2_RUN_STATE_STOPPING;
3913 rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3917 dlb2->run_state = DLB2_RUN_STATE_STOPPED;
3921 dlb2_eventdev_close(struct rte_eventdev *dev)
3923 dlb2_hw_reset_sched_domain(dev, false);
3929 dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
3934 /* This function intentionally left blank. */
3938 dlb2_eventdev_port_release(void *port)
3940 struct dlb2_eventdev_port *ev_port = port;
3941 struct dlb2_port *qm_port;
3944 qm_port = &ev_port->qm_port;
3945 if (qm_port->config_state == DLB2_CONFIGURED)
3946 dlb2_free_qe_mem(qm_port);
3951 dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
3952 uint64_t *timeout_ticks)
3955 uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
3957 *timeout_ticks = ns * cycles_per_ns;
3963 dlb2_entry_points_init(struct rte_eventdev *dev)
3965 struct dlb2_eventdev *dlb2;
3967 /* Expose PMD's eventdev interface */
3968 static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
3969 .dev_infos_get = dlb2_eventdev_info_get,
3970 .dev_configure = dlb2_eventdev_configure,
3971 .dev_start = dlb2_eventdev_start,
3972 .dev_stop = dlb2_eventdev_stop,
3973 .dev_close = dlb2_eventdev_close,
3974 .queue_def_conf = dlb2_eventdev_queue_default_conf_get,
3975 .queue_setup = dlb2_eventdev_queue_setup,
3976 .queue_release = dlb2_eventdev_queue_release,
3977 .port_def_conf = dlb2_eventdev_port_default_conf_get,
3978 .port_setup = dlb2_eventdev_port_setup,
3979 .port_release = dlb2_eventdev_port_release,
3980 .port_link = dlb2_eventdev_port_link,
3981 .port_unlink = dlb2_eventdev_port_unlink,
3982 .port_unlinks_in_progress =
3983 dlb2_eventdev_port_unlinks_in_progress,
3984 .timeout_ticks = dlb2_eventdev_timeout_ticks,
3985 .dump = dlb2_eventdev_dump,
3986 .xstats_get = dlb2_eventdev_xstats_get,
3987 .xstats_get_names = dlb2_eventdev_xstats_get_names,
3988 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
3989 .xstats_reset = dlb2_eventdev_xstats_reset,
3990 .dev_selftest = test_dlb2_eventdev,
3993 /* Expose PMD's eventdev interface */
3995 dev->dev_ops = &dlb2_eventdev_entry_ops;
3996 dev->enqueue = dlb2_event_enqueue;
3997 dev->enqueue_burst = dlb2_event_enqueue_burst;
3998 dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
3999 dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
4001 dlb2 = dev->data->dev_private;
4002 if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
4003 dev->dequeue = dlb2_event_dequeue_sparse;
4004 dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
4006 dev->dequeue = dlb2_event_dequeue;
4007 dev->dequeue_burst = dlb2_event_dequeue_burst;
4012 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
4014 struct dlb2_devargs *dlb2_args)
4016 struct dlb2_eventdev *dlb2;
4019 dlb2 = dev->data->dev_private;
4021 dlb2->event_dev = dev; /* backlink */
4023 evdev_dlb2_default_info.driver_name = name;
4025 dlb2->max_num_events_override = dlb2_args->max_num_events;
4026 dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
4027 dlb2->qm_instance.cos_id = dlb2_args->cos_id;
4029 err = dlb2_iface_open(&dlb2->qm_instance, name);
4031 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
4036 err = dlb2_iface_get_device_version(&dlb2->qm_instance,
4039 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
4044 err = dlb2_hw_query_resources(dlb2);
4046 DLB2_LOG_ERR("get resources err=%d for %s\n",
4051 dlb2_iface_hardware_init(&dlb2->qm_instance);
4053 err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
4055 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
4060 /* Complete xtstats runtime initialization */
4061 err = dlb2_xstats_init(dlb2);
4063 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
4067 /* Initialize each port's token pop mode */
4068 for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
4069 dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
4071 rte_spinlock_init(&dlb2->qm_instance.resource_lock);
4073 dlb2_iface_low_level_io_init();
4075 dlb2_entry_points_init(dev);
4077 dlb2_init_queue_depth_thresholds(dlb2,
4078 dlb2_args->qid_depth_thresholds.val);
4084 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
4087 struct dlb2_eventdev *dlb2;
4090 dlb2 = dev->data->dev_private;
4092 evdev_dlb2_default_info.driver_name = name;
4094 err = dlb2_iface_open(&dlb2->qm_instance, name);
4096 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
4101 err = dlb2_hw_query_resources(dlb2);
4103 DLB2_LOG_ERR("get resources err=%d for %s\n",
4108 dlb2_iface_low_level_io_init();
4110 dlb2_entry_points_init(dev);
4116 dlb2_parse_params(const char *params,
4118 struct dlb2_devargs *dlb2_args,
4122 static const char * const args[] = { NUMA_NODE_ARG,
4123 DLB2_MAX_NUM_EVENTS,
4124 DLB2_NUM_DIR_CREDITS,
4126 DLB2_QID_DEPTH_THRESH_ARG,
4130 if (params != NULL && params[0] != '\0') {
4131 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
4133 if (kvlist == NULL) {
4135 "Ignoring unsupported parameters when creating device '%s'\n",
4138 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
4140 &dlb2_args->socket_id);
4142 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
4144 rte_kvargs_free(kvlist);
4148 ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
4150 &dlb2_args->max_num_events);
4152 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
4154 rte_kvargs_free(kvlist);
4158 if (version == DLB2_HW_V2) {
4159 ret = rte_kvargs_process(kvlist,
4160 DLB2_NUM_DIR_CREDITS,
4161 set_num_dir_credits,
4162 &dlb2_args->num_dir_credits_override);
4164 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
4166 rte_kvargs_free(kvlist);
4170 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
4172 &dlb2_args->dev_id);
4174 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
4176 rte_kvargs_free(kvlist);
4180 if (version == DLB2_HW_V2) {
4181 ret = rte_kvargs_process(
4183 DLB2_QID_DEPTH_THRESH_ARG,
4184 set_qid_depth_thresh,
4185 &dlb2_args->qid_depth_thresholds);
4187 ret = rte_kvargs_process(
4189 DLB2_QID_DEPTH_THRESH_ARG,
4190 set_qid_depth_thresh_v2_5,
4191 &dlb2_args->qid_depth_thresholds);
4194 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
4196 rte_kvargs_free(kvlist);
4200 ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
4202 &dlb2_args->cos_id);
4204 DLB2_LOG_ERR("%s: Error parsing cos parameter",
4206 rte_kvargs_free(kvlist);
4210 rte_kvargs_free(kvlist);
4215 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);