1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <sys/fcntl.h>
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
22 #include <rte_errno.h>
24 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
28 #include <rte_power_intrinsics.h>
29 #include <rte_prefetch.h>
31 #include <rte_string_fns.h>
33 #include <rte_eventdev.h>
34 #include <rte_eventdev_pmd.h>
37 #include "dlb_iface.h"
38 #include "dlb_inline_fns.h"
41 * Resources exposed to eventdev.
43 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
44 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
46 static struct rte_event_dev_info evdev_dlb_default_info = {
47 .driver_name = "", /* probe will set */
48 .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
49 .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
50 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
51 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
53 .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
55 .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
56 .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
57 .max_event_priority_levels = DLB_QID_PRIORITIES,
58 .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
59 .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
60 .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
61 .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
62 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
63 .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
64 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
65 RTE_EVENT_DEV_CAP_EVENT_QOS |
66 RTE_EVENT_DEV_CAP_BURST_MODE |
67 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
68 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
69 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
72 struct process_local_port_data
73 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
76 dlb_get_queue_depth(struct dlb_eventdev *dlb,
77 struct dlb_eventdev_queue *queue)
79 /* DUMMY FOR NOW So "xstats" patch compiles */
87 dlb_hw_query_resources(struct dlb_eventdev *dlb)
89 struct dlb_hw_dev *handle = &dlb->qm_instance;
90 struct dlb_hw_resource_info *dlb_info = &handle->info;
93 ret = dlb_iface_get_num_resources(handle,
94 &dlb->hw_rsrc_query_results);
96 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
100 /* Complete filling in device resource info returned to evdev app,
101 * overriding any default values.
102 * The capabilities (CAPs) were set at compile time.
105 evdev_dlb_default_info.max_event_queues =
106 dlb->hw_rsrc_query_results.num_ldb_queues;
108 evdev_dlb_default_info.max_event_ports =
109 dlb->hw_rsrc_query_results.num_ldb_ports;
111 evdev_dlb_default_info.max_num_events =
112 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
114 /* Save off values used when creating the scheduling domain. */
116 handle->info.num_sched_domains =
117 dlb->hw_rsrc_query_results.num_sched_domains;
119 handle->info.hw_rsrc_max.nb_events_limit =
120 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
122 handle->info.hw_rsrc_max.num_queues =
123 dlb->hw_rsrc_query_results.num_ldb_queues +
124 dlb->hw_rsrc_query_results.num_dir_ports;
126 handle->info.hw_rsrc_max.num_ldb_queues =
127 dlb->hw_rsrc_query_results.num_ldb_queues;
129 handle->info.hw_rsrc_max.num_ldb_ports =
130 dlb->hw_rsrc_query_results.num_ldb_ports;
132 handle->info.hw_rsrc_max.num_dir_ports =
133 dlb->hw_rsrc_query_results.num_dir_ports;
135 handle->info.hw_rsrc_max.reorder_window_size =
136 dlb->hw_rsrc_query_results.num_hist_list_entries;
138 rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
144 dlb_free_qe_mem(struct dlb_port *qm_port)
149 rte_free(qm_port->qe4);
152 rte_free(qm_port->consume_qe);
153 qm_port->consume_qe = NULL;
157 dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
159 struct dlb_cq_pop_qe *qe;
161 qe = rte_zmalloc(mz_name,
162 DLB_NUM_QES_PER_CACHE_LINE *
163 sizeof(struct dlb_cq_pop_qe),
164 RTE_CACHE_LINE_SIZE);
167 DLB_LOG_ERR("dlb: no memory for consume_qe\n");
171 qm_port->consume_qe = qe;
177 /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
180 qe->tokens = 0; /* set at run time */
183 /* Completion IDs are disabled */
190 dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
194 sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
196 qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
198 if (qm_port->qe4 == NULL) {
199 DLB_LOG_ERR("dlb: no qe4 memory\n");
204 ret = dlb_init_consume_qe(qm_port, mz_name);
206 DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
214 dlb_free_qe_mem(qm_port);
219 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
222 #define DLB_BASE_10 10
225 dlb_string_to_int(int *result, const char *str)
230 if (str == NULL || result == NULL)
234 ret = strtol(str, &endstr, DLB_BASE_10);
238 /* long int and int may be different width for some architectures */
239 if (ret < INT_MIN || ret > INT_MAX || endstr == str)
247 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
249 int *socket_id = opaque;
252 ret = dlb_string_to_int(socket_id, value);
256 if (*socket_id > RTE_MAX_NUMA_NODES)
263 set_max_num_events(const char *key __rte_unused,
267 int *max_num_events = opaque;
270 if (value == NULL || opaque == NULL) {
271 DLB_LOG_ERR("NULL pointer\n");
275 ret = dlb_string_to_int(max_num_events, value);
279 if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
280 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
281 DLB_MAX_NUM_LDB_CREDITS);
289 set_num_dir_credits(const char *key __rte_unused,
293 int *num_dir_credits = opaque;
296 if (value == NULL || opaque == NULL) {
297 DLB_LOG_ERR("NULL pointer\n");
301 ret = dlb_string_to_int(num_dir_credits, value);
305 if (*num_dir_credits < 0 ||
306 *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
307 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
308 DLB_MAX_NUM_DIR_CREDITS);
315 * This function first unmaps all memory mappings and closes the
316 * domain's file descriptor, which causes the driver to reset the
317 * scheduling domain. Once that completes (when close() returns), we
318 * can safely free the dynamically allocated memory used by the
322 * We will maintain a use count and use that to determine when
323 * a reset is required. In PF mode, we never mmap, or munmap
324 * device memory, and we own the entire physical PCI device.
328 dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
330 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
331 enum dlb_configuration_state config_state;
334 /* Close and reset the domain */
335 dlb_iface_domain_close(dlb);
337 /* Free all dynamically allocated port memory */
338 for (i = 0; i < dlb->num_ports; i++)
339 dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
341 /* If reconfiguring, mark the device's queues and ports as "previously
342 * configured." If the user does not reconfigure them, the PMD will
343 * reapply their previous configuration when the device is started.
345 config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
347 for (i = 0; i < dlb->num_ports; i++) {
348 dlb->ev_ports[i].qm_port.config_state = config_state;
349 /* Reset setup_done so ports can be reconfigured */
350 dlb->ev_ports[i].setup_done = false;
351 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
352 dlb->ev_ports[i].link[j].mapped = false;
355 for (i = 0; i < dlb->num_queues; i++)
356 dlb->ev_queues[i].qm_queue.config_state = config_state;
358 for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
359 dlb->ev_queues[i].setup_done = false;
362 dlb->num_ldb_ports = 0;
363 dlb->num_dir_ports = 0;
365 dlb->num_ldb_queues = 0;
366 dlb->num_dir_queues = 0;
367 dlb->configured = false;
371 dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
373 struct dlb_create_ldb_pool_args cfg;
374 struct dlb_cmd_response response;
380 if (!handle->cfg.resources.num_ldb_credits) {
381 handle->cfg.ldb_credit_pool_id = 0;
382 handle->cfg.num_ldb_credits = 0;
386 cfg.response = (uintptr_t)&response;
387 cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
389 ret = dlb_iface_ldb_credit_pool_create(handle,
392 DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
393 ret, dlb_error_strings[response.status]);
396 handle->cfg.ldb_credit_pool_id = response.id;
397 handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
403 dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
405 struct dlb_create_dir_pool_args cfg;
406 struct dlb_cmd_response response;
412 if (!handle->cfg.resources.num_dir_credits) {
413 handle->cfg.dir_credit_pool_id = 0;
414 handle->cfg.num_dir_credits = 0;
418 cfg.response = (uintptr_t)&response;
419 cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
421 ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
423 DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
424 ret, dlb_error_strings[response.status]);
426 handle->cfg.dir_credit_pool_id = response.id;
427 handle->cfg.num_dir_credits = cfg.num_dir_credits;
433 dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
434 struct dlb_eventdev *dlb,
435 const struct dlb_hw_rsrcs *resources_asked)
438 struct dlb_create_sched_domain_args *config_params;
439 struct dlb_cmd_response response;
441 if (resources_asked == NULL) {
442 DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
447 /* Map generic qm resources to dlb resources */
448 config_params = &handle->cfg.resources;
450 config_params->response = (uintptr_t)&response;
452 /* DIR ports and queues */
454 config_params->num_dir_ports =
455 resources_asked->num_dir_ports;
457 config_params->num_dir_credits =
458 resources_asked->num_dir_credits;
460 /* LDB ports and queues */
462 config_params->num_ldb_queues =
463 resources_asked->num_ldb_queues;
465 config_params->num_ldb_ports =
466 resources_asked->num_ldb_ports;
468 config_params->num_ldb_credits =
469 resources_asked->num_ldb_credits;
471 config_params->num_atomic_inflights =
472 dlb->num_atm_inflights_per_queue *
473 config_params->num_ldb_queues;
475 config_params->num_hist_list_entries = config_params->num_ldb_ports *
476 DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
478 /* dlb limited to 1 credit pool per queue type */
479 config_params->num_ldb_credit_pools = 1;
480 config_params->num_dir_credit_pools = 1;
482 DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
483 config_params->num_ldb_queues,
484 config_params->num_ldb_ports,
485 config_params->num_dir_ports,
486 config_params->num_atomic_inflights,
487 config_params->num_hist_list_entries,
488 config_params->num_ldb_credits,
489 config_params->num_dir_credits,
490 config_params->num_ldb_credit_pools,
491 config_params->num_dir_credit_pools);
493 /* Configure the QM */
495 ret = dlb_iface_sched_domain_create(handle, config_params);
497 DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
500 dlb_error_strings[response.status]);
504 handle->domain_id = response.id;
505 handle->domain_id_valid = 1;
507 config_params->response = 0;
509 ret = dlb_ldb_credit_pool_create(handle);
511 DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
515 ret = dlb_dir_credit_pool_create(handle);
517 DLB_LOG_ERR("dlb: create dir credit pool failed\n");
521 handle->cfg.configured = true;
526 dlb_iface_domain_close(dlb);
532 /* End HW specific */
534 dlb_eventdev_info_get(struct rte_eventdev *dev,
535 struct rte_event_dev_info *dev_info)
537 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
540 ret = dlb_hw_query_resources(dlb);
542 const struct rte_eventdev_data *data = dev->data;
544 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
546 /* fn is void, so fall through and return values set up in
551 /* Add num resources currently owned by this domain.
552 * These would become available if the scheduling domain were reset due
553 * to the application recalling eventdev_configure to *reconfigure* the
556 evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
557 evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
558 evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
560 /* In DLB A-stepping hardware, applications are limited to 128
561 * configured ports (load-balanced or directed). The reported number of
562 * available ports must reflect this.
564 if (dlb->revision < DLB_REV_B0) {
567 used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
568 dlb->hw_rsrc_query_results.num_ldb_ports -
569 dlb->hw_rsrc_query_results.num_dir_ports;
571 evdev_dlb_default_info.max_event_ports =
572 RTE_MIN(evdev_dlb_default_info.max_event_ports,
576 evdev_dlb_default_info.max_event_queues =
577 RTE_MIN(evdev_dlb_default_info.max_event_queues,
578 RTE_EVENT_MAX_QUEUES_PER_DEV);
580 evdev_dlb_default_info.max_num_events =
581 RTE_MIN(evdev_dlb_default_info.max_num_events,
582 dlb->max_num_events_override);
584 *dev_info = evdev_dlb_default_info;
587 /* Note: 1 QM instance per QM device, QM instance/device == event device */
589 dlb_eventdev_configure(const struct rte_eventdev *dev)
591 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
592 struct dlb_hw_dev *handle = &dlb->qm_instance;
593 struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
594 const struct rte_eventdev_data *data = dev->data;
595 const struct rte_event_dev_config *config = &data->dev_conf;
598 /* If this eventdev is already configured, we must release the current
599 * scheduling domain before attempting to configure a new one.
601 if (dlb->configured) {
602 dlb_hw_reset_sched_domain(dev, true);
604 ret = dlb_hw_query_resources(dlb);
606 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
612 if (config->nb_event_queues > rsrcs->num_queues) {
613 DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
614 config->nb_event_queues,
618 if (config->nb_event_ports > (rsrcs->num_ldb_ports
619 + rsrcs->num_dir_ports)) {
620 DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
621 config->nb_event_ports,
622 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
625 if (config->nb_events_limit > rsrcs->nb_events_limit) {
626 DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
627 config->nb_events_limit,
628 rsrcs->nb_events_limit);
632 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
633 dlb->global_dequeue_wait = false;
637 dlb->global_dequeue_wait = true;
639 timeout32 = config->dequeue_timeout_ns;
641 dlb->global_dequeue_wait_ticks =
642 timeout32 * (rte_get_timer_hz() / 1E9);
645 /* Does this platform support umonitor/umwait? */
646 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
647 if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
648 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
649 DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
650 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
653 dlb->umwait_allowed = true;
656 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
657 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
658 /* 1 dir queue per dir port */
659 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
661 /* Scale down nb_events_limit by 4 for directed credits, since there
662 * are 4x as many load-balanced credits.
664 rsrcs->num_ldb_credits = 0;
665 rsrcs->num_dir_credits = 0;
667 if (rsrcs->num_ldb_queues)
668 rsrcs->num_ldb_credits = config->nb_events_limit;
669 if (rsrcs->num_dir_ports)
670 rsrcs->num_dir_credits = config->nb_events_limit / 4;
671 if (dlb->num_dir_credits_override != -1)
672 rsrcs->num_dir_credits = dlb->num_dir_credits_override;
674 if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
675 DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
679 dlb->new_event_limit = config->nb_events_limit;
680 __atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
682 /* Save number of ports/queues for this event dev */
683 dlb->num_ports = config->nb_event_ports;
684 dlb->num_queues = config->nb_event_queues;
685 dlb->num_dir_ports = rsrcs->num_dir_ports;
686 dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
687 dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
688 dlb->num_dir_queues = dlb->num_dir_ports;
689 dlb->num_ldb_credits = rsrcs->num_ldb_credits;
690 dlb->num_dir_credits = rsrcs->num_dir_credits;
692 dlb->configured = true;
698 dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
702 struct dlb_unmap_qid_args cfg;
703 struct dlb_cmd_response response;
709 cfg.response = (uintptr_t)&response;
710 cfg.port_id = qm_port_id;
713 ret = dlb_iface_unmap_qid(handle, &cfg);
715 DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
716 ret, dlb_error_strings[response.status]);
722 dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
723 struct dlb_eventdev_port *ev_port,
724 struct dlb_eventdev_queue *ev_queue)
728 /* Don't unlink until start time. */
729 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
732 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
733 if (ev_port->link[i].valid &&
734 ev_port->link[i].queue_id == ev_queue->id)
738 /* This is expected with eventdev API!
739 * It blindly attempts to unmap all queues.
741 if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
742 DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
743 ev_queue->qm_queue.id,
744 ev_port->qm_port.id);
748 ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
750 ev_queue->qm_queue.id);
752 ev_port->link[i].mapped = false;
758 dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
759 uint8_t queues[], uint16_t nb_unlinks)
761 struct dlb_eventdev_port *ev_port = event_port;
762 struct dlb_eventdev *dlb;
767 if (!ev_port->setup_done) {
768 DLB_LOG_ERR("dlb: evport %d is not configured\n",
774 if (queues == NULL || nb_unlinks == 0) {
775 DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
776 return 0; /* Ignore and return success */
779 if (ev_port->qm_port.is_directed) {
780 DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
783 return nb_unlinks; /* as if success */
788 for (i = 0; i < nb_unlinks; i++) {
789 struct dlb_eventdev_queue *ev_queue;
792 if (queues[i] >= dlb->num_queues) {
793 DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
795 return i; /* return index of offending queue */
798 ev_queue = &dlb->ev_queues[queues[i]];
800 /* Does a link exist? */
801 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
802 if (ev_port->link[j].queue_id == queues[i] &&
803 ev_port->link[j].valid)
806 if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
809 ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
811 DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
812 ret, ev_port->id, queues[i]);
814 return i; /* return index of offending queue */
817 ev_port->link[j].valid = false;
818 ev_port->num_links--;
819 ev_queue->num_links--;
826 dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
829 struct dlb_eventdev_port *ev_port = event_port;
830 struct dlb_eventdev *dlb;
831 struct dlb_hw_dev *handle;
832 struct dlb_pending_port_unmaps_args cfg;
833 struct dlb_cmd_response response;
838 if (!ev_port->setup_done) {
839 DLB_LOG_ERR("dlb: evport %d is not configured\n",
845 cfg.port_id = ev_port->qm_port.id;
846 cfg.response = (uintptr_t)&response;
848 handle = &dlb->qm_instance;
849 ret = dlb_iface_pending_port_unmaps(handle, &cfg);
852 DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
853 ret, dlb_error_strings[response.status]);
861 dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
863 struct rte_event_port_conf *port_conf)
865 RTE_SET_USED(port_id);
866 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
868 port_conf->new_event_threshold = dlb->new_event_limit;
869 port_conf->dequeue_depth = 32;
870 port_conf->enqueue_depth = DLB_MAX_ENQUEUE_DEPTH;
871 port_conf->event_port_cfg = 0;
875 dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
877 struct rte_event_queue_conf *queue_conf)
880 RTE_SET_USED(queue_id);
881 queue_conf->nb_atomic_flows = 1024;
882 queue_conf->nb_atomic_order_sequences = 32;
883 queue_conf->event_queue_cfg = 0;
884 queue_conf->priority = 0;
888 dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
889 struct dlb_eventdev_port *ev_port,
890 uint32_t dequeue_depth,
892 uint32_t enqueue_depth,
893 uint16_t rsvd_tokens,
894 bool use_rsvd_token_scheme)
896 struct dlb_hw_dev *handle = &dlb->qm_instance;
897 struct dlb_create_ldb_port_args cfg = {0};
898 struct dlb_cmd_response response = {0};
900 struct dlb_port *qm_port = NULL;
901 char mz_name[RTE_MEMZONE_NAMESIZE];
907 if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
908 DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
909 DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
913 if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
914 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
915 DLB_MIN_ENQUEUE_DEPTH);
919 rte_spinlock_lock(&handle->resource_lock);
921 cfg.response = (uintptr_t)&response;
923 /* We round up to the next power of 2 if necessary */
924 cfg.cq_depth = rte_align32pow2(cq_depth);
925 cfg.cq_depth_threshold = rsvd_tokens;
927 cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
929 /* User controls the LDB high watermark via enqueue depth. The DIR high
930 * watermark is equal, unless the directed credit pool is too small.
932 cfg.ldb_credit_high_watermark = enqueue_depth;
934 /* If there are no directed ports, the kernel driver will ignore this
935 * port's directed credit settings. Don't use enqueue_depth if it would
936 * require more directed credits than are available.
938 cfg.dir_credit_high_watermark =
939 RTE_MIN(enqueue_depth,
940 handle->cfg.num_dir_credits / dlb->num_ports);
942 cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
943 cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
945 cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
946 cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
950 cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
951 cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
953 ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
955 DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
956 ret, dlb_error_strings[response.status]);
960 qm_port_id = response.id;
962 DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
963 ev_port->id, qm_port_id);
965 qm_port = &ev_port->qm_port;
966 qm_port->ev_port = ev_port; /* back ptr */
967 qm_port->dlb = dlb; /* back ptr */
970 * Allocate and init local qe struct(s).
971 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
974 snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
977 ret = dlb_init_qe_mem(qm_port, mz_name);
979 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
983 qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
984 qm_port->id = qm_port_id;
986 /* The credit window is one high water mark of QEs */
987 qm_port->ldb_pushcount_at_credit_expiry = 0;
988 qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
989 /* The credit window is one high water mark of QEs */
990 qm_port->dir_pushcount_at_credit_expiry = 0;
991 qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
992 qm_port->cq_depth = cfg.cq_depth;
993 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
994 * the effective depth is smaller.
996 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
998 qm_port->cq_idx_unmasked = 0;
999 if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
1000 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1002 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1004 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1005 /* starting value of gen bit - it toggles at wrap time */
1006 qm_port->gen_bit = 1;
1008 qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1009 qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1010 qm_port->int_armed = false;
1012 /* Save off for later use in info and lookup APIs. */
1013 qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
1015 qm_port->dequeue_depth = dequeue_depth;
1017 qm_port->owed_tokens = 0;
1018 qm_port->issued_releases = 0;
1021 qm_port->state = PORT_STARTED; /* enabled at create time */
1022 qm_port->config_state = DLB_CONFIGURED;
1024 qm_port->dir_credits = cfg.dir_credit_high_watermark;
1025 qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1027 DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1030 qm_port->ldb_credits,
1031 qm_port->dir_credits);
1033 rte_spinlock_unlock(&handle->resource_lock);
1039 dlb_free_qe_mem(qm_port);
1040 qm_port->pp_mmio_base = 0;
1043 rte_spinlock_unlock(&handle->resource_lock);
1045 DLB_LOG_ERR("dlb: create ldb port failed!\n");
1051 dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
1052 struct dlb_eventdev_port *ev_port,
1053 uint32_t dequeue_depth,
1055 uint32_t enqueue_depth,
1056 uint16_t rsvd_tokens,
1057 bool use_rsvd_token_scheme)
1059 struct dlb_hw_dev *handle = &dlb->qm_instance;
1060 struct dlb_create_dir_port_args cfg = {0};
1061 struct dlb_cmd_response response = {0};
1063 struct dlb_port *qm_port = NULL;
1064 char mz_name[RTE_MEMZONE_NAMESIZE];
1065 uint32_t qm_port_id;
1067 if (dlb == NULL || handle == NULL)
1070 if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
1071 DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
1072 DLB_MIN_DIR_CQ_DEPTH);
1076 if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
1077 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
1078 DLB_MIN_ENQUEUE_DEPTH);
1082 rte_spinlock_lock(&handle->resource_lock);
1084 /* Directed queues are configured at link time. */
1087 cfg.response = (uintptr_t)&response;
1089 /* We round up to the next power of 2 if necessary */
1090 cfg.cq_depth = rte_align32pow2(cq_depth);
1091 cfg.cq_depth_threshold = rsvd_tokens;
1093 /* User controls the LDB high watermark via enqueue depth. The DIR high
1094 * watermark is equal, unless the directed credit pool is too small.
1096 cfg.ldb_credit_high_watermark = enqueue_depth;
1098 /* Don't use enqueue_depth if it would require more directed credits
1099 * than are available.
1101 cfg.dir_credit_high_watermark =
1102 RTE_MIN(enqueue_depth,
1103 handle->cfg.num_dir_credits / dlb->num_ports);
1105 cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
1106 cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
1108 cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
1109 cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
1113 cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
1114 cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
1116 ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
1118 DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
1119 ret, dlb_error_strings[response.status]);
1123 qm_port_id = response.id;
1125 DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
1126 ev_port->id, qm_port_id);
1128 qm_port = &ev_port->qm_port;
1129 qm_port->ev_port = ev_port; /* back ptr */
1130 qm_port->dlb = dlb; /* back ptr */
1133 * Init local qe struct(s).
1134 * Note: MOVDIR64 requires the enqueue QE to be aligned
1137 snprintf(mz_name, sizeof(mz_name), "dir_port%d",
1140 ret = dlb_init_qe_mem(qm_port, mz_name);
1143 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
1147 qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
1148 qm_port->id = qm_port_id;
1150 /* The credit window is one high water mark of QEs */
1151 qm_port->ldb_pushcount_at_credit_expiry = 0;
1152 qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
1153 /* The credit window is one high water mark of QEs */
1154 qm_port->dir_pushcount_at_credit_expiry = 0;
1155 qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
1156 qm_port->cq_depth = cfg.cq_depth;
1157 qm_port->cq_idx = 0;
1158 qm_port->cq_idx_unmasked = 0;
1159 if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
1160 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1162 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1164 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1165 /* starting value of gen bit - it toggles at wrap time */
1166 qm_port->gen_bit = 1;
1168 qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1169 qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1170 qm_port->int_armed = false;
1172 /* Save off for later use in info and lookup APIs. */
1173 qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
1175 qm_port->dequeue_depth = dequeue_depth;
1177 qm_port->owed_tokens = 0;
1178 qm_port->issued_releases = 0;
1181 qm_port->state = PORT_STARTED; /* enabled at create time */
1182 qm_port->config_state = DLB_CONFIGURED;
1184 qm_port->dir_credits = cfg.dir_credit_high_watermark;
1185 qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1187 DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
1190 cfg.dir_credit_high_watermark,
1191 cfg.ldb_credit_high_watermark);
1193 rte_spinlock_unlock(&handle->resource_lock);
1199 qm_port->pp_mmio_base = 0;
1200 dlb_free_qe_mem(qm_port);
1203 rte_spinlock_unlock(&handle->resource_lock);
1205 DLB_LOG_ERR("dlb: create dir port failed!\n");
1211 dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
1212 struct dlb_queue *queue,
1213 const struct rte_event_queue_conf *evq_conf)
1215 struct dlb_hw_dev *handle = &dlb->qm_instance;
1216 struct dlb_create_ldb_queue_args cfg;
1217 struct dlb_cmd_response response;
1220 int sched_type = -1;
1222 if (evq_conf == NULL)
1225 if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
1226 if (evq_conf->nb_atomic_order_sequences != 0)
1227 sched_type = RTE_SCHED_TYPE_ORDERED;
1229 sched_type = RTE_SCHED_TYPE_PARALLEL;
1231 sched_type = evq_conf->schedule_type;
1233 cfg.response = (uintptr_t)&response;
1234 cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
1235 cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
1236 cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
1238 if (sched_type != RTE_SCHED_TYPE_ORDERED) {
1239 cfg.num_sequence_numbers = 0;
1240 cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
1243 ret = dlb_iface_ldb_queue_create(handle, &cfg);
1245 DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
1246 ret, dlb_error_strings[response.status]);
1250 qm_qid = response.id;
1252 /* Save off queue config for debug, resource lookups, and reconfig */
1253 queue->num_qid_inflights = cfg.num_qid_inflights;
1254 queue->num_atm_inflights = cfg.num_atomic_inflights;
1256 queue->sched_type = sched_type;
1257 queue->config_state = DLB_CONFIGURED;
1259 DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
1261 cfg.num_atomic_inflights,
1262 cfg.num_sequence_numbers,
1263 cfg.num_qid_inflights);
1269 dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
1271 struct dlb_hw_dev *handle = &dlb->qm_instance;
1272 struct dlb_get_sn_allocation_args cfg;
1273 struct dlb_cmd_response response;
1277 cfg.response = (uintptr_t)&response;
1279 ret = dlb_iface_get_sn_allocation(handle, &cfg);
1281 DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
1282 ret, dlb_error_strings[response.status]);
1290 dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
1292 struct dlb_hw_dev *handle = &dlb->qm_instance;
1293 struct dlb_set_sn_allocation_args cfg;
1294 struct dlb_cmd_response response;
1299 cfg.response = (uintptr_t)&response;
1301 ret = dlb_iface_set_sn_allocation(handle, &cfg);
1303 DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
1304 ret, dlb_error_strings[response.status]);
1312 dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
1314 struct dlb_hw_dev *handle = &dlb->qm_instance;
1315 struct dlb_get_sn_occupancy_args cfg;
1316 struct dlb_cmd_response response;
1320 cfg.response = (uintptr_t)&response;
1322 ret = dlb_iface_get_sn_occupancy(handle, &cfg);
1324 DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
1325 ret, dlb_error_strings[response.status]);
1332 /* Query the current sequence number allocations and, if they conflict with the
1333 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
1334 * This is best-effort; if it fails, the PMD will attempt to configure the
1335 * load-balanced queue and return an error.
1338 dlb_program_sn_allocation(struct dlb_eventdev *dlb,
1339 const struct rte_event_queue_conf *queue_conf)
1341 int grp_occupancy[DLB_NUM_SN_GROUPS];
1342 int grp_alloc[DLB_NUM_SN_GROUPS];
1343 int i, sequence_numbers;
1345 sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
1347 for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1350 grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
1351 if (grp_alloc[i] < 0)
1354 total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
1356 grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
1357 if (grp_occupancy[i] < 0)
1360 /* DLB has at least one available slot for the requested
1361 * sequence numbers, so no further configuration required.
1363 if (grp_alloc[i] == sequence_numbers &&
1364 grp_occupancy[i] < total_slots)
1368 /* None of the sequence number groups are configured for the requested
1369 * sequence numbers, so we have to reconfigure one of them. This is
1370 * only possible if a group is not in use.
1372 for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1373 if (grp_occupancy[i] == 0)
1377 if (i == DLB_NUM_SN_GROUPS) {
1378 DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
1379 __func__, sequence_numbers);
1383 /* Attempt to configure slot i with the requested number of sequence
1384 * numbers. Ignore the return value -- if this fails, the error will be
1385 * caught during subsequent queue configuration.
1387 dlb_set_sn_allocation(dlb, i, sequence_numbers);
1391 dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
1392 struct dlb_eventdev_queue *ev_queue,
1393 const struct rte_event_queue_conf *queue_conf)
1395 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1398 if (queue_conf->nb_atomic_order_sequences)
1399 dlb_program_sn_allocation(dlb, queue_conf);
1401 qm_qid = dlb_hw_create_ldb_queue(dlb,
1402 &ev_queue->qm_queue,
1405 DLB_LOG_ERR("Failed to create the load-balanced queue\n");
1410 dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
1412 ev_queue->qm_queue.id = qm_qid;
1417 static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
1421 for (i = 0; i < dlb->num_queues; i++) {
1422 if (dlb->ev_queues[i].setup_done &&
1423 dlb->ev_queues[i].qm_queue.is_directed)
1431 dlb_queue_link_teardown(struct dlb_eventdev *dlb,
1432 struct dlb_eventdev_queue *ev_queue)
1434 struct dlb_eventdev_port *ev_port;
1437 for (i = 0; i < dlb->num_ports; i++) {
1438 ev_port = &dlb->ev_ports[i];
1440 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1441 if (!ev_port->link[j].valid ||
1442 ev_port->link[j].queue_id != ev_queue->id)
1445 ev_port->link[j].valid = false;
1446 ev_port->num_links--;
1450 ev_queue->num_links = 0;
1454 dlb_eventdev_queue_setup(struct rte_eventdev *dev,
1456 const struct rte_event_queue_conf *queue_conf)
1458 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1459 struct dlb_eventdev_queue *ev_queue;
1462 if (queue_conf == NULL)
1465 if (ev_qid >= dlb->num_queues)
1468 ev_queue = &dlb->ev_queues[ev_qid];
1470 ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
1471 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
1472 ev_queue->id = ev_qid;
1473 ev_queue->conf = *queue_conf;
1475 if (!ev_queue->qm_queue.is_directed) {
1476 ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
1478 /* The directed queue isn't setup until link time, at which
1479 * point we know its directed port ID. Directed queue setup
1480 * will only fail if this queue is already setup or there are
1481 * no directed queues left to configure.
1485 ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
1487 if (ev_queue->setup_done ||
1488 dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
1492 /* Tear down pre-existing port->queue links */
1493 if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
1494 dlb_queue_link_teardown(dlb, ev_queue);
1497 ev_queue->setup_done = true;
1503 dlb_port_link_teardown(struct dlb_eventdev *dlb,
1504 struct dlb_eventdev_port *ev_port)
1506 struct dlb_eventdev_queue *ev_queue;
1509 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1510 if (!ev_port->link[i].valid)
1513 ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
1515 ev_port->link[i].valid = false;
1516 ev_port->num_links--;
1517 ev_queue->num_links--;
1522 dlb_eventdev_port_setup(struct rte_eventdev *dev,
1524 const struct rte_event_port_conf *port_conf)
1526 struct dlb_eventdev *dlb;
1527 struct dlb_eventdev_port *ev_port;
1528 bool use_rsvd_token_scheme;
1529 uint32_t adj_cq_depth;
1530 uint16_t rsvd_tokens;
1533 if (dev == NULL || port_conf == NULL) {
1534 DLB_LOG_ERR("Null parameter\n");
1538 dlb = dlb_pmd_priv(dev);
1540 if (ev_port_id >= DLB_MAX_NUM_PORTS)
1543 if (port_conf->dequeue_depth >
1544 evdev_dlb_default_info.max_event_port_dequeue_depth ||
1545 port_conf->enqueue_depth >
1546 evdev_dlb_default_info.max_event_port_enqueue_depth)
1549 ev_port = &dlb->ev_ports[ev_port_id];
1551 if (ev_port->setup_done) {
1552 DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
1556 /* The reserved token interrupt arming scheme requires that one or more
1557 * CQ tokens be reserved by the PMD. This limits the amount of CQ space
1558 * usable by the DLB, so in order to give an *effective* CQ depth equal
1559 * to the user-requested value, we double CQ depth and reserve half of
1560 * its tokens. If the user requests the max CQ depth (256) then we
1561 * cannot double it, so we reserve one token and give an effective
1562 * depth of 255 entries.
1564 use_rsvd_token_scheme = true;
1566 adj_cq_depth = port_conf->dequeue_depth;
1568 if (use_rsvd_token_scheme && adj_cq_depth < 256) {
1569 rsvd_tokens = adj_cq_depth;
1573 ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1574 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1576 if (!ev_port->qm_port.is_directed) {
1577 ret = dlb_hw_create_ldb_port(dlb,
1579 port_conf->dequeue_depth,
1581 port_conf->enqueue_depth,
1583 use_rsvd_token_scheme);
1585 DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1590 ret = dlb_hw_create_dir_port(dlb,
1592 port_conf->dequeue_depth,
1594 port_conf->enqueue_depth,
1596 use_rsvd_token_scheme);
1598 DLB_LOG_ERR("Failed to create the DIR port\n");
1603 /* Save off port config for reconfig */
1604 dlb->ev_ports[ev_port_id].conf = *port_conf;
1606 dlb->ev_ports[ev_port_id].id = ev_port_id;
1607 dlb->ev_ports[ev_port_id].enq_configured = true;
1608 dlb->ev_ports[ev_port_id].setup_done = true;
1609 dlb->ev_ports[ev_port_id].inflight_max =
1610 port_conf->new_event_threshold;
1611 dlb->ev_ports[ev_port_id].implicit_release =
1612 !(port_conf->event_port_cfg &
1613 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1614 dlb->ev_ports[ev_port_id].outstanding_releases = 0;
1615 dlb->ev_ports[ev_port_id].inflight_credits = 0;
1616 dlb->ev_ports[ev_port_id].credit_update_quanta =
1617 RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
1618 dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
1620 /* Tear down pre-existing port->queue links */
1621 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1622 dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
1624 dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
1630 dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
1632 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1635 /* If an event queue or port was previously configured, but hasn't been
1636 * reconfigured, reapply its original configuration.
1638 for (i = 0; i < dlb->num_queues; i++) {
1639 struct dlb_eventdev_queue *ev_queue;
1641 ev_queue = &dlb->ev_queues[i];
1643 if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
1646 ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
1648 DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
1653 for (i = 0; i < dlb->num_ports; i++) {
1654 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1656 if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
1659 ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
1661 DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
1671 set_dev_id(const char *key __rte_unused,
1675 int *dev_id = opaque;
1678 if (value == NULL || opaque == NULL) {
1679 DLB_LOG_ERR("NULL pointer\n");
1683 ret = dlb_string_to_int(dev_id, value);
1691 set_defer_sched(const char *key __rte_unused,
1695 int *defer_sched = opaque;
1697 if (value == NULL || opaque == NULL) {
1698 DLB_LOG_ERR("NULL pointer\n");
1702 if (strncmp(value, "on", 2) != 0) {
1703 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
1714 set_num_atm_inflights(const char *key __rte_unused,
1718 int *num_atm_inflights = opaque;
1721 if (value == NULL || opaque == NULL) {
1722 DLB_LOG_ERR("NULL pointer\n");
1726 ret = dlb_string_to_int(num_atm_inflights, value);
1730 if (*num_atm_inflights < 0 ||
1731 *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
1732 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
1733 DLB_MAX_NUM_ATM_INFLIGHTS);
1741 dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
1746 struct dlb_eventdev *dlb = ev_port->dlb;
1747 struct dlb_eventdev_queue *ev_queue;
1748 bool port_is_dir, queue_is_dir;
1750 if (queue_id > dlb->num_queues) {
1751 DLB_LOG_ERR("queue_id %d > num queues %d\n",
1752 queue_id, dlb->num_queues);
1753 rte_errno = -EINVAL;
1757 ev_queue = &dlb->ev_queues[queue_id];
1759 if (!ev_queue->setup_done &&
1760 ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
1761 DLB_LOG_ERR("setup not done and not previously configured\n");
1762 rte_errno = -EINVAL;
1766 port_is_dir = ev_port->qm_port.is_directed;
1767 queue_is_dir = ev_queue->qm_queue.is_directed;
1769 if (port_is_dir != queue_is_dir) {
1770 DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
1771 queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1772 port_is_dir ? "DIR" : "LDB", ev_port->id);
1774 rte_errno = -EINVAL;
1778 /* Check if there is space for the requested link */
1779 if (!link_exists && index == -1) {
1780 DLB_LOG_ERR("no space for new link\n");
1781 rte_errno = -ENOSPC;
1785 /* Check if the directed port is already linked */
1786 if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1788 DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1790 rte_errno = -EINVAL;
1794 /* Check if the directed queue is already linked */
1795 if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1797 DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1799 rte_errno = -EINVAL;
1807 dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
1809 struct dlb_hw_dev *handle = &dlb->qm_instance;
1810 struct dlb_create_dir_queue_args cfg;
1811 struct dlb_cmd_response response;
1814 cfg.response = (uintptr_t)&response;
1816 /* The directed port is always configured before its queue */
1817 cfg.port_id = qm_port_id;
1819 ret = dlb_iface_dir_queue_create(handle, &cfg);
1821 DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
1822 ret, dlb_error_strings[response.status]);
1830 dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
1831 struct dlb_eventdev_queue *ev_queue,
1832 struct dlb_eventdev_port *ev_port)
1836 qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
1839 DLB_LOG_ERR("Failed to create the DIR queue\n");
1843 dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1845 ev_queue->qm_queue.id = qm_qid;
1851 dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
1852 uint32_t qm_port_id,
1856 struct dlb_map_qid_args cfg;
1857 struct dlb_cmd_response response;
1864 cfg.response = (uintptr_t)&response;
1865 cfg.port_id = qm_port_id;
1867 cfg.priority = EV_TO_DLB_PRIO(priority);
1869 ret = dlb_iface_map_qid(handle, &cfg);
1871 DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
1872 ret, dlb_error_strings[response.status]);
1873 DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1875 handle->domain_id, cfg.port_id,
1879 DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
1880 qm_qid, qm_port_id);
1887 dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
1888 struct dlb_eventdev_port *ev_port,
1889 struct dlb_eventdev_queue *ev_queue,
1892 int first_avail = -1;
1895 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1896 if (ev_port->link[i].valid) {
1897 if (ev_port->link[i].queue_id == ev_queue->id &&
1898 ev_port->link[i].priority == priority) {
1899 if (ev_port->link[i].mapped)
1900 return 0; /* already mapped */
1904 if (first_avail == -1)
1908 if (first_avail == -1) {
1909 DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
1910 ev_port->qm_port.id);
1914 ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
1915 ev_port->qm_port.id,
1916 ev_queue->qm_queue.id,
1920 ev_port->link[first_avail].mapped = true;
1926 dlb_do_port_link(struct rte_eventdev *dev,
1927 struct dlb_eventdev_queue *ev_queue,
1928 struct dlb_eventdev_port *ev_port,
1931 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1934 /* Don't link until start time. */
1935 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1938 if (ev_queue->qm_queue.is_directed)
1939 err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
1941 err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
1944 DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1945 ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1946 ev_queue->id, ev_port->id);
1956 dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
1958 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1961 /* Perform requested port->queue links */
1962 for (i = 0; i < dlb->num_ports; i++) {
1963 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1966 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1967 struct dlb_eventdev_queue *ev_queue;
1968 uint8_t prio, queue_id;
1970 if (!ev_port->link[j].valid)
1973 prio = ev_port->link[j].priority;
1974 queue_id = ev_port->link[j].queue_id;
1976 if (dlb_validate_port_link(ev_port, queue_id, true, j))
1979 ev_queue = &dlb->ev_queues[queue_id];
1981 if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
1990 dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1991 const uint8_t queues[], const uint8_t priorities[],
1995 struct dlb_eventdev_port *ev_port = event_port;
1996 struct dlb_eventdev *dlb;
2001 if (ev_port == NULL) {
2002 DLB_LOG_ERR("dlb: evport not setup\n");
2003 rte_errno = -EINVAL;
2007 if (!ev_port->setup_done &&
2008 ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
2009 DLB_LOG_ERR("dlb: evport not setup\n");
2010 rte_errno = -EINVAL;
2014 /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
2017 if (nb_links == 0) {
2018 DLB_LOG_DBG("dlb: nb_links is 0\n");
2019 return 0; /* Ignore and return success */
2024 DLB_LOG_DBG("Linking %u queues to %s port %d\n",
2026 ev_port->qm_port.is_directed ? "DIR" : "LDB",
2029 for (i = 0; i < nb_links; i++) {
2030 struct dlb_eventdev_queue *ev_queue;
2031 uint8_t queue_id, prio;
2035 queue_id = queues[i];
2036 prio = priorities[i];
2038 /* Check if the link already exists. */
2039 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
2040 if (ev_port->link[j].valid) {
2041 if (ev_port->link[j].queue_id == queue_id) {
2051 /* could not link */
2055 /* Check if already linked at the requested priority */
2056 if (found && ev_port->link[j].priority == prio)
2059 if (dlb_validate_port_link(ev_port, queue_id, found, index))
2060 break; /* return index of offending queue */
2062 ev_queue = &dlb->ev_queues[queue_id];
2064 if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
2065 break; /* return index of offending queue */
2067 ev_queue->num_links++;
2069 ev_port->link[index].queue_id = queue_id;
2070 ev_port->link[index].priority = prio;
2071 ev_port->link[index].valid = true;
2072 /* Entry already exists? If so, then must be prio change */
2074 ev_port->num_links++;
2080 dlb_eventdev_start(struct rte_eventdev *dev)
2082 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
2083 struct dlb_hw_dev *handle = &dlb->qm_instance;
2084 struct dlb_start_domain_args cfg;
2085 struct dlb_cmd_response response;
2088 rte_spinlock_lock(&dlb->qm_instance.resource_lock);
2089 if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
2090 DLB_LOG_ERR("bad state %d for dev_start\n",
2091 (int)dlb->run_state);
2092 rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2095 dlb->run_state = DLB_RUN_STATE_STARTING;
2096 rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2098 /* If the device was configured more than once, some event ports and/or
2099 * queues may need to be reconfigured.
2101 ret = dlb_eventdev_reapply_configuration(dev);
2105 /* The DLB PMD delays port links until the device is started. */
2106 ret = dlb_eventdev_apply_port_links(dev);
2110 cfg.response = (uintptr_t)&response;
2112 for (i = 0; i < dlb->num_ports; i++) {
2113 if (!dlb->ev_ports[i].setup_done) {
2114 DLB_LOG_ERR("dlb: port %d not setup", i);
2119 for (i = 0; i < dlb->num_queues; i++) {
2120 if (dlb->ev_queues[i].num_links == 0) {
2121 DLB_LOG_ERR("dlb: queue %d is not linked", i);
2126 ret = dlb_iface_sched_domain_start(handle, &cfg);
2128 DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
2129 ret, dlb_error_strings[response.status]);
2133 dlb->run_state = DLB_RUN_STATE_STARTED;
2134 DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
2140 dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
2141 struct dlb_eventdev_port *ev_port)
2143 uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
2147 if (unlikely(ev_port->inflight_max < sw_inflights)) {
2148 DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2149 rte_errno = -ENOSPC;
2153 if (ev_port->inflight_credits < num) {
2154 /* check if event enqueue brings ev_port over max threshold */
2155 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2157 if (sw_inflights + credit_update_quanta >
2158 dlb->new_event_limit) {
2160 ev_port->stats.traffic.tx_nospc_new_event_limit,
2162 rte_errno = -ENOSPC;
2166 __atomic_fetch_add(&dlb->inflights, credit_update_quanta,
2168 ev_port->inflight_credits += (credit_update_quanta);
2170 if (ev_port->inflight_credits < num) {
2172 ev_port->stats.traffic.tx_nospc_inflight_credits,
2174 rte_errno = -ENOSPC;
2183 dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
2184 struct dlb_eventdev_port *ev_port)
2186 uint16_t quanta = ev_port->credit_update_quanta;
2188 if (ev_port->inflight_credits >= quanta * 2) {
2189 /* Replenish credits, saving one quanta for enqueues */
2190 uint16_t val = ev_port->inflight_credits - quanta;
2192 __atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
2193 ev_port->inflight_credits -= val;
2197 static __rte_always_inline uint16_t
2198 dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
2200 volatile uint16_t *popcount;
2203 popcount = port_data->ldb_popcount;
2205 popcount = port_data->dir_popcount;
2211 dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
2212 struct process_local_port_data *port_data)
2214 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2217 pc = dlb_read_pc(port_data, true);
2219 qm_port->cached_ldb_credits = pc -
2220 qm_port->ldb_pushcount_at_credit_expiry;
2221 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2223 qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2226 DLB_LOG_DBG("ldb credits exhausted\n");
2229 qm_port->ldb_pushcount_at_credit_expiry +=
2230 qm_port->cached_ldb_credits;
2237 dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
2238 struct process_local_port_data *port_data)
2240 if (unlikely(qm_port->cached_dir_credits == 0)) {
2243 pc = dlb_read_pc(port_data, false);
2245 qm_port->cached_dir_credits = pc -
2246 qm_port->dir_pushcount_at_credit_expiry;
2248 if (unlikely(qm_port->cached_dir_credits == 0)) {
2250 qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2253 DLB_LOG_DBG("dir credits exhausted\n");
2256 qm_port->dir_pushcount_at_credit_expiry +=
2257 qm_port->cached_dir_credits;
2264 dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
2265 struct dlb_port *qm_port,
2266 const struct rte_event ev[],
2267 struct process_local_port_data *port_data,
2268 uint8_t *sched_type,
2271 struct dlb_eventdev *dlb = ev_port->dlb;
2272 struct dlb_eventdev_queue *ev_queue;
2273 uint16_t *cached_credits = NULL;
2274 struct dlb_queue *qm_queue;
2276 ev_queue = &dlb->ev_queues[ev->queue_id];
2277 qm_queue = &ev_queue->qm_queue;
2278 *queue_id = qm_queue->id;
2280 /* Ignore sched_type and hardware credits on release events */
2281 if (ev->op == RTE_EVENT_OP_RELEASE)
2284 if (!qm_queue->is_directed) {
2285 /* Load balanced destination queue */
2287 if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
2288 rte_errno = -ENOSPC;
2291 cached_credits = &qm_port->cached_ldb_credits;
2293 switch (ev->sched_type) {
2294 case RTE_SCHED_TYPE_ORDERED:
2295 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2296 if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2297 DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue %d\n",
2299 rte_errno = -EINVAL;
2302 *sched_type = DLB_SCHED_ORDERED;
2304 case RTE_SCHED_TYPE_ATOMIC:
2305 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2306 *sched_type = DLB_SCHED_ATOMIC;
2308 case RTE_SCHED_TYPE_PARALLEL:
2309 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2310 if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2311 *sched_type = DLB_SCHED_ORDERED;
2313 *sched_type = DLB_SCHED_UNORDERED;
2316 DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2317 DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
2318 rte_errno = -EINVAL;
2322 /* Directed destination queue */
2324 if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
2325 rte_errno = -ENOSPC;
2328 cached_credits = &qm_port->cached_dir_credits;
2330 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2332 *sched_type = DLB_SCHED_DIRECTED;
2337 case RTE_EVENT_OP_NEW:
2338 /* Check that a sw credit is available */
2339 if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
2340 rte_errno = -ENOSPC;
2343 ev_port->inflight_credits--;
2344 (*cached_credits)--;
2346 case RTE_EVENT_OP_FORWARD:
2347 /* Check for outstanding_releases underflow. If this occurs,
2348 * the application is not using the EVENT_OPs correctly; for
2349 * example, forwarding or releasing events that were not
2352 RTE_ASSERT(ev_port->outstanding_releases > 0);
2353 ev_port->outstanding_releases--;
2354 qm_port->issued_releases++;
2355 (*cached_credits)--;
2357 case RTE_EVENT_OP_RELEASE:
2358 ev_port->inflight_credits++;
2359 /* Check for outstanding_releases underflow. If this occurs,
2360 * the application is not using the EVENT_OPs correctly; for
2361 * example, forwarding or releasing events that were not
2364 RTE_ASSERT(ev_port->outstanding_releases > 0);
2365 ev_port->outstanding_releases--;
2366 qm_port->issued_releases++;
2367 /* Replenish s/w credits if enough are cached */
2368 dlb_replenish_sw_credits(dlb, ev_port);
2372 DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2373 DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2375 #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
2376 if (ev->op != RTE_EVENT_OP_RELEASE) {
2377 DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
2378 DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2385 static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] = {
2387 /* Load-balanced cmd bytes */
2388 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2389 [RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
2390 [RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
2393 /* Directed cmd bytes */
2394 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2395 [RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
2396 [RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
2401 dlb_event_build_hcws(struct dlb_port *qm_port,
2402 const struct rte_event ev[],
2404 uint8_t *sched_type,
2407 struct dlb_enqueue_qe *qe;
2408 uint16_t sched_word[4];
2414 sse_qe[0] = _mm_setzero_si128();
2415 sse_qe[1] = _mm_setzero_si128();
2419 /* Construct the metadata portion of two HCWs in one 128b SSE
2420 * register. HCW metadata is constructed in the SSE registers
2422 * sse_qe[0][63:0]: qe[0]'s metadata
2423 * sse_qe[0][127:64]: qe[1]'s metadata
2424 * sse_qe[1][63:0]: qe[2]'s metadata
2425 * sse_qe[1][127:64]: qe[3]'s metadata
2428 /* Convert the event operation into a command byte and store it
2430 * sse_qe[0][63:56] = cmd_byte_map[is_directed][ev[0].op]
2431 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2432 * sse_qe[1][63:56] = cmd_byte_map[is_directed][ev[2].op]
2433 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2435 #define DLB_QE_CMD_BYTE 7
2436 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2437 cmd_byte_map[qm_port->is_directed][ev[0].op],
2439 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2440 cmd_byte_map[qm_port->is_directed][ev[1].op],
2441 DLB_QE_CMD_BYTE + 8);
2442 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2443 cmd_byte_map[qm_port->is_directed][ev[2].op],
2445 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2446 cmd_byte_map[qm_port->is_directed][ev[3].op],
2447 DLB_QE_CMD_BYTE + 8);
2449 /* Store priority, scheduling type, and queue ID in the sched
2450 * word array because these values are re-used when the
2451 * destination is a directed queue.
2453 sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
2454 sched_type[0] << 8 |
2456 sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
2457 sched_type[1] << 8 |
2459 sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
2460 sched_type[2] << 8 |
2462 sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
2463 sched_type[3] << 8 |
2466 /* Store the event priority, scheduling type, and queue ID in
2468 * sse_qe[0][31:16] = sched_word[0]
2469 * sse_qe[0][95:80] = sched_word[1]
2470 * sse_qe[1][31:16] = sched_word[2]
2471 * sse_qe[1][95:80] = sched_word[3]
2473 #define DLB_QE_QID_SCHED_WORD 1
2474 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2476 DLB_QE_QID_SCHED_WORD);
2477 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2479 DLB_QE_QID_SCHED_WORD + 4);
2480 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2482 DLB_QE_QID_SCHED_WORD);
2483 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2485 DLB_QE_QID_SCHED_WORD + 4);
2487 /* If the destination is a load-balanced queue, store the lock
2488 * ID. If it is a directed queue, DLB places this field in
2489 * bytes 10-11 of the received QE, so we format it accordingly:
2490 * sse_qe[0][47:32] = dir queue ? sched_word[0] : flow_id[0]
2491 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2492 * sse_qe[1][47:32] = dir queue ? sched_word[2] : flow_id[2]
2493 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2495 #define DLB_QE_LOCK_ID_WORD 2
2496 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2497 (sched_type[0] == DLB_SCHED_DIRECTED) ?
2498 sched_word[0] : ev[0].flow_id,
2499 DLB_QE_LOCK_ID_WORD);
2500 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2501 (sched_type[1] == DLB_SCHED_DIRECTED) ?
2502 sched_word[1] : ev[1].flow_id,
2503 DLB_QE_LOCK_ID_WORD + 4);
2504 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2505 (sched_type[2] == DLB_SCHED_DIRECTED) ?
2506 sched_word[2] : ev[2].flow_id,
2507 DLB_QE_LOCK_ID_WORD);
2508 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2509 (sched_type[3] == DLB_SCHED_DIRECTED) ?
2510 sched_word[3] : ev[3].flow_id,
2511 DLB_QE_LOCK_ID_WORD + 4);
2513 /* Store the event type and sub event type in the metadata:
2514 * sse_qe[0][15:0] = flow_id[0]
2515 * sse_qe[0][79:64] = flow_id[1]
2516 * sse_qe[1][15:0] = flow_id[2]
2517 * sse_qe[1][79:64] = flow_id[3]
2519 #define DLB_QE_EV_TYPE_WORD 0
2520 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2521 ev[0].sub_event_type << 8 |
2523 DLB_QE_EV_TYPE_WORD);
2524 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2525 ev[1].sub_event_type << 8 |
2527 DLB_QE_EV_TYPE_WORD + 4);
2528 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2529 ev[2].sub_event_type << 8 |
2531 DLB_QE_EV_TYPE_WORD);
2532 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2533 ev[3].sub_event_type << 8 |
2535 DLB_QE_EV_TYPE_WORD + 4);
2537 /* Store the metadata to memory (use the double-precision
2538 * _mm_storeh_pd because there is no integer function for
2539 * storing the upper 64b):
2540 * qe[0] metadata = sse_qe[0][63:0]
2541 * qe[1] metadata = sse_qe[0][127:64]
2542 * qe[2] metadata = sse_qe[1][63:0]
2543 * qe[3] metadata = sse_qe[1][127:64]
2545 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2546 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2547 (__m128d) sse_qe[0]);
2548 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2549 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2550 (__m128d) sse_qe[1]);
2552 qe[0].data = ev[0].u64;
2553 qe[1].data = ev[1].u64;
2554 qe[2].data = ev[2].u64;
2555 qe[3].data = ev[3].u64;
2561 for (i = 0; i < num; i++) {
2563 cmd_byte_map[qm_port->is_directed][ev[i].op];
2564 qe[i].sched_type = sched_type[i];
2565 qe[i].data = ev[i].u64;
2566 qe[i].qid = queue_id[i];
2567 qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
2568 qe[i].lock_id = ev[i].flow_id;
2569 if (sched_type[i] == DLB_SCHED_DIRECTED) {
2570 struct dlb_msg_info *info =
2571 (struct dlb_msg_info *)&qe[i].lock_id;
2573 info->qid = queue_id[i];
2574 info->sched_type = DLB_SCHED_DIRECTED;
2575 info->priority = qe[i].priority;
2577 qe[i].u.event_type.major = ev[i].event_type;
2578 qe[i].u.event_type.sub = ev[i].sub_event_type;
2586 static __rte_always_inline void
2587 dlb_pp_write(struct dlb_enqueue_qe *qe4,
2588 struct process_local_port_data *port_data)
2590 dlb_movdir64b(port_data->pp_addr, qe4);
2594 dlb_hw_do_enqueue(struct dlb_port *qm_port,
2596 struct process_local_port_data *port_data)
2598 DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
2600 /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2601 * application writes complete before enqueueing the release HCW.
2606 dlb_pp_write(qm_port->qe4, port_data);
2610 dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)
2612 struct process_local_port_data *port_data;
2613 struct dlb_cq_pop_qe *qe;
2615 RTE_ASSERT(qm_port->config_state == DLB_CONFIGURED);
2617 if (qm_port->use_rsvd_token_scheme) {
2618 /* Check if there's a deficit of reserved tokens, and return
2619 * early if there are no (unreserved) tokens to consume.
2621 if (num <= qm_port->cq_rsvd_token_deficit) {
2622 qm_port->cq_rsvd_token_deficit -= num;
2623 qm_port->owed_tokens = 0;
2626 num -= qm_port->cq_rsvd_token_deficit;
2627 qm_port->cq_rsvd_token_deficit = 0;
2630 qe = qm_port->consume_qe;
2632 qe->tokens = num - 1;
2635 /* No store fence needed since no pointer is being sent, and CQ token
2636 * pops can be safely reordered with other HCWs.
2638 port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
2640 dlb_movntdq_single(port_data->pp_addr, qe);
2642 DLB_LOG_DBG("dlb: consume immediate - %d QEs\n", num);
2644 qm_port->owed_tokens = 0;
2649 static inline uint16_t
2650 __dlb_event_enqueue_burst(void *event_port,
2651 const struct rte_event events[],
2654 struct dlb_eventdev_port *ev_port = event_port;
2655 struct dlb_port *qm_port = &ev_port->qm_port;
2656 struct process_local_port_data *port_data;
2659 RTE_ASSERT(ev_port->enq_configured);
2660 RTE_ASSERT(events != NULL);
2665 port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
2668 uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
2669 uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
2673 memset(qm_port->qe4,
2675 DLB_NUM_QES_PER_CACHE_LINE *
2676 sizeof(struct dlb_enqueue_qe));
2678 for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2679 const struct rte_event *ev = &events[i + j];
2681 if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
2682 port_data, &sched_types[j],
2690 dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
2691 sched_types, queue_ids);
2693 dlb_hw_do_enqueue(qm_port, i == 0, port_data);
2695 /* Don't include the token pop QE in the enqueue count */
2698 /* Don't interpret j < DLB_NUM_... as out-of-credits if
2701 if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2705 RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
2710 static inline uint16_t
2711 dlb_event_enqueue_burst(void *event_port,
2712 const struct rte_event events[],
2715 return __dlb_event_enqueue_burst(event_port, events, num);
2718 static inline uint16_t
2719 dlb_event_enqueue(void *event_port,
2720 const struct rte_event events[])
2722 return __dlb_event_enqueue_burst(event_port, events, 1);
2726 dlb_event_enqueue_new_burst(void *event_port,
2727 const struct rte_event events[],
2730 return __dlb_event_enqueue_burst(event_port, events, num);
2734 dlb_event_enqueue_forward_burst(void *event_port,
2735 const struct rte_event events[],
2738 return __dlb_event_enqueue_burst(event_port, events, num);
2741 static __rte_always_inline int
2742 dlb_recv_qe(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe,
2745 uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
2746 {0x00, 0x01, 0x03, 0x07} };
2747 uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
2748 volatile struct dlb_dequeue_qe *cq_addr;
2749 __m128i *qes = (__m128i *)qe;
2750 uint64_t *cache_line_base;
2753 cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
2754 cq_addr = &cq_addr[qm_port->cq_idx];
2756 cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
2757 *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
2759 /* Load the next CQ cache line from memory. Pack these reads as tight
2760 * as possible to reduce the chance that DLB invalidates the line while
2761 * the CPU is reading it. Read the cache line backwards to ensure that
2762 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
2764 * (Valid QEs start at &qe[offset])
2766 qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
2767 qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
2768 qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
2769 qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
2771 /* Evict the cache line ASAP */
2772 rte_cldemote(cache_line_base);
2774 /* Extract and combine the gen bits */
2775 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
2776 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
2777 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
2778 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
2780 /* XOR the combined bits such that a 1 represents a valid QE */
2781 gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
2783 /* Mask off gen bits we don't care about */
2784 gen_bits &= and_mask[*offset];
2786 return __builtin_popcount(gen_bits);
2790 dlb_inc_cq_idx(struct dlb_port *qm_port, int cnt)
2792 uint16_t idx = qm_port->cq_idx_unmasked + cnt;
2794 qm_port->cq_idx_unmasked = idx;
2795 qm_port->cq_idx = idx & qm_port->cq_depth_mask;
2796 qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
2800 dlb_process_dequeue_qes(struct dlb_eventdev_port *ev_port,
2801 struct dlb_port *qm_port,
2802 struct rte_event *events,
2803 struct dlb_dequeue_qe *qes,
2806 uint8_t *qid_mappings = qm_port->qid_mappings;
2809 RTE_SET_USED(ev_port); /* avoids unused variable error */
2811 for (i = 0, num = 0; i < cnt; i++) {
2812 struct dlb_dequeue_qe *qe = &qes[i];
2813 int sched_type_map[4] = {
2814 [DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2815 [DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2816 [DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2817 [DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2820 DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
2821 (long long)qe->data, qe->qid,
2822 qe->u.event_type.major,
2823 qe->u.event_type.sub,
2824 qe->pp_id, qe->sched_type, qe->qid, qe->error);
2826 /* Fill in event information.
2827 * Note that flow_id must be embedded in the data by
2828 * the app, such as the mbuf RSS hash field if the data
2831 if (unlikely(qe->error)) {
2832 DLB_LOG_ERR("QE error bit ON\n");
2833 DLB_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
2834 dlb_consume_qe_immediate(qm_port, 1);
2835 continue; /* Ignore */
2838 events[num].u64 = qe->data;
2839 events[num].queue_id = qid_mappings[qe->qid];
2840 events[num].priority = DLB_TO_EV_PRIO((uint8_t)qe->priority);
2841 events[num].event_type = qe->u.event_type.major;
2842 events[num].sub_event_type = qe->u.event_type.sub;
2843 events[num].sched_type = sched_type_map[qe->sched_type];
2844 DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
2847 DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num);
2853 dlb_process_dequeue_four_qes(struct dlb_eventdev_port *ev_port,
2854 struct dlb_port *qm_port,
2855 struct rte_event *events,
2856 struct dlb_dequeue_qe *qes)
2858 int sched_type_map[] = {
2859 [DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2860 [DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2861 [DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2862 [DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2864 const int num_events = DLB_NUM_QES_PER_CACHE_LINE;
2865 uint8_t *qid_mappings = qm_port->qid_mappings;
2869 /* In the unlikely case that any of the QE error bits are set, process
2870 * them one at a time.
2872 if (unlikely(qes[0].error || qes[1].error ||
2873 qes[2].error || qes[3].error))
2874 return dlb_process_dequeue_qes(ev_port, qm_port, events,
2877 for (i = 0; i < DLB_NUM_QES_PER_CACHE_LINE; i++) {
2878 DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
2879 (long long)qes[i].data, qes[i].qid,
2880 qes[i].u.event_type.major,
2881 qes[i].u.event_type.sub,
2882 qes[i].pp_id, qes[i].sched_type, qes[i].qid,
2886 events[0].u64 = qes[0].data;
2887 events[1].u64 = qes[1].data;
2888 events[2].u64 = qes[2].data;
2889 events[3].u64 = qes[3].data;
2891 /* Construct the metadata portion of two struct rte_events
2892 * in one 128b SSE register. Event metadata is constructed in the SSE
2893 * registers like so:
2894 * sse_evt[0][63:0]: event[0]'s metadata
2895 * sse_evt[0][127:64]: event[1]'s metadata
2896 * sse_evt[1][63:0]: event[2]'s metadata
2897 * sse_evt[1][127:64]: event[3]'s metadata
2899 sse_evt[0] = _mm_setzero_si128();
2900 sse_evt[1] = _mm_setzero_si128();
2902 /* Convert the hardware queue ID to an event queue ID and store it in
2904 * sse_evt[0][47:40] = qid_mappings[qes[0].qid]
2905 * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
2906 * sse_evt[1][47:40] = qid_mappings[qes[2].qid]
2907 * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
2909 #define DLB_EVENT_QUEUE_ID_BYTE 5
2910 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2911 qid_mappings[qes[0].qid],
2912 DLB_EVENT_QUEUE_ID_BYTE);
2913 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2914 qid_mappings[qes[1].qid],
2915 DLB_EVENT_QUEUE_ID_BYTE + 8);
2916 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2917 qid_mappings[qes[2].qid],
2918 DLB_EVENT_QUEUE_ID_BYTE);
2919 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2920 qid_mappings[qes[3].qid],
2921 DLB_EVENT_QUEUE_ID_BYTE + 8);
2923 /* Convert the hardware priority to an event priority and store it in
2925 * sse_evt[0][55:48] = DLB_TO_EV_PRIO(qes[0].priority)
2926 * sse_evt[0][119:112] = DLB_TO_EV_PRIO(qes[1].priority)
2927 * sse_evt[1][55:48] = DLB_TO_EV_PRIO(qes[2].priority)
2928 * sse_evt[1][119:112] = DLB_TO_EV_PRIO(qes[3].priority)
2930 #define DLB_EVENT_PRIO_BYTE 6
2931 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2932 DLB_TO_EV_PRIO((uint8_t)qes[0].priority),
2933 DLB_EVENT_PRIO_BYTE);
2934 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2935 DLB_TO_EV_PRIO((uint8_t)qes[1].priority),
2936 DLB_EVENT_PRIO_BYTE + 8);
2937 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2938 DLB_TO_EV_PRIO((uint8_t)qes[2].priority),
2939 DLB_EVENT_PRIO_BYTE);
2940 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2941 DLB_TO_EV_PRIO((uint8_t)qes[3].priority),
2942 DLB_EVENT_PRIO_BYTE + 8);
2944 /* Write the event type and sub event type to the event metadata. Leave
2945 * flow ID unspecified, since the hardware does not maintain it during
2947 * sse_evt[0][31:0] = qes[0].u.event_type.major << 28 |
2948 * qes[0].u.event_type.sub << 20;
2949 * sse_evt[0][95:64] = qes[1].u.event_type.major << 28 |
2950 * qes[1].u.event_type.sub << 20;
2951 * sse_evt[1][31:0] = qes[2].u.event_type.major << 28 |
2952 * qes[2].u.event_type.sub << 20;
2953 * sse_evt[1][95:64] = qes[3].u.event_type.major << 28 |
2954 * qes[3].u.event_type.sub << 20;
2956 #define DLB_EVENT_EV_TYPE_DW 0
2957 #define DLB_EVENT_EV_TYPE_SHIFT 28
2958 #define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
2959 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
2960 qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
2961 qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
2962 DLB_EVENT_EV_TYPE_DW);
2963 sse_evt[0] = _mm_insert_epi32(sse_evt[0],
2964 qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
2965 qes[1].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
2966 DLB_EVENT_EV_TYPE_DW + 2);
2967 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
2968 qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
2969 qes[2].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
2970 DLB_EVENT_EV_TYPE_DW);
2971 sse_evt[1] = _mm_insert_epi32(sse_evt[1],
2972 qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
2973 qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
2974 DLB_EVENT_EV_TYPE_DW + 2);
2976 /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
2978 * sse_evt[0][39:32] = sched_type_map[qes[0].sched_type] << 6
2979 * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
2980 * sse_evt[1][39:32] = sched_type_map[qes[2].sched_type] << 6
2981 * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
2983 #define DLB_EVENT_SCHED_TYPE_BYTE 4
2984 #define DLB_EVENT_SCHED_TYPE_SHIFT 6
2985 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2986 sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
2987 DLB_EVENT_SCHED_TYPE_BYTE);
2988 sse_evt[0] = _mm_insert_epi8(sse_evt[0],
2989 sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
2990 DLB_EVENT_SCHED_TYPE_BYTE + 8);
2991 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2992 sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
2993 DLB_EVENT_SCHED_TYPE_BYTE);
2994 sse_evt[1] = _mm_insert_epi8(sse_evt[1],
2995 sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
2996 DLB_EVENT_SCHED_TYPE_BYTE + 8);
2998 /* Store the metadata to the event (use the double-precision
2999 * _mm_storeh_pd because there is no integer function for storing the
3001 * events[0].event = sse_evt[0][63:0]
3002 * events[1].event = sse_evt[0][127:64]
3003 * events[2].event = sse_evt[1][63:0]
3004 * events[3].event = sse_evt[1][127:64]
3006 _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3007 _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3008 _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3009 _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3011 DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3012 DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3013 DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3014 DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3016 DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3022 dlb_dequeue_wait(struct dlb_eventdev *dlb,
3023 struct dlb_eventdev_port *ev_port,
3024 struct dlb_port *qm_port,
3026 uint64_t start_ticks)
3028 struct process_local_port_data *port_data;
3029 uint64_t elapsed_ticks;
3031 port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
3033 elapsed_ticks = rte_get_timer_cycles() - start_ticks;
3035 /* Wait/poll time expired */
3036 if (elapsed_ticks >= timeout) {
3037 /* Interrupts not supported by PF PMD */
3039 } else if (dlb->umwait_allowed) {
3040 volatile struct dlb_dequeue_qe *cq_base;
3043 struct dlb_dequeue_qe qe;
3045 uint64_t expected_value;
3046 volatile uint64_t *monitor_addr;
3048 qe_mask.qe.cq_gen = 1; /* set mask */
3050 cq_base = port_data->cq_base;
3051 monitor_addr = (volatile uint64_t *)(volatile void *)
3052 &cq_base[qm_port->cq_idx];
3053 monitor_addr++; /* cq_gen bit is in second 64bit location */
3055 if (qm_port->gen_bit)
3056 expected_value = qe_mask.raw_qe[1];
3060 rte_power_monitor(monitor_addr, expected_value,
3061 qe_mask.raw_qe[1], timeout + start_ticks,
3064 DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
3066 uint64_t poll_interval = RTE_LIBRTE_PMD_DLB_POLL_INTERVAL;
3067 uint64_t curr_ticks = rte_get_timer_cycles();
3068 uint64_t init_ticks = curr_ticks;
3070 while ((curr_ticks - start_ticks < timeout) &&
3071 (curr_ticks - init_ticks < poll_interval))
3072 curr_ticks = rte_get_timer_cycles();
3078 static inline int16_t
3079 dlb_hw_dequeue(struct dlb_eventdev *dlb,
3080 struct dlb_eventdev_port *ev_port,
3081 struct rte_event *events,
3083 uint64_t dequeue_timeout_ticks)
3086 uint64_t start_ticks = 0ULL;
3087 struct dlb_port *qm_port;
3090 qm_port = &ev_port->qm_port;
3092 /* If configured for per dequeue wait, then use wait value provided
3093 * to this API. Otherwise we must use the global
3094 * value from eventdev config time.
3096 if (!dlb->global_dequeue_wait)
3097 timeout = dequeue_timeout_ticks;
3099 timeout = dlb->global_dequeue_wait_ticks;
3102 start_ticks = rte_get_timer_cycles();
3104 while (num < max_num) {
3105 struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
3109 /* Copy up to 4 QEs from the current cache line into qes */
3110 num_avail = dlb_recv_qe(qm_port, qes, &offset);
3112 /* But don't process more than the user requested */
3113 num_avail = RTE_MIN(num_avail, max_num - num);
3115 dlb_inc_cq_idx(qm_port, num_avail);
3117 if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
3118 num += dlb_process_dequeue_four_qes(ev_port,
3123 num += dlb_process_dequeue_qes(ev_port,
3128 else if ((timeout == 0) || (num > 0))
3129 /* Not waiting in any form, or 1+ events received? */
3131 else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
3132 timeout, start_ticks))
3136 qm_port->owed_tokens += num;
3138 dlb_consume_qe_immediate(qm_port, num);
3140 ev_port->outstanding_releases += num;
3145 static __rte_always_inline int
3146 dlb_recv_qe_sparse(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe)
3148 volatile struct dlb_dequeue_qe *cq_addr;
3149 uint8_t xor_mask[2] = {0x0F, 0x00};
3150 const uint8_t and_mask = 0x0F;
3151 __m128i *qes = (__m128i *)qe;
3152 uint8_t gen_bits, gen_bit;
3156 cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3158 idx = qm_port->cq_idx;
3160 /* Load the next 4 QEs */
3161 addr[0] = (uintptr_t)&cq_addr[idx];
3162 addr[1] = (uintptr_t)&cq_addr[(idx + 4) & qm_port->cq_depth_mask];
3163 addr[2] = (uintptr_t)&cq_addr[(idx + 8) & qm_port->cq_depth_mask];
3164 addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3166 /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3167 rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3168 rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3169 rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3170 rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3172 /* Correct the xor_mask for wrap-around QEs */
3173 gen_bit = qm_port->gen_bit;
3174 xor_mask[gen_bit] ^= !!((idx + 4) > qm_port->cq_depth_mask) << 1;
3175 xor_mask[gen_bit] ^= !!((idx + 8) > qm_port->cq_depth_mask) << 2;
3176 xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3178 /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3179 * valid, then QEs[0:N-1] are too.
3181 qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3182 rte_compiler_barrier();
3183 qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3184 rte_compiler_barrier();
3185 qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3186 rte_compiler_barrier();
3187 qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3189 /* Extract and combine the gen bits */
3190 gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3191 ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3192 ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3193 ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3195 /* XOR the combined bits such that a 1 represents a valid QE */
3196 gen_bits ^= xor_mask[gen_bit];
3198 /* Mask off gen bits we don't care about */
3199 gen_bits &= and_mask;
3201 return __builtin_popcount(gen_bits);
3204 static inline int16_t
3205 dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,
3206 struct dlb_eventdev_port *ev_port,
3207 struct rte_event *events,
3209 uint64_t dequeue_timeout_ticks)
3212 uint64_t start_ticks = 0ULL;
3213 struct dlb_port *qm_port;
3216 qm_port = &ev_port->qm_port;
3218 /* If configured for per dequeue wait, then use wait value provided
3219 * to this API. Otherwise we must use the global
3220 * value from eventdev config time.
3222 if (!dlb->global_dequeue_wait)
3223 timeout = dequeue_timeout_ticks;
3225 timeout = dlb->global_dequeue_wait_ticks;
3228 start_ticks = rte_get_timer_cycles();
3230 while (num < max_num) {
3231 struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
3234 /* Copy up to 4 QEs from the current cache line into qes */
3235 num_avail = dlb_recv_qe_sparse(qm_port, qes);
3237 /* But don't process more than the user requested */
3238 num_avail = RTE_MIN(num_avail, max_num - num);
3240 dlb_inc_cq_idx(qm_port, num_avail << 2);
3242 if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
3243 num += dlb_process_dequeue_four_qes(ev_port,
3248 num += dlb_process_dequeue_qes(ev_port,
3253 else if ((timeout == 0) || (num > 0))
3254 /* Not waiting in any form, or 1+ events received? */
3256 else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
3257 timeout, start_ticks))
3261 qm_port->owed_tokens += num;
3263 dlb_consume_qe_immediate(qm_port, num);
3265 ev_port->outstanding_releases += num;
3271 dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)
3273 struct process_local_port_data *port_data;
3274 struct dlb_eventdev_port *ev_port;
3275 struct dlb_port *qm_port;
3278 if (port_id > dlb->num_ports) {
3279 DLB_LOG_ERR("Invalid port id %d in dlb-event_release\n",
3281 rte_errno = -EINVAL;
3285 ev_port = &dlb->ev_ports[port_id];
3286 qm_port = &ev_port->qm_port;
3287 port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
3291 if (qm_port->is_directed) {
3293 goto sw_credit_update;
3301 qm_port->qe4[0].cmd_byte = 0;
3302 qm_port->qe4[1].cmd_byte = 0;
3303 qm_port->qe4[2].cmd_byte = 0;
3304 qm_port->qe4[3].cmd_byte = 0;
3306 for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
3308 qm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;
3309 qm_port->issued_releases++;
3312 dlb_hw_do_enqueue(qm_port, i == 0, port_data);
3314 /* Don't include the token pop QE in the release count */
3319 /* each release returns one credit */
3320 if (!ev_port->outstanding_releases) {
3321 DLB_LOG_ERR("Unrecoverable application error. Outstanding releases underflowed.\n");
3322 rte_errno = -ENOTRECOVERABLE;
3326 ev_port->outstanding_releases -= i;
3327 ev_port->inflight_credits += i;
3329 /* Replenish s/w credits if enough releases are performed */
3330 dlb_replenish_sw_credits(dlb, ev_port);
3335 dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3338 struct dlb_eventdev_port *ev_port = event_port;
3339 struct dlb_eventdev *dlb = ev_port->dlb;
3345 RTE_ASSERT(ev_port->setup_done);
3346 RTE_ASSERT(ev != NULL);
3348 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3349 uint16_t out_rels = ev_port->outstanding_releases;
3351 ret = dlb_event_release(dlb, ev_port->id, out_rels);
3355 DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3358 cnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);
3360 DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3361 DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3366 dlb_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3368 return dlb_event_dequeue_burst(event_port, ev, 1, wait);
3372 dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3373 uint16_t num, uint64_t wait)
3375 struct dlb_eventdev_port *ev_port = event_port;
3376 struct dlb_eventdev *dlb = ev_port->dlb;
3382 RTE_ASSERT(ev_port->setup_done);
3383 RTE_ASSERT(ev != NULL);
3385 if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3386 uint16_t out_rels = ev_port->outstanding_releases;
3388 ret = dlb_event_release(dlb, ev_port->id, out_rels);
3392 DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3395 cnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);
3397 DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3398 DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3403 dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t wait)
3405 return dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3409 dlb_entry_points_init(struct rte_eventdev *dev)
3411 struct dlb_eventdev *dlb;
3413 static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
3414 .dev_infos_get = dlb_eventdev_info_get,
3415 .dev_configure = dlb_eventdev_configure,
3416 .dev_start = dlb_eventdev_start,
3417 .queue_def_conf = dlb_eventdev_queue_default_conf_get,
3418 .port_def_conf = dlb_eventdev_port_default_conf_get,
3419 .queue_setup = dlb_eventdev_queue_setup,
3420 .port_setup = dlb_eventdev_port_setup,
3421 .port_link = dlb_eventdev_port_link,
3422 .port_unlink = dlb_eventdev_port_unlink,
3423 .port_unlinks_in_progress =
3424 dlb_eventdev_port_unlinks_in_progress,
3425 .dump = dlb_eventdev_dump,
3426 .xstats_get = dlb_eventdev_xstats_get,
3427 .xstats_get_names = dlb_eventdev_xstats_get_names,
3428 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
3429 .xstats_reset = dlb_eventdev_xstats_reset,
3432 /* Expose PMD's eventdev interface */
3433 dev->dev_ops = &dlb_eventdev_entry_ops;
3435 dev->enqueue = dlb_event_enqueue;
3436 dev->enqueue_burst = dlb_event_enqueue_burst;
3437 dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
3438 dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
3439 dev->dequeue = dlb_event_dequeue;
3440 dev->dequeue_burst = dlb_event_dequeue_burst;
3442 dlb = dev->data->dev_private;
3444 if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) {
3445 dev->dequeue = dlb_event_dequeue_sparse;
3446 dev->dequeue_burst = dlb_event_dequeue_burst_sparse;
3451 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
3453 struct dlb_devargs *dlb_args)
3455 struct dlb_eventdev *dlb;
3458 dlb = dev->data->dev_private;
3460 dlb->event_dev = dev; /* backlink */
3462 evdev_dlb_default_info.driver_name = name;
3464 dlb->max_num_events_override = dlb_args->max_num_events;
3465 dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
3466 dlb->defer_sched = dlb_args->defer_sched;
3467 dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
3469 /* Open the interface.
3470 * For vdev mode, this means open the dlb kernel module.
3472 err = dlb_iface_open(&dlb->qm_instance, name);
3474 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
3479 err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
3481 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
3486 err = dlb_hw_query_resources(dlb);
3488 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
3492 err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
3494 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
3498 /* Complete xtstats runtime initialization */
3499 err = dlb_xstats_init(dlb);
3501 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
3505 rte_spinlock_init(&dlb->qm_instance.resource_lock);
3507 dlb_iface_low_level_io_init(dlb);
3509 dlb_entry_points_init(dev);
3515 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
3518 struct dlb_eventdev *dlb;
3521 dlb = dev->data->dev_private;
3523 evdev_dlb_default_info.driver_name = name;
3525 err = dlb_iface_open(&dlb->qm_instance, name);
3527 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
3532 err = dlb_hw_query_resources(dlb);
3534 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
3538 dlb_iface_low_level_io_init(dlb);
3540 dlb_entry_points_init(dev);
3546 dlb_parse_params(const char *params,
3548 struct dlb_devargs *dlb_args)
3551 static const char * const args[] = { NUMA_NODE_ARG,
3553 DLB_NUM_DIR_CREDITS,
3555 DLB_DEFER_SCHED_ARG,
3556 DLB_NUM_ATM_INFLIGHTS_ARG,
3559 if (params && params[0] != '\0') {
3560 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
3562 if (kvlist == NULL) {
3563 DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
3566 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
3568 &dlb_args->socket_id);
3570 DLB_LOG_ERR("%s: Error parsing numa node parameter",
3572 rte_kvargs_free(kvlist);
3576 ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
3578 &dlb_args->max_num_events);
3580 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
3582 rte_kvargs_free(kvlist);
3586 ret = rte_kvargs_process(kvlist,
3587 DLB_NUM_DIR_CREDITS,
3588 set_num_dir_credits,
3589 &dlb_args->num_dir_credits_override);
3591 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
3593 rte_kvargs_free(kvlist);
3597 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
3601 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
3603 rte_kvargs_free(kvlist);
3607 ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
3609 &dlb_args->defer_sched);
3611 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
3613 rte_kvargs_free(kvlist);
3617 ret = rte_kvargs_process(kvlist,
3618 DLB_NUM_ATM_INFLIGHTS_ARG,
3619 set_num_atm_inflights,
3620 &dlb_args->num_atm_inflights);
3622 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
3624 rte_kvargs_free(kvlist);
3628 rte_kvargs_free(kvlist);
3633 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);