1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <sys/fcntl.h>
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
22 #include <rte_errno.h>
24 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
28 #include <rte_prefetch.h>
30 #include <rte_string_fns.h>
32 #include <rte_eventdev.h>
33 #include <rte_eventdev_pmd.h>
36 #include "dlb_iface.h"
37 #include "dlb_inline_fns.h"
40 * Resources exposed to eventdev.
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
45 static struct rte_event_dev_info evdev_dlb_default_info = {
46 .driver_name = "", /* probe will set */
47 .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
48 .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
50 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
52 .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
54 .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
55 .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
56 .max_event_priority_levels = DLB_QID_PRIORITIES,
57 .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
58 .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
59 .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
60 .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
61 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
62 .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
63 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64 RTE_EVENT_DEV_CAP_EVENT_QOS |
65 RTE_EVENT_DEV_CAP_BURST_MODE |
66 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
71 struct process_local_port_data
72 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
75 dlb_get_queue_depth(struct dlb_eventdev *dlb,
76 struct dlb_eventdev_queue *queue)
78 /* DUMMY FOR NOW So "xstats" patch compiles */
86 dlb_hw_query_resources(struct dlb_eventdev *dlb)
88 struct dlb_hw_dev *handle = &dlb->qm_instance;
89 struct dlb_hw_resource_info *dlb_info = &handle->info;
92 ret = dlb_iface_get_num_resources(handle,
93 &dlb->hw_rsrc_query_results);
95 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
99 /* Complete filling in device resource info returned to evdev app,
100 * overriding any default values.
101 * The capabilities (CAPs) were set at compile time.
104 evdev_dlb_default_info.max_event_queues =
105 dlb->hw_rsrc_query_results.num_ldb_queues;
107 evdev_dlb_default_info.max_event_ports =
108 dlb->hw_rsrc_query_results.num_ldb_ports;
110 evdev_dlb_default_info.max_num_events =
111 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
113 /* Save off values used when creating the scheduling domain. */
115 handle->info.num_sched_domains =
116 dlb->hw_rsrc_query_results.num_sched_domains;
118 handle->info.hw_rsrc_max.nb_events_limit =
119 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
121 handle->info.hw_rsrc_max.num_queues =
122 dlb->hw_rsrc_query_results.num_ldb_queues +
123 dlb->hw_rsrc_query_results.num_dir_ports;
125 handle->info.hw_rsrc_max.num_ldb_queues =
126 dlb->hw_rsrc_query_results.num_ldb_queues;
128 handle->info.hw_rsrc_max.num_ldb_ports =
129 dlb->hw_rsrc_query_results.num_ldb_ports;
131 handle->info.hw_rsrc_max.num_dir_ports =
132 dlb->hw_rsrc_query_results.num_dir_ports;
134 handle->info.hw_rsrc_max.reorder_window_size =
135 dlb->hw_rsrc_query_results.num_hist_list_entries;
137 rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
143 dlb_free_qe_mem(struct dlb_port *qm_port)
148 rte_free(qm_port->qe4);
151 rte_free(qm_port->consume_qe);
152 qm_port->consume_qe = NULL;
156 dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
158 struct dlb_cq_pop_qe *qe;
160 qe = rte_zmalloc(mz_name,
161 DLB_NUM_QES_PER_CACHE_LINE *
162 sizeof(struct dlb_cq_pop_qe),
163 RTE_CACHE_LINE_SIZE);
166 DLB_LOG_ERR("dlb: no memory for consume_qe\n");
170 qm_port->consume_qe = qe;
176 /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
179 qe->tokens = 0; /* set at run time */
182 /* Completion IDs are disabled */
189 dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
193 sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
195 qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
197 if (qm_port->qe4 == NULL) {
198 DLB_LOG_ERR("dlb: no qe4 memory\n");
203 ret = dlb_init_consume_qe(qm_port, mz_name);
205 DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
213 dlb_free_qe_mem(qm_port);
218 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
221 #define DLB_BASE_10 10
224 dlb_string_to_int(int *result, const char *str)
229 if (str == NULL || result == NULL)
233 ret = strtol(str, &endstr, DLB_BASE_10);
237 /* long int and int may be different width for some architectures */
238 if (ret < INT_MIN || ret > INT_MAX || endstr == str)
246 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
248 int *socket_id = opaque;
251 ret = dlb_string_to_int(socket_id, value);
255 if (*socket_id > RTE_MAX_NUMA_NODES)
262 set_max_num_events(const char *key __rte_unused,
266 int *max_num_events = opaque;
269 if (value == NULL || opaque == NULL) {
270 DLB_LOG_ERR("NULL pointer\n");
274 ret = dlb_string_to_int(max_num_events, value);
278 if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
279 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
280 DLB_MAX_NUM_LDB_CREDITS);
288 set_num_dir_credits(const char *key __rte_unused,
292 int *num_dir_credits = opaque;
295 if (value == NULL || opaque == NULL) {
296 DLB_LOG_ERR("NULL pointer\n");
300 ret = dlb_string_to_int(num_dir_credits, value);
304 if (*num_dir_credits < 0 ||
305 *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
306 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
307 DLB_MAX_NUM_DIR_CREDITS);
314 * This function first unmaps all memory mappings and closes the
315 * domain's file descriptor, which causes the driver to reset the
316 * scheduling domain. Once that completes (when close() returns), we
317 * can safely free the dynamically allocated memory used by the
321 * We will maintain a use count and use that to determine when
322 * a reset is required. In PF mode, we never mmap, or munmap
323 * device memory, and we own the entire physical PCI device.
327 dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
329 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
330 enum dlb_configuration_state config_state;
333 /* Close and reset the domain */
334 dlb_iface_domain_close(dlb);
336 /* Free all dynamically allocated port memory */
337 for (i = 0; i < dlb->num_ports; i++)
338 dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
340 /* If reconfiguring, mark the device's queues and ports as "previously
341 * configured." If the user does not reconfigure them, the PMD will
342 * reapply their previous configuration when the device is started.
344 config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
346 for (i = 0; i < dlb->num_ports; i++) {
347 dlb->ev_ports[i].qm_port.config_state = config_state;
348 /* Reset setup_done so ports can be reconfigured */
349 dlb->ev_ports[i].setup_done = false;
350 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
351 dlb->ev_ports[i].link[j].mapped = false;
354 for (i = 0; i < dlb->num_queues; i++)
355 dlb->ev_queues[i].qm_queue.config_state = config_state;
357 for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
358 dlb->ev_queues[i].setup_done = false;
361 dlb->num_ldb_ports = 0;
362 dlb->num_dir_ports = 0;
364 dlb->num_ldb_queues = 0;
365 dlb->num_dir_queues = 0;
366 dlb->configured = false;
370 dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
372 struct dlb_create_ldb_pool_args cfg;
373 struct dlb_cmd_response response;
379 if (!handle->cfg.resources.num_ldb_credits) {
380 handle->cfg.ldb_credit_pool_id = 0;
381 handle->cfg.num_ldb_credits = 0;
385 cfg.response = (uintptr_t)&response;
386 cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
388 ret = dlb_iface_ldb_credit_pool_create(handle,
391 DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
392 ret, dlb_error_strings[response.status]);
395 handle->cfg.ldb_credit_pool_id = response.id;
396 handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
402 dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
404 struct dlb_create_dir_pool_args cfg;
405 struct dlb_cmd_response response;
411 if (!handle->cfg.resources.num_dir_credits) {
412 handle->cfg.dir_credit_pool_id = 0;
413 handle->cfg.num_dir_credits = 0;
417 cfg.response = (uintptr_t)&response;
418 cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
420 ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
422 DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
423 ret, dlb_error_strings[response.status]);
425 handle->cfg.dir_credit_pool_id = response.id;
426 handle->cfg.num_dir_credits = cfg.num_dir_credits;
432 dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
433 struct dlb_eventdev *dlb,
434 const struct dlb_hw_rsrcs *resources_asked)
437 struct dlb_create_sched_domain_args *config_params;
438 struct dlb_cmd_response response;
440 if (resources_asked == NULL) {
441 DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
446 /* Map generic qm resources to dlb resources */
447 config_params = &handle->cfg.resources;
449 config_params->response = (uintptr_t)&response;
451 /* DIR ports and queues */
453 config_params->num_dir_ports =
454 resources_asked->num_dir_ports;
456 config_params->num_dir_credits =
457 resources_asked->num_dir_credits;
459 /* LDB ports and queues */
461 config_params->num_ldb_queues =
462 resources_asked->num_ldb_queues;
464 config_params->num_ldb_ports =
465 resources_asked->num_ldb_ports;
467 config_params->num_ldb_credits =
468 resources_asked->num_ldb_credits;
470 config_params->num_atomic_inflights =
471 dlb->num_atm_inflights_per_queue *
472 config_params->num_ldb_queues;
474 config_params->num_hist_list_entries = config_params->num_ldb_ports *
475 DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
477 /* dlb limited to 1 credit pool per queue type */
478 config_params->num_ldb_credit_pools = 1;
479 config_params->num_dir_credit_pools = 1;
481 DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
482 config_params->num_ldb_queues,
483 config_params->num_ldb_ports,
484 config_params->num_dir_ports,
485 config_params->num_atomic_inflights,
486 config_params->num_hist_list_entries,
487 config_params->num_ldb_credits,
488 config_params->num_dir_credits,
489 config_params->num_ldb_credit_pools,
490 config_params->num_dir_credit_pools);
492 /* Configure the QM */
494 ret = dlb_iface_sched_domain_create(handle, config_params);
496 DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
499 dlb_error_strings[response.status]);
503 handle->domain_id = response.id;
504 handle->domain_id_valid = 1;
506 config_params->response = 0;
508 ret = dlb_ldb_credit_pool_create(handle);
510 DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
514 ret = dlb_dir_credit_pool_create(handle);
516 DLB_LOG_ERR("dlb: create dir credit pool failed\n");
520 handle->cfg.configured = true;
525 dlb_iface_domain_close(dlb);
531 /* End HW specific */
533 dlb_eventdev_info_get(struct rte_eventdev *dev,
534 struct rte_event_dev_info *dev_info)
536 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
539 ret = dlb_hw_query_resources(dlb);
541 const struct rte_eventdev_data *data = dev->data;
543 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
545 /* fn is void, so fall through and return values set up in
550 /* Add num resources currently owned by this domain.
551 * These would become available if the scheduling domain were reset due
552 * to the application recalling eventdev_configure to *reconfigure* the
555 evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
556 evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
557 evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
559 /* In DLB A-stepping hardware, applications are limited to 128
560 * configured ports (load-balanced or directed). The reported number of
561 * available ports must reflect this.
563 if (dlb->revision < DLB_REV_B0) {
566 used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
567 dlb->hw_rsrc_query_results.num_ldb_ports -
568 dlb->hw_rsrc_query_results.num_dir_ports;
570 evdev_dlb_default_info.max_event_ports =
571 RTE_MIN(evdev_dlb_default_info.max_event_ports,
575 evdev_dlb_default_info.max_event_queues =
576 RTE_MIN(evdev_dlb_default_info.max_event_queues,
577 RTE_EVENT_MAX_QUEUES_PER_DEV);
579 evdev_dlb_default_info.max_num_events =
580 RTE_MIN(evdev_dlb_default_info.max_num_events,
581 dlb->max_num_events_override);
583 *dev_info = evdev_dlb_default_info;
586 /* Note: 1 QM instance per QM device, QM instance/device == event device */
588 dlb_eventdev_configure(const struct rte_eventdev *dev)
590 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
591 struct dlb_hw_dev *handle = &dlb->qm_instance;
592 struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
593 const struct rte_eventdev_data *data = dev->data;
594 const struct rte_event_dev_config *config = &data->dev_conf;
597 /* If this eventdev is already configured, we must release the current
598 * scheduling domain before attempting to configure a new one.
600 if (dlb->configured) {
601 dlb_hw_reset_sched_domain(dev, true);
603 ret = dlb_hw_query_resources(dlb);
605 DLB_LOG_ERR("get resources err=%d, devid=%d\n",
611 if (config->nb_event_queues > rsrcs->num_queues) {
612 DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
613 config->nb_event_queues,
617 if (config->nb_event_ports > (rsrcs->num_ldb_ports
618 + rsrcs->num_dir_ports)) {
619 DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
620 config->nb_event_ports,
621 (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
624 if (config->nb_events_limit > rsrcs->nb_events_limit) {
625 DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
626 config->nb_events_limit,
627 rsrcs->nb_events_limit);
631 if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
632 dlb->global_dequeue_wait = false;
636 dlb->global_dequeue_wait = true;
638 timeout32 = config->dequeue_timeout_ns;
640 dlb->global_dequeue_wait_ticks =
641 timeout32 * (rte_get_timer_hz() / 1E9);
644 /* Does this platform support umonitor/umwait? */
645 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
646 if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
647 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
648 DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
649 RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
652 dlb->umwait_allowed = true;
655 rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
656 rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
657 /* 1 dir queue per dir port */
658 rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
660 /* Scale down nb_events_limit by 4 for directed credits, since there
661 * are 4x as many load-balanced credits.
663 rsrcs->num_ldb_credits = 0;
664 rsrcs->num_dir_credits = 0;
666 if (rsrcs->num_ldb_queues)
667 rsrcs->num_ldb_credits = config->nb_events_limit;
668 if (rsrcs->num_dir_ports)
669 rsrcs->num_dir_credits = config->nb_events_limit / 4;
670 if (dlb->num_dir_credits_override != -1)
671 rsrcs->num_dir_credits = dlb->num_dir_credits_override;
673 if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
674 DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
678 dlb->new_event_limit = config->nb_events_limit;
679 __atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
681 /* Save number of ports/queues for this event dev */
682 dlb->num_ports = config->nb_event_ports;
683 dlb->num_queues = config->nb_event_queues;
684 dlb->num_dir_ports = rsrcs->num_dir_ports;
685 dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
686 dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
687 dlb->num_dir_queues = dlb->num_dir_ports;
688 dlb->num_ldb_credits = rsrcs->num_ldb_credits;
689 dlb->num_dir_credits = rsrcs->num_dir_credits;
691 dlb->configured = true;
697 dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
701 struct dlb_unmap_qid_args cfg;
702 struct dlb_cmd_response response;
708 cfg.response = (uintptr_t)&response;
709 cfg.port_id = qm_port_id;
712 ret = dlb_iface_unmap_qid(handle, &cfg);
714 DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
715 ret, dlb_error_strings[response.status]);
721 dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
722 struct dlb_eventdev_port *ev_port,
723 struct dlb_eventdev_queue *ev_queue)
727 /* Don't unlink until start time. */
728 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
731 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
732 if (ev_port->link[i].valid &&
733 ev_port->link[i].queue_id == ev_queue->id)
737 /* This is expected with eventdev API!
738 * It blindly attempts to unmap all queues.
740 if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
741 DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
742 ev_queue->qm_queue.id,
743 ev_port->qm_port.id);
747 ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
749 ev_queue->qm_queue.id);
751 ev_port->link[i].mapped = false;
757 dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
758 uint8_t queues[], uint16_t nb_unlinks)
760 struct dlb_eventdev_port *ev_port = event_port;
761 struct dlb_eventdev *dlb;
766 if (!ev_port->setup_done) {
767 DLB_LOG_ERR("dlb: evport %d is not configured\n",
773 if (queues == NULL || nb_unlinks == 0) {
774 DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
775 return 0; /* Ignore and return success */
778 if (ev_port->qm_port.is_directed) {
779 DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
782 return nb_unlinks; /* as if success */
787 for (i = 0; i < nb_unlinks; i++) {
788 struct dlb_eventdev_queue *ev_queue;
791 if (queues[i] >= dlb->num_queues) {
792 DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
794 return i; /* return index of offending queue */
797 ev_queue = &dlb->ev_queues[queues[i]];
799 /* Does a link exist? */
800 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
801 if (ev_port->link[j].queue_id == queues[i] &&
802 ev_port->link[j].valid)
805 if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
808 ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
810 DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
811 ret, ev_port->id, queues[i]);
813 return i; /* return index of offending queue */
816 ev_port->link[j].valid = false;
817 ev_port->num_links--;
818 ev_queue->num_links--;
825 dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
828 struct dlb_eventdev_port *ev_port = event_port;
829 struct dlb_eventdev *dlb;
830 struct dlb_hw_dev *handle;
831 struct dlb_pending_port_unmaps_args cfg;
832 struct dlb_cmd_response response;
837 if (!ev_port->setup_done) {
838 DLB_LOG_ERR("dlb: evport %d is not configured\n",
844 cfg.port_id = ev_port->qm_port.id;
845 cfg.response = (uintptr_t)&response;
847 handle = &dlb->qm_instance;
848 ret = dlb_iface_pending_port_unmaps(handle, &cfg);
851 DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
852 ret, dlb_error_strings[response.status]);
860 dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
862 struct rte_event_port_conf *port_conf)
864 RTE_SET_USED(port_id);
865 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
867 port_conf->new_event_threshold = dlb->new_event_limit;
868 port_conf->dequeue_depth = 32;
869 port_conf->enqueue_depth = DLB_MAX_ENQUEUE_DEPTH;
870 port_conf->event_port_cfg = 0;
874 dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
876 struct rte_event_queue_conf *queue_conf)
879 RTE_SET_USED(queue_id);
880 queue_conf->nb_atomic_flows = 1024;
881 queue_conf->nb_atomic_order_sequences = 32;
882 queue_conf->event_queue_cfg = 0;
883 queue_conf->priority = 0;
887 dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
888 struct dlb_eventdev_port *ev_port,
889 uint32_t dequeue_depth,
891 uint32_t enqueue_depth,
892 uint16_t rsvd_tokens,
893 bool use_rsvd_token_scheme)
895 struct dlb_hw_dev *handle = &dlb->qm_instance;
896 struct dlb_create_ldb_port_args cfg = {0};
897 struct dlb_cmd_response response = {0};
899 struct dlb_port *qm_port = NULL;
900 char mz_name[RTE_MEMZONE_NAMESIZE];
906 if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
907 DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
908 DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
912 if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
913 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
914 DLB_MIN_ENQUEUE_DEPTH);
918 rte_spinlock_lock(&handle->resource_lock);
920 cfg.response = (uintptr_t)&response;
922 /* We round up to the next power of 2 if necessary */
923 cfg.cq_depth = rte_align32pow2(cq_depth);
924 cfg.cq_depth_threshold = rsvd_tokens;
926 cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
928 /* User controls the LDB high watermark via enqueue depth. The DIR high
929 * watermark is equal, unless the directed credit pool is too small.
931 cfg.ldb_credit_high_watermark = enqueue_depth;
933 /* If there are no directed ports, the kernel driver will ignore this
934 * port's directed credit settings. Don't use enqueue_depth if it would
935 * require more directed credits than are available.
937 cfg.dir_credit_high_watermark =
938 RTE_MIN(enqueue_depth,
939 handle->cfg.num_dir_credits / dlb->num_ports);
941 cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
942 cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
944 cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
945 cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
949 cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
950 cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
952 ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
954 DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
955 ret, dlb_error_strings[response.status]);
959 qm_port_id = response.id;
961 DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
962 ev_port->id, qm_port_id);
964 qm_port = &ev_port->qm_port;
965 qm_port->ev_port = ev_port; /* back ptr */
966 qm_port->dlb = dlb; /* back ptr */
969 * Allocate and init local qe struct(s).
970 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
973 snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
976 ret = dlb_init_qe_mem(qm_port, mz_name);
978 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
982 qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
983 qm_port->id = qm_port_id;
985 /* The credit window is one high water mark of QEs */
986 qm_port->ldb_pushcount_at_credit_expiry = 0;
987 qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
988 /* The credit window is one high water mark of QEs */
989 qm_port->dir_pushcount_at_credit_expiry = 0;
990 qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
991 qm_port->cq_depth = cfg.cq_depth;
992 /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
993 * the effective depth is smaller.
995 qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
997 qm_port->cq_idx_unmasked = 0;
998 if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
999 qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1001 qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1003 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1004 /* starting value of gen bit - it toggles at wrap time */
1005 qm_port->gen_bit = 1;
1007 qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1008 qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1009 qm_port->int_armed = false;
1011 /* Save off for later use in info and lookup APIs. */
1012 qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
1014 qm_port->dequeue_depth = dequeue_depth;
1016 qm_port->owed_tokens = 0;
1017 qm_port->issued_releases = 0;
1020 qm_port->state = PORT_STARTED; /* enabled at create time */
1021 qm_port->config_state = DLB_CONFIGURED;
1023 qm_port->dir_credits = cfg.dir_credit_high_watermark;
1024 qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1026 DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1029 qm_port->ldb_credits,
1030 qm_port->dir_credits);
1032 rte_spinlock_unlock(&handle->resource_lock);
1038 dlb_free_qe_mem(qm_port);
1039 qm_port->pp_mmio_base = 0;
1042 rte_spinlock_unlock(&handle->resource_lock);
1044 DLB_LOG_ERR("dlb: create ldb port failed!\n");
1050 dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
1051 struct dlb_eventdev_port *ev_port,
1052 uint32_t dequeue_depth,
1054 uint32_t enqueue_depth,
1055 uint16_t rsvd_tokens,
1056 bool use_rsvd_token_scheme)
1058 struct dlb_hw_dev *handle = &dlb->qm_instance;
1059 struct dlb_create_dir_port_args cfg = {0};
1060 struct dlb_cmd_response response = {0};
1062 struct dlb_port *qm_port = NULL;
1063 char mz_name[RTE_MEMZONE_NAMESIZE];
1064 uint32_t qm_port_id;
1066 if (dlb == NULL || handle == NULL)
1069 if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
1070 DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
1071 DLB_MIN_DIR_CQ_DEPTH);
1075 if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
1076 DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
1077 DLB_MIN_ENQUEUE_DEPTH);
1081 rte_spinlock_lock(&handle->resource_lock);
1083 /* Directed queues are configured at link time. */
1086 cfg.response = (uintptr_t)&response;
1088 /* We round up to the next power of 2 if necessary */
1089 cfg.cq_depth = rte_align32pow2(cq_depth);
1090 cfg.cq_depth_threshold = rsvd_tokens;
1092 /* User controls the LDB high watermark via enqueue depth. The DIR high
1093 * watermark is equal, unless the directed credit pool is too small.
1095 cfg.ldb_credit_high_watermark = enqueue_depth;
1097 /* Don't use enqueue_depth if it would require more directed credits
1098 * than are available.
1100 cfg.dir_credit_high_watermark =
1101 RTE_MIN(enqueue_depth,
1102 handle->cfg.num_dir_credits / dlb->num_ports);
1104 cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
1105 cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
1107 cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
1108 cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
1112 cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
1113 cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
1115 ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
1117 DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
1118 ret, dlb_error_strings[response.status]);
1122 qm_port_id = response.id;
1124 DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
1125 ev_port->id, qm_port_id);
1127 qm_port = &ev_port->qm_port;
1128 qm_port->ev_port = ev_port; /* back ptr */
1129 qm_port->dlb = dlb; /* back ptr */
1132 * Init local qe struct(s).
1133 * Note: MOVDIR64 requires the enqueue QE to be aligned
1136 snprintf(mz_name, sizeof(mz_name), "dir_port%d",
1139 ret = dlb_init_qe_mem(qm_port, mz_name);
1142 DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
1146 qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
1147 qm_port->id = qm_port_id;
1149 /* The credit window is one high water mark of QEs */
1150 qm_port->ldb_pushcount_at_credit_expiry = 0;
1151 qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
1152 /* The credit window is one high water mark of QEs */
1153 qm_port->dir_pushcount_at_credit_expiry = 0;
1154 qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
1155 qm_port->cq_depth = cfg.cq_depth;
1156 qm_port->cq_idx = 0;
1157 qm_port->cq_idx_unmasked = 0;
1158 if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
1159 qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1161 qm_port->cq_depth_mask = cfg.cq_depth - 1;
1163 qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1164 /* starting value of gen bit - it toggles at wrap time */
1165 qm_port->gen_bit = 1;
1167 qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
1168 qm_port->cq_rsvd_token_deficit = rsvd_tokens;
1169 qm_port->int_armed = false;
1171 /* Save off for later use in info and lookup APIs. */
1172 qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
1174 qm_port->dequeue_depth = dequeue_depth;
1176 qm_port->owed_tokens = 0;
1177 qm_port->issued_releases = 0;
1180 qm_port->state = PORT_STARTED; /* enabled at create time */
1181 qm_port->config_state = DLB_CONFIGURED;
1183 qm_port->dir_credits = cfg.dir_credit_high_watermark;
1184 qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
1186 DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
1189 cfg.dir_credit_high_watermark,
1190 cfg.ldb_credit_high_watermark);
1192 rte_spinlock_unlock(&handle->resource_lock);
1198 qm_port->pp_mmio_base = 0;
1199 dlb_free_qe_mem(qm_port);
1202 rte_spinlock_unlock(&handle->resource_lock);
1204 DLB_LOG_ERR("dlb: create dir port failed!\n");
1210 dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
1211 struct dlb_queue *queue,
1212 const struct rte_event_queue_conf *evq_conf)
1214 struct dlb_hw_dev *handle = &dlb->qm_instance;
1215 struct dlb_create_ldb_queue_args cfg;
1216 struct dlb_cmd_response response;
1219 int sched_type = -1;
1221 if (evq_conf == NULL)
1224 if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
1225 if (evq_conf->nb_atomic_order_sequences != 0)
1226 sched_type = RTE_SCHED_TYPE_ORDERED;
1228 sched_type = RTE_SCHED_TYPE_PARALLEL;
1230 sched_type = evq_conf->schedule_type;
1232 cfg.response = (uintptr_t)&response;
1233 cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
1234 cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
1235 cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
1237 if (sched_type != RTE_SCHED_TYPE_ORDERED) {
1238 cfg.num_sequence_numbers = 0;
1239 cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
1242 ret = dlb_iface_ldb_queue_create(handle, &cfg);
1244 DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
1245 ret, dlb_error_strings[response.status]);
1249 qm_qid = response.id;
1251 /* Save off queue config for debug, resource lookups, and reconfig */
1252 queue->num_qid_inflights = cfg.num_qid_inflights;
1253 queue->num_atm_inflights = cfg.num_atomic_inflights;
1255 queue->sched_type = sched_type;
1256 queue->config_state = DLB_CONFIGURED;
1258 DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
1260 cfg.num_atomic_inflights,
1261 cfg.num_sequence_numbers,
1262 cfg.num_qid_inflights);
1268 dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
1270 struct dlb_hw_dev *handle = &dlb->qm_instance;
1271 struct dlb_get_sn_allocation_args cfg;
1272 struct dlb_cmd_response response;
1276 cfg.response = (uintptr_t)&response;
1278 ret = dlb_iface_get_sn_allocation(handle, &cfg);
1280 DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
1281 ret, dlb_error_strings[response.status]);
1289 dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
1291 struct dlb_hw_dev *handle = &dlb->qm_instance;
1292 struct dlb_set_sn_allocation_args cfg;
1293 struct dlb_cmd_response response;
1298 cfg.response = (uintptr_t)&response;
1300 ret = dlb_iface_set_sn_allocation(handle, &cfg);
1302 DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
1303 ret, dlb_error_strings[response.status]);
1311 dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
1313 struct dlb_hw_dev *handle = &dlb->qm_instance;
1314 struct dlb_get_sn_occupancy_args cfg;
1315 struct dlb_cmd_response response;
1319 cfg.response = (uintptr_t)&response;
1321 ret = dlb_iface_get_sn_occupancy(handle, &cfg);
1323 DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
1324 ret, dlb_error_strings[response.status]);
1331 /* Query the current sequence number allocations and, if they conflict with the
1332 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
1333 * This is best-effort; if it fails, the PMD will attempt to configure the
1334 * load-balanced queue and return an error.
1337 dlb_program_sn_allocation(struct dlb_eventdev *dlb,
1338 const struct rte_event_queue_conf *queue_conf)
1340 int grp_occupancy[DLB_NUM_SN_GROUPS];
1341 int grp_alloc[DLB_NUM_SN_GROUPS];
1342 int i, sequence_numbers;
1344 sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
1346 for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1349 grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
1350 if (grp_alloc[i] < 0)
1353 total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
1355 grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
1356 if (grp_occupancy[i] < 0)
1359 /* DLB has at least one available slot for the requested
1360 * sequence numbers, so no further configuration required.
1362 if (grp_alloc[i] == sequence_numbers &&
1363 grp_occupancy[i] < total_slots)
1367 /* None of the sequence number groups are configured for the requested
1368 * sequence numbers, so we have to reconfigure one of them. This is
1369 * only possible if a group is not in use.
1371 for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
1372 if (grp_occupancy[i] == 0)
1376 if (i == DLB_NUM_SN_GROUPS) {
1377 DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
1378 __func__, sequence_numbers);
1382 /* Attempt to configure slot i with the requested number of sequence
1383 * numbers. Ignore the return value -- if this fails, the error will be
1384 * caught during subsequent queue configuration.
1386 dlb_set_sn_allocation(dlb, i, sequence_numbers);
1390 dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
1391 struct dlb_eventdev_queue *ev_queue,
1392 const struct rte_event_queue_conf *queue_conf)
1394 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1397 if (queue_conf->nb_atomic_order_sequences)
1398 dlb_program_sn_allocation(dlb, queue_conf);
1400 qm_qid = dlb_hw_create_ldb_queue(dlb,
1401 &ev_queue->qm_queue,
1404 DLB_LOG_ERR("Failed to create the load-balanced queue\n");
1409 dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
1411 ev_queue->qm_queue.id = qm_qid;
1416 static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
1420 for (i = 0; i < dlb->num_queues; i++) {
1421 if (dlb->ev_queues[i].setup_done &&
1422 dlb->ev_queues[i].qm_queue.is_directed)
1430 dlb_queue_link_teardown(struct dlb_eventdev *dlb,
1431 struct dlb_eventdev_queue *ev_queue)
1433 struct dlb_eventdev_port *ev_port;
1436 for (i = 0; i < dlb->num_ports; i++) {
1437 ev_port = &dlb->ev_ports[i];
1439 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1440 if (!ev_port->link[j].valid ||
1441 ev_port->link[j].queue_id != ev_queue->id)
1444 ev_port->link[j].valid = false;
1445 ev_port->num_links--;
1449 ev_queue->num_links = 0;
1453 dlb_eventdev_queue_setup(struct rte_eventdev *dev,
1455 const struct rte_event_queue_conf *queue_conf)
1457 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1458 struct dlb_eventdev_queue *ev_queue;
1461 if (queue_conf == NULL)
1464 if (ev_qid >= dlb->num_queues)
1467 ev_queue = &dlb->ev_queues[ev_qid];
1469 ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
1470 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
1471 ev_queue->id = ev_qid;
1472 ev_queue->conf = *queue_conf;
1474 if (!ev_queue->qm_queue.is_directed) {
1475 ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
1477 /* The directed queue isn't setup until link time, at which
1478 * point we know its directed port ID. Directed queue setup
1479 * will only fail if this queue is already setup or there are
1480 * no directed queues left to configure.
1484 ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
1486 if (ev_queue->setup_done ||
1487 dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
1491 /* Tear down pre-existing port->queue links */
1492 if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
1493 dlb_queue_link_teardown(dlb, ev_queue);
1496 ev_queue->setup_done = true;
1502 dlb_port_link_teardown(struct dlb_eventdev *dlb,
1503 struct dlb_eventdev_port *ev_port)
1505 struct dlb_eventdev_queue *ev_queue;
1508 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1509 if (!ev_port->link[i].valid)
1512 ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
1514 ev_port->link[i].valid = false;
1515 ev_port->num_links--;
1516 ev_queue->num_links--;
1521 dlb_eventdev_port_setup(struct rte_eventdev *dev,
1523 const struct rte_event_port_conf *port_conf)
1525 struct dlb_eventdev *dlb;
1526 struct dlb_eventdev_port *ev_port;
1527 bool use_rsvd_token_scheme;
1528 uint32_t adj_cq_depth;
1529 uint16_t rsvd_tokens;
1532 if (dev == NULL || port_conf == NULL) {
1533 DLB_LOG_ERR("Null parameter\n");
1537 dlb = dlb_pmd_priv(dev);
1539 if (ev_port_id >= DLB_MAX_NUM_PORTS)
1542 if (port_conf->dequeue_depth >
1543 evdev_dlb_default_info.max_event_port_dequeue_depth ||
1544 port_conf->enqueue_depth >
1545 evdev_dlb_default_info.max_event_port_enqueue_depth)
1548 ev_port = &dlb->ev_ports[ev_port_id];
1550 if (ev_port->setup_done) {
1551 DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
1555 /* The reserved token interrupt arming scheme requires that one or more
1556 * CQ tokens be reserved by the PMD. This limits the amount of CQ space
1557 * usable by the DLB, so in order to give an *effective* CQ depth equal
1558 * to the user-requested value, we double CQ depth and reserve half of
1559 * its tokens. If the user requests the max CQ depth (256) then we
1560 * cannot double it, so we reserve one token and give an effective
1561 * depth of 255 entries.
1563 use_rsvd_token_scheme = true;
1565 adj_cq_depth = port_conf->dequeue_depth;
1567 if (use_rsvd_token_scheme && adj_cq_depth < 256) {
1568 rsvd_tokens = adj_cq_depth;
1572 ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1573 RTE_EVENT_PORT_CFG_SINGLE_LINK;
1575 if (!ev_port->qm_port.is_directed) {
1576 ret = dlb_hw_create_ldb_port(dlb,
1578 port_conf->dequeue_depth,
1580 port_conf->enqueue_depth,
1582 use_rsvd_token_scheme);
1584 DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1589 ret = dlb_hw_create_dir_port(dlb,
1591 port_conf->dequeue_depth,
1593 port_conf->enqueue_depth,
1595 use_rsvd_token_scheme);
1597 DLB_LOG_ERR("Failed to create the DIR port\n");
1602 /* Save off port config for reconfig */
1603 dlb->ev_ports[ev_port_id].conf = *port_conf;
1605 dlb->ev_ports[ev_port_id].id = ev_port_id;
1606 dlb->ev_ports[ev_port_id].enq_configured = true;
1607 dlb->ev_ports[ev_port_id].setup_done = true;
1608 dlb->ev_ports[ev_port_id].inflight_max =
1609 port_conf->new_event_threshold;
1610 dlb->ev_ports[ev_port_id].implicit_release =
1611 !(port_conf->event_port_cfg &
1612 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1613 dlb->ev_ports[ev_port_id].outstanding_releases = 0;
1614 dlb->ev_ports[ev_port_id].inflight_credits = 0;
1615 dlb->ev_ports[ev_port_id].credit_update_quanta =
1616 RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
1617 dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
1619 /* Tear down pre-existing port->queue links */
1620 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1621 dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
1623 dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
1629 dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
1631 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1634 /* If an event queue or port was previously configured, but hasn't been
1635 * reconfigured, reapply its original configuration.
1637 for (i = 0; i < dlb->num_queues; i++) {
1638 struct dlb_eventdev_queue *ev_queue;
1640 ev_queue = &dlb->ev_queues[i];
1642 if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
1645 ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
1647 DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
1652 for (i = 0; i < dlb->num_ports; i++) {
1653 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1655 if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
1658 ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
1660 DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
1670 set_dev_id(const char *key __rte_unused,
1674 int *dev_id = opaque;
1677 if (value == NULL || opaque == NULL) {
1678 DLB_LOG_ERR("NULL pointer\n");
1682 ret = dlb_string_to_int(dev_id, value);
1690 set_defer_sched(const char *key __rte_unused,
1694 int *defer_sched = opaque;
1696 if (value == NULL || opaque == NULL) {
1697 DLB_LOG_ERR("NULL pointer\n");
1701 if (strncmp(value, "on", 2) != 0) {
1702 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
1713 set_num_atm_inflights(const char *key __rte_unused,
1717 int *num_atm_inflights = opaque;
1720 if (value == NULL || opaque == NULL) {
1721 DLB_LOG_ERR("NULL pointer\n");
1725 ret = dlb_string_to_int(num_atm_inflights, value);
1729 if (*num_atm_inflights < 0 ||
1730 *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
1731 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
1732 DLB_MAX_NUM_ATM_INFLIGHTS);
1740 dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
1745 struct dlb_eventdev *dlb = ev_port->dlb;
1746 struct dlb_eventdev_queue *ev_queue;
1747 bool port_is_dir, queue_is_dir;
1749 if (queue_id > dlb->num_queues) {
1750 DLB_LOG_ERR("queue_id %d > num queues %d\n",
1751 queue_id, dlb->num_queues);
1752 rte_errno = -EINVAL;
1756 ev_queue = &dlb->ev_queues[queue_id];
1758 if (!ev_queue->setup_done &&
1759 ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
1760 DLB_LOG_ERR("setup not done and not previously configured\n");
1761 rte_errno = -EINVAL;
1765 port_is_dir = ev_port->qm_port.is_directed;
1766 queue_is_dir = ev_queue->qm_queue.is_directed;
1768 if (port_is_dir != queue_is_dir) {
1769 DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
1770 queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1771 port_is_dir ? "DIR" : "LDB", ev_port->id);
1773 rte_errno = -EINVAL;
1777 /* Check if there is space for the requested link */
1778 if (!link_exists && index == -1) {
1779 DLB_LOG_ERR("no space for new link\n");
1780 rte_errno = -ENOSPC;
1784 /* Check if the directed port is already linked */
1785 if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1787 DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1789 rte_errno = -EINVAL;
1793 /* Check if the directed queue is already linked */
1794 if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1796 DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1798 rte_errno = -EINVAL;
1806 dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
1808 struct dlb_hw_dev *handle = &dlb->qm_instance;
1809 struct dlb_create_dir_queue_args cfg;
1810 struct dlb_cmd_response response;
1813 cfg.response = (uintptr_t)&response;
1815 /* The directed port is always configured before its queue */
1816 cfg.port_id = qm_port_id;
1818 ret = dlb_iface_dir_queue_create(handle, &cfg);
1820 DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
1821 ret, dlb_error_strings[response.status]);
1829 dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
1830 struct dlb_eventdev_queue *ev_queue,
1831 struct dlb_eventdev_port *ev_port)
1835 qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
1838 DLB_LOG_ERR("Failed to create the DIR queue\n");
1842 dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1844 ev_queue->qm_queue.id = qm_qid;
1850 dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
1851 uint32_t qm_port_id,
1855 struct dlb_map_qid_args cfg;
1856 struct dlb_cmd_response response;
1863 cfg.response = (uintptr_t)&response;
1864 cfg.port_id = qm_port_id;
1866 cfg.priority = EV_TO_DLB_PRIO(priority);
1868 ret = dlb_iface_map_qid(handle, &cfg);
1870 DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
1871 ret, dlb_error_strings[response.status]);
1872 DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1874 handle->domain_id, cfg.port_id,
1878 DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
1879 qm_qid, qm_port_id);
1886 dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
1887 struct dlb_eventdev_port *ev_port,
1888 struct dlb_eventdev_queue *ev_queue,
1891 int first_avail = -1;
1894 for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1895 if (ev_port->link[i].valid) {
1896 if (ev_port->link[i].queue_id == ev_queue->id &&
1897 ev_port->link[i].priority == priority) {
1898 if (ev_port->link[i].mapped)
1899 return 0; /* already mapped */
1903 if (first_avail == -1)
1907 if (first_avail == -1) {
1908 DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
1909 ev_port->qm_port.id);
1913 ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
1914 ev_port->qm_port.id,
1915 ev_queue->qm_queue.id,
1919 ev_port->link[first_avail].mapped = true;
1925 dlb_do_port_link(struct rte_eventdev *dev,
1926 struct dlb_eventdev_queue *ev_queue,
1927 struct dlb_eventdev_port *ev_port,
1930 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1933 /* Don't link until start time. */
1934 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
1937 if (ev_queue->qm_queue.is_directed)
1938 err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
1940 err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
1943 DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1944 ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1945 ev_queue->id, ev_port->id);
1955 dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
1957 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
1960 /* Perform requested port->queue links */
1961 for (i = 0; i < dlb->num_ports; i++) {
1962 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
1965 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1966 struct dlb_eventdev_queue *ev_queue;
1967 uint8_t prio, queue_id;
1969 if (!ev_port->link[j].valid)
1972 prio = ev_port->link[j].priority;
1973 queue_id = ev_port->link[j].queue_id;
1975 if (dlb_validate_port_link(ev_port, queue_id, true, j))
1978 ev_queue = &dlb->ev_queues[queue_id];
1980 if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
1989 dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1990 const uint8_t queues[], const uint8_t priorities[],
1994 struct dlb_eventdev_port *ev_port = event_port;
1995 struct dlb_eventdev *dlb;
2000 if (ev_port == NULL) {
2001 DLB_LOG_ERR("dlb: evport not setup\n");
2002 rte_errno = -EINVAL;
2006 if (!ev_port->setup_done &&
2007 ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
2008 DLB_LOG_ERR("dlb: evport not setup\n");
2009 rte_errno = -EINVAL;
2013 /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
2016 if (nb_links == 0) {
2017 DLB_LOG_DBG("dlb: nb_links is 0\n");
2018 return 0; /* Ignore and return success */
2023 DLB_LOG_DBG("Linking %u queues to %s port %d\n",
2025 ev_port->qm_port.is_directed ? "DIR" : "LDB",
2028 for (i = 0; i < nb_links; i++) {
2029 struct dlb_eventdev_queue *ev_queue;
2030 uint8_t queue_id, prio;
2034 queue_id = queues[i];
2035 prio = priorities[i];
2037 /* Check if the link already exists. */
2038 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
2039 if (ev_port->link[j].valid) {
2040 if (ev_port->link[j].queue_id == queue_id) {
2050 /* could not link */
2054 /* Check if already linked at the requested priority */
2055 if (found && ev_port->link[j].priority == prio)
2058 if (dlb_validate_port_link(ev_port, queue_id, found, index))
2059 break; /* return index of offending queue */
2061 ev_queue = &dlb->ev_queues[queue_id];
2063 if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
2064 break; /* return index of offending queue */
2066 ev_queue->num_links++;
2068 ev_port->link[index].queue_id = queue_id;
2069 ev_port->link[index].priority = prio;
2070 ev_port->link[index].valid = true;
2071 /* Entry already exists? If so, then must be prio change */
2073 ev_port->num_links++;
2079 dlb_eventdev_start(struct rte_eventdev *dev)
2081 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
2082 struct dlb_hw_dev *handle = &dlb->qm_instance;
2083 struct dlb_start_domain_args cfg;
2084 struct dlb_cmd_response response;
2087 rte_spinlock_lock(&dlb->qm_instance.resource_lock);
2088 if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
2089 DLB_LOG_ERR("bad state %d for dev_start\n",
2090 (int)dlb->run_state);
2091 rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2094 dlb->run_state = DLB_RUN_STATE_STARTING;
2095 rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
2097 /* If the device was configured more than once, some event ports and/or
2098 * queues may need to be reconfigured.
2100 ret = dlb_eventdev_reapply_configuration(dev);
2104 /* The DLB PMD delays port links until the device is started. */
2105 ret = dlb_eventdev_apply_port_links(dev);
2109 cfg.response = (uintptr_t)&response;
2111 for (i = 0; i < dlb->num_ports; i++) {
2112 if (!dlb->ev_ports[i].setup_done) {
2113 DLB_LOG_ERR("dlb: port %d not setup", i);
2118 for (i = 0; i < dlb->num_queues; i++) {
2119 if (dlb->ev_queues[i].num_links == 0) {
2120 DLB_LOG_ERR("dlb: queue %d is not linked", i);
2125 ret = dlb_iface_sched_domain_start(handle, &cfg);
2127 DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
2128 ret, dlb_error_strings[response.status]);
2132 dlb->run_state = DLB_RUN_STATE_STARTED;
2133 DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
2139 dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
2140 struct dlb_eventdev_port *ev_port)
2142 uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
2146 if (unlikely(ev_port->inflight_max < sw_inflights)) {
2147 DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2148 rte_errno = -ENOSPC;
2152 if (ev_port->inflight_credits < num) {
2153 /* check if event enqueue brings ev_port over max threshold */
2154 uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2156 if (sw_inflights + credit_update_quanta >
2157 dlb->new_event_limit) {
2159 ev_port->stats.traffic.tx_nospc_new_event_limit,
2161 rte_errno = -ENOSPC;
2165 __atomic_fetch_add(&dlb->inflights, credit_update_quanta,
2167 ev_port->inflight_credits += (credit_update_quanta);
2169 if (ev_port->inflight_credits < num) {
2171 ev_port->stats.traffic.tx_nospc_inflight_credits,
2173 rte_errno = -ENOSPC;
2182 dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
2183 struct dlb_eventdev_port *ev_port)
2185 uint16_t quanta = ev_port->credit_update_quanta;
2187 if (ev_port->inflight_credits >= quanta * 2) {
2188 /* Replenish credits, saving one quanta for enqueues */
2189 uint16_t val = ev_port->inflight_credits - quanta;
2191 __atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
2192 ev_port->inflight_credits -= val;
2196 static __rte_always_inline uint16_t
2197 dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
2199 volatile uint16_t *popcount;
2202 popcount = port_data->ldb_popcount;
2204 popcount = port_data->dir_popcount;
2210 dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
2211 struct process_local_port_data *port_data)
2213 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2216 pc = dlb_read_pc(port_data, true);
2218 qm_port->cached_ldb_credits = pc -
2219 qm_port->ldb_pushcount_at_credit_expiry;
2220 if (unlikely(qm_port->cached_ldb_credits == 0)) {
2222 qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2225 DLB_LOG_DBG("ldb credits exhausted\n");
2228 qm_port->ldb_pushcount_at_credit_expiry +=
2229 qm_port->cached_ldb_credits;
2236 dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
2237 struct process_local_port_data *port_data)
2239 if (unlikely(qm_port->cached_dir_credits == 0)) {
2242 pc = dlb_read_pc(port_data, false);
2244 qm_port->cached_dir_credits = pc -
2245 qm_port->dir_pushcount_at_credit_expiry;
2247 if (unlikely(qm_port->cached_dir_credits == 0)) {
2249 qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2252 DLB_LOG_DBG("dir credits exhausted\n");
2255 qm_port->dir_pushcount_at_credit_expiry +=
2256 qm_port->cached_dir_credits;
2263 dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
2264 struct dlb_port *qm_port,
2265 const struct rte_event ev[],
2266 struct process_local_port_data *port_data,
2267 uint8_t *sched_type,
2270 struct dlb_eventdev *dlb = ev_port->dlb;
2271 struct dlb_eventdev_queue *ev_queue;
2272 uint16_t *cached_credits = NULL;
2273 struct dlb_queue *qm_queue;
2275 ev_queue = &dlb->ev_queues[ev->queue_id];
2276 qm_queue = &ev_queue->qm_queue;
2277 *queue_id = qm_queue->id;
2279 /* Ignore sched_type and hardware credits on release events */
2280 if (ev->op == RTE_EVENT_OP_RELEASE)
2283 if (!qm_queue->is_directed) {
2284 /* Load balanced destination queue */
2286 if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
2287 rte_errno = -ENOSPC;
2290 cached_credits = &qm_port->cached_ldb_credits;
2292 switch (ev->sched_type) {
2293 case RTE_SCHED_TYPE_ORDERED:
2294 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2295 if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2296 DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue %d\n",
2298 rte_errno = -EINVAL;
2301 *sched_type = DLB_SCHED_ORDERED;
2303 case RTE_SCHED_TYPE_ATOMIC:
2304 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2305 *sched_type = DLB_SCHED_ATOMIC;
2307 case RTE_SCHED_TYPE_PARALLEL:
2308 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2309 if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2310 *sched_type = DLB_SCHED_ORDERED;
2312 *sched_type = DLB_SCHED_UNORDERED;
2315 DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2316 DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
2317 rte_errno = -EINVAL;
2321 /* Directed destination queue */
2323 if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
2324 rte_errno = -ENOSPC;
2327 cached_credits = &qm_port->cached_dir_credits;
2329 DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2331 *sched_type = DLB_SCHED_DIRECTED;
2336 case RTE_EVENT_OP_NEW:
2337 /* Check that a sw credit is available */
2338 if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
2339 rte_errno = -ENOSPC;
2342 ev_port->inflight_credits--;
2343 (*cached_credits)--;
2345 case RTE_EVENT_OP_FORWARD:
2346 /* Check for outstanding_releases underflow. If this occurs,
2347 * the application is not using the EVENT_OPs correctly; for
2348 * example, forwarding or releasing events that were not
2351 RTE_ASSERT(ev_port->outstanding_releases > 0);
2352 ev_port->outstanding_releases--;
2353 qm_port->issued_releases++;
2354 (*cached_credits)--;
2356 case RTE_EVENT_OP_RELEASE:
2357 ev_port->inflight_credits++;
2358 /* Check for outstanding_releases underflow. If this occurs,
2359 * the application is not using the EVENT_OPs correctly; for
2360 * example, forwarding or releasing events that were not
2363 RTE_ASSERT(ev_port->outstanding_releases > 0);
2364 ev_port->outstanding_releases--;
2365 qm_port->issued_releases++;
2366 /* Replenish s/w credits if enough are cached */
2367 dlb_replenish_sw_credits(dlb, ev_port);
2371 DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2372 DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2374 #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
2375 if (ev->op != RTE_EVENT_OP_RELEASE) {
2376 DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
2377 DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2384 static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] = {
2386 /* Load-balanced cmd bytes */
2387 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2388 [RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
2389 [RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
2392 /* Directed cmd bytes */
2393 [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
2394 [RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
2395 [RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
2400 dlb_event_build_hcws(struct dlb_port *qm_port,
2401 const struct rte_event ev[],
2403 uint8_t *sched_type,
2406 struct dlb_enqueue_qe *qe;
2407 uint16_t sched_word[4];
2413 sse_qe[0] = _mm_setzero_si128();
2414 sse_qe[1] = _mm_setzero_si128();
2418 /* Construct the metadata portion of two HCWs in one 128b SSE
2419 * register. HCW metadata is constructed in the SSE registers
2421 * sse_qe[0][63:0]: qe[0]'s metadata
2422 * sse_qe[0][127:64]: qe[1]'s metadata
2423 * sse_qe[1][63:0]: qe[2]'s metadata
2424 * sse_qe[1][127:64]: qe[3]'s metadata
2427 /* Convert the event operation into a command byte and store it
2429 * sse_qe[0][63:56] = cmd_byte_map[is_directed][ev[0].op]
2430 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2431 * sse_qe[1][63:56] = cmd_byte_map[is_directed][ev[2].op]
2432 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2434 #define DLB_QE_CMD_BYTE 7
2435 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2436 cmd_byte_map[qm_port->is_directed][ev[0].op],
2438 sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2439 cmd_byte_map[qm_port->is_directed][ev[1].op],
2440 DLB_QE_CMD_BYTE + 8);
2441 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2442 cmd_byte_map[qm_port->is_directed][ev[2].op],
2444 sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2445 cmd_byte_map[qm_port->is_directed][ev[3].op],
2446 DLB_QE_CMD_BYTE + 8);
2448 /* Store priority, scheduling type, and queue ID in the sched
2449 * word array because these values are re-used when the
2450 * destination is a directed queue.
2452 sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
2453 sched_type[0] << 8 |
2455 sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
2456 sched_type[1] << 8 |
2458 sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
2459 sched_type[2] << 8 |
2461 sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
2462 sched_type[3] << 8 |
2465 /* Store the event priority, scheduling type, and queue ID in
2467 * sse_qe[0][31:16] = sched_word[0]
2468 * sse_qe[0][95:80] = sched_word[1]
2469 * sse_qe[1][31:16] = sched_word[2]
2470 * sse_qe[1][95:80] = sched_word[3]
2472 #define DLB_QE_QID_SCHED_WORD 1
2473 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2475 DLB_QE_QID_SCHED_WORD);
2476 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2478 DLB_QE_QID_SCHED_WORD + 4);
2479 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2481 DLB_QE_QID_SCHED_WORD);
2482 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2484 DLB_QE_QID_SCHED_WORD + 4);
2486 /* If the destination is a load-balanced queue, store the lock
2487 * ID. If it is a directed queue, DLB places this field in
2488 * bytes 10-11 of the received QE, so we format it accordingly:
2489 * sse_qe[0][47:32] = dir queue ? sched_word[0] : flow_id[0]
2490 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2491 * sse_qe[1][47:32] = dir queue ? sched_word[2] : flow_id[2]
2492 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2494 #define DLB_QE_LOCK_ID_WORD 2
2495 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2496 (sched_type[0] == DLB_SCHED_DIRECTED) ?
2497 sched_word[0] : ev[0].flow_id,
2498 DLB_QE_LOCK_ID_WORD);
2499 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2500 (sched_type[1] == DLB_SCHED_DIRECTED) ?
2501 sched_word[1] : ev[1].flow_id,
2502 DLB_QE_LOCK_ID_WORD + 4);
2503 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2504 (sched_type[2] == DLB_SCHED_DIRECTED) ?
2505 sched_word[2] : ev[2].flow_id,
2506 DLB_QE_LOCK_ID_WORD);
2507 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2508 (sched_type[3] == DLB_SCHED_DIRECTED) ?
2509 sched_word[3] : ev[3].flow_id,
2510 DLB_QE_LOCK_ID_WORD + 4);
2512 /* Store the event type and sub event type in the metadata:
2513 * sse_qe[0][15:0] = flow_id[0]
2514 * sse_qe[0][79:64] = flow_id[1]
2515 * sse_qe[1][15:0] = flow_id[2]
2516 * sse_qe[1][79:64] = flow_id[3]
2518 #define DLB_QE_EV_TYPE_WORD 0
2519 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2520 ev[0].sub_event_type << 8 |
2522 DLB_QE_EV_TYPE_WORD);
2523 sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2524 ev[1].sub_event_type << 8 |
2526 DLB_QE_EV_TYPE_WORD + 4);
2527 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2528 ev[2].sub_event_type << 8 |
2530 DLB_QE_EV_TYPE_WORD);
2531 sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2532 ev[3].sub_event_type << 8 |
2534 DLB_QE_EV_TYPE_WORD + 4);
2536 /* Store the metadata to memory (use the double-precision
2537 * _mm_storeh_pd because there is no integer function for
2538 * storing the upper 64b):
2539 * qe[0] metadata = sse_qe[0][63:0]
2540 * qe[1] metadata = sse_qe[0][127:64]
2541 * qe[2] metadata = sse_qe[1][63:0]
2542 * qe[3] metadata = sse_qe[1][127:64]
2544 _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2545 _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2546 (__m128d) sse_qe[0]);
2547 _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2548 _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2549 (__m128d) sse_qe[1]);
2551 qe[0].data = ev[0].u64;
2552 qe[1].data = ev[1].u64;
2553 qe[2].data = ev[2].u64;
2554 qe[3].data = ev[3].u64;
2560 for (i = 0; i < num; i++) {
2562 cmd_byte_map[qm_port->is_directed][ev[i].op];
2563 qe[i].sched_type = sched_type[i];
2564 qe[i].data = ev[i].u64;
2565 qe[i].qid = queue_id[i];
2566 qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
2567 qe[i].lock_id = ev[i].flow_id;
2568 if (sched_type[i] == DLB_SCHED_DIRECTED) {
2569 struct dlb_msg_info *info =
2570 (struct dlb_msg_info *)&qe[i].lock_id;
2572 info->qid = queue_id[i];
2573 info->sched_type = DLB_SCHED_DIRECTED;
2574 info->priority = qe[i].priority;
2576 qe[i].u.event_type.major = ev[i].event_type;
2577 qe[i].u.event_type.sub = ev[i].sub_event_type;
2585 static __rte_always_inline void
2586 dlb_pp_write(struct dlb_enqueue_qe *qe4,
2587 struct process_local_port_data *port_data)
2589 dlb_movdir64b(port_data->pp_addr, qe4);
2593 dlb_hw_do_enqueue(struct dlb_port *qm_port,
2595 struct process_local_port_data *port_data)
2597 DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
2599 /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2600 * application writes complete before enqueueing the release HCW.
2605 dlb_pp_write(qm_port->qe4, port_data);
2608 static inline uint16_t
2609 __dlb_event_enqueue_burst(void *event_port,
2610 const struct rte_event events[],
2613 struct dlb_eventdev_port *ev_port = event_port;
2614 struct dlb_port *qm_port = &ev_port->qm_port;
2615 struct process_local_port_data *port_data;
2618 RTE_ASSERT(ev_port->enq_configured);
2619 RTE_ASSERT(events != NULL);
2624 port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
2627 uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
2628 uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
2632 memset(qm_port->qe4,
2634 DLB_NUM_QES_PER_CACHE_LINE *
2635 sizeof(struct dlb_enqueue_qe));
2637 for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2638 const struct rte_event *ev = &events[i + j];
2640 if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
2641 port_data, &sched_types[j],
2649 dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
2650 sched_types, queue_ids);
2652 dlb_hw_do_enqueue(qm_port, i == 0, port_data);
2654 /* Don't include the token pop QE in the enqueue count */
2657 /* Don't interpret j < DLB_NUM_... as out-of-credits if
2660 if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2664 RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
2669 static inline uint16_t
2670 dlb_event_enqueue_burst(void *event_port,
2671 const struct rte_event events[],
2674 return __dlb_event_enqueue_burst(event_port, events, num);
2677 static inline uint16_t
2678 dlb_event_enqueue(void *event_port,
2679 const struct rte_event events[])
2681 return __dlb_event_enqueue_burst(event_port, events, 1);
2685 dlb_event_enqueue_new_burst(void *event_port,
2686 const struct rte_event events[],
2689 return __dlb_event_enqueue_burst(event_port, events, num);
2693 dlb_event_enqueue_forward_burst(void *event_port,
2694 const struct rte_event events[],
2697 return __dlb_event_enqueue_burst(event_port, events, num);
2701 dlb_entry_points_init(struct rte_eventdev *dev)
2703 static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
2704 .dev_infos_get = dlb_eventdev_info_get,
2705 .dev_configure = dlb_eventdev_configure,
2706 .dev_start = dlb_eventdev_start,
2707 .queue_def_conf = dlb_eventdev_queue_default_conf_get,
2708 .port_def_conf = dlb_eventdev_port_default_conf_get,
2709 .queue_setup = dlb_eventdev_queue_setup,
2710 .port_setup = dlb_eventdev_port_setup,
2711 .port_link = dlb_eventdev_port_link,
2712 .port_unlink = dlb_eventdev_port_unlink,
2713 .port_unlinks_in_progress =
2714 dlb_eventdev_port_unlinks_in_progress,
2715 .dump = dlb_eventdev_dump,
2716 .xstats_get = dlb_eventdev_xstats_get,
2717 .xstats_get_names = dlb_eventdev_xstats_get_names,
2718 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
2719 .xstats_reset = dlb_eventdev_xstats_reset,
2722 /* Expose PMD's eventdev interface */
2723 dev->dev_ops = &dlb_eventdev_entry_ops;
2725 dev->enqueue = dlb_event_enqueue;
2726 dev->enqueue_burst = dlb_event_enqueue_burst;
2727 dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
2728 dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
2732 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
2734 struct dlb_devargs *dlb_args)
2736 struct dlb_eventdev *dlb;
2739 dlb = dev->data->dev_private;
2741 dlb->event_dev = dev; /* backlink */
2743 evdev_dlb_default_info.driver_name = name;
2745 dlb->max_num_events_override = dlb_args->max_num_events;
2746 dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
2747 dlb->defer_sched = dlb_args->defer_sched;
2748 dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
2750 /* Open the interface.
2751 * For vdev mode, this means open the dlb kernel module.
2753 err = dlb_iface_open(&dlb->qm_instance, name);
2755 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
2760 err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
2762 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
2767 err = dlb_hw_query_resources(dlb);
2769 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
2773 err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
2775 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
2779 /* Complete xtstats runtime initialization */
2780 err = dlb_xstats_init(dlb);
2782 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
2786 rte_spinlock_init(&dlb->qm_instance.resource_lock);
2788 dlb_iface_low_level_io_init(dlb);
2790 dlb_entry_points_init(dev);
2796 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
2799 struct dlb_eventdev *dlb;
2802 dlb = dev->data->dev_private;
2804 evdev_dlb_default_info.driver_name = name;
2806 err = dlb_iface_open(&dlb->qm_instance, name);
2808 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
2813 err = dlb_hw_query_resources(dlb);
2815 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
2819 dlb_iface_low_level_io_init(dlb);
2821 dlb_entry_points_init(dev);
2827 dlb_parse_params(const char *params,
2829 struct dlb_devargs *dlb_args)
2832 static const char * const args[] = { NUMA_NODE_ARG,
2834 DLB_NUM_DIR_CREDITS,
2836 DLB_DEFER_SCHED_ARG,
2837 DLB_NUM_ATM_INFLIGHTS_ARG,
2840 if (params && params[0] != '\0') {
2841 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
2843 if (kvlist == NULL) {
2844 DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
2847 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
2849 &dlb_args->socket_id);
2851 DLB_LOG_ERR("%s: Error parsing numa node parameter",
2853 rte_kvargs_free(kvlist);
2857 ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
2859 &dlb_args->max_num_events);
2861 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
2863 rte_kvargs_free(kvlist);
2867 ret = rte_kvargs_process(kvlist,
2868 DLB_NUM_DIR_CREDITS,
2869 set_num_dir_credits,
2870 &dlb_args->num_dir_credits_override);
2872 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
2874 rte_kvargs_free(kvlist);
2878 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
2882 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
2884 rte_kvargs_free(kvlist);
2888 ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
2890 &dlb_args->defer_sched);
2892 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
2894 rte_kvargs_free(kvlist);
2898 ret = rte_kvargs_process(kvlist,
2899 DLB_NUM_ATM_INFLIGHTS_ARG,
2900 set_num_atm_inflights,
2901 &dlb_args->num_atm_inflights);
2903 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
2905 rte_kvargs_free(kvlist);
2909 rte_kvargs_free(kvlist);
2914 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);