1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
16 #include <rte_debug.h>
19 #include <rte_devargs.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
28 #include <rte_bus_pci.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
35 #include "../dlb2_priv.h"
36 #include "../dlb2_iface.h"
37 #include "../dlb2_inline_fns.h"
38 #include "dlb2_main.h"
39 #include "base/dlb2_hw_types.h"
40 #include "base/dlb2_osdep.h"
41 #include "base/dlb2_resource.h"
43 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
46 dlb2_pf_low_level_io_init(void)
49 /* Addresses will be initialized at port create */
50 for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
51 /* First directed ports */
52 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
53 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
54 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
56 /* Now load balanced ports */
57 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
58 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
59 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
64 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
73 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
76 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
78 *revision = dlb2_dev->revision;
84 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
86 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
88 dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
89 dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
93 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
94 struct dlb2_get_num_resources_args *rsrcs)
96 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
98 return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
102 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
103 enum dlb2_cq_poll_modes *mode)
105 RTE_SET_USED(handle);
107 *mode = DLB2_CQ_POLL_MODE_SPARSE;
113 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
114 struct dlb2_create_sched_domain_args *arg)
116 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
117 struct dlb2_cmd_response response = {0};
120 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
122 if (dlb2_dev->domain_reset_failed) {
123 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
128 ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
134 arg->response = response;
136 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
143 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
145 struct dlb2_dev *dlb2_dev;
148 dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
149 ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
151 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
155 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
156 struct dlb2_create_ldb_queue_args *cfg)
158 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
159 struct dlb2_cmd_response response = {0};
162 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
164 ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
169 cfg->response = response;
171 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
178 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
179 struct dlb2_get_sn_occupancy_args *args)
181 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
182 struct dlb2_cmd_response response = {0};
185 ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
191 args->response = response;
197 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
198 struct dlb2_get_sn_allocation_args *args)
200 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
201 struct dlb2_cmd_response response = {0};
204 ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
209 args->response = response;
215 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
216 struct dlb2_set_sn_allocation_args *args)
218 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
219 struct dlb2_cmd_response response = {0};
222 ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
227 args->response = response;
233 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
234 size_t size, int align)
236 char mz_name[RTE_MEMZONE_NAMESIZE];
237 uint32_t core_id = rte_lcore_id();
238 unsigned int socket_id;
240 snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
241 (unsigned long)rte_get_timer_cycles());
242 if (core_id == (unsigned int)LCORE_ID_ANY)
243 core_id = rte_get_main_lcore();
244 socket_id = rte_lcore_to_socket_id(core_id);
245 *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
246 RTE_MEMZONE_IOVA_CONTIG, align);
248 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
249 size, rte_strerror(rte_errno));
258 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
259 struct dlb2_create_ldb_port_args *cfg,
260 enum dlb2_cq_poll_modes poll_mode)
262 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
263 struct dlb2_cmd_response response = {0};
264 struct dlb2_port_memory port_memory;
265 int ret, cq_alloc_depth;
267 const struct rte_memzone *mz;
273 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
275 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
276 qe_sz = sizeof(struct dlb2_dequeue_qe);
278 qe_sz = RTE_CACHE_LINE_SIZE;
280 /* Calculate the port memory required, and round up to the nearest
283 cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
284 alloc_sz = cq_alloc_depth * qe_sz;
285 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
287 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
288 rte_mem_page_size());
289 if (port_base == NULL)
292 /* Lock the page in memory */
293 ret = rte_mem_lock_page(port_base);
295 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
296 goto create_port_err;
299 memset(port_base, 0, alloc_sz);
301 ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
307 goto create_port_err;
309 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
310 dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
311 (void *)(pp_base + (rte_mem_page_size() * response.id));
313 dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
314 memset(&port_memory, 0, sizeof(port_memory));
316 dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
318 dlb2_list_init_head(&port_memory.list);
320 cfg->response = response;
326 rte_memzone_free(mz);
328 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
334 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
335 struct dlb2_create_dir_port_args *cfg,
336 enum dlb2_cq_poll_modes poll_mode)
338 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
339 struct dlb2_cmd_response response = {0};
340 struct dlb2_port_memory port_memory;
343 const struct rte_memzone *mz;
349 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
351 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
352 qe_sz = sizeof(struct dlb2_dequeue_qe);
354 qe_sz = RTE_CACHE_LINE_SIZE;
356 /* Calculate the port memory required, and round up to the nearest
359 alloc_sz = cfg->cq_depth * qe_sz;
360 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
362 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
363 rte_mem_page_size());
364 if (port_base == NULL)
367 /* Lock the page in memory */
368 ret = rte_mem_lock_page(port_base);
370 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
371 goto create_port_err;
374 memset(port_base, 0, alloc_sz);
376 ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
382 goto create_port_err;
384 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
385 dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
386 (void *)(pp_base + (rte_mem_page_size() * response.id));
388 dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
390 memset(&port_memory, 0, sizeof(port_memory));
392 dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
394 dlb2_list_init_head(&port_memory.list);
396 cfg->response = response;
402 rte_memzone_free(mz);
404 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
411 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
412 struct dlb2_create_dir_queue_args *cfg)
414 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
415 struct dlb2_cmd_response response = {0};
418 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
420 ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
425 cfg->response = response;
427 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
434 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
435 struct dlb2_map_qid_args *cfg)
437 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
438 struct dlb2_cmd_response response = {0};
441 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
443 ret = dlb2_hw_map_qid(&dlb2_dev->hw,
450 cfg->response = response;
452 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
459 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
460 struct dlb2_unmap_qid_args *cfg)
462 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
463 struct dlb2_cmd_response response = {0};
466 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
468 ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
475 cfg->response = response;
477 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
484 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
485 struct dlb2_pending_port_unmaps_args *args)
487 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
488 struct dlb2_cmd_response response = {0};
491 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
493 ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
500 args->response = response;
502 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
509 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
510 struct dlb2_start_domain_args *cfg)
512 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
513 struct dlb2_cmd_response response = {0};
516 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
518 ret = dlb2_pf_start_domain(&dlb2_dev->hw,
523 cfg->response = response;
525 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
532 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
533 struct dlb2_get_ldb_queue_depth_args *args)
535 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
536 struct dlb2_cmd_response response = {0};
539 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
541 ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
548 args->response = response;
550 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
557 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
558 struct dlb2_get_dir_queue_depth_args *args)
560 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
561 struct dlb2_cmd_response response = {0};
564 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
566 ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
573 args->response = response;
575 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
582 dlb2_pf_iface_fn_ptrs_init(void)
584 dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
585 dlb2_iface_open = dlb2_pf_open;
586 dlb2_iface_domain_reset = dlb2_pf_domain_reset;
587 dlb2_iface_get_device_version = dlb2_pf_get_device_version;
588 dlb2_iface_hardware_init = dlb2_pf_hardware_init;
589 dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
590 dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
591 dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
592 dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
593 dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
594 dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
595 dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
596 dlb2_iface_map_qid = dlb2_pf_map_qid;
597 dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
598 dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
599 dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
600 dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
601 dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
602 dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
603 dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
604 dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
609 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
612 struct rte_pci_device *pci_dev;
613 struct dlb2_devargs dlb2_args = {
614 .socket_id = rte_socket_id(),
615 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
616 .num_dir_credits_override = -1,
617 .qid_depth_thresholds = { {0} },
618 .cos_id = DLB2_COS_DEFAULT,
619 .poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
620 .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
621 .default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT
623 struct dlb2_eventdev *dlb2;
625 DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
626 eventdev->data->dev_id, eventdev->data->socket_id);
628 dlb2_pf_iface_fn_ptrs_init();
630 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
632 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
633 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
634 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
636 /* Probe the DLB2 PF layer */
637 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
639 if (dlb2->qm_instance.pf_dev == NULL) {
640 DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
643 goto dlb2_probe_failed;
646 /* Were we invoked with runtime parameters? */
647 if (pci_dev->device.devargs) {
648 ret = dlb2_parse_params(pci_dev->device.devargs->args,
649 pci_dev->device.devargs->name,
653 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
655 goto dlb2_probe_failed;
659 ret = dlb2_primary_eventdev_probe(eventdev,
663 dlb2 = dlb2_pmd_priv(eventdev);
664 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
665 ret = dlb2_secondary_eventdev_probe(eventdev,
669 goto dlb2_probe_failed;
671 DLB2_LOG_INFO("DLB2 PF Probe success\n");
677 DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
682 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
684 static const struct rte_pci_id pci_id_dlb2_map[] = {
686 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
687 PCI_DEVICE_ID_INTEL_DLB2_PF)
694 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
696 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
697 PCI_DEVICE_ID_INTEL_DLB2_5_PF)
705 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
706 struct rte_pci_device *pci_dev)
710 ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
711 sizeof(struct dlb2_eventdev),
712 dlb2_eventdev_pci_init,
715 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
723 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
727 ret = rte_event_pmd_pci_remove(pci_dev, NULL);
730 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
739 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
740 struct rte_pci_device *pci_dev)
744 ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
745 sizeof(struct dlb2_eventdev),
746 dlb2_eventdev_pci_init,
749 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
757 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
761 ret = rte_event_pmd_pci_remove(pci_dev, NULL);
764 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
772 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
773 .id_table = pci_id_dlb2_map,
774 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
775 .probe = event_dlb2_pci_probe,
776 .remove = event_dlb2_pci_remove,
779 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
780 .id_table = pci_id_dlb2_5_map,
781 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
782 .probe = event_dlb2_5_pci_probe,
783 .remove = event_dlb2_5_pci_remove,
786 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
787 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
789 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
790 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);