1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
16 #include <rte_debug.h>
19 #include <rte_devargs.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
28 #include <rte_bus_pci.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
35 #include "../dlb2_priv.h"
36 #include "../dlb2_iface.h"
37 #include "../dlb2_inline_fns.h"
38 #include "dlb2_main.h"
39 #include "base/dlb2_hw_types.h"
40 #include "base/dlb2_osdep.h"
41 #include "base/dlb2_resource.h"
43 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
44 static unsigned int dlb2_qe_sa_pct = 1;
45 static unsigned int dlb2_qid_sa_pct;
48 dlb2_pf_low_level_io_init(void)
51 /* Addresses will be initialized at port create */
52 for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
53 /* First directed ports */
54 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
55 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
56 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
58 /* Now load balanced ports */
59 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
60 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
61 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
66 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
75 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
78 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
80 *revision = dlb2_dev->revision;
85 static void dlb2_pf_calc_arbiter_weights(u8 *weight,
90 /* Largest possible weight (100% SA case): 32 */
91 val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS;
93 /* Scale val according to the starvation avoidance percentage */
94 val = (val * pct) / 100;
95 if (val == 0 && pct != 0)
98 /* Prio 7 always has weight 0xff */
99 weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT;
101 for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--)
102 weight[i] = weight[i + 1] - val;
107 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
109 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
111 dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
112 dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
114 /* Configure arbitration weights for QE selection */
115 if (dlb2_qe_sa_pct <= 100) {
116 u8 weight[DLB2_NUM_ARB_WEIGHTS];
118 dlb2_pf_calc_arbiter_weights(weight,
121 dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight);
124 /* Configure arbitration weights for QID selection */
125 if (dlb2_qid_sa_pct <= 100) {
126 u8 weight[DLB2_NUM_ARB_WEIGHTS];
128 dlb2_pf_calc_arbiter_weights(weight,
131 dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight);
137 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
138 struct dlb2_get_num_resources_args *rsrcs)
140 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
142 return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
146 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
147 enum dlb2_cq_poll_modes *mode)
149 RTE_SET_USED(handle);
151 *mode = DLB2_CQ_POLL_MODE_SPARSE;
157 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
158 struct dlb2_create_sched_domain_args *arg)
160 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
161 struct dlb2_cmd_response response = {0};
164 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
166 if (dlb2_dev->domain_reset_failed) {
167 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
172 ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
178 arg->response = response;
180 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
187 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
189 struct dlb2_dev *dlb2_dev;
192 dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
193 ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
195 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
199 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
200 struct dlb2_create_ldb_queue_args *cfg)
202 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
203 struct dlb2_cmd_response response = {0};
206 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
208 ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
213 cfg->response = response;
215 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
222 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
223 struct dlb2_get_sn_occupancy_args *args)
225 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
226 struct dlb2_cmd_response response = {0};
229 ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
235 args->response = response;
241 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
242 struct dlb2_get_sn_allocation_args *args)
244 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
245 struct dlb2_cmd_response response = {0};
248 ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
253 args->response = response;
259 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
260 struct dlb2_set_sn_allocation_args *args)
262 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
263 struct dlb2_cmd_response response = {0};
266 ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
271 args->response = response;
277 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
278 size_t size, int align)
280 char mz_name[RTE_MEMZONE_NAMESIZE];
281 uint32_t core_id = rte_lcore_id();
282 unsigned int socket_id;
284 snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
285 (unsigned long)rte_get_timer_cycles());
286 if (core_id == (unsigned int)LCORE_ID_ANY)
287 core_id = rte_get_main_lcore();
288 socket_id = rte_lcore_to_socket_id(core_id);
289 *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
290 RTE_MEMZONE_IOVA_CONTIG, align);
292 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
293 size, rte_strerror(rte_errno));
302 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
303 struct dlb2_create_ldb_port_args *cfg,
304 enum dlb2_cq_poll_modes poll_mode)
306 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
307 struct dlb2_cmd_response response = {0};
308 struct dlb2_port_memory port_memory;
309 int ret, cq_alloc_depth;
311 const struct rte_memzone *mz;
317 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
319 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
320 qe_sz = sizeof(struct dlb2_dequeue_qe);
322 qe_sz = RTE_CACHE_LINE_SIZE;
324 /* Calculate the port memory required, and round up to the nearest
327 cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
328 alloc_sz = cq_alloc_depth * qe_sz;
329 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
331 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
332 rte_mem_page_size());
333 if (port_base == NULL)
336 /* Lock the page in memory */
337 ret = rte_mem_lock_page(port_base);
339 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
340 goto create_port_err;
343 memset(port_base, 0, alloc_sz);
345 ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
351 goto create_port_err;
353 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
354 dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
355 (void *)(pp_base + (rte_mem_page_size() * response.id));
357 dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
358 memset(&port_memory, 0, sizeof(port_memory));
360 dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
362 dlb2_list_init_head(&port_memory.list);
364 cfg->response = response;
370 rte_memzone_free(mz);
372 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
378 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
379 struct dlb2_create_dir_port_args *cfg,
380 enum dlb2_cq_poll_modes poll_mode)
382 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
383 struct dlb2_cmd_response response = {0};
384 struct dlb2_port_memory port_memory;
387 const struct rte_memzone *mz;
393 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
395 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
396 qe_sz = sizeof(struct dlb2_dequeue_qe);
398 qe_sz = RTE_CACHE_LINE_SIZE;
400 /* Calculate the port memory required, and round up to the nearest
403 alloc_sz = cfg->cq_depth * qe_sz;
404 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
406 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
407 rte_mem_page_size());
408 if (port_base == NULL)
411 /* Lock the page in memory */
412 ret = rte_mem_lock_page(port_base);
414 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
415 goto create_port_err;
418 memset(port_base, 0, alloc_sz);
420 ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
426 goto create_port_err;
428 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
429 dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
430 (void *)(pp_base + (rte_mem_page_size() * response.id));
432 dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
434 memset(&port_memory, 0, sizeof(port_memory));
436 dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
438 dlb2_list_init_head(&port_memory.list);
440 cfg->response = response;
446 rte_memzone_free(mz);
448 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
455 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
456 struct dlb2_create_dir_queue_args *cfg)
458 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
459 struct dlb2_cmd_response response = {0};
462 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
464 ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
469 cfg->response = response;
471 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
478 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
479 struct dlb2_map_qid_args *cfg)
481 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
482 struct dlb2_cmd_response response = {0};
485 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
487 ret = dlb2_hw_map_qid(&dlb2_dev->hw,
494 cfg->response = response;
496 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
503 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
504 struct dlb2_unmap_qid_args *cfg)
506 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
507 struct dlb2_cmd_response response = {0};
510 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
512 ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
519 cfg->response = response;
521 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
528 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
529 struct dlb2_pending_port_unmaps_args *args)
531 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
532 struct dlb2_cmd_response response = {0};
535 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
537 ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
544 args->response = response;
546 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
553 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
554 struct dlb2_start_domain_args *cfg)
556 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
557 struct dlb2_cmd_response response = {0};
560 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
562 ret = dlb2_pf_start_domain(&dlb2_dev->hw,
567 cfg->response = response;
569 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
576 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
577 struct dlb2_get_ldb_queue_depth_args *args)
579 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
580 struct dlb2_cmd_response response = {0};
583 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
585 ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
592 args->response = response;
594 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
601 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
602 struct dlb2_get_dir_queue_depth_args *args)
604 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
605 struct dlb2_cmd_response response = {0};
608 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
610 ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
617 args->response = response;
619 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
626 dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle,
627 struct dlb2_enable_cq_weight_args *args)
629 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
630 struct dlb2_cmd_response response = {0};
633 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
635 ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw,
641 args->response = response;
643 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
650 dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle,
651 struct dlb2_set_cos_bw_args *args)
653 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
656 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
658 ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw,
662 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
669 dlb2_pf_iface_fn_ptrs_init(void)
671 dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
672 dlb2_iface_open = dlb2_pf_open;
673 dlb2_iface_domain_reset = dlb2_pf_domain_reset;
674 dlb2_iface_get_device_version = dlb2_pf_get_device_version;
675 dlb2_iface_hardware_init = dlb2_pf_hardware_init;
676 dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
677 dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
678 dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
679 dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
680 dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
681 dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
682 dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
683 dlb2_iface_map_qid = dlb2_pf_map_qid;
684 dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
685 dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
686 dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
687 dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
688 dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
689 dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
690 dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
691 dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
692 dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight;
693 dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth;
698 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
701 struct rte_pci_device *pci_dev;
702 struct dlb2_devargs dlb2_args = {
703 .socket_id = rte_socket_id(),
704 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
705 .num_dir_credits_override = -1,
706 .qid_depth_thresholds = { {0} },
707 .poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
708 .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
709 .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ,
710 .default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT,
711 .max_cq_depth = DLB2_DEFAULT_CQ_DEPTH
713 struct dlb2_eventdev *dlb2;
715 DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
716 eventdev->data->dev_id, eventdev->data->socket_id);
718 dlb2_pf_iface_fn_ptrs_init();
720 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
722 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
723 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
724 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
726 /* Probe the DLB2 PF layer */
727 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
729 if (dlb2->qm_instance.pf_dev == NULL) {
730 DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
733 goto dlb2_probe_failed;
736 /* Were we invoked with runtime parameters? */
737 if (pci_dev->device.devargs) {
738 ret = dlb2_parse_params(pci_dev->device.devargs->args,
739 pci_dev->device.devargs->name,
743 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
745 goto dlb2_probe_failed;
749 ret = dlb2_primary_eventdev_probe(eventdev,
753 dlb2 = dlb2_pmd_priv(eventdev);
754 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
755 ret = dlb2_secondary_eventdev_probe(eventdev,
759 goto dlb2_probe_failed;
761 DLB2_LOG_INFO("DLB2 PF Probe success\n");
767 DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
772 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
774 static const struct rte_pci_id pci_id_dlb2_map[] = {
776 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
777 PCI_DEVICE_ID_INTEL_DLB2_PF)
784 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
786 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
787 PCI_DEVICE_ID_INTEL_DLB2_5_PF)
795 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
796 struct rte_pci_device *pci_dev)
800 ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
801 sizeof(struct dlb2_eventdev),
802 dlb2_eventdev_pci_init,
805 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
813 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
817 ret = rte_event_pmd_pci_remove(pci_dev, NULL);
820 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
829 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
830 struct rte_pci_device *pci_dev)
834 ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
835 sizeof(struct dlb2_eventdev),
836 dlb2_eventdev_pci_init,
839 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
847 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
851 ret = rte_event_pmd_pci_remove(pci_dev, NULL);
854 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
862 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
863 .id_table = pci_id_dlb2_map,
864 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
865 .probe = event_dlb2_pci_probe,
866 .remove = event_dlb2_pci_remove,
869 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
870 .id_table = pci_id_dlb2_5_map,
871 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
872 .probe = event_dlb2_5_pci_probe,
873 .remove = event_dlb2_5_pci_remove,
876 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
877 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
879 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
880 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);