1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
15 #include <rte_debug.h>
18 #include <rte_devargs.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
27 #include <rte_bus_pci.h>
28 #include <rte_eventdev.h>
29 #include <rte_eventdev_pmd.h>
30 #include <rte_eventdev_pmd_pci.h>
31 #include <rte_memory.h>
32 #include <rte_string_fns.h>
34 #include "../dlb2_priv.h"
35 #include "../dlb2_iface.h"
36 #include "../dlb2_inline_fns.h"
37 #include "dlb2_main.h"
38 #include "base/dlb2_hw_types.h"
39 #include "base/dlb2_osdep.h"
40 #include "base/dlb2_resource.h"
42 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
45 dlb2_pf_low_level_io_init(void)
48 /* Addresses will be initialized at port create */
49 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
50 /* First directed ports */
51 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
52 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
53 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
55 /* Now load balanced ports */
56 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
57 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
58 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
63 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
72 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
75 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
77 *revision = dlb2_dev->revision;
83 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
85 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
87 dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
88 dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
92 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
93 struct dlb2_get_num_resources_args *rsrcs)
95 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
97 return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
101 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
102 enum dlb2_cq_poll_modes *mode)
104 RTE_SET_USED(handle);
106 *mode = DLB2_CQ_POLL_MODE_SPARSE;
112 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
113 struct dlb2_create_sched_domain_args *arg)
115 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
116 struct dlb2_cmd_response response = {0};
119 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
121 if (dlb2_dev->domain_reset_failed) {
122 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
127 ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
133 arg->response = response;
135 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
142 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
144 struct dlb2_dev *dlb2_dev;
147 dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
148 ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
150 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
154 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
155 struct dlb2_create_ldb_queue_args *cfg)
157 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
158 struct dlb2_cmd_response response = {0};
161 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
163 ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
168 cfg->response = response;
170 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
177 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
178 struct dlb2_get_sn_occupancy_args *args)
180 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
181 struct dlb2_cmd_response response = {0};
184 ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
190 args->response = response;
196 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
197 struct dlb2_get_sn_allocation_args *args)
199 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
200 struct dlb2_cmd_response response = {0};
203 ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
208 args->response = response;
214 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
215 struct dlb2_set_sn_allocation_args *args)
217 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
218 struct dlb2_cmd_response response = {0};
221 ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
226 args->response = response;
232 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
233 size_t size, int align)
235 char mz_name[RTE_MEMZONE_NAMESIZE];
236 uint32_t core_id = rte_lcore_id();
237 unsigned int socket_id;
239 snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
240 (unsigned long)rte_get_timer_cycles());
241 if (core_id == (unsigned int)LCORE_ID_ANY)
242 core_id = rte_get_main_lcore();
243 socket_id = rte_lcore_to_socket_id(core_id);
244 *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
245 RTE_MEMZONE_IOVA_CONTIG, align);
247 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
248 size, rte_strerror(rte_errno));
257 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
258 struct dlb2_create_ldb_port_args *cfg,
259 enum dlb2_cq_poll_modes poll_mode)
261 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
262 struct dlb2_cmd_response response = {0};
263 struct dlb2_port_memory port_memory;
264 int ret, cq_alloc_depth;
266 const struct rte_memzone *mz;
272 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
274 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
275 qe_sz = sizeof(struct dlb2_dequeue_qe);
277 qe_sz = RTE_CACHE_LINE_SIZE;
279 /* Calculate the port memory required, and round up to the nearest
282 cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
283 alloc_sz = cq_alloc_depth * qe_sz;
284 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
286 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
288 if (port_base == NULL)
291 /* Lock the page in memory */
292 ret = rte_mem_lock_page(port_base);
294 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
295 goto create_port_err;
298 memset(port_base, 0, alloc_sz);
300 ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
306 goto create_port_err;
308 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
309 dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
310 (void *)(pp_base + (PAGE_SIZE * response.id));
312 dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
313 memset(&port_memory, 0, sizeof(port_memory));
315 dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
317 dlb2_list_init_head(&port_memory.list);
319 cfg->response = response;
325 rte_memzone_free(mz);
327 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
333 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
334 struct dlb2_create_dir_port_args *cfg,
335 enum dlb2_cq_poll_modes poll_mode)
337 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
338 struct dlb2_cmd_response response = {0};
339 struct dlb2_port_memory port_memory;
342 const struct rte_memzone *mz;
348 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
350 if (poll_mode == DLB2_CQ_POLL_MODE_STD)
351 qe_sz = sizeof(struct dlb2_dequeue_qe);
353 qe_sz = RTE_CACHE_LINE_SIZE;
355 /* Calculate the port memory required, and round up to the nearest
358 alloc_sz = cfg->cq_depth * qe_sz;
359 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
361 port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
363 if (port_base == NULL)
366 /* Lock the page in memory */
367 ret = rte_mem_lock_page(port_base);
369 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
370 goto create_port_err;
373 memset(port_base, 0, alloc_sz);
375 ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
381 goto create_port_err;
383 pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
384 dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
385 (void *)(pp_base + (PAGE_SIZE * response.id));
387 dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
389 memset(&port_memory, 0, sizeof(port_memory));
391 dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
393 dlb2_list_init_head(&port_memory.list);
395 cfg->response = response;
401 rte_memzone_free(mz);
403 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
410 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
411 struct dlb2_create_dir_queue_args *cfg)
413 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
414 struct dlb2_cmd_response response = {0};
417 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
419 ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
424 cfg->response = response;
426 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
433 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
434 struct dlb2_map_qid_args *cfg)
436 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
437 struct dlb2_cmd_response response = {0};
440 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
442 ret = dlb2_hw_map_qid(&dlb2_dev->hw,
449 cfg->response = response;
451 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
458 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
459 struct dlb2_unmap_qid_args *cfg)
461 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
462 struct dlb2_cmd_response response = {0};
465 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
467 ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
474 cfg->response = response;
476 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
483 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
484 struct dlb2_pending_port_unmaps_args *args)
486 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
487 struct dlb2_cmd_response response = {0};
490 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
492 ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
499 args->response = response;
501 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
508 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
509 struct dlb2_start_domain_args *cfg)
511 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
512 struct dlb2_cmd_response response = {0};
515 DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
517 ret = dlb2_pf_start_domain(&dlb2_dev->hw,
522 cfg->response = response;
524 DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
531 dlb2_pf_iface_fn_ptrs_init(void)
533 dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
534 dlb2_iface_open = dlb2_pf_open;
535 dlb2_iface_domain_reset = dlb2_pf_domain_reset;
536 dlb2_iface_get_device_version = dlb2_pf_get_device_version;
537 dlb2_iface_hardware_init = dlb2_pf_hardware_init;
538 dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
539 dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
540 dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
541 dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
542 dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
543 dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
544 dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
545 dlb2_iface_map_qid = dlb2_pf_map_qid;
546 dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
547 dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
548 dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
549 dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
550 dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
551 dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
556 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
559 struct rte_pci_device *pci_dev;
560 struct dlb2_devargs dlb2_args = {
561 .socket_id = rte_socket_id(),
562 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
563 .num_dir_credits_override = -1,
564 .qid_depth_thresholds = { {0} },
565 .cos_id = DLB2_COS_DEFAULT
567 struct dlb2_eventdev *dlb2;
569 DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
570 eventdev->data->dev_id, eventdev->data->socket_id);
572 dlb2_pf_iface_fn_ptrs_init();
574 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
576 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
577 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
579 /* Probe the DLB2 PF layer */
580 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
582 if (dlb2->qm_instance.pf_dev == NULL) {
583 DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
586 goto dlb2_probe_failed;
589 /* Were we invoked with runtime parameters? */
590 if (pci_dev->device.devargs) {
591 ret = dlb2_parse_params(pci_dev->device.devargs->args,
592 pci_dev->device.devargs->name,
595 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
597 goto dlb2_probe_failed;
601 ret = dlb2_primary_eventdev_probe(eventdev,
605 ret = dlb2_secondary_eventdev_probe(eventdev,
609 goto dlb2_probe_failed;
611 DLB2_LOG_INFO("DLB2 PF Probe success\n");
617 DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
622 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
624 static const struct rte_pci_id pci_id_dlb2_map[] = {
626 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
627 PCI_DEVICE_ID_INTEL_DLB2_PF)
635 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
636 struct rte_pci_device *pci_dev)
640 ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
641 sizeof(struct dlb2_eventdev),
642 dlb2_eventdev_pci_init,
645 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
653 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
657 ret = rte_event_pmd_pci_remove(pci_dev, NULL);
660 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
668 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
669 .id_table = pci_id_dlb2_map,
670 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
671 .probe = event_dlb2_pci_probe,
672 .remove = event_dlb2_pci_remove,
675 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
676 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);