1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
15 #include <rte_debug.h>
18 #include <rte_devargs.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
26 #include <rte_memory.h>
27 #include <rte_string_fns.h>
29 #include "../dlb_priv.h"
30 #include "../dlb_iface.h"
31 #include "../dlb_inline_fns.h"
33 #include "base/dlb_hw_types.h"
34 #include "base/dlb_osdep.h"
35 #include "base/dlb_resource.h"
38 dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
42 /* Addresses will be initialized at port create */
43 for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
44 /* First directed ports */
47 dlb_port[i][DLB_DIR].pp_addr = NULL;
50 dlb_port[i][DLB_DIR].ldb_popcount = NULL;
51 dlb_port[i][DLB_DIR].dir_popcount = NULL;
54 dlb_port[i][DLB_DIR].cq_base = NULL;
55 dlb_port[i][DLB_DIR].mmaped = true;
57 /* Now load balanced ports */
60 dlb_port[i][DLB_LDB].pp_addr = NULL;
63 dlb_port[i][DLB_LDB].ldb_popcount = NULL;
64 dlb_port[i][DLB_LDB].dir_popcount = NULL;
67 dlb_port[i][DLB_LDB].cq_base = NULL;
68 dlb_port[i][DLB_LDB].mmaped = true;
73 dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
82 dlb_pf_domain_close(struct dlb_eventdev *dlb)
84 struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
87 ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
89 DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
93 dlb_pf_get_device_version(struct dlb_hw_dev *handle,
96 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
98 *revision = dlb_dev->revision;
104 dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
105 struct dlb_get_num_resources_args *rsrcs)
107 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
109 dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
115 dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
116 struct dlb_create_sched_domain_args *arg)
118 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
119 struct dlb_cmd_response response = {0};
122 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
124 if (dlb_dev->domain_reset_failed) {
125 response.status = DLB_ST_DOMAIN_RESET_FAILED;
130 ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
136 *(struct dlb_cmd_response *)arg->response = response;
138 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
144 dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
145 struct dlb_create_ldb_pool_args *cfg)
147 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
148 struct dlb_cmd_response response = {0};
151 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
153 ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
158 *(struct dlb_cmd_response *)cfg->response = response;
160 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
166 dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
167 struct dlb_create_dir_pool_args *cfg)
169 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
170 struct dlb_cmd_response response = {0};
173 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
175 ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
180 *(struct dlb_cmd_response *)cfg->response = response;
182 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
188 dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
189 enum dlb_cq_poll_modes *mode)
191 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
193 if (dlb_dev->revision >= DLB_REV_B0)
194 *mode = DLB_CQ_POLL_MODE_SPARSE;
196 *mode = DLB_CQ_POLL_MODE_STD;
202 dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
203 struct dlb_create_ldb_queue_args *cfg)
205 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
206 struct dlb_cmd_response response = {0};
209 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
211 ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
216 *(struct dlb_cmd_response *)cfg->response = response;
218 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
224 dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
225 struct dlb_create_dir_queue_args *cfg)
227 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
228 struct dlb_cmd_response response = {0};
231 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
233 ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
238 *(struct dlb_cmd_response *)cfg->response = response;
240 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
246 dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
247 size_t size, int align)
249 char mz_name[RTE_MEMZONE_NAMESIZE];
250 uint32_t core_id = rte_lcore_id();
251 unsigned int socket_id;
253 snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
254 (unsigned long)rte_get_timer_cycles());
255 if (core_id == (unsigned int)LCORE_ID_ANY)
256 core_id = rte_get_main_lcore();
257 socket_id = rte_lcore_to_socket_id(core_id);
258 *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
259 RTE_MEMZONE_IOVA_CONTIG, align);
261 DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
271 dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
272 struct dlb_create_ldb_port_args *cfg,
273 enum dlb_cq_poll_modes poll_mode)
275 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
276 struct dlb_cmd_response response = {0};
279 const struct rte_memzone *mz;
280 int alloc_sz, qe_sz, cq_alloc_depth;
281 rte_iova_t pp_dma_base;
282 rte_iova_t pc_dma_base;
283 rte_iova_t cq_dma_base;
286 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
288 if (poll_mode == DLB_CQ_POLL_MODE_STD)
289 qe_sz = sizeof(struct dlb_dequeue_qe);
291 qe_sz = RTE_CACHE_LINE_SIZE;
293 /* The hardware always uses a CQ depth of at least
294 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
295 * perspective we support a depth as low as 1 for LDB ports.
297 cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
299 /* Calculate the port memory required, including two cache lines for
300 * credit pop counts. Round up to the nearest cache line.
302 alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
303 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
305 port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
306 alloc_sz, PAGE_SIZE);
307 if (port_base == NULL)
310 /* Lock the page in memory */
311 ret = rte_mem_lock_page(port_base);
313 DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
314 goto create_port_err;
317 memset(port_base, 0, alloc_sz);
318 cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
320 ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
327 goto create_port_err;
329 pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
330 dlb_port[response.id][DLB_LDB].pp_addr =
331 (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
333 dlb_port[response.id][DLB_LDB].cq_base =
334 (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
336 dlb_port[response.id][DLB_LDB].ldb_popcount =
337 (void *)(uintptr_t)port_base;
338 dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
339 (port_base + RTE_CACHE_LINE_SIZE);
340 dlb_port[response.id][DLB_LDB].mz = mz;
342 *(struct dlb_cmd_response *)cfg->response = response;
344 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
348 rte_memzone_free(mz);
354 dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
355 struct dlb_create_dir_port_args *cfg,
356 enum dlb_cq_poll_modes poll_mode)
358 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
359 struct dlb_cmd_response response = {0};
362 const struct rte_memzone *mz;
364 rte_iova_t pp_dma_base;
365 rte_iova_t pc_dma_base;
366 rte_iova_t cq_dma_base;
369 DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
371 if (poll_mode == DLB_CQ_POLL_MODE_STD)
372 qe_sz = sizeof(struct dlb_dequeue_qe);
374 qe_sz = RTE_CACHE_LINE_SIZE;
376 /* Calculate the port memory required, including two cache lines for
377 * credit pop counts. Round up to the nearest cache line.
379 alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
380 alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
382 port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
383 alloc_sz, PAGE_SIZE);
384 if (port_base == NULL)
387 /* Lock the page in memory */
388 ret = rte_mem_lock_page(port_base);
390 DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
391 goto create_port_err;
394 memset(port_base, 0, alloc_sz);
395 cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
397 ret = dlb_hw_create_dir_port(&dlb_dev->hw,
404 goto create_port_err;
406 pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
407 dlb_port[response.id][DLB_DIR].pp_addr =
408 (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
410 dlb_port[response.id][DLB_DIR].cq_base =
411 (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
413 dlb_port[response.id][DLB_DIR].ldb_popcount =
414 (void *)(uintptr_t)port_base;
415 dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
416 (port_base + RTE_CACHE_LINE_SIZE);
417 dlb_port[response.id][DLB_DIR].mz = mz;
419 *(struct dlb_cmd_response *)cfg->response = response;
421 DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
425 rte_memzone_free(mz);
431 dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
432 struct dlb_get_sn_allocation_args *args)
434 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
435 struct dlb_cmd_response response = {0};
438 ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
443 *(struct dlb_cmd_response *)args->response = response;
449 dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
450 struct dlb_set_sn_allocation_args *args)
452 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
453 struct dlb_cmd_response response = {0};
456 ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
461 *(struct dlb_cmd_response *)args->response = response;
467 dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
468 struct dlb_get_sn_occupancy_args *args)
470 struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
471 struct dlb_cmd_response response = {0};
474 ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
480 *(struct dlb_cmd_response *)args->response = response;
486 dlb_pf_iface_fn_ptrs_init(void)
488 dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
489 dlb_iface_open = dlb_pf_open;
490 dlb_iface_domain_close = dlb_pf_domain_close;
491 dlb_iface_get_device_version = dlb_pf_get_device_version;
492 dlb_iface_get_num_resources = dlb_pf_get_num_resources;
493 dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
494 dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
495 dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
496 dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
497 dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
498 dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
499 dlb_iface_dir_port_create = dlb_pf_dir_port_create;
500 dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
501 dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
502 dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
503 dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
508 dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
511 struct rte_pci_device *pci_dev;
512 struct dlb_devargs dlb_args = {
513 .socket_id = rte_socket_id(),
514 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
515 .num_dir_credits_override = -1,
517 .num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
519 struct dlb_eventdev *dlb;
521 DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
522 eventdev->data->dev_id, eventdev->data->socket_id);
524 dlb_entry_points_init(eventdev);
526 dlb_pf_iface_fn_ptrs_init();
528 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
530 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
531 dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
533 /* Probe the DLB PF layer */
534 dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
536 if (dlb->qm_instance.pf_dev == NULL) {
537 DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
540 goto dlb_probe_failed;
543 /* Were we invoked with runtime parameters? */
544 if (pci_dev->device.devargs) {
545 ret = dlb_parse_params(pci_dev->device.devargs->args,
546 pci_dev->device.devargs->name,
549 DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
551 goto dlb_probe_failed;
555 ret = dlb_primary_eventdev_probe(eventdev,
556 EVDEV_DLB_NAME_PMD_STR,
559 ret = dlb_secondary_eventdev_probe(eventdev,
560 EVDEV_DLB_NAME_PMD_STR);
563 goto dlb_probe_failed;
565 DLB_LOG_INFO("DLB PF Probe success\n");
571 DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
576 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
578 static const struct rte_pci_id pci_id_dlb_map[] = {
580 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
589 event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
590 struct rte_pci_device *pci_dev)
592 return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
593 sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
594 EVDEV_DLB_NAME_PMD_STR);
598 event_dlb_pci_remove(struct rte_pci_device *pci_dev)
600 return rte_event_pmd_pci_remove(pci_dev, NULL);
603 static struct rte_pci_driver pci_eventdev_dlb_pmd = {
604 .id_table = pci_id_dlb_map,
605 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
606 .probe = event_dlb_pci_probe,
607 .remove = event_dlb_pci_remove,
610 RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
611 RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);