#include <stdbool.h>
#include <stdio.h>
#include <sys/mman.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <sys/time.h>
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <string.h>
+
#include <rte_debug.h>
#include <rte_log.h>
#include <rte_dev.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
-#include <rte_eventdev_pmd_pci.h>
+#include <eventdev_pmd.h>
+#include <eventdev_pmd_pci.h>
#include <rte_memory.h>
#include <rte_string_fns.h>
{
int i;
/* Addresses will be initialized at port create */
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
/* First directed ports */
dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
- PAGE_SIZE);
+ rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
- (void *)(pp_base + (PAGE_SIZE * response.id));
+ (void *)(pp_base + (rte_mem_page_size() * response.id));
dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
memset(&port_memory, 0, sizeof(port_memory));
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
- PAGE_SIZE);
+ rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
- (void *)(pp_base + (PAGE_SIZE * response.id));
+ (void *)(pp_base + (rte_mem_page_size() * response.id));
dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
(void *)(port_base);
return ret;
}
+static int
+dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
+ struct dlb2_unmap_qid_args *cfg)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response,
+ false,
+ 0);
+
+ cfg->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
+ struct dlb2_pending_port_unmaps_args *args)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ false,
+ 0);
+
+ args->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
+ struct dlb2_start_domain_args *cfg)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_pf_start_domain(&dlb2_dev->hw,
+ handle->domain_id,
+ cfg,
+ &response);
+
+ cfg->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
+ struct dlb2_get_ldb_queue_depth_args *args)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ false,
+ 0);
+
+ args->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int
+dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
+ struct dlb2_get_dir_queue_depth_args *args)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret = 0;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
+ handle->domain_id,
+ args,
+ &response,
+ false,
+ 0);
+
+ args->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static void
dlb2_pf_iface_fn_ptrs_init(void)
{
dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
dlb2_iface_map_qid = dlb2_pf_map_qid;
+ dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
+ dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
+ dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
+ dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
+ dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
.num_dir_credits_override = -1,
.qid_depth_thresholds = { {0} },
- .cos_id = DLB2_COS_DEFAULT
+ .cos_id = DLB2_COS_DEFAULT,
+ .poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
+ .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
+ .default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT
};
struct dlb2_eventdev *dlb2;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
+ dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
/* Probe the DLB2 PF layer */
dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
if (pci_dev->device.devargs) {
ret = dlb2_parse_params(pci_dev->device.devargs->args,
pci_dev->device.devargs->name,
- &dlb2_args);
+ &dlb2_args,
+ dlb2->version);
if (ret) {
DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
ret, rte_errno);
event_dlb2_pf_name,
&dlb2_args);
} else {
+ dlb2 = dlb2_pmd_priv(eventdev);
+ dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
ret = dlb2_secondary_eventdev_probe(eventdev,
event_dlb2_pf_name);
}
},
};
+static const struct rte_pci_id pci_id_dlb2_5_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
+ PCI_DEVICE_ID_INTEL_DLB2_5_PF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
static int
event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
}
+static int
+event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
+ sizeof(struct dlb2_eventdev),
+ dlb2_eventdev_pci_init,
+ event_dlb2_pf_name);
+ if (ret) {
+ DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
+ "ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
+static int
+event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ ret = rte_event_pmd_pci_remove(pci_dev, NULL);
+
+ if (ret) {
+ DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
+ "ret=%d\n", ret);
+ }
+
+ return ret;
+
+}
+
static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
.id_table = pci_id_dlb2_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.remove = event_dlb2_pci_remove,
};
+static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
+ .id_table = pci_id_dlb2_5_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_dlb2_5_pci_probe,
+ .remove = event_dlb2_5_pci_remove,
+};
+
RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
+
+RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);