1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_debug.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_lcore.h>
21 #include <rte_bus_vdev.h>
23 #include "skeleton_eventdev.h"
25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
26 /**< Skeleton event device PMD name */
29 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
31 struct skeleton_port *sp = port;
41 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
44 struct skeleton_port *sp = port;
49 RTE_SET_USED(nb_events);
55 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
56 uint64_t timeout_ticks)
58 struct skeleton_port *sp = port;
62 RTE_SET_USED(timeout_ticks);
68 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
69 uint16_t nb_events, uint64_t timeout_ticks)
71 struct skeleton_port *sp = port;
75 RTE_SET_USED(nb_events);
76 RTE_SET_USED(timeout_ticks);
82 skeleton_eventdev_info_get(struct rte_eventdev *dev,
83 struct rte_event_dev_info *dev_info)
85 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
91 dev_info->min_dequeue_timeout_ns = 1;
92 dev_info->max_dequeue_timeout_ns = 10000;
93 dev_info->dequeue_timeout_ns = 25;
94 dev_info->max_event_queues = 64;
95 dev_info->max_event_queue_flows = (1ULL << 20);
96 dev_info->max_event_queue_priority_levels = 8;
97 dev_info->max_event_priority_levels = 8;
98 dev_info->max_event_ports = 32;
99 dev_info->max_event_port_dequeue_depth = 16;
100 dev_info->max_event_port_enqueue_depth = 16;
101 dev_info->max_num_events = (1ULL << 20);
102 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
103 RTE_EVENT_DEV_CAP_BURST_MODE |
104 RTE_EVENT_DEV_CAP_EVENT_QOS;
108 skeleton_eventdev_configure(const struct rte_eventdev *dev)
110 struct rte_eventdev_data *data = dev->data;
111 struct rte_event_dev_config *conf = &data->dev_conf;
112 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
114 PMD_DRV_FUNC_TRACE();
119 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
124 skeleton_eventdev_start(struct rte_eventdev *dev)
126 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
128 PMD_DRV_FUNC_TRACE();
136 skeleton_eventdev_stop(struct rte_eventdev *dev)
138 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
140 PMD_DRV_FUNC_TRACE();
146 skeleton_eventdev_close(struct rte_eventdev *dev)
148 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
150 PMD_DRV_FUNC_TRACE();
158 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
159 struct rte_event_queue_conf *queue_conf)
161 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
163 PMD_DRV_FUNC_TRACE();
166 RTE_SET_USED(queue_id);
168 queue_conf->nb_atomic_flows = (1ULL << 20);
169 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
170 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
171 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
175 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
177 PMD_DRV_FUNC_TRACE();
180 RTE_SET_USED(queue_id);
184 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
185 const struct rte_event_queue_conf *queue_conf)
187 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
189 PMD_DRV_FUNC_TRACE();
192 RTE_SET_USED(queue_conf);
193 RTE_SET_USED(queue_id);
199 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
200 struct rte_event_port_conf *port_conf)
202 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
204 PMD_DRV_FUNC_TRACE();
207 RTE_SET_USED(port_id);
209 port_conf->new_event_threshold = 32 * 1024;
210 port_conf->dequeue_depth = 16;
211 port_conf->enqueue_depth = 16;
212 port_conf->disable_implicit_release = 0;
216 skeleton_eventdev_port_release(void *port)
218 struct skeleton_port *sp = port;
219 PMD_DRV_FUNC_TRACE();
225 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
226 const struct rte_event_port_conf *port_conf)
228 struct skeleton_port *sp;
229 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
231 PMD_DRV_FUNC_TRACE();
234 RTE_SET_USED(port_conf);
236 /* Free memory prior to re-allocation if needed */
237 if (dev->data->ports[port_id] != NULL) {
238 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
240 skeleton_eventdev_port_release(dev->data->ports[port_id]);
241 dev->data->ports[port_id] = NULL;
244 /* Allocate event port memory */
245 sp = rte_zmalloc_socket("eventdev port",
246 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
247 dev->data->socket_id);
249 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
253 sp->port_id = port_id;
255 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
257 dev->data->ports[port_id] = sp;
262 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
263 const uint8_t queues[], const uint8_t priorities[],
266 struct skeleton_port *sp = port;
267 PMD_DRV_FUNC_TRACE();
271 RTE_SET_USED(queues);
272 RTE_SET_USED(priorities);
274 /* Linked all the queues */
275 return (int)nb_links;
279 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
280 uint8_t queues[], uint16_t nb_unlinks)
282 struct skeleton_port *sp = port;
283 PMD_DRV_FUNC_TRACE();
287 RTE_SET_USED(queues);
289 /* Unlinked all the queues */
290 return (int)nb_unlinks;
295 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
296 uint64_t *timeout_ticks)
298 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
301 PMD_DRV_FUNC_TRACE();
304 *timeout_ticks = ns * scale;
310 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
312 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
314 PMD_DRV_FUNC_TRACE();
321 /* Initialize and register event driver with DPDK Application */
322 static struct rte_eventdev_ops skeleton_eventdev_ops = {
323 .dev_infos_get = skeleton_eventdev_info_get,
324 .dev_configure = skeleton_eventdev_configure,
325 .dev_start = skeleton_eventdev_start,
326 .dev_stop = skeleton_eventdev_stop,
327 .dev_close = skeleton_eventdev_close,
328 .queue_def_conf = skeleton_eventdev_queue_def_conf,
329 .queue_setup = skeleton_eventdev_queue_setup,
330 .queue_release = skeleton_eventdev_queue_release,
331 .port_def_conf = skeleton_eventdev_port_def_conf,
332 .port_setup = skeleton_eventdev_port_setup,
333 .port_release = skeleton_eventdev_port_release,
334 .port_link = skeleton_eventdev_port_link,
335 .port_unlink = skeleton_eventdev_port_unlink,
336 .timeout_ticks = skeleton_eventdev_timeout_ticks,
337 .dump = skeleton_eventdev_dump
341 skeleton_eventdev_init(struct rte_eventdev *eventdev)
343 struct rte_pci_device *pci_dev;
344 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
347 PMD_DRV_FUNC_TRACE();
349 eventdev->dev_ops = &skeleton_eventdev_ops;
350 eventdev->enqueue = skeleton_eventdev_enqueue;
351 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
352 eventdev->dequeue = skeleton_eventdev_dequeue;
353 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
355 /* For secondary processes, the primary has done all the work */
356 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
359 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
361 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
362 if (!skel->reg_base) {
363 PMD_DRV_ERR("Failed to map BAR0");
368 skel->device_id = pci_dev->id.device_id;
369 skel->vendor_id = pci_dev->id.vendor_id;
370 skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
371 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
373 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
374 pci_dev->id.vendor_id, pci_dev->id.device_id,
375 pci_dev->addr.domain, pci_dev->addr.bus,
376 pci_dev->addr.devid, pci_dev->addr.function);
378 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
379 eventdev->data->dev_id, eventdev->data->socket_id,
380 skel->vendor_id, skel->device_id);
386 /* PCI based event device */
388 #define EVENTDEV_SKEL_VENDOR_ID 0x177d
389 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001
391 static const struct rte_pci_id pci_id_skeleton_map[] = {
393 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
394 EVENTDEV_SKEL_PRODUCT_ID)
402 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
403 struct rte_pci_device *pci_dev)
405 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
406 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
410 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
412 return rte_event_pmd_pci_remove(pci_dev, NULL);
415 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
416 .id_table = pci_id_skeleton_map,
417 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
418 .probe = event_skeleton_pci_probe,
419 .remove = event_skeleton_pci_remove,
422 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
423 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
425 /* VDEV based event device */
428 skeleton_eventdev_create(const char *name, int socket_id)
430 struct rte_eventdev *eventdev;
432 eventdev = rte_event_pmd_vdev_init(name,
433 sizeof(struct skeleton_eventdev), socket_id);
434 if (eventdev == NULL) {
435 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
439 eventdev->dev_ops = &skeleton_eventdev_ops;
440 eventdev->enqueue = skeleton_eventdev_enqueue;
441 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
442 eventdev->dequeue = skeleton_eventdev_dequeue;
443 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
451 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
455 name = rte_vdev_device_name(vdev);
456 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
458 return skeleton_eventdev_create(name, rte_socket_id());
462 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
466 name = rte_vdev_device_name(vdev);
467 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
469 return rte_event_pmd_vdev_uninit(name);
472 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
473 .probe = skeleton_eventdev_probe,
474 .remove = skeleton_eventdev_remove
477 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);