1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_debug.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_lcore.h>
21 #include <rte_bus_vdev.h>
23 #include "skeleton_eventdev.h"
25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
26 /**< Skeleton event device PMD name */
29 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
31 struct skeleton_port *sp = port;
41 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
44 struct skeleton_port *sp = port;
49 RTE_SET_USED(nb_events);
55 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
56 uint64_t timeout_ticks)
58 struct skeleton_port *sp = port;
62 RTE_SET_USED(timeout_ticks);
68 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
69 uint16_t nb_events, uint64_t timeout_ticks)
71 struct skeleton_port *sp = port;
75 RTE_SET_USED(nb_events);
76 RTE_SET_USED(timeout_ticks);
82 skeleton_eventdev_info_get(struct rte_eventdev *dev,
83 struct rte_event_dev_info *dev_info)
85 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
91 dev_info->min_dequeue_timeout_ns = 1;
92 dev_info->max_dequeue_timeout_ns = 10000;
93 dev_info->dequeue_timeout_ns = 25;
94 dev_info->max_event_queues = 64;
95 dev_info->max_event_queue_flows = (1ULL << 20);
96 dev_info->max_event_queue_priority_levels = 8;
97 dev_info->max_event_priority_levels = 8;
98 dev_info->max_event_ports = 32;
99 dev_info->max_event_port_dequeue_depth = 16;
100 dev_info->max_event_port_enqueue_depth = 16;
101 dev_info->max_num_events = (1ULL << 20);
102 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
103 RTE_EVENT_DEV_CAP_BURST_MODE |
104 RTE_EVENT_DEV_CAP_EVENT_QOS |
105 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
106 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
110 skeleton_eventdev_configure(const struct rte_eventdev *dev)
112 struct rte_eventdev_data *data = dev->data;
113 struct rte_event_dev_config *conf = &data->dev_conf;
114 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
116 PMD_DRV_FUNC_TRACE();
121 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
126 skeleton_eventdev_start(struct rte_eventdev *dev)
128 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
130 PMD_DRV_FUNC_TRACE();
138 skeleton_eventdev_stop(struct rte_eventdev *dev)
140 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
142 PMD_DRV_FUNC_TRACE();
148 skeleton_eventdev_close(struct rte_eventdev *dev)
150 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
152 PMD_DRV_FUNC_TRACE();
160 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
161 struct rte_event_queue_conf *queue_conf)
163 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
165 PMD_DRV_FUNC_TRACE();
168 RTE_SET_USED(queue_id);
170 queue_conf->nb_atomic_flows = (1ULL << 20);
171 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
172 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
173 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
177 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
179 PMD_DRV_FUNC_TRACE();
182 RTE_SET_USED(queue_id);
186 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
187 const struct rte_event_queue_conf *queue_conf)
189 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
191 PMD_DRV_FUNC_TRACE();
194 RTE_SET_USED(queue_conf);
195 RTE_SET_USED(queue_id);
201 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
202 struct rte_event_port_conf *port_conf)
204 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
206 PMD_DRV_FUNC_TRACE();
209 RTE_SET_USED(port_id);
211 port_conf->new_event_threshold = 32 * 1024;
212 port_conf->dequeue_depth = 16;
213 port_conf->enqueue_depth = 16;
214 port_conf->event_port_cfg = 0;
218 skeleton_eventdev_port_release(void *port)
220 struct skeleton_port *sp = port;
221 PMD_DRV_FUNC_TRACE();
227 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
228 const struct rte_event_port_conf *port_conf)
230 struct skeleton_port *sp;
231 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
233 PMD_DRV_FUNC_TRACE();
236 RTE_SET_USED(port_conf);
238 /* Free memory prior to re-allocation if needed */
239 if (dev->data->ports[port_id] != NULL) {
240 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
242 skeleton_eventdev_port_release(dev->data->ports[port_id]);
243 dev->data->ports[port_id] = NULL;
246 /* Allocate event port memory */
247 sp = rte_zmalloc_socket("eventdev port",
248 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
249 dev->data->socket_id);
251 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
255 sp->port_id = port_id;
257 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
259 dev->data->ports[port_id] = sp;
264 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
265 const uint8_t queues[], const uint8_t priorities[],
268 struct skeleton_port *sp = port;
269 PMD_DRV_FUNC_TRACE();
273 RTE_SET_USED(queues);
274 RTE_SET_USED(priorities);
276 /* Linked all the queues */
277 return (int)nb_links;
281 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
282 uint8_t queues[], uint16_t nb_unlinks)
284 struct skeleton_port *sp = port;
285 PMD_DRV_FUNC_TRACE();
289 RTE_SET_USED(queues);
291 /* Unlinked all the queues */
292 return (int)nb_unlinks;
297 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
298 uint64_t *timeout_ticks)
300 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
303 PMD_DRV_FUNC_TRACE();
306 *timeout_ticks = ns * scale;
312 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
314 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
316 PMD_DRV_FUNC_TRACE();
323 /* Initialize and register event driver with DPDK Application */
324 static struct eventdev_ops skeleton_eventdev_ops = {
325 .dev_infos_get = skeleton_eventdev_info_get,
326 .dev_configure = skeleton_eventdev_configure,
327 .dev_start = skeleton_eventdev_start,
328 .dev_stop = skeleton_eventdev_stop,
329 .dev_close = skeleton_eventdev_close,
330 .queue_def_conf = skeleton_eventdev_queue_def_conf,
331 .queue_setup = skeleton_eventdev_queue_setup,
332 .queue_release = skeleton_eventdev_queue_release,
333 .port_def_conf = skeleton_eventdev_port_def_conf,
334 .port_setup = skeleton_eventdev_port_setup,
335 .port_release = skeleton_eventdev_port_release,
336 .port_link = skeleton_eventdev_port_link,
337 .port_unlink = skeleton_eventdev_port_unlink,
338 .timeout_ticks = skeleton_eventdev_timeout_ticks,
339 .dump = skeleton_eventdev_dump
343 skeleton_eventdev_init(struct rte_eventdev *eventdev)
345 struct rte_pci_device *pci_dev;
346 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
349 PMD_DRV_FUNC_TRACE();
351 eventdev->dev_ops = &skeleton_eventdev_ops;
352 eventdev->enqueue = skeleton_eventdev_enqueue;
353 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
354 eventdev->dequeue = skeleton_eventdev_dequeue;
355 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
357 /* For secondary processes, the primary has done all the work */
358 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
361 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
363 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
364 if (!skel->reg_base) {
365 PMD_DRV_ERR("Failed to map BAR0");
370 skel->device_id = pci_dev->id.device_id;
371 skel->vendor_id = pci_dev->id.vendor_id;
372 skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
373 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
375 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
376 pci_dev->id.vendor_id, pci_dev->id.device_id,
377 pci_dev->addr.domain, pci_dev->addr.bus,
378 pci_dev->addr.devid, pci_dev->addr.function);
380 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
381 eventdev->data->dev_id, eventdev->data->socket_id,
382 skel->vendor_id, skel->device_id);
388 /* PCI based event device */
390 #define EVENTDEV_SKEL_VENDOR_ID 0x177d
391 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001
393 static const struct rte_pci_id pci_id_skeleton_map[] = {
395 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
396 EVENTDEV_SKEL_PRODUCT_ID)
404 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
405 struct rte_pci_device *pci_dev)
407 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
408 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
412 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
414 return rte_event_pmd_pci_remove(pci_dev, NULL);
417 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
418 .id_table = pci_id_skeleton_map,
419 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
420 .probe = event_skeleton_pci_probe,
421 .remove = event_skeleton_pci_remove,
424 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
425 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
427 /* VDEV based event device */
430 skeleton_eventdev_create(const char *name, int socket_id)
432 struct rte_eventdev *eventdev;
434 eventdev = rte_event_pmd_vdev_init(name,
435 sizeof(struct skeleton_eventdev), socket_id);
436 if (eventdev == NULL) {
437 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
441 eventdev->dev_ops = &skeleton_eventdev_ops;
442 eventdev->enqueue = skeleton_eventdev_enqueue;
443 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
444 eventdev->dequeue = skeleton_eventdev_dequeue;
445 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
447 event_dev_probing_finish(eventdev);
454 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
458 name = rte_vdev_device_name(vdev);
459 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
461 return skeleton_eventdev_create(name, rte_socket_id());
465 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
469 name = rte_vdev_device_name(vdev);
470 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
472 return rte_event_pmd_vdev_uninit(name);
475 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
476 .probe = skeleton_eventdev_probe,
477 .remove = skeleton_eventdev_remove
480 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);