4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
46 #include <rte_malloc.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_lcore.h>
52 #include "skeleton_eventdev.h"
54 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
55 /**< Skeleton event device PMD name */
58 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
60 struct skeleton_port *sp = port;
70 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
73 struct skeleton_port *sp = port;
78 RTE_SET_USED(nb_events);
84 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
85 uint64_t timeout_ticks)
87 struct skeleton_port *sp = port;
91 RTE_SET_USED(timeout_ticks);
97 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
98 uint16_t nb_events, uint64_t timeout_ticks)
100 struct skeleton_port *sp = port;
104 RTE_SET_USED(nb_events);
105 RTE_SET_USED(timeout_ticks);
111 skeleton_eventdev_info_get(struct rte_eventdev *dev,
112 struct rte_event_dev_info *dev_info)
114 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
116 PMD_DRV_FUNC_TRACE();
120 dev_info->min_dequeue_timeout_ns = 1;
121 dev_info->max_dequeue_timeout_ns = 10000;
122 dev_info->dequeue_timeout_ns = 25;
123 dev_info->max_event_queues = 64;
124 dev_info->max_event_queue_flows = (1ULL << 20);
125 dev_info->max_event_queue_priority_levels = 8;
126 dev_info->max_event_priority_levels = 8;
127 dev_info->max_event_ports = 32;
128 dev_info->max_event_port_dequeue_depth = 16;
129 dev_info->max_event_port_enqueue_depth = 16;
130 dev_info->max_num_events = (1ULL << 20);
131 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
132 RTE_EVENT_DEV_CAP_BURST_MODE |
133 RTE_EVENT_DEV_CAP_EVENT_QOS;
137 skeleton_eventdev_configure(const struct rte_eventdev *dev)
139 struct rte_eventdev_data *data = dev->data;
140 struct rte_event_dev_config *conf = &data->dev_conf;
141 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
143 PMD_DRV_FUNC_TRACE();
148 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
153 skeleton_eventdev_start(struct rte_eventdev *dev)
155 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
157 PMD_DRV_FUNC_TRACE();
165 skeleton_eventdev_stop(struct rte_eventdev *dev)
167 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
169 PMD_DRV_FUNC_TRACE();
175 skeleton_eventdev_close(struct rte_eventdev *dev)
177 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
179 PMD_DRV_FUNC_TRACE();
187 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
188 struct rte_event_queue_conf *queue_conf)
190 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
192 PMD_DRV_FUNC_TRACE();
195 RTE_SET_USED(queue_id);
197 queue_conf->nb_atomic_flows = (1ULL << 20);
198 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
199 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
200 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
204 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
206 PMD_DRV_FUNC_TRACE();
209 RTE_SET_USED(queue_id);
213 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
214 const struct rte_event_queue_conf *queue_conf)
216 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
218 PMD_DRV_FUNC_TRACE();
221 RTE_SET_USED(queue_conf);
222 RTE_SET_USED(queue_id);
228 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
229 struct rte_event_port_conf *port_conf)
231 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
233 PMD_DRV_FUNC_TRACE();
236 RTE_SET_USED(port_id);
238 port_conf->new_event_threshold = 32 * 1024;
239 port_conf->dequeue_depth = 16;
240 port_conf->enqueue_depth = 16;
244 skeleton_eventdev_port_release(void *port)
246 struct skeleton_port *sp = port;
247 PMD_DRV_FUNC_TRACE();
253 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
254 const struct rte_event_port_conf *port_conf)
256 struct skeleton_port *sp;
257 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
259 PMD_DRV_FUNC_TRACE();
262 RTE_SET_USED(port_conf);
264 /* Free memory prior to re-allocation if needed */
265 if (dev->data->ports[port_id] != NULL) {
266 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
268 skeleton_eventdev_port_release(dev->data->ports[port_id]);
269 dev->data->ports[port_id] = NULL;
272 /* Allocate event port memory */
273 sp = rte_zmalloc_socket("eventdev port",
274 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
275 dev->data->socket_id);
277 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
281 sp->port_id = port_id;
283 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
285 dev->data->ports[port_id] = sp;
290 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
291 const uint8_t queues[], const uint8_t priorities[],
294 struct skeleton_port *sp = port;
295 PMD_DRV_FUNC_TRACE();
299 RTE_SET_USED(queues);
300 RTE_SET_USED(priorities);
302 /* Linked all the queues */
303 return (int)nb_links;
307 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
308 uint8_t queues[], uint16_t nb_unlinks)
310 struct skeleton_port *sp = port;
311 PMD_DRV_FUNC_TRACE();
315 RTE_SET_USED(queues);
317 /* Unlinked all the queues */
318 return (int)nb_unlinks;
323 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
324 uint64_t *timeout_ticks)
326 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
329 PMD_DRV_FUNC_TRACE();
332 *timeout_ticks = ns * scale;
338 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
340 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
342 PMD_DRV_FUNC_TRACE();
349 /* Initialize and register event driver with DPDK Application */
350 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
351 .dev_infos_get = skeleton_eventdev_info_get,
352 .dev_configure = skeleton_eventdev_configure,
353 .dev_start = skeleton_eventdev_start,
354 .dev_stop = skeleton_eventdev_stop,
355 .dev_close = skeleton_eventdev_close,
356 .queue_def_conf = skeleton_eventdev_queue_def_conf,
357 .queue_setup = skeleton_eventdev_queue_setup,
358 .queue_release = skeleton_eventdev_queue_release,
359 .port_def_conf = skeleton_eventdev_port_def_conf,
360 .port_setup = skeleton_eventdev_port_setup,
361 .port_release = skeleton_eventdev_port_release,
362 .port_link = skeleton_eventdev_port_link,
363 .port_unlink = skeleton_eventdev_port_unlink,
364 .timeout_ticks = skeleton_eventdev_timeout_ticks,
365 .dump = skeleton_eventdev_dump
369 skeleton_eventdev_init(struct rte_eventdev *eventdev)
371 struct rte_pci_device *pci_dev;
372 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
375 PMD_DRV_FUNC_TRACE();
377 eventdev->dev_ops = &skeleton_eventdev_ops;
378 eventdev->schedule = NULL;
379 eventdev->enqueue = skeleton_eventdev_enqueue;
380 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
381 eventdev->dequeue = skeleton_eventdev_dequeue;
382 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
384 /* For secondary processes, the primary has done all the work */
385 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
388 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
390 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
391 if (!skel->reg_base) {
392 PMD_DRV_ERR("Failed to map BAR0");
397 skel->device_id = pci_dev->id.device_id;
398 skel->vendor_id = pci_dev->id.vendor_id;
399 skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
400 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
402 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
403 pci_dev->id.vendor_id, pci_dev->id.device_id,
404 pci_dev->addr.domain, pci_dev->addr.bus,
405 pci_dev->addr.devid, pci_dev->addr.function);
407 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
408 eventdev->data->dev_id, eventdev->data->socket_id,
409 skel->vendor_id, skel->device_id);
415 /* PCI based event device */
417 #define EVENTDEV_SKEL_VENDOR_ID 0x177d
418 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001
420 static const struct rte_pci_id pci_id_skeleton_map[] = {
422 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
423 EVENTDEV_SKEL_PRODUCT_ID)
431 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
432 struct rte_pci_device *pci_dev)
434 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
435 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
439 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
441 return rte_event_pmd_pci_remove(pci_dev, NULL);
444 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
445 .id_table = pci_id_skeleton_map,
446 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
447 .probe = event_skeleton_pci_probe,
448 .remove = event_skeleton_pci_remove,
451 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
452 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
454 /* VDEV based event device */
457 skeleton_eventdev_create(const char *name, int socket_id)
459 struct rte_eventdev *eventdev;
461 eventdev = rte_event_pmd_vdev_init(name,
462 sizeof(struct skeleton_eventdev), socket_id);
463 if (eventdev == NULL) {
464 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
468 eventdev->dev_ops = &skeleton_eventdev_ops;
469 eventdev->schedule = NULL;
470 eventdev->enqueue = skeleton_eventdev_enqueue;
471 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
472 eventdev->dequeue = skeleton_eventdev_dequeue;
473 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
481 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
485 name = rte_vdev_device_name(vdev);
486 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
488 return skeleton_eventdev_create(name, rte_socket_id());
492 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
496 name = rte_vdev_device_name(vdev);
497 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
499 return rte_event_pmd_vdev_uninit(name);
502 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
503 .probe = skeleton_eventdev_probe,
504 .remove = skeleton_eventdev_remove
507 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);