4 * Copyright (C) Cavium, Inc. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
46 #include <rte_malloc.h>
47 #include <rte_memory.h>
48 #include <rte_lcore.h>
51 #include "skeleton_eventdev.h"
53 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
54 /**< Skeleton event device PMD name */
57 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
59 struct skeleton_port *sp = port;
69 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
72 struct skeleton_port *sp = port;
77 RTE_SET_USED(nb_events);
83 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
84 uint64_t timeout_ticks)
86 struct skeleton_port *sp = port;
90 RTE_SET_USED(timeout_ticks);
96 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
97 uint16_t nb_events, uint64_t timeout_ticks)
99 struct skeleton_port *sp = port;
103 RTE_SET_USED(nb_events);
104 RTE_SET_USED(timeout_ticks);
110 skeleton_eventdev_info_get(struct rte_eventdev *dev,
111 struct rte_event_dev_info *dev_info)
113 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
115 PMD_DRV_FUNC_TRACE();
119 dev_info->min_dequeue_timeout_ns = 1;
120 dev_info->max_dequeue_timeout_ns = 10000;
121 dev_info->dequeue_timeout_ns = 25;
122 dev_info->max_event_queues = 64;
123 dev_info->max_event_queue_flows = (1ULL << 20);
124 dev_info->max_event_queue_priority_levels = 8;
125 dev_info->max_event_priority_levels = 8;
126 dev_info->max_event_ports = 32;
127 dev_info->max_event_port_dequeue_depth = 16;
128 dev_info->max_event_port_enqueue_depth = 16;
129 dev_info->max_num_events = (1ULL << 20);
130 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
131 RTE_EVENT_DEV_CAP_BURST_MODE |
132 RTE_EVENT_DEV_CAP_EVENT_QOS;
136 skeleton_eventdev_configure(const struct rte_eventdev *dev)
138 struct rte_eventdev_data *data = dev->data;
139 struct rte_event_dev_config *conf = &data->dev_conf;
140 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
142 PMD_DRV_FUNC_TRACE();
147 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
152 skeleton_eventdev_start(struct rte_eventdev *dev)
154 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
156 PMD_DRV_FUNC_TRACE();
164 skeleton_eventdev_stop(struct rte_eventdev *dev)
166 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
168 PMD_DRV_FUNC_TRACE();
174 skeleton_eventdev_close(struct rte_eventdev *dev)
176 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
178 PMD_DRV_FUNC_TRACE();
186 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
187 struct rte_event_queue_conf *queue_conf)
189 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
191 PMD_DRV_FUNC_TRACE();
194 RTE_SET_USED(queue_id);
196 queue_conf->nb_atomic_flows = (1ULL << 20);
197 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
198 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
199 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
203 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
205 PMD_DRV_FUNC_TRACE();
208 RTE_SET_USED(queue_id);
212 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
213 const struct rte_event_queue_conf *queue_conf)
215 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
217 PMD_DRV_FUNC_TRACE();
220 RTE_SET_USED(queue_conf);
221 RTE_SET_USED(queue_id);
227 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
228 struct rte_event_port_conf *port_conf)
230 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
232 PMD_DRV_FUNC_TRACE();
235 RTE_SET_USED(port_id);
237 port_conf->new_event_threshold = 32 * 1024;
238 port_conf->dequeue_depth = 16;
239 port_conf->enqueue_depth = 16;
243 skeleton_eventdev_port_release(void *port)
245 struct skeleton_port *sp = port;
246 PMD_DRV_FUNC_TRACE();
252 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
253 const struct rte_event_port_conf *port_conf)
255 struct skeleton_port *sp;
256 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
258 PMD_DRV_FUNC_TRACE();
261 RTE_SET_USED(port_conf);
263 /* Free memory prior to re-allocation if needed */
264 if (dev->data->ports[port_id] != NULL) {
265 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
267 skeleton_eventdev_port_release(dev->data->ports[port_id]);
268 dev->data->ports[port_id] = NULL;
271 /* Allocate event port memory */
272 sp = rte_zmalloc_socket("eventdev port",
273 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
274 dev->data->socket_id);
276 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
280 sp->port_id = port_id;
282 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
284 dev->data->ports[port_id] = sp;
289 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
290 const uint8_t queues[], const uint8_t priorities[],
293 struct skeleton_port *sp = port;
294 PMD_DRV_FUNC_TRACE();
298 RTE_SET_USED(queues);
299 RTE_SET_USED(priorities);
301 /* Linked all the queues */
302 return (int)nb_links;
306 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
307 uint8_t queues[], uint16_t nb_unlinks)
309 struct skeleton_port *sp = port;
310 PMD_DRV_FUNC_TRACE();
314 RTE_SET_USED(queues);
316 /* Unlinked all the queues */
317 return (int)nb_unlinks;
322 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
323 uint64_t *timeout_ticks)
325 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
328 PMD_DRV_FUNC_TRACE();
331 *timeout_ticks = ns * scale;
337 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
339 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
341 PMD_DRV_FUNC_TRACE();
348 /* Initialize and register event driver with DPDK Application */
349 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
350 .dev_infos_get = skeleton_eventdev_info_get,
351 .dev_configure = skeleton_eventdev_configure,
352 .dev_start = skeleton_eventdev_start,
353 .dev_stop = skeleton_eventdev_stop,
354 .dev_close = skeleton_eventdev_close,
355 .queue_def_conf = skeleton_eventdev_queue_def_conf,
356 .queue_setup = skeleton_eventdev_queue_setup,
357 .queue_release = skeleton_eventdev_queue_release,
358 .port_def_conf = skeleton_eventdev_port_def_conf,
359 .port_setup = skeleton_eventdev_port_setup,
360 .port_release = skeleton_eventdev_port_release,
361 .port_link = skeleton_eventdev_port_link,
362 .port_unlink = skeleton_eventdev_port_unlink,
363 .timeout_ticks = skeleton_eventdev_timeout_ticks,
364 .dump = skeleton_eventdev_dump
368 skeleton_eventdev_init(struct rte_eventdev *eventdev)
370 struct rte_pci_device *pci_dev;
371 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
374 PMD_DRV_FUNC_TRACE();
376 eventdev->dev_ops = &skeleton_eventdev_ops;
377 eventdev->enqueue = skeleton_eventdev_enqueue;
378 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
379 eventdev->dequeue = skeleton_eventdev_dequeue;
380 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
382 /* For secondary processes, the primary has done all the work */
383 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
386 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
388 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
389 if (!skel->reg_base) {
390 PMD_DRV_ERR("Failed to map BAR0");
395 skel->device_id = pci_dev->id.device_id;
396 skel->vendor_id = pci_dev->id.vendor_id;
397 skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
398 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
400 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
401 pci_dev->id.vendor_id, pci_dev->id.device_id,
402 pci_dev->addr.domain, pci_dev->addr.bus,
403 pci_dev->addr.devid, pci_dev->addr.function);
405 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
406 eventdev->data->dev_id, eventdev->data->socket_id,
407 skel->vendor_id, skel->device_id);
413 /* PCI based event device */
415 #define EVENTDEV_SKEL_VENDOR_ID 0x177d
416 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001
418 static const struct rte_pci_id pci_id_skeleton_map[] = {
420 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
421 EVENTDEV_SKEL_PRODUCT_ID)
429 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
430 struct rte_pci_device *pci_dev)
432 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
433 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
437 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
439 return rte_event_pmd_pci_remove(pci_dev, NULL);
442 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
443 .id_table = pci_id_skeleton_map,
444 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
445 .probe = event_skeleton_pci_probe,
446 .remove = event_skeleton_pci_remove,
449 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
450 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
452 /* VDEV based event device */
455 skeleton_eventdev_create(const char *name, int socket_id)
457 struct rte_eventdev *eventdev;
459 eventdev = rte_event_pmd_vdev_init(name,
460 sizeof(struct skeleton_eventdev), socket_id);
461 if (eventdev == NULL) {
462 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
466 eventdev->dev_ops = &skeleton_eventdev_ops;
467 eventdev->enqueue = skeleton_eventdev_enqueue;
468 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
469 eventdev->dequeue = skeleton_eventdev_dequeue;
470 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
478 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
482 name = rte_vdev_device_name(vdev);
483 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
485 return skeleton_eventdev_create(name, rte_socket_id());
489 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
493 name = rte_vdev_device_name(vdev);
494 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
496 return rte_event_pmd_vdev_uninit(name);
499 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
500 .probe = skeleton_eventdev_probe,
501 .remove = skeleton_eventdev_remove
504 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);