1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
5 #include <rte_eventdev_pmd.h>
6 #include <rte_eventdev_pmd_vdev.h>
10 #define EVENTDEV_NAME_DSW_PMD event_dsw
13 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
14 const struct rte_event_queue_conf *conf)
16 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
17 struct dsw_queue *queue = &dsw->queues[queue_id];
19 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
22 if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
25 /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
26 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
27 * the queue will only have a single serving port, no
28 * migration will ever happen, so the extra TYPE_ATOMIC
29 * migration overhead is avoided.
31 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
32 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
33 else /* atomic or parallel */
34 queue->schedule_type = conf->schedule_type;
36 queue->num_serving_ports = 0;
42 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
43 uint8_t queue_id __rte_unused,
44 struct rte_event_queue_conf *queue_conf)
46 *queue_conf = (struct rte_event_queue_conf) {
47 .nb_atomic_flows = 4096,
48 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
49 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
54 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
55 uint8_t queue_id __rte_unused)
60 dsw_info_get(struct rte_eventdev *dev __rte_unused,
61 struct rte_event_dev_info *info)
63 *info = (struct rte_event_dev_info) {
64 .driver_name = DSW_PMD_NAME,
65 .max_event_queues = DSW_MAX_QUEUES,
66 .max_event_queue_flows = DSW_MAX_FLOWS,
67 .max_event_queue_priority_levels = 1,
68 .max_event_priority_levels = 1,
69 .max_event_ports = DSW_MAX_PORTS,
70 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
71 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
72 .max_num_events = DSW_MAX_EVENTS,
73 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
74 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
79 dsw_configure(const struct rte_eventdev *dev)
81 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
82 const struct rte_event_dev_config *conf = &dev->data->dev_conf;
84 dsw->num_queues = conf->nb_event_queues;
89 static struct rte_eventdev_ops dsw_evdev_ops = {
90 .queue_setup = dsw_queue_setup,
91 .queue_def_conf = dsw_queue_def_conf,
92 .queue_release = dsw_queue_release,
93 .dev_infos_get = dsw_info_get,
94 .dev_configure = dsw_configure,
98 dsw_probe(struct rte_vdev_device *vdev)
101 struct rte_eventdev *dev;
102 struct dsw_evdev *dsw;
104 name = rte_vdev_device_name(vdev);
106 dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
111 dev->dev_ops = &dsw_evdev_ops;
113 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
116 dsw = dev->data->dev_private;
117 dsw->data = dev->data;
123 dsw_remove(struct rte_vdev_device *vdev)
127 name = rte_vdev_device_name(vdev);
131 return rte_event_pmd_vdev_uninit(name);
134 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
139 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);