1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
7 #include <rte_eventdev_pmd.h>
8 #include <rte_eventdev_pmd_vdev.h>
10 #include "dsw_evdev.h"
12 #define EVENTDEV_NAME_DSW_PMD event_dsw
15 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
16 const struct rte_event_port_conf *conf)
18 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
19 struct dsw_port *port;
20 struct rte_event_ring *in_ring;
21 char ring_name[RTE_RING_NAMESIZE];
23 port = &dsw->ports[port_id];
25 *port = (struct dsw_port) {
28 .dequeue_depth = conf->dequeue_depth,
29 .enqueue_depth = conf->enqueue_depth,
30 .new_event_threshold = conf->new_event_threshold
33 snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
36 in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
38 RING_F_SC_DEQ|RING_F_EXACT_SZ);
43 port->in_ring = in_ring;
45 dev->data->ports[port_id] = port;
51 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
52 uint8_t port_id __rte_unused,
53 struct rte_event_port_conf *port_conf)
55 *port_conf = (struct rte_event_port_conf) {
56 .new_event_threshold = 1024,
57 .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
58 .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
63 dsw_port_release(void *p)
65 struct dsw_port *port = p;
67 rte_event_ring_free(port->in_ring);
71 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
72 const struct rte_event_queue_conf *conf)
74 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
75 struct dsw_queue *queue = &dsw->queues[queue_id];
77 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
80 if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
83 /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
84 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
85 * the queue will only have a single serving port, no
86 * migration will ever happen, so the extra TYPE_ATOMIC
87 * migration overhead is avoided.
89 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
90 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
91 else /* atomic or parallel */
92 queue->schedule_type = conf->schedule_type;
94 queue->num_serving_ports = 0;
100 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
101 uint8_t queue_id __rte_unused,
102 struct rte_event_queue_conf *queue_conf)
104 *queue_conf = (struct rte_event_queue_conf) {
105 .nb_atomic_flows = 4096,
106 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
107 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
112 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
113 uint8_t queue_id __rte_unused)
118 queue_add_port(struct dsw_queue *queue, uint16_t port_id)
120 queue->serving_ports[queue->num_serving_ports] = port_id;
121 queue->num_serving_ports++;
125 queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
129 for (i = 0; i < queue->num_serving_ports; i++)
130 if (queue->serving_ports[i] == port_id) {
131 uint16_t last_idx = queue->num_serving_ports - 1;
133 queue->serving_ports[i] =
134 queue->serving_ports[last_idx];
135 queue->num_serving_ports--;
142 dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
143 const uint8_t queues[], uint16_t num, bool link)
145 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
146 struct dsw_port *p = port;
150 for (i = 0; i < num; i++) {
151 uint8_t qid = queues[i];
152 struct dsw_queue *q = &dsw->queues[qid];
154 queue_add_port(q, p->id);
157 bool removed = queue_remove_port(q, p->id);
167 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
168 const uint8_t priorities[] __rte_unused, uint16_t num)
170 return dsw_port_link_unlink(dev, port, queues, num, true);
174 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
177 return dsw_port_link_unlink(dev, port, queues, num, false);
181 dsw_info_get(struct rte_eventdev *dev __rte_unused,
182 struct rte_event_dev_info *info)
184 *info = (struct rte_event_dev_info) {
185 .driver_name = DSW_PMD_NAME,
186 .max_event_queues = DSW_MAX_QUEUES,
187 .max_event_queue_flows = DSW_MAX_FLOWS,
188 .max_event_queue_priority_levels = 1,
189 .max_event_priority_levels = 1,
190 .max_event_ports = DSW_MAX_PORTS,
191 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
192 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
193 .max_num_events = DSW_MAX_EVENTS,
194 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
195 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
200 dsw_configure(const struct rte_eventdev *dev)
202 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
203 const struct rte_event_dev_config *conf = &dev->data->dev_conf;
205 dsw->num_ports = conf->nb_event_ports;
206 dsw->num_queues = conf->nb_event_queues;
211 static struct rte_eventdev_ops dsw_evdev_ops = {
212 .port_setup = dsw_port_setup,
213 .port_def_conf = dsw_port_def_conf,
214 .port_release = dsw_port_release,
215 .queue_setup = dsw_queue_setup,
216 .queue_def_conf = dsw_queue_def_conf,
217 .queue_release = dsw_queue_release,
218 .port_link = dsw_port_link,
219 .port_unlink = dsw_port_unlink,
220 .dev_infos_get = dsw_info_get,
221 .dev_configure = dsw_configure,
225 dsw_probe(struct rte_vdev_device *vdev)
228 struct rte_eventdev *dev;
229 struct dsw_evdev *dsw;
231 name = rte_vdev_device_name(vdev);
233 dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
238 dev->dev_ops = &dsw_evdev_ops;
240 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
243 dsw = dev->data->dev_private;
244 dsw->data = dev->data;
250 dsw_remove(struct rte_vdev_device *vdev)
254 name = rte_vdev_device_name(vdev);
258 return rte_event_pmd_vdev_uninit(name);
261 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
266 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);