1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
7 #include <rte_cycles.h>
8 #include <rte_eventdev_pmd.h>
9 #include <rte_eventdev_pmd_vdev.h>
10 #include <rte_random.h>
11 #include <rte_ring_elem.h>
13 #include "dsw_evdev.h"
15 #define EVENTDEV_NAME_DSW_PMD event_dsw
18 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
19 const struct rte_event_port_conf *conf)
21 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
22 struct dsw_port *port;
23 struct rte_event_ring *in_ring;
24 struct rte_ring *ctl_in_ring;
25 char ring_name[RTE_RING_NAMESIZE];
27 port = &dsw->ports[port_id];
29 *port = (struct dsw_port) {
32 .dequeue_depth = conf->dequeue_depth,
33 .enqueue_depth = conf->enqueue_depth,
34 .new_event_threshold = conf->new_event_threshold
37 snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
40 in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
42 RING_F_SC_DEQ|RING_F_EXACT_SZ);
47 snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u",
48 dev->data->dev_id, port_id);
50 ctl_in_ring = rte_ring_create_elem(ring_name,
51 sizeof(struct dsw_ctl_msg),
54 RING_F_SC_DEQ|RING_F_EXACT_SZ);
56 if (ctl_in_ring == NULL) {
57 rte_event_ring_free(in_ring);
61 port->in_ring = in_ring;
62 port->ctl_in_ring = ctl_in_ring;
64 rte_atomic16_init(&port->load);
66 port->load_update_interval =
67 (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
69 port->migration_interval =
70 (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S;
72 dev->data->ports[port_id] = port;
78 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
79 uint8_t port_id __rte_unused,
80 struct rte_event_port_conf *port_conf)
82 *port_conf = (struct rte_event_port_conf) {
83 .new_event_threshold = 1024,
84 .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
85 .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
90 dsw_port_release(void *p)
92 struct dsw_port *port = p;
94 rte_event_ring_free(port->in_ring);
95 rte_ring_free(port->ctl_in_ring);
99 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
100 const struct rte_event_queue_conf *conf)
102 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
103 struct dsw_queue *queue = &dsw->queues[queue_id];
105 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
108 /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
109 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
110 * the queue will only have a single serving port, no
111 * migration will ever happen, so the extra TYPE_ATOMIC
112 * migration overhead is avoided.
114 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
115 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
117 if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
119 /* atomic or parallel */
120 queue->schedule_type = conf->schedule_type;
123 queue->num_serving_ports = 0;
129 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
130 uint8_t queue_id __rte_unused,
131 struct rte_event_queue_conf *queue_conf)
133 *queue_conf = (struct rte_event_queue_conf) {
134 .nb_atomic_flows = 4096,
135 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
136 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
141 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
142 uint8_t queue_id __rte_unused)
147 queue_add_port(struct dsw_queue *queue, uint16_t port_id)
149 queue->serving_ports[queue->num_serving_ports] = port_id;
150 queue->num_serving_ports++;
154 queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
158 for (i = 0; i < queue->num_serving_ports; i++)
159 if (queue->serving_ports[i] == port_id) {
160 uint16_t last_idx = queue->num_serving_ports - 1;
162 queue->serving_ports[i] =
163 queue->serving_ports[last_idx];
164 queue->num_serving_ports--;
171 dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
172 const uint8_t queues[], uint16_t num, bool link)
174 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
175 struct dsw_port *p = port;
179 for (i = 0; i < num; i++) {
180 uint8_t qid = queues[i];
181 struct dsw_queue *q = &dsw->queues[qid];
183 queue_add_port(q, p->id);
186 bool removed = queue_remove_port(q, p->id);
196 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
197 const uint8_t priorities[] __rte_unused, uint16_t num)
199 return dsw_port_link_unlink(dev, port, queues, num, true);
203 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
206 return dsw_port_link_unlink(dev, port, queues, num, false);
210 dsw_info_get(struct rte_eventdev *dev __rte_unused,
211 struct rte_event_dev_info *info)
213 *info = (struct rte_event_dev_info) {
214 .driver_name = DSW_PMD_NAME,
215 .max_event_queues = DSW_MAX_QUEUES,
216 .max_event_queue_flows = DSW_MAX_FLOWS,
217 .max_event_queue_priority_levels = 1,
218 .max_event_priority_levels = 1,
219 .max_event_ports = DSW_MAX_PORTS,
220 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
221 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
222 .max_num_events = DSW_MAX_EVENTS,
223 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
224 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
225 RTE_EVENT_DEV_CAP_NONSEQ_MODE|
226 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
231 dsw_configure(const struct rte_eventdev *dev)
233 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
234 const struct rte_event_dev_config *conf = &dev->data->dev_conf;
235 int32_t min_max_in_flight;
237 dsw->num_ports = conf->nb_event_ports;
238 dsw->num_queues = conf->nb_event_queues;
240 /* Avoid a situation where consumer ports are holding all the
241 * credits, without making use of them.
243 min_max_in_flight = conf->nb_event_ports * DSW_PORT_MAX_CREDITS;
245 dsw->max_inflight = RTE_MAX(conf->nb_events_limit, min_max_in_flight);
252 initial_flow_to_port_assignment(struct dsw_evdev *dsw)
255 for (queue_id = 0; queue_id < dsw->num_queues; queue_id++) {
256 struct dsw_queue *queue = &dsw->queues[queue_id];
258 for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
260 rte_rand() % queue->num_serving_ports;
262 queue->serving_ports[port_idx];
263 dsw->queues[queue_id].flow_to_port_map[flow_hash] =
270 dsw_start(struct rte_eventdev *dev)
272 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
276 rte_atomic32_init(&dsw->credits_on_loan);
278 initial_flow_to_port_assignment(dsw);
280 now = rte_get_timer_cycles();
281 for (i = 0; i < dsw->num_ports; i++) {
282 dsw->ports[i].measurement_start = now;
283 dsw->ports[i].busy_start = now;
290 dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len,
291 eventdev_stop_flush_t flush, void *flush_arg)
295 for (i = 0; i < buf_len; i++)
296 flush(dev_id, buf[i], flush_arg);
300 dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port,
301 eventdev_stop_flush_t flush, void *flush_arg)
303 dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len,
308 dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port,
309 eventdev_stop_flush_t flush, void *flush_arg)
313 for (dport_id = 0; dport_id < dsw->num_ports; dport_id++)
314 if (dport_id != port->id)
315 dsw_port_drain_buf(dev_id, port->out_buffer[dport_id],
316 port->out_buffer_len[dport_id],
321 dsw_port_drain_in_ring(uint8_t dev_id, struct dsw_port *port,
322 eventdev_stop_flush_t flush, void *flush_arg)
326 while (rte_event_ring_dequeue_burst(port->in_ring, &ev, 1, NULL))
327 flush(dev_id, ev, flush_arg);
331 dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw,
332 eventdev_stop_flush_t flush, void *flush_arg)
339 for (port_id = 0; port_id < dsw->num_ports; port_id++) {
340 struct dsw_port *port = &dsw->ports[port_id];
342 dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg);
343 dsw_port_drain_paused(dev_id, port, flush, flush_arg);
344 dsw_port_drain_in_ring(dev_id, port, flush, flush_arg);
349 dsw_stop(struct rte_eventdev *dev)
351 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
353 eventdev_stop_flush_t flush;
356 dev_id = dev->data->dev_id;
357 flush = dev->dev_ops->dev_stop_flush;
358 flush_arg = dev->data->dev_stop_flush_arg;
360 dsw_drain(dev_id, dsw, flush, flush_arg);
364 dsw_close(struct rte_eventdev *dev)
366 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
374 static struct rte_eventdev_ops dsw_evdev_ops = {
375 .port_setup = dsw_port_setup,
376 .port_def_conf = dsw_port_def_conf,
377 .port_release = dsw_port_release,
378 .queue_setup = dsw_queue_setup,
379 .queue_def_conf = dsw_queue_def_conf,
380 .queue_release = dsw_queue_release,
381 .port_link = dsw_port_link,
382 .port_unlink = dsw_port_unlink,
383 .dev_infos_get = dsw_info_get,
384 .dev_configure = dsw_configure,
385 .dev_start = dsw_start,
386 .dev_stop = dsw_stop,
387 .dev_close = dsw_close,
388 .xstats_get = dsw_xstats_get,
389 .xstats_get_names = dsw_xstats_get_names,
390 .xstats_get_by_name = dsw_xstats_get_by_name
394 dsw_probe(struct rte_vdev_device *vdev)
397 struct rte_eventdev *dev;
398 struct dsw_evdev *dsw;
400 name = rte_vdev_device_name(vdev);
402 dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
407 dev->dev_ops = &dsw_evdev_ops;
408 dev->enqueue = dsw_event_enqueue;
409 dev->enqueue_burst = dsw_event_enqueue_burst;
410 dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
411 dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
412 dev->dequeue = dsw_event_dequeue;
413 dev->dequeue_burst = dsw_event_dequeue_burst;
415 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
418 dsw = dev->data->dev_private;
419 dsw->data = dev->data;
425 dsw_remove(struct rte_vdev_device *vdev)
429 name = rte_vdev_device_name(vdev);
433 return rte_event_pmd_vdev_uninit(name);
436 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
441 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);