eventdev: negate maintenance capability flag
[dpdk.git] / drivers / event / dsw / dsw_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Ericsson AB
3  */
4
5 #include <stdbool.h>
6
7 #include <rte_cycles.h>
8 #include <eventdev_pmd.h>
9 #include <eventdev_pmd_vdev.h>
10 #include <rte_random.h>
11 #include <rte_ring_elem.h>
12
13 #include "dsw_evdev.h"
14
15 #define EVENTDEV_NAME_DSW_PMD event_dsw
16
17 static int
18 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
19                const struct rte_event_port_conf *conf)
20 {
21         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
22         struct dsw_port *port;
23         struct rte_event_ring *in_ring;
24         struct rte_ring *ctl_in_ring;
25         char ring_name[RTE_RING_NAMESIZE];
26
27         port = &dsw->ports[port_id];
28
29         *port = (struct dsw_port) {
30                 .id = port_id,
31                 .dsw = dsw,
32                 .dequeue_depth = conf->dequeue_depth,
33                 .enqueue_depth = conf->enqueue_depth,
34                 .new_event_threshold = conf->new_event_threshold
35         };
36
37         snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
38                  port_id);
39
40         in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
41                                         dev->data->socket_id,
42                                         RING_F_SC_DEQ|RING_F_EXACT_SZ);
43
44         if (in_ring == NULL)
45                 return -ENOMEM;
46
47         snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u",
48                  dev->data->dev_id, port_id);
49
50         ctl_in_ring = rte_ring_create_elem(ring_name,
51                                            sizeof(struct dsw_ctl_msg),
52                                            DSW_CTL_IN_RING_SIZE,
53                                            dev->data->socket_id,
54                                            RING_F_SC_DEQ|RING_F_EXACT_SZ);
55
56         if (ctl_in_ring == NULL) {
57                 rte_event_ring_free(in_ring);
58                 return -ENOMEM;
59         }
60
61         port->in_ring = in_ring;
62         port->ctl_in_ring = ctl_in_ring;
63
64         port->load_update_interval =
65                 (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
66
67         port->migration_interval =
68                 (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S;
69
70         dev->data->ports[port_id] = port;
71
72         return 0;
73 }
74
75 static void
76 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
77                   uint8_t port_id __rte_unused,
78                   struct rte_event_port_conf *port_conf)
79 {
80         *port_conf = (struct rte_event_port_conf) {
81                 .new_event_threshold = 1024,
82                 .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
83                 .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
84         };
85 }
86
87 static void
88 dsw_port_release(void *p)
89 {
90         struct dsw_port *port = p;
91
92         rte_event_ring_free(port->in_ring);
93         rte_ring_free(port->ctl_in_ring);
94 }
95
96 static int
97 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
98                 const struct rte_event_queue_conf *conf)
99 {
100         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
101         struct dsw_queue *queue = &dsw->queues[queue_id];
102
103         if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
104                 return -ENOTSUP;
105
106         /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
107          * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
108          * the queue will only have a single serving port, no
109          * migration will ever happen, so the extra TYPE_ATOMIC
110          * migration overhead is avoided.
111          */
112         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
113                 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
114         else {
115                 if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
116                         return -ENOTSUP;
117                 /* atomic or parallel */
118                 queue->schedule_type = conf->schedule_type;
119         }
120
121         queue->num_serving_ports = 0;
122
123         return 0;
124 }
125
126 static void
127 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
128                    uint8_t queue_id __rte_unused,
129                    struct rte_event_queue_conf *queue_conf)
130 {
131         *queue_conf = (struct rte_event_queue_conf) {
132                 .nb_atomic_flows = 4096,
133                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
134                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
135         };
136 }
137
138 static void
139 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
140                   uint8_t queue_id __rte_unused)
141 {
142 }
143
144 static void
145 queue_add_port(struct dsw_queue *queue, uint16_t port_id)
146 {
147         queue->serving_ports[queue->num_serving_ports] = port_id;
148         queue->num_serving_ports++;
149 }
150
151 static bool
152 queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
153 {
154         uint16_t i;
155
156         for (i = 0; i < queue->num_serving_ports; i++)
157                 if (queue->serving_ports[i] == port_id) {
158                         uint16_t last_idx = queue->num_serving_ports - 1;
159                         if (i != last_idx)
160                                 queue->serving_ports[i] =
161                                         queue->serving_ports[last_idx];
162                         queue->num_serving_ports--;
163                         return true;
164                 }
165         return false;
166 }
167
168 static int
169 dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
170                      const uint8_t queues[], uint16_t num, bool link)
171 {
172         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
173         struct dsw_port *p = port;
174         uint16_t i;
175         uint16_t count = 0;
176
177         for (i = 0; i < num; i++) {
178                 uint8_t qid = queues[i];
179                 struct dsw_queue *q = &dsw->queues[qid];
180                 if (link) {
181                         queue_add_port(q, p->id);
182                         count++;
183                 } else {
184                         bool removed = queue_remove_port(q, p->id);
185                         if (removed)
186                                 count++;
187                 }
188         }
189
190         return count;
191 }
192
193 static int
194 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
195               const uint8_t priorities[] __rte_unused, uint16_t num)
196 {
197         return dsw_port_link_unlink(dev, port, queues, num, true);
198 }
199
200 static int
201 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
202                 uint16_t num)
203 {
204         return dsw_port_link_unlink(dev, port, queues, num, false);
205 }
206
207 static void
208 dsw_info_get(struct rte_eventdev *dev __rte_unused,
209              struct rte_event_dev_info *info)
210 {
211         *info = (struct rte_event_dev_info) {
212                 .driver_name = DSW_PMD_NAME,
213                 .max_event_queues = DSW_MAX_QUEUES,
214                 .max_event_queue_flows = DSW_MAX_FLOWS,
215                 .max_event_queue_priority_levels = 1,
216                 .max_event_priority_levels = 1,
217                 .max_event_ports = DSW_MAX_PORTS,
218                 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
219                 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
220                 .max_num_events = DSW_MAX_EVENTS,
221                 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
222                 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
223                 RTE_EVENT_DEV_CAP_NONSEQ_MODE|
224                 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
225                 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
226         };
227 }
228
229 static int
230 dsw_configure(const struct rte_eventdev *dev)
231 {
232         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
233         const struct rte_event_dev_config *conf = &dev->data->dev_conf;
234         int32_t min_max_in_flight;
235
236         dsw->num_ports = conf->nb_event_ports;
237         dsw->num_queues = conf->nb_event_queues;
238
239         /* Avoid a situation where consumer ports are holding all the
240          * credits, without making use of them.
241          */
242         min_max_in_flight = conf->nb_event_ports * DSW_PORT_MAX_CREDITS;
243
244         dsw->max_inflight = RTE_MAX(conf->nb_events_limit, min_max_in_flight);
245
246         return 0;
247 }
248
249
250 static void
251 initial_flow_to_port_assignment(struct dsw_evdev *dsw)
252 {
253         uint8_t queue_id;
254         for (queue_id = 0; queue_id < dsw->num_queues; queue_id++) {
255                 struct dsw_queue *queue = &dsw->queues[queue_id];
256                 uint16_t flow_hash;
257                 for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
258                         uint8_t port_idx =
259                                 rte_rand() % queue->num_serving_ports;
260                         uint8_t port_id =
261                                 queue->serving_ports[port_idx];
262                         dsw->queues[queue_id].flow_to_port_map[flow_hash] =
263                                 port_id;
264                 }
265         }
266 }
267
268 static int
269 dsw_start(struct rte_eventdev *dev)
270 {
271         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
272         uint16_t i;
273         uint64_t now;
274
275         dsw->credits_on_loan = 0;
276
277         initial_flow_to_port_assignment(dsw);
278
279         now = rte_get_timer_cycles();
280         for (i = 0; i < dsw->num_ports; i++) {
281                 dsw->ports[i].measurement_start = now;
282                 dsw->ports[i].busy_start = now;
283         }
284
285         return 0;
286 }
287
288 static void
289 dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len,
290                    eventdev_stop_flush_t flush, void *flush_arg)
291 {
292         uint16_t i;
293
294         for (i = 0; i < buf_len; i++)
295                 flush(dev_id, buf[i], flush_arg);
296 }
297
298 static void
299 dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port,
300                       eventdev_stop_flush_t flush, void *flush_arg)
301 {
302         dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len,
303                            flush, flush_arg);
304 }
305
306 static void
307 dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port,
308                    eventdev_stop_flush_t flush, void *flush_arg)
309 {
310         uint16_t dport_id;
311
312         for (dport_id = 0; dport_id < dsw->num_ports; dport_id++)
313                 if (dport_id != port->id)
314                         dsw_port_drain_buf(dev_id, port->out_buffer[dport_id],
315                                            port->out_buffer_len[dport_id],
316                                            flush, flush_arg);
317 }
318
319 static void
320 dsw_port_drain_in_ring(uint8_t dev_id, struct dsw_port *port,
321                        eventdev_stop_flush_t flush, void *flush_arg)
322 {
323         struct rte_event ev;
324
325         while (rte_event_ring_dequeue_burst(port->in_ring, &ev, 1, NULL))
326                 flush(dev_id, ev, flush_arg);
327 }
328
329 static void
330 dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw,
331           eventdev_stop_flush_t flush, void *flush_arg)
332 {
333         uint16_t port_id;
334
335         if (flush == NULL)
336                 return;
337
338         for (port_id = 0; port_id < dsw->num_ports; port_id++) {
339                 struct dsw_port *port = &dsw->ports[port_id];
340
341                 dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg);
342                 dsw_port_drain_paused(dev_id, port, flush, flush_arg);
343                 dsw_port_drain_in_ring(dev_id, port, flush, flush_arg);
344         }
345 }
346
347 static void
348 dsw_stop(struct rte_eventdev *dev)
349 {
350         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
351         uint8_t dev_id;
352         eventdev_stop_flush_t flush;
353         void *flush_arg;
354
355         dev_id = dev->data->dev_id;
356         flush = dev->dev_ops->dev_stop_flush;
357         flush_arg = dev->data->dev_stop_flush_arg;
358
359         dsw_drain(dev_id, dsw, flush, flush_arg);
360 }
361
362 static int
363 dsw_close(struct rte_eventdev *dev)
364 {
365         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
366
367         dsw->num_ports = 0;
368         dsw->num_queues = 0;
369
370         return 0;
371 }
372
373 static int
374 dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
375                             const struct rte_eth_dev *eth_dev __rte_unused,
376                             uint32_t *caps)
377 {
378         *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
379         return 0;
380 }
381
382 static int
383 dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused,
384                            uint64_t flags __rte_unused, uint32_t *caps,
385                            const struct event_timer_adapter_ops **ops)
386 {
387         *caps = 0;
388         *ops = NULL;
389         return 0;
390 }
391
392 static int
393 dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev  __rte_unused,
394                             const struct rte_cryptodev *cdev  __rte_unused,
395                             uint32_t *caps)
396 {
397         *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
398         return 0;
399 }
400
401 static struct eventdev_ops dsw_evdev_ops = {
402         .port_setup = dsw_port_setup,
403         .port_def_conf = dsw_port_def_conf,
404         .port_release = dsw_port_release,
405         .queue_setup = dsw_queue_setup,
406         .queue_def_conf = dsw_queue_def_conf,
407         .queue_release = dsw_queue_release,
408         .port_link = dsw_port_link,
409         .port_unlink = dsw_port_unlink,
410         .dev_infos_get = dsw_info_get,
411         .dev_configure = dsw_configure,
412         .dev_start = dsw_start,
413         .dev_stop = dsw_stop,
414         .dev_close = dsw_close,
415         .eth_rx_adapter_caps_get = dsw_eth_rx_adapter_caps_get,
416         .timer_adapter_caps_get = dsw_timer_adapter_caps_get,
417         .crypto_adapter_caps_get = dsw_crypto_adapter_caps_get,
418         .xstats_get = dsw_xstats_get,
419         .xstats_get_names = dsw_xstats_get_names,
420         .xstats_get_by_name = dsw_xstats_get_by_name
421 };
422
423 static int
424 dsw_probe(struct rte_vdev_device *vdev)
425 {
426         const char *name;
427         struct rte_eventdev *dev;
428         struct dsw_evdev *dsw;
429
430         name = rte_vdev_device_name(vdev);
431
432         dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
433                                       rte_socket_id());
434         if (dev == NULL)
435                 return -EFAULT;
436
437         dev->dev_ops = &dsw_evdev_ops;
438         dev->enqueue = dsw_event_enqueue;
439         dev->enqueue_burst = dsw_event_enqueue_burst;
440         dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
441         dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
442         dev->dequeue = dsw_event_dequeue;
443         dev->dequeue_burst = dsw_event_dequeue_burst;
444         dev->maintain = dsw_event_maintain;
445
446         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
447                 return 0;
448
449         dsw = dev->data->dev_private;
450         dsw->data = dev->data;
451
452         event_dev_probing_finish(dev);
453         return 0;
454 }
455
456 static int
457 dsw_remove(struct rte_vdev_device *vdev)
458 {
459         const char *name;
460
461         name = rte_vdev_device_name(vdev);
462         if (name == NULL)
463                 return -EINVAL;
464
465         return rte_event_pmd_vdev_uninit(name);
466 }
467
468 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
469         .probe = dsw_probe,
470         .remove = dsw_remove
471 };
472
473 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);