event/dsw: support linking/unlinking ports
[dpdk.git] / drivers / event / dsw / dsw_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Ericsson AB
3  */
4
5 #include <stdbool.h>
6
7 #include <rte_eventdev_pmd.h>
8 #include <rte_eventdev_pmd_vdev.h>
9
10 #include "dsw_evdev.h"
11
12 #define EVENTDEV_NAME_DSW_PMD event_dsw
13
14 static int
15 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
16                const struct rte_event_port_conf *conf)
17 {
18         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
19         struct dsw_port *port;
20         struct rte_event_ring *in_ring;
21         char ring_name[RTE_RING_NAMESIZE];
22
23         port = &dsw->ports[port_id];
24
25         *port = (struct dsw_port) {
26                 .id = port_id,
27                 .dsw = dsw,
28                 .dequeue_depth = conf->dequeue_depth,
29                 .enqueue_depth = conf->enqueue_depth,
30                 .new_event_threshold = conf->new_event_threshold
31         };
32
33         snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
34                  port_id);
35
36         in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
37                                         dev->data->socket_id,
38                                         RING_F_SC_DEQ|RING_F_EXACT_SZ);
39
40         if (in_ring == NULL)
41                 return -ENOMEM;
42
43         port->in_ring = in_ring;
44
45         dev->data->ports[port_id] = port;
46
47         return 0;
48 }
49
50 static void
51 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
52                   uint8_t port_id __rte_unused,
53                   struct rte_event_port_conf *port_conf)
54 {
55         *port_conf = (struct rte_event_port_conf) {
56                 .new_event_threshold = 1024,
57                 .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
58                 .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
59         };
60 }
61
62 static void
63 dsw_port_release(void *p)
64 {
65         struct dsw_port *port = p;
66
67         rte_event_ring_free(port->in_ring);
68 }
69
70 static int
71 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
72                 const struct rte_event_queue_conf *conf)
73 {
74         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
75         struct dsw_queue *queue = &dsw->queues[queue_id];
76
77         if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
78                 return -ENOTSUP;
79
80         if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
81                 return -ENOTSUP;
82
83         /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
84          * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
85          * the queue will only have a single serving port, no
86          * migration will ever happen, so the extra TYPE_ATOMIC
87          * migration overhead is avoided.
88          */
89         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
90                 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
91         else /* atomic or parallel */
92                 queue->schedule_type = conf->schedule_type;
93
94         queue->num_serving_ports = 0;
95
96         return 0;
97 }
98
99 static void
100 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
101                    uint8_t queue_id __rte_unused,
102                    struct rte_event_queue_conf *queue_conf)
103 {
104         *queue_conf = (struct rte_event_queue_conf) {
105                 .nb_atomic_flows = 4096,
106                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
107                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
108         };
109 }
110
111 static void
112 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
113                   uint8_t queue_id __rte_unused)
114 {
115 }
116
117 static void
118 queue_add_port(struct dsw_queue *queue, uint16_t port_id)
119 {
120         queue->serving_ports[queue->num_serving_ports] = port_id;
121         queue->num_serving_ports++;
122 }
123
124 static bool
125 queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
126 {
127         uint16_t i;
128
129         for (i = 0; i < queue->num_serving_ports; i++)
130                 if (queue->serving_ports[i] == port_id) {
131                         uint16_t last_idx = queue->num_serving_ports - 1;
132                         if (i != last_idx)
133                                 queue->serving_ports[i] =
134                                         queue->serving_ports[last_idx];
135                         queue->num_serving_ports--;
136                         return true;
137                 }
138         return false;
139 }
140
141 static int
142 dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
143                      const uint8_t queues[], uint16_t num, bool link)
144 {
145         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
146         struct dsw_port *p = port;
147         uint16_t i;
148         uint16_t count = 0;
149
150         for (i = 0; i < num; i++) {
151                 uint8_t qid = queues[i];
152                 struct dsw_queue *q = &dsw->queues[qid];
153                 if (link) {
154                         queue_add_port(q, p->id);
155                         count++;
156                 } else {
157                         bool removed = queue_remove_port(q, p->id);
158                         if (removed)
159                                 count++;
160                 }
161         }
162
163         return count;
164 }
165
166 static int
167 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
168               const uint8_t priorities[] __rte_unused, uint16_t num)
169 {
170         return dsw_port_link_unlink(dev, port, queues, num, true);
171 }
172
173 static int
174 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
175                 uint16_t num)
176 {
177         return dsw_port_link_unlink(dev, port, queues, num, false);
178 }
179
180 static void
181 dsw_info_get(struct rte_eventdev *dev __rte_unused,
182              struct rte_event_dev_info *info)
183 {
184         *info = (struct rte_event_dev_info) {
185                 .driver_name = DSW_PMD_NAME,
186                 .max_event_queues = DSW_MAX_QUEUES,
187                 .max_event_queue_flows = DSW_MAX_FLOWS,
188                 .max_event_queue_priority_levels = 1,
189                 .max_event_priority_levels = 1,
190                 .max_event_ports = DSW_MAX_PORTS,
191                 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
192                 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
193                 .max_num_events = DSW_MAX_EVENTS,
194                 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
195                 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
196         };
197 }
198
199 static int
200 dsw_configure(const struct rte_eventdev *dev)
201 {
202         struct dsw_evdev *dsw = dsw_pmd_priv(dev);
203         const struct rte_event_dev_config *conf = &dev->data->dev_conf;
204
205         dsw->num_ports = conf->nb_event_ports;
206         dsw->num_queues = conf->nb_event_queues;
207
208         return 0;
209 }
210
211 static struct rte_eventdev_ops dsw_evdev_ops = {
212         .port_setup = dsw_port_setup,
213         .port_def_conf = dsw_port_def_conf,
214         .port_release = dsw_port_release,
215         .queue_setup = dsw_queue_setup,
216         .queue_def_conf = dsw_queue_def_conf,
217         .queue_release = dsw_queue_release,
218         .port_link = dsw_port_link,
219         .port_unlink = dsw_port_unlink,
220         .dev_infos_get = dsw_info_get,
221         .dev_configure = dsw_configure,
222 };
223
224 static int
225 dsw_probe(struct rte_vdev_device *vdev)
226 {
227         const char *name;
228         struct rte_eventdev *dev;
229         struct dsw_evdev *dsw;
230
231         name = rte_vdev_device_name(vdev);
232
233         dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
234                                       rte_socket_id());
235         if (dev == NULL)
236                 return -EFAULT;
237
238         dev->dev_ops = &dsw_evdev_ops;
239
240         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
241                 return 0;
242
243         dsw = dev->data->dev_private;
244         dsw->data = dev->data;
245
246         return 0;
247 }
248
249 static int
250 dsw_remove(struct rte_vdev_device *vdev)
251 {
252         const char *name;
253
254         name = rte_vdev_device_name(vdev);
255         if (name == NULL)
256                 return -EINVAL;
257
258         return rte_event_pmd_vdev_uninit(name);
259 }
260
261 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
262         .probe = dsw_probe,
263         .remove = dsw_remove
264 };
265
266 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);