574696bd6f30b635c2becfe3beb97d51ba0c8421
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <string.h>
34
35 #include <rte_vdev.h>
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
38 #include <rte_ring.h>
39
40 #include "sw_evdev.h"
41 #include "iq_ring.h"
42
43 #define EVENTDEV_NAME_SW_PMD event_sw
44 #define NUMA_NODE_ARG "numa_node"
45 #define SCHED_QUANTA_ARG "sched_quanta"
46 #define CREDIT_QUANTA_ARG "credit_quanta"
47
48 static int32_t
49 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
50                 const struct rte_event_queue_conf *queue_conf)
51 {
52         unsigned int i;
53         int dev_id = sw->data->dev_id;
54         int socket_id = sw->data->socket_id;
55         char buf[IQ_RING_NAMESIZE];
56         struct sw_qid *qid = &sw->qids[idx];
57
58         for (i = 0; i < SW_IQS_MAX; i++) {
59                 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
60                 qid->iq[i] = iq_ring_create(buf, socket_id);
61                 if (!qid->iq[i]) {
62                         SW_LOG_DBG("ring create failed");
63                         goto cleanup;
64                 }
65         }
66
67         /* Initialize the FID structures to no pinning (-1), and zero packets */
68         const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
69         for (i = 0; i < RTE_DIM(qid->fids); i++)
70                 qid->fids[i] = fid;
71
72         qid->id = idx;
73         qid->type = type;
74         qid->priority = queue_conf->priority;
75
76         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
77                 char ring_name[RTE_RING_NAMESIZE];
78                 uint32_t window_size;
79
80                 /* rte_ring and window_size_mask require require window_size to
81                  * be a power-of-2.
82                  */
83                 window_size = rte_align32pow2(
84                                 queue_conf->nb_atomic_order_sequences);
85
86                 qid->window_size = window_size - 1;
87
88                 if (!window_size) {
89                         SW_LOG_DBG(
90                                 "invalid reorder_window_size for ordered queue\n"
91                                 );
92                         goto cleanup;
93                 }
94
95                 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
96                 qid->reorder_buffer = rte_zmalloc_socket(buf,
97                                 window_size * sizeof(qid->reorder_buffer[0]),
98                                 0, socket_id);
99                 if (!qid->reorder_buffer) {
100                         SW_LOG_DBG("reorder_buffer malloc failed\n");
101                         goto cleanup;
102                 }
103
104                 memset(&qid->reorder_buffer[0],
105                        0,
106                        window_size * sizeof(qid->reorder_buffer[0]));
107
108                 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
109                                 dev_id, idx);
110
111                 /* lookup the ring, and if it already exists, free it */
112                 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
113                 if (cleanup)
114                         rte_ring_free(cleanup);
115
116                 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
117                                 window_size,
118                                 socket_id,
119                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
120                 if (!qid->reorder_buffer_freelist) {
121                         SW_LOG_DBG("freelist ring create failed");
122                         goto cleanup;
123                 }
124
125                 /* Populate the freelist with reorder buffer entries. Enqueue
126                  * 'window_size - 1' entries because the rte_ring holds only
127                  * that many.
128                  */
129                 for (i = 0; i < window_size - 1; i++) {
130                         if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
131                                                 &qid->reorder_buffer[i]) < 0)
132                                 goto cleanup;
133                 }
134
135                 qid->reorder_buffer_index = 0;
136                 qid->cq_next_tx = 0;
137         }
138
139         qid->initialized = 1;
140
141         return 0;
142
143 cleanup:
144         for (i = 0; i < SW_IQS_MAX; i++) {
145                 if (qid->iq[i])
146                         iq_ring_destroy(qid->iq[i]);
147         }
148
149         if (qid->reorder_buffer) {
150                 rte_free(qid->reorder_buffer);
151                 qid->reorder_buffer = NULL;
152         }
153
154         if (qid->reorder_buffer_freelist) {
155                 rte_ring_free(qid->reorder_buffer_freelist);
156                 qid->reorder_buffer_freelist = NULL;
157         }
158
159         return -EINVAL;
160 }
161
162 static int
163 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
164                 const struct rte_event_queue_conf *conf)
165 {
166         int type;
167
168         /* SINGLE_LINK can be OR-ed with other types, so handle first */
169         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
170                 type = SW_SCHED_TYPE_DIRECT;
171         } else {
172                 switch (conf->event_queue_cfg) {
173                 case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
174                         type = RTE_SCHED_TYPE_ATOMIC;
175                         break;
176                 case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
177                         type = RTE_SCHED_TYPE_ORDERED;
178                         break;
179                 case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
180                         type = RTE_SCHED_TYPE_PARALLEL;
181                         break;
182                 case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
183                         SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
184                         return -ENOTSUP;
185                 default:
186                         SW_LOG_ERR("Unknown queue type %d requested\n",
187                                    conf->event_queue_cfg);
188                         return -EINVAL;
189                 }
190         }
191
192         struct sw_evdev *sw = sw_pmd_priv(dev);
193         return qid_init(sw, queue_id, type, conf);
194 }
195
196 static void
197 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
198 {
199         struct sw_evdev *sw = sw_pmd_priv(dev);
200         struct sw_qid *qid = &sw->qids[id];
201         uint32_t i;
202
203         for (i = 0; i < SW_IQS_MAX; i++)
204                 iq_ring_destroy(qid->iq[i]);
205
206         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
207                 rte_free(qid->reorder_buffer);
208                 rte_ring_free(qid->reorder_buffer_freelist);
209         }
210         memset(qid, 0, sizeof(*qid));
211 }
212
213 static void
214 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
215                                  struct rte_event_queue_conf *conf)
216 {
217         RTE_SET_USED(dev);
218         RTE_SET_USED(queue_id);
219
220         static const struct rte_event_queue_conf default_conf = {
221                 .nb_atomic_flows = 4096,
222                 .nb_atomic_order_sequences = 1,
223                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
224                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
225         };
226
227         *conf = default_conf;
228 }
229
230 static void
231 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
232                  struct rte_event_port_conf *port_conf)
233 {
234         RTE_SET_USED(dev);
235         RTE_SET_USED(port_id);
236
237         port_conf->new_event_threshold = 1024;
238         port_conf->dequeue_depth = 16;
239         port_conf->enqueue_depth = 16;
240 }
241
242 static int
243 sw_dev_configure(const struct rte_eventdev *dev)
244 {
245         struct sw_evdev *sw = sw_pmd_priv(dev);
246         const struct rte_eventdev_data *data = dev->data;
247         const struct rte_event_dev_config *conf = &data->dev_conf;
248
249         sw->qid_count = conf->nb_event_queues;
250         sw->port_count = conf->nb_event_ports;
251         sw->nb_events_limit = conf->nb_events_limit;
252
253         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
254                 return -ENOTSUP;
255
256         return 0;
257 }
258
259 static void
260 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
261 {
262         RTE_SET_USED(dev);
263
264         static const struct rte_event_dev_info evdev_sw_info = {
265                         .driver_name = SW_PMD_NAME,
266                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
267                         .max_event_queue_flows = SW_QID_NUM_FIDS,
268                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
269                         .max_event_priority_levels = SW_IQS_MAX,
270                         .max_event_ports = SW_PORTS_MAX,
271                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
272                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
273                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
274                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
275                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
276         };
277
278         *info = evdev_sw_info;
279 }
280
281 static int
282 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
283 {
284         int *socket_id = opaque;
285         *socket_id = atoi(value);
286         if (*socket_id >= RTE_MAX_NUMA_NODES)
287                 return -1;
288         return 0;
289 }
290
291 static int
292 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
293 {
294         int *quanta = opaque;
295         *quanta = atoi(value);
296         if (*quanta < 0 || *quanta >= 4096)
297                 return -1;
298         return 0;
299 }
300
301 static int
302 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
303 {
304         int *credit = opaque;
305         *credit = atoi(value);
306         if (*credit < 0 || *credit >= 128)
307                 return -1;
308         return 0;
309 }
310
311 static int
312 sw_probe(const char *name, const char *params)
313 {
314         static const struct rte_eventdev_ops evdev_sw_ops = {
315                         .dev_configure = sw_dev_configure,
316                         .dev_infos_get = sw_info_get,
317
318                         .queue_def_conf = sw_queue_def_conf,
319                         .queue_setup = sw_queue_setup,
320                         .queue_release = sw_queue_release,
321                         .port_def_conf = sw_port_def_conf,
322         };
323
324         static const char *const args[] = {
325                 NUMA_NODE_ARG,
326                 SCHED_QUANTA_ARG,
327                 CREDIT_QUANTA_ARG,
328                 NULL
329         };
330         struct rte_eventdev *dev;
331         struct sw_evdev *sw;
332         int socket_id = rte_socket_id();
333         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
334         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
335
336         if (params != NULL && params[0] != '\0') {
337                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
338
339                 if (!kvlist) {
340                         SW_LOG_INFO(
341                                 "Ignoring unsupported parameters when creating device '%s'\n",
342                                 name);
343                 } else {
344                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
345                                         assign_numa_node, &socket_id);
346                         if (ret != 0) {
347                                 SW_LOG_ERR(
348                                         "%s: Error parsing numa node parameter",
349                                         name);
350                                 rte_kvargs_free(kvlist);
351                                 return ret;
352                         }
353
354                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
355                                         set_sched_quanta, &sched_quanta);
356                         if (ret != 0) {
357                                 SW_LOG_ERR(
358                                         "%s: Error parsing sched quanta parameter",
359                                         name);
360                                 rte_kvargs_free(kvlist);
361                                 return ret;
362                         }
363
364                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
365                                         set_credit_quanta, &credit_quanta);
366                         if (ret != 0) {
367                                 SW_LOG_ERR(
368                                         "%s: Error parsing credit quanta parameter",
369                                         name);
370                                 rte_kvargs_free(kvlist);
371                                 return ret;
372                         }
373
374                         rte_kvargs_free(kvlist);
375                 }
376         }
377
378         SW_LOG_INFO(
379                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
380                         name, socket_id, sched_quanta, credit_quanta);
381
382         dev = rte_event_pmd_vdev_init(name,
383                         sizeof(struct sw_evdev), socket_id);
384         if (dev == NULL) {
385                 SW_LOG_ERR("eventdev vdev init() failed");
386                 return -EFAULT;
387         }
388         dev->dev_ops = &evdev_sw_ops;
389
390         sw = dev->data->dev_private;
391         sw->data = dev->data;
392
393         /* copy values passed from vdev command line to instance */
394         sw->credit_update_quanta = credit_quanta;
395         sw->sched_quanta = sched_quanta;
396
397         return 0;
398 }
399
400 static int
401 sw_remove(const char *name)
402 {
403         if (name == NULL)
404                 return -EINVAL;
405
406         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
407
408         return rte_event_pmd_vdev_uninit(name);
409 }
410
411 static struct rte_vdev_driver evdev_sw_pmd_drv = {
412         .probe = sw_probe,
413         .remove = sw_remove
414 };
415
416 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
417 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
418                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");