event/sw: support event ports
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <string.h>
34
35 #include <rte_vdev.h>
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
38 #include <rte_ring.h>
39
40 #include "sw_evdev.h"
41 #include "iq_ring.h"
42 #include "event_ring.h"
43
44 #define EVENTDEV_NAME_SW_PMD event_sw
45 #define NUMA_NODE_ARG "numa_node"
46 #define SCHED_QUANTA_ARG "sched_quanta"
47 #define CREDIT_QUANTA_ARG "credit_quanta"
48
49 static void
50 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
51
52 static int
53 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
54                 const struct rte_event_port_conf *conf)
55 {
56         struct sw_evdev *sw = sw_pmd_priv(dev);
57         struct sw_port *p = &sw->ports[port_id];
58         char buf[QE_RING_NAMESIZE];
59         unsigned int i;
60
61         struct rte_event_dev_info info;
62         sw_info_get(dev, &info);
63
64         /* detect re-configuring and return credits to instance if needed */
65         if (p->initialized) {
66                 /* taking credits from pool is done one quanta at a time, and
67                  * credits may be spend (counted in p->inflights) or still
68                  * available in the port (p->inflight_credits). We must return
69                  * the sum to no leak credits
70                  */
71                 int possible_inflights = p->inflight_credits + p->inflights;
72                 rte_atomic32_sub(&sw->inflights, possible_inflights);
73         }
74
75         *p = (struct sw_port){0}; /* zero entire structure */
76         p->id = port_id;
77         p->sw = sw;
78
79         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
80                         "rx_worker_ring");
81         p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
82                         dev->data->socket_id);
83         if (p->rx_worker_ring == NULL) {
84                 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
85                                 port_id);
86                 return -1;
87         }
88
89         p->inflight_max = conf->new_event_threshold;
90
91         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
92                         "cq_worker_ring");
93         p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
94                         dev->data->socket_id);
95         if (p->cq_worker_ring == NULL) {
96                 qe_ring_destroy(p->rx_worker_ring);
97                 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
98                                 port_id);
99                 return -1;
100         }
101         sw->cq_ring_space[port_id] = conf->dequeue_depth;
102
103         /* set hist list contents to empty */
104         for (i = 0; i < SW_PORT_HIST_LIST; i++) {
105                 p->hist_list[i].fid = -1;
106                 p->hist_list[i].qid = -1;
107         }
108         dev->data->ports[port_id] = p;
109
110         rte_smp_wmb();
111         p->initialized = 1;
112         return 0;
113 }
114
115 static void
116 sw_port_release(void *port)
117 {
118         struct sw_port *p = (void *)port;
119         if (p == NULL)
120                 return;
121
122         qe_ring_destroy(p->rx_worker_ring);
123         qe_ring_destroy(p->cq_worker_ring);
124         memset(p, 0, sizeof(*p));
125 }
126
127 static int32_t
128 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
129                 const struct rte_event_queue_conf *queue_conf)
130 {
131         unsigned int i;
132         int dev_id = sw->data->dev_id;
133         int socket_id = sw->data->socket_id;
134         char buf[IQ_RING_NAMESIZE];
135         struct sw_qid *qid = &sw->qids[idx];
136
137         for (i = 0; i < SW_IQS_MAX; i++) {
138                 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
139                 qid->iq[i] = iq_ring_create(buf, socket_id);
140                 if (!qid->iq[i]) {
141                         SW_LOG_DBG("ring create failed");
142                         goto cleanup;
143                 }
144         }
145
146         /* Initialize the FID structures to no pinning (-1), and zero packets */
147         const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
148         for (i = 0; i < RTE_DIM(qid->fids); i++)
149                 qid->fids[i] = fid;
150
151         qid->id = idx;
152         qid->type = type;
153         qid->priority = queue_conf->priority;
154
155         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
156                 char ring_name[RTE_RING_NAMESIZE];
157                 uint32_t window_size;
158
159                 /* rte_ring and window_size_mask require require window_size to
160                  * be a power-of-2.
161                  */
162                 window_size = rte_align32pow2(
163                                 queue_conf->nb_atomic_order_sequences);
164
165                 qid->window_size = window_size - 1;
166
167                 if (!window_size) {
168                         SW_LOG_DBG(
169                                 "invalid reorder_window_size for ordered queue\n"
170                                 );
171                         goto cleanup;
172                 }
173
174                 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
175                 qid->reorder_buffer = rte_zmalloc_socket(buf,
176                                 window_size * sizeof(qid->reorder_buffer[0]),
177                                 0, socket_id);
178                 if (!qid->reorder_buffer) {
179                         SW_LOG_DBG("reorder_buffer malloc failed\n");
180                         goto cleanup;
181                 }
182
183                 memset(&qid->reorder_buffer[0],
184                        0,
185                        window_size * sizeof(qid->reorder_buffer[0]));
186
187                 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
188                                 dev_id, idx);
189
190                 /* lookup the ring, and if it already exists, free it */
191                 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
192                 if (cleanup)
193                         rte_ring_free(cleanup);
194
195                 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
196                                 window_size,
197                                 socket_id,
198                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
199                 if (!qid->reorder_buffer_freelist) {
200                         SW_LOG_DBG("freelist ring create failed");
201                         goto cleanup;
202                 }
203
204                 /* Populate the freelist with reorder buffer entries. Enqueue
205                  * 'window_size - 1' entries because the rte_ring holds only
206                  * that many.
207                  */
208                 for (i = 0; i < window_size - 1; i++) {
209                         if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
210                                                 &qid->reorder_buffer[i]) < 0)
211                                 goto cleanup;
212                 }
213
214                 qid->reorder_buffer_index = 0;
215                 qid->cq_next_tx = 0;
216         }
217
218         qid->initialized = 1;
219
220         return 0;
221
222 cleanup:
223         for (i = 0; i < SW_IQS_MAX; i++) {
224                 if (qid->iq[i])
225                         iq_ring_destroy(qid->iq[i]);
226         }
227
228         if (qid->reorder_buffer) {
229                 rte_free(qid->reorder_buffer);
230                 qid->reorder_buffer = NULL;
231         }
232
233         if (qid->reorder_buffer_freelist) {
234                 rte_ring_free(qid->reorder_buffer_freelist);
235                 qid->reorder_buffer_freelist = NULL;
236         }
237
238         return -EINVAL;
239 }
240
241 static int
242 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
243                 const struct rte_event_queue_conf *conf)
244 {
245         int type;
246
247         /* SINGLE_LINK can be OR-ed with other types, so handle first */
248         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
249                 type = SW_SCHED_TYPE_DIRECT;
250         } else {
251                 switch (conf->event_queue_cfg) {
252                 case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
253                         type = RTE_SCHED_TYPE_ATOMIC;
254                         break;
255                 case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
256                         type = RTE_SCHED_TYPE_ORDERED;
257                         break;
258                 case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
259                         type = RTE_SCHED_TYPE_PARALLEL;
260                         break;
261                 case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
262                         SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
263                         return -ENOTSUP;
264                 default:
265                         SW_LOG_ERR("Unknown queue type %d requested\n",
266                                    conf->event_queue_cfg);
267                         return -EINVAL;
268                 }
269         }
270
271         struct sw_evdev *sw = sw_pmd_priv(dev);
272         return qid_init(sw, queue_id, type, conf);
273 }
274
275 static void
276 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
277 {
278         struct sw_evdev *sw = sw_pmd_priv(dev);
279         struct sw_qid *qid = &sw->qids[id];
280         uint32_t i;
281
282         for (i = 0; i < SW_IQS_MAX; i++)
283                 iq_ring_destroy(qid->iq[i]);
284
285         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
286                 rte_free(qid->reorder_buffer);
287                 rte_ring_free(qid->reorder_buffer_freelist);
288         }
289         memset(qid, 0, sizeof(*qid));
290 }
291
292 static void
293 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
294                                  struct rte_event_queue_conf *conf)
295 {
296         RTE_SET_USED(dev);
297         RTE_SET_USED(queue_id);
298
299         static const struct rte_event_queue_conf default_conf = {
300                 .nb_atomic_flows = 4096,
301                 .nb_atomic_order_sequences = 1,
302                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
303                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
304         };
305
306         *conf = default_conf;
307 }
308
309 static void
310 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
311                  struct rte_event_port_conf *port_conf)
312 {
313         RTE_SET_USED(dev);
314         RTE_SET_USED(port_id);
315
316         port_conf->new_event_threshold = 1024;
317         port_conf->dequeue_depth = 16;
318         port_conf->enqueue_depth = 16;
319 }
320
321 static int
322 sw_dev_configure(const struct rte_eventdev *dev)
323 {
324         struct sw_evdev *sw = sw_pmd_priv(dev);
325         const struct rte_eventdev_data *data = dev->data;
326         const struct rte_event_dev_config *conf = &data->dev_conf;
327
328         sw->qid_count = conf->nb_event_queues;
329         sw->port_count = conf->nb_event_ports;
330         sw->nb_events_limit = conf->nb_events_limit;
331
332         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
333                 return -ENOTSUP;
334
335         return 0;
336 }
337
338 static void
339 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
340 {
341         RTE_SET_USED(dev);
342
343         static const struct rte_event_dev_info evdev_sw_info = {
344                         .driver_name = SW_PMD_NAME,
345                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
346                         .max_event_queue_flows = SW_QID_NUM_FIDS,
347                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
348                         .max_event_priority_levels = SW_IQS_MAX,
349                         .max_event_ports = SW_PORTS_MAX,
350                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
351                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
352                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
353                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
354                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
355         };
356
357         *info = evdev_sw_info;
358 }
359
360 static int
361 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
362 {
363         int *socket_id = opaque;
364         *socket_id = atoi(value);
365         if (*socket_id >= RTE_MAX_NUMA_NODES)
366                 return -1;
367         return 0;
368 }
369
370 static int
371 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
372 {
373         int *quanta = opaque;
374         *quanta = atoi(value);
375         if (*quanta < 0 || *quanta >= 4096)
376                 return -1;
377         return 0;
378 }
379
380 static int
381 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
382 {
383         int *credit = opaque;
384         *credit = atoi(value);
385         if (*credit < 0 || *credit >= 128)
386                 return -1;
387         return 0;
388 }
389
390 static int
391 sw_probe(const char *name, const char *params)
392 {
393         static const struct rte_eventdev_ops evdev_sw_ops = {
394                         .dev_configure = sw_dev_configure,
395                         .dev_infos_get = sw_info_get,
396
397                         .queue_def_conf = sw_queue_def_conf,
398                         .queue_setup = sw_queue_setup,
399                         .queue_release = sw_queue_release,
400                         .port_def_conf = sw_port_def_conf,
401                         .port_setup = sw_port_setup,
402                         .port_release = sw_port_release,
403         };
404
405         static const char *const args[] = {
406                 NUMA_NODE_ARG,
407                 SCHED_QUANTA_ARG,
408                 CREDIT_QUANTA_ARG,
409                 NULL
410         };
411         struct rte_eventdev *dev;
412         struct sw_evdev *sw;
413         int socket_id = rte_socket_id();
414         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
415         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
416
417         if (params != NULL && params[0] != '\0') {
418                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
419
420                 if (!kvlist) {
421                         SW_LOG_INFO(
422                                 "Ignoring unsupported parameters when creating device '%s'\n",
423                                 name);
424                 } else {
425                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
426                                         assign_numa_node, &socket_id);
427                         if (ret != 0) {
428                                 SW_LOG_ERR(
429                                         "%s: Error parsing numa node parameter",
430                                         name);
431                                 rte_kvargs_free(kvlist);
432                                 return ret;
433                         }
434
435                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
436                                         set_sched_quanta, &sched_quanta);
437                         if (ret != 0) {
438                                 SW_LOG_ERR(
439                                         "%s: Error parsing sched quanta parameter",
440                                         name);
441                                 rte_kvargs_free(kvlist);
442                                 return ret;
443                         }
444
445                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
446                                         set_credit_quanta, &credit_quanta);
447                         if (ret != 0) {
448                                 SW_LOG_ERR(
449                                         "%s: Error parsing credit quanta parameter",
450                                         name);
451                                 rte_kvargs_free(kvlist);
452                                 return ret;
453                         }
454
455                         rte_kvargs_free(kvlist);
456                 }
457         }
458
459         SW_LOG_INFO(
460                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
461                         name, socket_id, sched_quanta, credit_quanta);
462
463         dev = rte_event_pmd_vdev_init(name,
464                         sizeof(struct sw_evdev), socket_id);
465         if (dev == NULL) {
466                 SW_LOG_ERR("eventdev vdev init() failed");
467                 return -EFAULT;
468         }
469         dev->dev_ops = &evdev_sw_ops;
470
471         sw = dev->data->dev_private;
472         sw->data = dev->data;
473
474         /* copy values passed from vdev command line to instance */
475         sw->credit_update_quanta = credit_quanta;
476         sw->sched_quanta = sched_quanta;
477
478         return 0;
479 }
480
481 static int
482 sw_remove(const char *name)
483 {
484         if (name == NULL)
485                 return -EINVAL;
486
487         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
488
489         return rte_event_pmd_vdev_uninit(name);
490 }
491
492 static struct rte_vdev_driver evdev_sw_pmd_drv = {
493         .probe = sw_probe,
494         .remove = sw_remove
495 };
496
497 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
498 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
499                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");