a2e1cbbfe0bd747b550fe3da17300af5201767ea
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <string.h>
34
35 #include <rte_vdev.h>
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
38 #include <rte_ring.h>
39 #include <rte_errno.h>
40
41 #include "sw_evdev.h"
42 #include "iq_ring.h"
43 #include "event_ring.h"
44
45 #define EVENTDEV_NAME_SW_PMD event_sw
46 #define NUMA_NODE_ARG "numa_node"
47 #define SCHED_QUANTA_ARG "sched_quanta"
48 #define CREDIT_QUANTA_ARG "credit_quanta"
49
50 static void
51 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
52
53 static int
54 sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
55                 const uint8_t priorities[], uint16_t num)
56 {
57         struct sw_port *p = port;
58         struct sw_evdev *sw = sw_pmd_priv(dev);
59         int i;
60
61         RTE_SET_USED(priorities);
62         for (i = 0; i < num; i++) {
63                 struct sw_qid *q = &sw->qids[queues[i]];
64
65                 /* check for qid map overflow */
66                 if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
67                         rte_errno = -EDQUOT;
68                         break;
69                 }
70
71                 if (p->is_directed && p->num_qids_mapped > 0) {
72                         rte_errno = -EDQUOT;
73                         break;
74                 }
75
76                 if (q->type == SW_SCHED_TYPE_DIRECT) {
77                         /* check directed qids only map to one port */
78                         if (p->num_qids_mapped > 0) {
79                                 rte_errno = -EDQUOT;
80                                 break;
81                         }
82                         /* check port only takes a directed flow */
83                         if (num > 1) {
84                                 rte_errno = -EDQUOT;
85                                 break;
86                         }
87
88                         p->is_directed = 1;
89                         p->num_qids_mapped = 1;
90                 } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
91                         p->num_ordered_qids++;
92                         p->num_qids_mapped++;
93                 } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
94                         p->num_qids_mapped++;
95                 }
96
97                 q->cq_map[q->cq_num_mapped_cqs] = p->id;
98                 rte_smp_wmb();
99                 q->cq_num_mapped_cqs++;
100         }
101         return i;
102 }
103
104 static int
105 sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
106                 uint16_t nb_unlinks)
107 {
108         struct sw_port *p = port;
109         struct sw_evdev *sw = sw_pmd_priv(dev);
110         unsigned int i, j;
111
112         int unlinked = 0;
113         for (i = 0; i < nb_unlinks; i++) {
114                 struct sw_qid *q = &sw->qids[queues[i]];
115                 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
116                         if (q->cq_map[j] == p->id) {
117                                 q->cq_map[j] =
118                                         q->cq_map[q->cq_num_mapped_cqs - 1];
119                                 rte_smp_wmb();
120                                 q->cq_num_mapped_cqs--;
121                                 unlinked++;
122
123                                 p->num_qids_mapped--;
124
125                                 if (q->type == RTE_SCHED_TYPE_ORDERED)
126                                         p->num_ordered_qids--;
127
128                                 continue;
129                         }
130                 }
131         }
132         return unlinked;
133 }
134
135 static int
136 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
137                 const struct rte_event_port_conf *conf)
138 {
139         struct sw_evdev *sw = sw_pmd_priv(dev);
140         struct sw_port *p = &sw->ports[port_id];
141         char buf[QE_RING_NAMESIZE];
142         unsigned int i;
143
144         struct rte_event_dev_info info;
145         sw_info_get(dev, &info);
146
147         /* detect re-configuring and return credits to instance if needed */
148         if (p->initialized) {
149                 /* taking credits from pool is done one quanta at a time, and
150                  * credits may be spend (counted in p->inflights) or still
151                  * available in the port (p->inflight_credits). We must return
152                  * the sum to no leak credits
153                  */
154                 int possible_inflights = p->inflight_credits + p->inflights;
155                 rte_atomic32_sub(&sw->inflights, possible_inflights);
156         }
157
158         *p = (struct sw_port){0}; /* zero entire structure */
159         p->id = port_id;
160         p->sw = sw;
161
162         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
163                         "rx_worker_ring");
164         p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
165                         dev->data->socket_id);
166         if (p->rx_worker_ring == NULL) {
167                 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
168                                 port_id);
169                 return -1;
170         }
171
172         p->inflight_max = conf->new_event_threshold;
173
174         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
175                         "cq_worker_ring");
176         p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
177                         dev->data->socket_id);
178         if (p->cq_worker_ring == NULL) {
179                 qe_ring_destroy(p->rx_worker_ring);
180                 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
181                                 port_id);
182                 return -1;
183         }
184         sw->cq_ring_space[port_id] = conf->dequeue_depth;
185
186         /* set hist list contents to empty */
187         for (i = 0; i < SW_PORT_HIST_LIST; i++) {
188                 p->hist_list[i].fid = -1;
189                 p->hist_list[i].qid = -1;
190         }
191         dev->data->ports[port_id] = p;
192
193         rte_smp_wmb();
194         p->initialized = 1;
195         return 0;
196 }
197
198 static void
199 sw_port_release(void *port)
200 {
201         struct sw_port *p = (void *)port;
202         if (p == NULL)
203                 return;
204
205         qe_ring_destroy(p->rx_worker_ring);
206         qe_ring_destroy(p->cq_worker_ring);
207         memset(p, 0, sizeof(*p));
208 }
209
210 static int32_t
211 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
212                 const struct rte_event_queue_conf *queue_conf)
213 {
214         unsigned int i;
215         int dev_id = sw->data->dev_id;
216         int socket_id = sw->data->socket_id;
217         char buf[IQ_RING_NAMESIZE];
218         struct sw_qid *qid = &sw->qids[idx];
219
220         for (i = 0; i < SW_IQS_MAX; i++) {
221                 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
222                 qid->iq[i] = iq_ring_create(buf, socket_id);
223                 if (!qid->iq[i]) {
224                         SW_LOG_DBG("ring create failed");
225                         goto cleanup;
226                 }
227         }
228
229         /* Initialize the FID structures to no pinning (-1), and zero packets */
230         const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
231         for (i = 0; i < RTE_DIM(qid->fids); i++)
232                 qid->fids[i] = fid;
233
234         qid->id = idx;
235         qid->type = type;
236         qid->priority = queue_conf->priority;
237
238         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
239                 char ring_name[RTE_RING_NAMESIZE];
240                 uint32_t window_size;
241
242                 /* rte_ring and window_size_mask require require window_size to
243                  * be a power-of-2.
244                  */
245                 window_size = rte_align32pow2(
246                                 queue_conf->nb_atomic_order_sequences);
247
248                 qid->window_size = window_size - 1;
249
250                 if (!window_size) {
251                         SW_LOG_DBG(
252                                 "invalid reorder_window_size for ordered queue\n"
253                                 );
254                         goto cleanup;
255                 }
256
257                 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
258                 qid->reorder_buffer = rte_zmalloc_socket(buf,
259                                 window_size * sizeof(qid->reorder_buffer[0]),
260                                 0, socket_id);
261                 if (!qid->reorder_buffer) {
262                         SW_LOG_DBG("reorder_buffer malloc failed\n");
263                         goto cleanup;
264                 }
265
266                 memset(&qid->reorder_buffer[0],
267                        0,
268                        window_size * sizeof(qid->reorder_buffer[0]));
269
270                 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
271                                 dev_id, idx);
272
273                 /* lookup the ring, and if it already exists, free it */
274                 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
275                 if (cleanup)
276                         rte_ring_free(cleanup);
277
278                 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
279                                 window_size,
280                                 socket_id,
281                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
282                 if (!qid->reorder_buffer_freelist) {
283                         SW_LOG_DBG("freelist ring create failed");
284                         goto cleanup;
285                 }
286
287                 /* Populate the freelist with reorder buffer entries. Enqueue
288                  * 'window_size - 1' entries because the rte_ring holds only
289                  * that many.
290                  */
291                 for (i = 0; i < window_size - 1; i++) {
292                         if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
293                                                 &qid->reorder_buffer[i]) < 0)
294                                 goto cleanup;
295                 }
296
297                 qid->reorder_buffer_index = 0;
298                 qid->cq_next_tx = 0;
299         }
300
301         qid->initialized = 1;
302
303         return 0;
304
305 cleanup:
306         for (i = 0; i < SW_IQS_MAX; i++) {
307                 if (qid->iq[i])
308                         iq_ring_destroy(qid->iq[i]);
309         }
310
311         if (qid->reorder_buffer) {
312                 rte_free(qid->reorder_buffer);
313                 qid->reorder_buffer = NULL;
314         }
315
316         if (qid->reorder_buffer_freelist) {
317                 rte_ring_free(qid->reorder_buffer_freelist);
318                 qid->reorder_buffer_freelist = NULL;
319         }
320
321         return -EINVAL;
322 }
323
324 static int
325 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
326                 const struct rte_event_queue_conf *conf)
327 {
328         int type;
329
330         /* SINGLE_LINK can be OR-ed with other types, so handle first */
331         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
332                 type = SW_SCHED_TYPE_DIRECT;
333         } else {
334                 switch (conf->event_queue_cfg) {
335                 case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
336                         type = RTE_SCHED_TYPE_ATOMIC;
337                         break;
338                 case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
339                         type = RTE_SCHED_TYPE_ORDERED;
340                         break;
341                 case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
342                         type = RTE_SCHED_TYPE_PARALLEL;
343                         break;
344                 case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
345                         SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
346                         return -ENOTSUP;
347                 default:
348                         SW_LOG_ERR("Unknown queue type %d requested\n",
349                                    conf->event_queue_cfg);
350                         return -EINVAL;
351                 }
352         }
353
354         struct sw_evdev *sw = sw_pmd_priv(dev);
355         return qid_init(sw, queue_id, type, conf);
356 }
357
358 static void
359 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
360 {
361         struct sw_evdev *sw = sw_pmd_priv(dev);
362         struct sw_qid *qid = &sw->qids[id];
363         uint32_t i;
364
365         for (i = 0; i < SW_IQS_MAX; i++)
366                 iq_ring_destroy(qid->iq[i]);
367
368         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
369                 rte_free(qid->reorder_buffer);
370                 rte_ring_free(qid->reorder_buffer_freelist);
371         }
372         memset(qid, 0, sizeof(*qid));
373 }
374
375 static void
376 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
377                                  struct rte_event_queue_conf *conf)
378 {
379         RTE_SET_USED(dev);
380         RTE_SET_USED(queue_id);
381
382         static const struct rte_event_queue_conf default_conf = {
383                 .nb_atomic_flows = 4096,
384                 .nb_atomic_order_sequences = 1,
385                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
386                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
387         };
388
389         *conf = default_conf;
390 }
391
392 static void
393 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
394                  struct rte_event_port_conf *port_conf)
395 {
396         RTE_SET_USED(dev);
397         RTE_SET_USED(port_id);
398
399         port_conf->new_event_threshold = 1024;
400         port_conf->dequeue_depth = 16;
401         port_conf->enqueue_depth = 16;
402 }
403
404 static int
405 sw_dev_configure(const struct rte_eventdev *dev)
406 {
407         struct sw_evdev *sw = sw_pmd_priv(dev);
408         const struct rte_eventdev_data *data = dev->data;
409         const struct rte_event_dev_config *conf = &data->dev_conf;
410
411         sw->qid_count = conf->nb_event_queues;
412         sw->port_count = conf->nb_event_ports;
413         sw->nb_events_limit = conf->nb_events_limit;
414
415         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
416                 return -ENOTSUP;
417
418         return 0;
419 }
420
421 static void
422 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
423 {
424         RTE_SET_USED(dev);
425
426         static const struct rte_event_dev_info evdev_sw_info = {
427                         .driver_name = SW_PMD_NAME,
428                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
429                         .max_event_queue_flows = SW_QID_NUM_FIDS,
430                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
431                         .max_event_priority_levels = SW_IQS_MAX,
432                         .max_event_ports = SW_PORTS_MAX,
433                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
434                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
435                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
436                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
437                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
438         };
439
440         *info = evdev_sw_info;
441 }
442
443 static int
444 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
445 {
446         int *socket_id = opaque;
447         *socket_id = atoi(value);
448         if (*socket_id >= RTE_MAX_NUMA_NODES)
449                 return -1;
450         return 0;
451 }
452
453 static int
454 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
455 {
456         int *quanta = opaque;
457         *quanta = atoi(value);
458         if (*quanta < 0 || *quanta >= 4096)
459                 return -1;
460         return 0;
461 }
462
463 static int
464 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
465 {
466         int *credit = opaque;
467         *credit = atoi(value);
468         if (*credit < 0 || *credit >= 128)
469                 return -1;
470         return 0;
471 }
472
473 static int
474 sw_probe(const char *name, const char *params)
475 {
476         static const struct rte_eventdev_ops evdev_sw_ops = {
477                         .dev_configure = sw_dev_configure,
478                         .dev_infos_get = sw_info_get,
479
480                         .queue_def_conf = sw_queue_def_conf,
481                         .queue_setup = sw_queue_setup,
482                         .queue_release = sw_queue_release,
483                         .port_def_conf = sw_port_def_conf,
484                         .port_setup = sw_port_setup,
485                         .port_release = sw_port_release,
486                         .port_link = sw_port_link,
487                         .port_unlink = sw_port_unlink,
488         };
489
490         static const char *const args[] = {
491                 NUMA_NODE_ARG,
492                 SCHED_QUANTA_ARG,
493                 CREDIT_QUANTA_ARG,
494                 NULL
495         };
496         struct rte_eventdev *dev;
497         struct sw_evdev *sw;
498         int socket_id = rte_socket_id();
499         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
500         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
501
502         if (params != NULL && params[0] != '\0') {
503                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
504
505                 if (!kvlist) {
506                         SW_LOG_INFO(
507                                 "Ignoring unsupported parameters when creating device '%s'\n",
508                                 name);
509                 } else {
510                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
511                                         assign_numa_node, &socket_id);
512                         if (ret != 0) {
513                                 SW_LOG_ERR(
514                                         "%s: Error parsing numa node parameter",
515                                         name);
516                                 rte_kvargs_free(kvlist);
517                                 return ret;
518                         }
519
520                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
521                                         set_sched_quanta, &sched_quanta);
522                         if (ret != 0) {
523                                 SW_LOG_ERR(
524                                         "%s: Error parsing sched quanta parameter",
525                                         name);
526                                 rte_kvargs_free(kvlist);
527                                 return ret;
528                         }
529
530                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
531                                         set_credit_quanta, &credit_quanta);
532                         if (ret != 0) {
533                                 SW_LOG_ERR(
534                                         "%s: Error parsing credit quanta parameter",
535                                         name);
536                                 rte_kvargs_free(kvlist);
537                                 return ret;
538                         }
539
540                         rte_kvargs_free(kvlist);
541                 }
542         }
543
544         SW_LOG_INFO(
545                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
546                         name, socket_id, sched_quanta, credit_quanta);
547
548         dev = rte_event_pmd_vdev_init(name,
549                         sizeof(struct sw_evdev), socket_id);
550         if (dev == NULL) {
551                 SW_LOG_ERR("eventdev vdev init() failed");
552                 return -EFAULT;
553         }
554         dev->dev_ops = &evdev_sw_ops;
555
556         sw = dev->data->dev_private;
557         sw->data = dev->data;
558
559         /* copy values passed from vdev command line to instance */
560         sw->credit_update_quanta = credit_quanta;
561         sw->sched_quanta = sched_quanta;
562
563         return 0;
564 }
565
566 static int
567 sw_remove(const char *name)
568 {
569         if (name == NULL)
570                 return -EINVAL;
571
572         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
573
574         return rte_event_pmd_vdev_uninit(name);
575 }
576
577 static struct rte_vdev_driver evdev_sw_pmd_drv = {
578         .probe = sw_probe,
579         .remove = sw_remove
580 };
581
582 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
583 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
584                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");