event/dpaa2: add configuration functions
[dpdk.git] / drivers / event / dpaa2 / dpaa2_eventdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 NXP.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of NXP nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <stdint.h>
40 #include <sys/epoll.h>
41
42 #include <rte_atomic.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_dev.h>
47 #include <rte_eal.h>
48 #include <rte_fslmc.h>
49 #include <rte_lcore.h>
50 #include <rte_log.h>
51 #include <rte_malloc.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_pci.h>
55 #include <rte_vdev.h>
56
57 #include <fslmc_vfio.h>
58 #include <dpaa2_hw_pvt.h>
59 #include <dpaa2_hw_mempool.h>
60 #include <dpaa2_hw_dpio.h>
61 #include "dpaa2_eventdev.h"
62 #include <portal/dpaa2_hw_pvt.h>
63 #include <mc/fsl_dpci.h>
64
65 /* Clarifications
66  * Evendev = SoC Instance
67  * Eventport = DPIO Instance
68  * Eventqueue = DPCON Instance
69  * 1 Eventdev can have N Eventqueue
70  * Soft Event Flow is DPCI Instance
71  */
72
73 static uint16_t
74 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
75                              uint16_t nb_events)
76 {
77         RTE_SET_USED(port);
78         RTE_SET_USED(ev);
79         RTE_SET_USED(nb_events);
80
81         return 0;
82 }
83
84 static uint16_t
85 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
86 {
87         return dpaa2_eventdev_enqueue_burst(port, ev, 1);
88 }
89
90 static uint16_t
91 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
92                              uint16_t nb_events, uint64_t timeout_ticks)
93 {
94         RTE_SET_USED(port);
95         RTE_SET_USED(ev);
96         RTE_SET_USED(nb_events);
97         RTE_SET_USED(timeout_ticks);
98
99         return 0;
100 }
101
102 static uint16_t
103 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
104                        uint64_t timeout_ticks)
105 {
106         return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
107 }
108
109 static void
110 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
111                         struct rte_event_dev_info *dev_info)
112 {
113         struct dpaa2_eventdev *priv = dev->data->dev_private;
114
115         PMD_DRV_FUNC_TRACE();
116
117         RTE_SET_USED(dev);
118
119         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
120         dev_info->min_dequeue_timeout_ns =
121                 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
122         dev_info->max_dequeue_timeout_ns =
123                 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
124         dev_info->dequeue_timeout_ns =
125                 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
126         dev_info->max_event_queues = priv->max_event_queues;
127         dev_info->max_event_queue_flows =
128                 DPAA2_EVENT_MAX_QUEUE_FLOWS;
129         dev_info->max_event_queue_priority_levels =
130                 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
131         dev_info->max_event_priority_levels =
132                 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
133         dev_info->max_event_ports = RTE_MAX_LCORE;
134         dev_info->max_event_port_dequeue_depth =
135                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
136         dev_info->max_event_port_enqueue_depth =
137                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
138         dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
139         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED;
140 }
141
142 static int
143 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
144 {
145         struct dpaa2_eventdev *priv = dev->data->dev_private;
146         struct rte_event_dev_config *conf = &dev->data->dev_conf;
147
148         PMD_DRV_FUNC_TRACE();
149
150         priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
151         priv->nb_event_queues = conf->nb_event_queues;
152         priv->nb_event_ports = conf->nb_event_ports;
153         priv->nb_event_queue_flows = conf->nb_event_queue_flows;
154         priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
155         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
156         priv->event_dev_cfg = conf->event_dev_cfg;
157
158         PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
159         return 0;
160 }
161
162 static int
163 dpaa2_eventdev_start(struct rte_eventdev *dev)
164 {
165         PMD_DRV_FUNC_TRACE();
166
167         RTE_SET_USED(dev);
168
169         return 0;
170 }
171
172 static void
173 dpaa2_eventdev_stop(struct rte_eventdev *dev)
174 {
175         PMD_DRV_FUNC_TRACE();
176
177         RTE_SET_USED(dev);
178 }
179
180 static int
181 dpaa2_eventdev_close(struct rte_eventdev *dev)
182 {
183         PMD_DRV_FUNC_TRACE();
184
185         RTE_SET_USED(dev);
186
187         return 0;
188 }
189
190 static void
191 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
192                               struct rte_event_queue_conf *queue_conf)
193 {
194         PMD_DRV_FUNC_TRACE();
195
196         RTE_SET_USED(dev);
197         RTE_SET_USED(queue_id);
198         RTE_SET_USED(queue_conf);
199
200         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
201         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
202                                       RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
203         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
204 }
205
206 static void
207 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
208 {
209         PMD_DRV_FUNC_TRACE();
210
211         RTE_SET_USED(dev);
212         RTE_SET_USED(queue_id);
213 }
214
215 static int
216 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
217                            const struct rte_event_queue_conf *queue_conf)
218 {
219         struct dpaa2_eventdev *priv = dev->data->dev_private;
220         struct evq_info_t *evq_info =
221                 &priv->evq_info[queue_id];
222
223         PMD_DRV_FUNC_TRACE();
224
225         evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
226
227         return 0;
228 }
229
230 static void
231 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
232                              struct rte_event_port_conf *port_conf)
233 {
234         PMD_DRV_FUNC_TRACE();
235
236         RTE_SET_USED(dev);
237         RTE_SET_USED(port_id);
238         RTE_SET_USED(port_conf);
239
240         port_conf->new_event_threshold =
241                 DPAA2_EVENT_MAX_NUM_EVENTS;
242         port_conf->dequeue_depth =
243                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
244         port_conf->enqueue_depth =
245                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
246 }
247
248 static void
249 dpaa2_eventdev_port_release(void *port)
250 {
251         PMD_DRV_FUNC_TRACE();
252
253         RTE_SET_USED(port);
254 }
255
256 static int
257 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
258                           const struct rte_event_port_conf *port_conf)
259 {
260         PMD_DRV_FUNC_TRACE();
261
262         RTE_SET_USED(port_conf);
263
264         if (!dpaa2_io_portal[port_id].dpio_dev) {
265                 dpaa2_io_portal[port_id].dpio_dev =
266                                 dpaa2_get_qbman_swp(port_id);
267                 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
268                 if (!dpaa2_io_portal[port_id].dpio_dev)
269                         return -1;
270         }
271
272         dpaa2_io_portal[port_id].eventdev = dev;
273         dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
274         return 0;
275 }
276
277 static int
278 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
279                            uint8_t queues[], uint16_t nb_unlinks)
280 {
281         struct dpaa2_eventdev *priv = dev->data->dev_private;
282         struct dpaa2_io_portal_t *dpaa2_portal = port;
283         struct evq_info_t *evq_info;
284         int i;
285
286         PMD_DRV_FUNC_TRACE();
287
288         for (i = 0; i < nb_unlinks; i++) {
289                 evq_info = &priv->evq_info[queues[i]];
290                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
291                                    evq_info->dpcon->channel_index, 0);
292                 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
293                                         0, dpaa2_portal->dpio_dev->token,
294                         evq_info->dpcon->dpcon_id);
295                 evq_info->link = 0;
296         }
297
298         return (int)nb_unlinks;
299 }
300
301 static int
302 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
303                          const uint8_t queues[], const uint8_t priorities[],
304                         uint16_t nb_links)
305 {
306         struct dpaa2_eventdev *priv = dev->data->dev_private;
307         struct dpaa2_io_portal_t *dpaa2_portal = port;
308         struct evq_info_t *evq_info;
309         uint8_t channel_index;
310         int ret, i, n;
311
312         PMD_DRV_FUNC_TRACE();
313
314         for (i = 0; i < nb_links; i++) {
315                 evq_info = &priv->evq_info[queues[i]];
316                 if (evq_info->link)
317                         continue;
318
319                 ret = dpio_add_static_dequeue_channel(
320                         dpaa2_portal->dpio_dev->dpio,
321                         CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
322                         evq_info->dpcon->dpcon_id, &channel_index);
323                 if (ret < 0) {
324                         PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
325                                     ret);
326                         goto err;
327                 }
328
329                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
330                                    channel_index, 1);
331                 evq_info->dpcon->channel_index = channel_index;
332                 evq_info->link = 1;
333         }
334
335         RTE_SET_USED(priorities);
336
337         return (int)nb_links;
338 err:
339         for (n = 0; n < i; n++) {
340                 evq_info = &priv->evq_info[queues[n]];
341                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
342                                    evq_info->dpcon->channel_index, 0);
343                 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
344                                         0, dpaa2_portal->dpio_dev->token,
345                         evq_info->dpcon->dpcon_id);
346                 evq_info->link = 0;
347         }
348         return ret;
349 }
350
351 static int
352 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
353                              uint64_t *timeout_ticks)
354 {
355         uint32_t scale = 1;
356
357         PMD_DRV_FUNC_TRACE();
358
359         RTE_SET_USED(dev);
360         *timeout_ticks = ns * scale;
361
362         return 0;
363 }
364
365 static void
366 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
367 {
368         PMD_DRV_FUNC_TRACE();
369
370         RTE_SET_USED(dev);
371         RTE_SET_USED(f);
372 }
373
374 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
375         .dev_infos_get    = dpaa2_eventdev_info_get,
376         .dev_configure    = dpaa2_eventdev_configure,
377         .dev_start        = dpaa2_eventdev_start,
378         .dev_stop         = dpaa2_eventdev_stop,
379         .dev_close        = dpaa2_eventdev_close,
380         .queue_def_conf   = dpaa2_eventdev_queue_def_conf,
381         .queue_setup      = dpaa2_eventdev_queue_setup,
382         .queue_release    = dpaa2_eventdev_queue_release,
383         .port_def_conf    = dpaa2_eventdev_port_def_conf,
384         .port_setup       = dpaa2_eventdev_port_setup,
385         .port_release     = dpaa2_eventdev_port_release,
386         .port_link        = dpaa2_eventdev_port_link,
387         .port_unlink      = dpaa2_eventdev_port_unlink,
388         .timeout_ticks    = dpaa2_eventdev_timeout_ticks,
389         .dump             = dpaa2_eventdev_dump
390 };
391
392 static int
393 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
394                           struct dpaa2_dpcon_dev *dpcon_dev)
395 {
396         struct dpci_rx_queue_cfg rx_queue_cfg;
397         int ret, i;
398
399         /*Do settings to get the frame on a DPCON object*/
400         rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST;
401         rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
402         rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
403         rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
404
405         for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
406                 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
407                 ret = dpci_set_rx_queue(&dpci_dev->dpci,
408                                         CMD_PRI_LOW,
409                                         dpci_dev->token, i,
410                                         &rx_queue_cfg);
411                 if (ret) {
412                         PMD_DRV_LOG(ERR, PMD,
413                                     "set_rx_q failed with err code: %d", ret);
414                         return ret;
415                 }
416         }
417         return 0;
418 }
419
420 static int
421 dpaa2_eventdev_create(const char *name)
422 {
423         struct rte_eventdev *eventdev;
424         struct dpaa2_eventdev *priv;
425         struct dpaa2_dpcon_dev *dpcon_dev = NULL;
426         struct dpaa2_dpci_dev *dpci_dev = NULL;
427         int ret;
428
429         eventdev = rte_event_pmd_vdev_init(name,
430                                            sizeof(struct dpaa2_eventdev),
431                                            rte_socket_id());
432         if (eventdev == NULL) {
433                 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
434                 goto fail;
435         }
436
437         eventdev->dev_ops       = &dpaa2_eventdev_ops;
438         eventdev->schedule      = NULL;
439         eventdev->enqueue       = dpaa2_eventdev_enqueue;
440         eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
441         eventdev->dequeue       = dpaa2_eventdev_dequeue;
442         eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
443
444         /* For secondary processes, the primary has done all the work */
445         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
446                 return 0;
447
448         priv = eventdev->data->dev_private;
449         priv->max_event_queues = 0;
450
451         do {
452                 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
453                 if (!dpcon_dev)
454                         break;
455                 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
456
457                 dpci_dev = rte_dpaa2_alloc_dpci_dev();
458                 if (!dpci_dev) {
459                         rte_dpaa2_free_dpcon_dev(dpcon_dev);
460                         break;
461                 }
462                 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
463
464                 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
465                 if (ret) {
466                         PMD_DRV_LOG(ERR, PMD,
467                                     "dpci setup failed with err code: %d", ret);
468                         return ret;
469                 }
470                 priv->max_event_queues++;
471         } while (dpcon_dev && dpci_dev);
472
473         return 0;
474 fail:
475         return -EFAULT;
476 }
477
478 static int
479 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
480 {
481         const char *name;
482
483         name = rte_vdev_device_name(vdev);
484         PMD_DRV_LOG(INFO, PMD, "Initializing %s\n", name);
485         return dpaa2_eventdev_create(name);
486 }
487
488 static int
489 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
490 {
491         const char *name;
492
493         name = rte_vdev_device_name(vdev);
494         PMD_DRV_LOG(INFO, "Closing %s", name);
495
496         return rte_event_pmd_vdev_uninit(name);
497 }
498
499 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
500         .probe = dpaa2_eventdev_probe,
501         .remove = dpaa2_eventdev_remove
502 };
503
504 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);