event/dpaa2: remove conditional compilation
[dpdk.git] / drivers / event / dpaa2 / dpaa2_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017,2019 NXP
3  */
4
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
25 #include <rte_pci.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
30
31 #include <fslmc_vfio.h>
32 #include <dpaa2_hw_pvt.h>
33 #include <dpaa2_hw_mempool.h>
34 #include <dpaa2_hw_dpio.h>
35 #include <dpaa2_ethdev.h>
36 #include <dpaa2_sec_event.h>
37 #include "dpaa2_eventdev.h"
38 #include "dpaa2_eventdev_logs.h"
39 #include <portal/dpaa2_hw_pvt.h>
40 #include <mc/fsl_dpci.h>
41
42 /* Clarifications
43  * Evendev = SoC Instance
44  * Eventport = DPIO Instance
45  * Eventqueue = DPCON Instance
46  * 1 Eventdev can have N Eventqueue
47  * Soft Event Flow is DPCI Instance
48  */
49
50 /* Dynamic logging identified for mempool */
51 int dpaa2_logtype_event;
52
53 static uint16_t
54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
55                              uint16_t nb_events)
56 {
57
58         struct dpaa2_port *dpaa2_portal = port;
59         struct dpaa2_dpio_dev *dpio_dev;
60         uint32_t queue_id = ev[0].queue_id;
61         struct dpaa2_eventq *evq_info;
62         uint32_t fqid;
63         struct qbman_swp *swp;
64         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65         uint32_t loop, frames_to_send;
66         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
67         uint16_t num_tx = 0;
68         int i, n, ret;
69         uint8_t channel_index;
70
71         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
72                 /* Affine current thread context to a qman portal */
73                 ret = dpaa2_affine_qbman_swp();
74                 if (ret < 0) {
75                         DPAA2_EVENTDEV_ERR("Failure in affining portal");
76                         return 0;
77                 }
78         }
79         /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
80         dpio_dev = DPAA2_PER_LCORE_DPIO;
81         swp = DPAA2_PER_LCORE_PORTAL;
82
83         if (likely(dpaa2_portal->is_port_linked))
84                 goto skip_linking;
85
86         /* Create mapping between portal and channel to receive packets */
87         for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
88                 evq_info = &dpaa2_portal->evq_info[i];
89                 if (!evq_info->event_port)
90                         continue;
91
92                 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
93                                                       CMD_PRI_LOW,
94                                                       dpio_dev->token,
95                                                       evq_info->dpcon->dpcon_id,
96                                                       &channel_index);
97                 if (ret < 0) {
98                         DPAA2_EVENTDEV_ERR(
99                                 "Static dequeue config failed: err(%d)", ret);
100                         goto err;
101                 }
102
103                 qbman_swp_push_set(swp, channel_index, 1);
104                 evq_info->dpcon->channel_index = channel_index;
105         }
106         dpaa2_portal->is_port_linked = true;
107
108 skip_linking:
109         evq_info = &dpaa2_portal->evq_info[queue_id];
110
111         while (nb_events) {
112                 frames_to_send = (nb_events > dpaa2_eqcr_size) ?
113                         dpaa2_eqcr_size : nb_events;
114
115                 for (loop = 0; loop < frames_to_send; loop++) {
116                         const struct rte_event *event = &ev[num_tx + loop];
117
118                         if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
119                                 fqid = evq_info->dpci->rx_queue[
120                                         DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
121                         else
122                                 fqid = evq_info->dpci->rx_queue[
123                                         DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
124
125                         /* Prepare enqueue descriptor */
126                         qbman_eq_desc_clear(&eqdesc[loop]);
127                         qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
128                         qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
129                         qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
130
131                         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
132                                 && event->mbuf->seqn) {
133                                 uint8_t dqrr_index = event->mbuf->seqn - 1;
134
135                                 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
136                                                       dqrr_index, 0);
137                                 DPAA2_PER_LCORE_DQRR_SIZE--;
138                                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
139                         }
140
141                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
142
143                         /*
144                          * todo - need to align with hw context data
145                          * to avoid copy
146                          */
147                         struct rte_event *ev_temp = rte_malloc(NULL,
148                                                 sizeof(struct rte_event), 0);
149
150                         if (!ev_temp) {
151                                 if (!loop)
152                                         return num_tx;
153                                 frames_to_send = loop;
154                                 DPAA2_EVENTDEV_ERR(
155                                         "Unable to allocate event object");
156                                 goto send_partial;
157                         }
158                         rte_memcpy(ev_temp, event, sizeof(struct rte_event));
159                         DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
160                         DPAA2_SET_FD_LEN((&fd_arr[loop]),
161                                          sizeof(struct rte_event));
162                 }
163 send_partial:
164                 loop = 0;
165                 while (loop < frames_to_send) {
166                         loop += qbman_swp_enqueue_multiple_desc(swp,
167                                         &eqdesc[loop], &fd_arr[loop],
168                                         frames_to_send - loop);
169                 }
170                 num_tx += frames_to_send;
171                 nb_events -= frames_to_send;
172         }
173
174         return num_tx;
175 err:
176         for (n = 0; n < i; n++) {
177                 evq_info = &dpaa2_portal->evq_info[n];
178                 if (!evq_info->event_port)
179                         continue;
180                 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
181                 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
182                                                 dpio_dev->token,
183                                                 evq_info->dpcon->dpcon_id);
184         }
185         return 0;
186
187 }
188
189 static uint16_t
190 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
191 {
192         return dpaa2_eventdev_enqueue_burst(port, ev, 1);
193 }
194
195 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
196 {
197         struct epoll_event epoll_ev;
198
199         qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
200                                          QBMAN_SWP_INTERRUPT_DQRI);
201
202         epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
203                          &epoll_ev, 1, timeout_ticks);
204 }
205
206 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
207                                             const struct qbman_fd *fd,
208                                             const struct qbman_result *dq,
209                                             struct dpaa2_queue *rxq,
210                                             struct rte_event *ev)
211 {
212         struct rte_event *ev_temp =
213                 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
214
215         RTE_SET_USED(rxq);
216
217         rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
218         rte_free(ev_temp);
219
220         qbman_swp_dqrr_consume(swp, dq);
221 }
222
223 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
224                                           const struct qbman_fd *fd,
225                                           const struct qbman_result *dq,
226                                           struct dpaa2_queue *rxq,
227                                           struct rte_event *ev)
228 {
229         struct rte_event *ev_temp =
230                 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
231         uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
232
233         RTE_SET_USED(swp);
234         RTE_SET_USED(rxq);
235
236         rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
237         rte_free(ev_temp);
238         ev->mbuf->seqn = dqrr_index + 1;
239         DPAA2_PER_LCORE_DQRR_SIZE++;
240         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
241         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
242 }
243
244 static uint16_t
245 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
246                              uint16_t nb_events, uint64_t timeout_ticks)
247 {
248         const struct qbman_result *dq;
249         struct dpaa2_dpio_dev *dpio_dev = NULL;
250         struct dpaa2_port *dpaa2_portal = port;
251         struct dpaa2_eventq *evq_info;
252         struct qbman_swp *swp;
253         const struct qbman_fd *fd;
254         struct dpaa2_queue *rxq;
255         int num_pkts = 0, ret, i = 0, n;
256         uint8_t channel_index;
257
258         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
259                 /* Affine current thread context to a qman portal */
260                 ret = dpaa2_affine_qbman_swp();
261                 if (ret < 0) {
262                         DPAA2_EVENTDEV_ERR("Failure in affining portal");
263                         return 0;
264                 }
265         }
266
267         dpio_dev = DPAA2_PER_LCORE_DPIO;
268         swp = DPAA2_PER_LCORE_PORTAL;
269
270         if (likely(dpaa2_portal->is_port_linked))
271                 goto skip_linking;
272
273         /* Create mapping between portal and channel to receive packets */
274         for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
275                 evq_info = &dpaa2_portal->evq_info[i];
276                 if (!evq_info->event_port)
277                         continue;
278
279                 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
280                                                       CMD_PRI_LOW,
281                                                       dpio_dev->token,
282                                                       evq_info->dpcon->dpcon_id,
283                                                       &channel_index);
284                 if (ret < 0) {
285                         DPAA2_EVENTDEV_ERR(
286                                 "Static dequeue config failed: err(%d)", ret);
287                         goto err;
288                 }
289
290                 qbman_swp_push_set(swp, channel_index, 1);
291                 evq_info->dpcon->channel_index = channel_index;
292         }
293         dpaa2_portal->is_port_linked = true;
294
295 skip_linking:
296         /* Check if there are atomic contexts to be released */
297         while (DPAA2_PER_LCORE_DQRR_SIZE) {
298                 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
299                         qbman_swp_dqrr_idx_consume(swp, i);
300                         DPAA2_PER_LCORE_DQRR_SIZE--;
301                         DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
302                                 DPAA2_INVALID_MBUF_SEQN;
303                 }
304                 i++;
305         }
306         DPAA2_PER_LCORE_DQRR_HELD = 0;
307
308         do {
309                 dq = qbman_swp_dqrr_next(swp);
310                 if (!dq) {
311                         if (!num_pkts && timeout_ticks) {
312                                 dpaa2_eventdev_dequeue_wait(timeout_ticks);
313                                 timeout_ticks = 0;
314                                 continue;
315                         }
316                         return num_pkts;
317                 }
318                 qbman_swp_prefetch_dqrr_next(swp);
319
320                 fd = qbman_result_DQ_fd(dq);
321                 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
322                 if (rxq) {
323                         rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
324                 } else {
325                         qbman_swp_dqrr_consume(swp, dq);
326                         DPAA2_EVENTDEV_ERR("Null Return VQ received");
327                         return 0;
328                 }
329
330                 num_pkts++;
331         } while (num_pkts < nb_events);
332
333         return num_pkts;
334 err:
335         for (n = 0; n < i; n++) {
336                 evq_info = &dpaa2_portal->evq_info[n];
337                 if (!evq_info->event_port)
338                         continue;
339
340                 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
341                 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
342                                                         dpio_dev->token,
343                                                 evq_info->dpcon->dpcon_id);
344         }
345         return 0;
346 }
347
348 static uint16_t
349 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
350                        uint64_t timeout_ticks)
351 {
352         return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
353 }
354
355 static void
356 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
357                         struct rte_event_dev_info *dev_info)
358 {
359         struct dpaa2_eventdev *priv = dev->data->dev_private;
360
361         EVENTDEV_INIT_FUNC_TRACE();
362
363         RTE_SET_USED(dev);
364
365         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
366         dev_info->min_dequeue_timeout_ns =
367                 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
368         dev_info->max_dequeue_timeout_ns =
369                 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
370         dev_info->dequeue_timeout_ns =
371                 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
372         dev_info->max_event_queues = priv->max_event_queues;
373         dev_info->max_event_queue_flows =
374                 DPAA2_EVENT_MAX_QUEUE_FLOWS;
375         dev_info->max_event_queue_priority_levels =
376                 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
377         dev_info->max_event_priority_levels =
378                 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
379         dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
380         /* we only support dpio upto number of cores*/
381         if (dev_info->max_event_ports > rte_lcore_count())
382                 dev_info->max_event_ports = rte_lcore_count();
383         dev_info->max_event_port_dequeue_depth =
384                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
385         dev_info->max_event_port_enqueue_depth =
386                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
387         dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
388         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
389                 RTE_EVENT_DEV_CAP_BURST_MODE|
390                 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
391                 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
392                 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
393
394 }
395
396 static int
397 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
398 {
399         struct dpaa2_eventdev *priv = dev->data->dev_private;
400         struct rte_event_dev_config *conf = &dev->data->dev_conf;
401
402         EVENTDEV_INIT_FUNC_TRACE();
403
404         priv->nb_event_queues = conf->nb_event_queues;
405         priv->nb_event_ports = conf->nb_event_ports;
406         priv->nb_event_queue_flows = conf->nb_event_queue_flows;
407         priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
408         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
409         priv->event_dev_cfg = conf->event_dev_cfg;
410
411         /* Check dequeue timeout method is per dequeue or global */
412         if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
413                 /*
414                  * Use timeout value as given in dequeue operation.
415                  * So invalidating this timeout value.
416                  */
417                 priv->dequeue_timeout_ns = 0;
418
419         } else if (conf->dequeue_timeout_ns == 0) {
420                 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
421         } else {
422                 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
423         }
424
425         DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
426                              dev->data->dev_id);
427         return 0;
428 }
429
430 static int
431 dpaa2_eventdev_start(struct rte_eventdev *dev)
432 {
433         EVENTDEV_INIT_FUNC_TRACE();
434
435         RTE_SET_USED(dev);
436
437         return 0;
438 }
439
440 static void
441 dpaa2_eventdev_stop(struct rte_eventdev *dev)
442 {
443         EVENTDEV_INIT_FUNC_TRACE();
444
445         RTE_SET_USED(dev);
446 }
447
448 static int
449 dpaa2_eventdev_close(struct rte_eventdev *dev)
450 {
451         EVENTDEV_INIT_FUNC_TRACE();
452
453         RTE_SET_USED(dev);
454
455         return 0;
456 }
457
458 static void
459 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
460                               struct rte_event_queue_conf *queue_conf)
461 {
462         EVENTDEV_INIT_FUNC_TRACE();
463
464         RTE_SET_USED(dev);
465         RTE_SET_USED(queue_id);
466         RTE_SET_USED(queue_conf);
467
468         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
469         queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
470         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
471 }
472
473 static int
474 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
475                            const struct rte_event_queue_conf *queue_conf)
476 {
477         struct dpaa2_eventdev *priv = dev->data->dev_private;
478         struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
479
480         EVENTDEV_INIT_FUNC_TRACE();
481
482         switch (queue_conf->schedule_type) {
483         case RTE_SCHED_TYPE_PARALLEL:
484         case RTE_SCHED_TYPE_ATOMIC:
485                 break;
486         case RTE_SCHED_TYPE_ORDERED:
487                 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
488                 return -1;
489         }
490         evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
491         evq_info->event_queue_id = queue_id;
492
493         return 0;
494 }
495
496 static void
497 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
498 {
499         EVENTDEV_INIT_FUNC_TRACE();
500
501         RTE_SET_USED(dev);
502         RTE_SET_USED(queue_id);
503 }
504
505 static void
506 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
507                              struct rte_event_port_conf *port_conf)
508 {
509         EVENTDEV_INIT_FUNC_TRACE();
510
511         RTE_SET_USED(dev);
512         RTE_SET_USED(port_id);
513
514         port_conf->new_event_threshold =
515                 DPAA2_EVENT_MAX_NUM_EVENTS;
516         port_conf->dequeue_depth =
517                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
518         port_conf->enqueue_depth =
519                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
520         port_conf->disable_implicit_release = 0;
521 }
522
523 static int
524 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
525                           const struct rte_event_port_conf *port_conf)
526 {
527         char event_port_name[32];
528         struct dpaa2_port *portal;
529
530         EVENTDEV_INIT_FUNC_TRACE();
531
532         RTE_SET_USED(port_conf);
533
534         sprintf(event_port_name, "event-port-%d", port_id);
535         portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
536         if (!portal) {
537                 DPAA2_EVENTDEV_ERR("Memory allocation failure");
538                 return -ENOMEM;
539         }
540
541         memset(portal, 0, sizeof(struct dpaa2_port));
542         dev->data->ports[port_id] = portal;
543         return 0;
544 }
545
546 static void
547 dpaa2_eventdev_port_release(void *port)
548 {
549         struct dpaa2_port *portal = port;
550
551         EVENTDEV_INIT_FUNC_TRACE();
552
553         /* TODO: Cleanup is required when ports are in linked state. */
554         if (portal->is_port_linked)
555                 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
556
557         if (portal)
558                 rte_free(portal);
559
560         portal = NULL;
561 }
562
563 static int
564 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
565                          const uint8_t queues[], const uint8_t priorities[],
566                         uint16_t nb_links)
567 {
568         struct dpaa2_eventdev *priv = dev->data->dev_private;
569         struct dpaa2_port *dpaa2_portal = port;
570         struct dpaa2_eventq *evq_info;
571         uint16_t i;
572
573         EVENTDEV_INIT_FUNC_TRACE();
574
575         RTE_SET_USED(priorities);
576
577         for (i = 0; i < nb_links; i++) {
578                 evq_info = &priv->evq_info[queues[i]];
579                 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
580                            sizeof(struct dpaa2_eventq));
581                 dpaa2_portal->evq_info[queues[i]].event_port = port;
582                 dpaa2_portal->num_linked_evq++;
583         }
584
585         return (int)nb_links;
586 }
587
588 static int
589 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
590                            uint8_t queues[], uint16_t nb_unlinks)
591 {
592         struct dpaa2_port *dpaa2_portal = port;
593         int i;
594         struct dpaa2_dpio_dev *dpio_dev = NULL;
595         struct dpaa2_eventq *evq_info;
596         struct qbman_swp *swp;
597
598         EVENTDEV_INIT_FUNC_TRACE();
599
600         RTE_SET_USED(dev);
601         RTE_SET_USED(queues);
602
603         for (i = 0; i < nb_unlinks; i++) {
604                 evq_info = &dpaa2_portal->evq_info[queues[i]];
605
606                 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
607                         /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
608                         dpio_dev = DPAA2_PER_LCORE_DPIO;
609                         swp = DPAA2_PER_LCORE_PORTAL;
610
611                         qbman_swp_push_set(swp,
612                                         evq_info->dpcon->channel_index, 0);
613                         dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
614                                                 dpio_dev->token,
615                                                 evq_info->dpcon->dpcon_id);
616                 }
617                 memset(evq_info, 0, sizeof(struct dpaa2_eventq));
618                 if (dpaa2_portal->num_linked_evq)
619                         dpaa2_portal->num_linked_evq--;
620         }
621
622         if (!dpaa2_portal->num_linked_evq)
623                 dpaa2_portal->is_port_linked = false;
624
625         return (int)nb_unlinks;
626 }
627
628
629 static int
630 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
631                              uint64_t *timeout_ticks)
632 {
633         uint32_t scale = 1000*1000;
634
635         EVENTDEV_INIT_FUNC_TRACE();
636
637         RTE_SET_USED(dev);
638         *timeout_ticks = ns / scale;
639
640         return 0;
641 }
642
643 static void
644 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
645 {
646         EVENTDEV_INIT_FUNC_TRACE();
647
648         RTE_SET_USED(dev);
649         RTE_SET_USED(f);
650 }
651
652 static int
653 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
654                             const struct rte_eth_dev *eth_dev,
655                             uint32_t *caps)
656 {
657         const char *ethdev_driver = eth_dev->device->driver->name;
658
659         EVENTDEV_INIT_FUNC_TRACE();
660
661         RTE_SET_USED(dev);
662
663         if (!strcmp(ethdev_driver, "net_dpaa2"))
664                 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
665         else
666                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
667
668         return 0;
669 }
670
671 static int
672 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
673                 const struct rte_eth_dev *eth_dev,
674                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
675 {
676         struct dpaa2_eventdev *priv = dev->data->dev_private;
677         uint8_t ev_qid = queue_conf->ev.queue_id;
678         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
679         int i, ret;
680
681         EVENTDEV_INIT_FUNC_TRACE();
682
683         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
684                 ret = dpaa2_eth_eventq_attach(eth_dev, i,
685                                 dpcon_id, queue_conf);
686                 if (ret) {
687                         DPAA2_EVENTDEV_ERR(
688                                 "Event queue attach failed: err(%d)", ret);
689                         goto fail;
690                 }
691         }
692         return 0;
693 fail:
694         for (i = (i - 1); i >= 0 ; i--)
695                 dpaa2_eth_eventq_detach(eth_dev, i);
696
697         return ret;
698 }
699
700 static int
701 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
702                 const struct rte_eth_dev *eth_dev,
703                 int32_t rx_queue_id,
704                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
705 {
706         struct dpaa2_eventdev *priv = dev->data->dev_private;
707         uint8_t ev_qid = queue_conf->ev.queue_id;
708         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
709         int ret;
710
711         EVENTDEV_INIT_FUNC_TRACE();
712
713         if (rx_queue_id == -1)
714                 return dpaa2_eventdev_eth_queue_add_all(dev,
715                                 eth_dev, queue_conf);
716
717         ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
718                         dpcon_id, queue_conf);
719         if (ret) {
720                 DPAA2_EVENTDEV_ERR(
721                         "Event queue attach failed: err(%d)", ret);
722                 return ret;
723         }
724         return 0;
725 }
726
727 static int
728 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
729                              const struct rte_eth_dev *eth_dev)
730 {
731         int i, ret;
732
733         EVENTDEV_INIT_FUNC_TRACE();
734
735         RTE_SET_USED(dev);
736
737         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
738                 ret = dpaa2_eth_eventq_detach(eth_dev, i);
739                 if (ret) {
740                         DPAA2_EVENTDEV_ERR(
741                                 "Event queue detach failed: err(%d)", ret);
742                         return ret;
743                 }
744         }
745
746         return 0;
747 }
748
749 static int
750 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
751                              const struct rte_eth_dev *eth_dev,
752                              int32_t rx_queue_id)
753 {
754         int ret;
755
756         EVENTDEV_INIT_FUNC_TRACE();
757
758         if (rx_queue_id == -1)
759                 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
760
761         ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
762         if (ret) {
763                 DPAA2_EVENTDEV_ERR(
764                         "Event queue detach failed: err(%d)", ret);
765                 return ret;
766         }
767
768         return 0;
769 }
770
771 static int
772 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
773                          const struct rte_eth_dev *eth_dev)
774 {
775         EVENTDEV_INIT_FUNC_TRACE();
776
777         RTE_SET_USED(dev);
778         RTE_SET_USED(eth_dev);
779
780         return 0;
781 }
782
783 static int
784 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
785                         const struct rte_eth_dev *eth_dev)
786 {
787         EVENTDEV_INIT_FUNC_TRACE();
788
789         RTE_SET_USED(dev);
790         RTE_SET_USED(eth_dev);
791
792         return 0;
793 }
794
795 static int
796 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
797                             const struct rte_cryptodev *cdev,
798                             uint32_t *caps)
799 {
800         const char *name = cdev->data->name;
801
802         EVENTDEV_INIT_FUNC_TRACE();
803
804         RTE_SET_USED(dev);
805
806         if (!strncmp(name, "dpsec-", 6))
807                 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
808         else
809                 return -1;
810
811         return 0;
812 }
813
814 static int
815 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
816                 const struct rte_cryptodev *cryptodev,
817                 const struct rte_event *ev)
818 {
819         struct dpaa2_eventdev *priv = dev->data->dev_private;
820         uint8_t ev_qid = ev->queue_id;
821         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
822         int i, ret;
823
824         EVENTDEV_INIT_FUNC_TRACE();
825
826         for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
827                 ret = dpaa2_sec_eventq_attach(cryptodev, i,
828                                 dpcon_id, ev);
829                 if (ret) {
830                         DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
831                                     ret);
832                         goto fail;
833                 }
834         }
835         return 0;
836 fail:
837         for (i = (i - 1); i >= 0 ; i--)
838                 dpaa2_sec_eventq_detach(cryptodev, i);
839
840         return ret;
841 }
842
843 static int
844 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
845                 const struct rte_cryptodev *cryptodev,
846                 int32_t rx_queue_id,
847                 const struct rte_event *ev)
848 {
849         struct dpaa2_eventdev *priv = dev->data->dev_private;
850         uint8_t ev_qid = ev->queue_id;
851         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
852         int ret;
853
854         EVENTDEV_INIT_FUNC_TRACE();
855
856         if (rx_queue_id == -1)
857                 return dpaa2_eventdev_crypto_queue_add_all(dev,
858                                 cryptodev, ev);
859
860         ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
861                         dpcon_id, ev);
862         if (ret) {
863                 DPAA2_EVENTDEV_ERR(
864                         "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
865                 return ret;
866         }
867         return 0;
868 }
869
870 static int
871 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
872                              const struct rte_cryptodev *cdev)
873 {
874         int i, ret;
875
876         EVENTDEV_INIT_FUNC_TRACE();
877
878         RTE_SET_USED(dev);
879
880         for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
881                 ret = dpaa2_sec_eventq_detach(cdev, i);
882                 if (ret) {
883                         DPAA2_EVENTDEV_ERR(
884                                 "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
885                         return ret;
886                 }
887         }
888
889         return 0;
890 }
891
892 static int
893 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
894                              const struct rte_cryptodev *cryptodev,
895                              int32_t rx_queue_id)
896 {
897         int ret;
898
899         EVENTDEV_INIT_FUNC_TRACE();
900
901         if (rx_queue_id == -1)
902                 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
903
904         ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
905         if (ret) {
906                 DPAA2_EVENTDEV_ERR(
907                         "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
908                 return ret;
909         }
910
911         return 0;
912 }
913
914 static int
915 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
916                             const struct rte_cryptodev *cryptodev)
917 {
918         EVENTDEV_INIT_FUNC_TRACE();
919
920         RTE_SET_USED(dev);
921         RTE_SET_USED(cryptodev);
922
923         return 0;
924 }
925
926 static int
927 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
928                            const struct rte_cryptodev *cryptodev)
929 {
930         EVENTDEV_INIT_FUNC_TRACE();
931
932         RTE_SET_USED(dev);
933         RTE_SET_USED(cryptodev);
934
935         return 0;
936 }
937
938 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
939         .dev_infos_get    = dpaa2_eventdev_info_get,
940         .dev_configure    = dpaa2_eventdev_configure,
941         .dev_start        = dpaa2_eventdev_start,
942         .dev_stop         = dpaa2_eventdev_stop,
943         .dev_close        = dpaa2_eventdev_close,
944         .queue_def_conf   = dpaa2_eventdev_queue_def_conf,
945         .queue_setup      = dpaa2_eventdev_queue_setup,
946         .queue_release    = dpaa2_eventdev_queue_release,
947         .port_def_conf    = dpaa2_eventdev_port_def_conf,
948         .port_setup       = dpaa2_eventdev_port_setup,
949         .port_release     = dpaa2_eventdev_port_release,
950         .port_link        = dpaa2_eventdev_port_link,
951         .port_unlink      = dpaa2_eventdev_port_unlink,
952         .timeout_ticks    = dpaa2_eventdev_timeout_ticks,
953         .dump             = dpaa2_eventdev_dump,
954         .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
955         .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
956         .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
957         .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
958         .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
959         .crypto_adapter_caps_get        = dpaa2_eventdev_crypto_caps_get,
960         .crypto_adapter_queue_pair_add  = dpaa2_eventdev_crypto_queue_add,
961         .crypto_adapter_queue_pair_del  = dpaa2_eventdev_crypto_queue_del,
962         .crypto_adapter_start           = dpaa2_eventdev_crypto_start,
963         .crypto_adapter_stop            = dpaa2_eventdev_crypto_stop,
964 };
965
966 static int
967 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
968                           struct dpaa2_dpcon_dev *dpcon_dev)
969 {
970         struct dpci_rx_queue_cfg rx_queue_cfg;
971         int ret, i;
972
973         /*Do settings to get the frame on a DPCON object*/
974         rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
975                   DPCI_QUEUE_OPT_USER_CTX;
976         rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
977         rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
978         rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
979
980         dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
981                 dpaa2_eventdev_process_parallel;
982         dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
983                 dpaa2_eventdev_process_atomic;
984
985         for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
986                 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
987                 ret = dpci_set_rx_queue(&dpci_dev->dpci,
988                                         CMD_PRI_LOW,
989                                         dpci_dev->token, i,
990                                         &rx_queue_cfg);
991                 if (ret) {
992                         DPAA2_EVENTDEV_ERR(
993                                 "DPCI Rx queue setup failed: err(%d)",
994                                 ret);
995                         return ret;
996                 }
997         }
998         return 0;
999 }
1000
1001 static int
1002 dpaa2_eventdev_create(const char *name)
1003 {
1004         struct rte_eventdev *eventdev;
1005         struct dpaa2_eventdev *priv;
1006         struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1007         struct dpaa2_dpci_dev *dpci_dev = NULL;
1008         int ret;
1009
1010         eventdev = rte_event_pmd_vdev_init(name,
1011                                            sizeof(struct dpaa2_eventdev),
1012                                            rte_socket_id());
1013         if (eventdev == NULL) {
1014                 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1015                 goto fail;
1016         }
1017
1018         eventdev->dev_ops       = &dpaa2_eventdev_ops;
1019         eventdev->enqueue       = dpaa2_eventdev_enqueue;
1020         eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1021         eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1022         eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1023         eventdev->dequeue       = dpaa2_eventdev_dequeue;
1024         eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1025
1026         /* For secondary processes, the primary has done all the work */
1027         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1028                 return 0;
1029
1030         priv = eventdev->data->dev_private;
1031         priv->max_event_queues = 0;
1032
1033         do {
1034                 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1035                 if (!dpcon_dev)
1036                         break;
1037                 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1038
1039                 dpci_dev = rte_dpaa2_alloc_dpci_dev();
1040                 if (!dpci_dev) {
1041                         rte_dpaa2_free_dpcon_dev(dpcon_dev);
1042                         break;
1043                 }
1044                 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1045
1046                 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1047                 if (ret) {
1048                         DPAA2_EVENTDEV_ERR(
1049                                     "DPCI setup failed: err(%d)", ret);
1050                         return ret;
1051                 }
1052                 priv->max_event_queues++;
1053         } while (dpcon_dev && dpci_dev);
1054
1055         RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1056
1057         return 0;
1058 fail:
1059         return -EFAULT;
1060 }
1061
1062 static int
1063 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1064 {
1065         const char *name;
1066
1067         name = rte_vdev_device_name(vdev);
1068         DPAA2_EVENTDEV_INFO("Initializing %s", name);
1069         return dpaa2_eventdev_create(name);
1070 }
1071
1072 static int
1073 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1074 {
1075         const char *name;
1076
1077         name = rte_vdev_device_name(vdev);
1078         DPAA2_EVENTDEV_INFO("Closing %s", name);
1079
1080         return rte_event_pmd_vdev_uninit(name);
1081 }
1082
1083 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1084         .probe = dpaa2_eventdev_probe,
1085         .remove = dpaa2_eventdev_remove
1086 };
1087
1088 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1089
1090 RTE_INIT(dpaa2_eventdev_init_log)
1091 {
1092         dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
1093         if (dpaa2_logtype_event >= 0)
1094                 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
1095 }