event/dpaa2: apply new capability flags
[dpdk.git] / drivers / event / dpaa2 / dpaa2_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6
7 #include <assert.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <string.h>
13 #include <sys/epoll.h>
14
15 #include <rte_atomic.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_eal.h>
21 #include <rte_fslmc.h>
22 #include <rte_lcore.h>
23 #include <rte_log.h>
24 #include <rte_malloc.h>
25 #include <rte_memcpy.h>
26 #include <rte_memory.h>
27 #include <rte_pci.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_ethdev.h>
30 #include <rte_event_eth_rx_adapter.h>
31
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include "dpaa2_eventdev.h"
38 #include <portal/dpaa2_hw_pvt.h>
39 #include <mc/fsl_dpci.h>
40
41 /* Clarifications
42  * Evendev = SoC Instance
43  * Eventport = DPIO Instance
44  * Eventqueue = DPCON Instance
45  * 1 Eventdev can have N Eventqueue
46  * Soft Event Flow is DPCI Instance
47  */
48
49 static uint16_t
50 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
51                              uint16_t nb_events)
52 {
53         struct rte_eventdev *ev_dev =
54                         ((struct dpaa2_io_portal_t *)port)->eventdev;
55         struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
56         uint32_t queue_id = ev[0].queue_id;
57         struct evq_info_t *evq_info = &priv->evq_info[queue_id];
58         uint32_t fqid;
59         struct qbman_swp *swp;
60         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
61         uint32_t loop, frames_to_send;
62         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
63         uint16_t num_tx = 0;
64         int ret;
65
66         RTE_SET_USED(port);
67
68         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
69                 ret = dpaa2_affine_qbman_swp();
70                 if (ret) {
71                         PMD_DRV_LOG(ERR, "Failure in affining portal\n");
72                         return 0;
73                 }
74         }
75
76         swp = DPAA2_PER_LCORE_PORTAL;
77
78         while (nb_events) {
79                 frames_to_send = (nb_events >> 3) ?
80                         MAX_TX_RING_SLOTS : nb_events;
81
82                 for (loop = 0; loop < frames_to_send; loop++) {
83                         const struct rte_event *event = &ev[num_tx + loop];
84
85                         if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
86                                 fqid = evq_info->dpci->queue[
87                                         DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
88                         else
89                                 fqid = evq_info->dpci->queue[
90                                         DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
91
92                         /* Prepare enqueue descriptor */
93                         qbman_eq_desc_clear(&eqdesc[loop]);
94                         qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
95                         qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
96                         qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
97
98                         if (event->impl_opaque) {
99                                 uint8_t dqrr_index = event->impl_opaque - 1;
100
101                                 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
102                                                       dqrr_index, 0);
103                                 DPAA2_PER_LCORE_DPIO->dqrr_size--;
104                                 DPAA2_PER_LCORE_DPIO->dqrr_held &=
105                                         ~(1 << dqrr_index);
106                         }
107
108                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
109
110                         /*
111                          * todo - need to align with hw context data
112                          * to avoid copy
113                          */
114                         struct rte_event *ev_temp = rte_malloc(NULL,
115                                 sizeof(struct rte_event), 0);
116
117                         if (!ev_temp) {
118                                 if (!loop)
119                                         return num_tx;
120                                 frames_to_send = loop;
121                                 PMD_DRV_LOG(ERR, "Unable to allocate memory");
122                                 goto send_partial;
123                         }
124                         rte_memcpy(ev_temp, event, sizeof(struct rte_event));
125                         DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
126                         DPAA2_SET_FD_LEN((&fd_arr[loop]),
127                                          sizeof(struct rte_event));
128                 }
129 send_partial:
130                 loop = 0;
131                 while (loop < frames_to_send) {
132                         loop += qbman_swp_enqueue_multiple_desc(swp,
133                                         &eqdesc[loop], &fd_arr[loop],
134                                         frames_to_send - loop);
135                 }
136                 num_tx += frames_to_send;
137                 nb_events -= frames_to_send;
138         }
139
140         return num_tx;
141 }
142
143 static uint16_t
144 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
145 {
146         return dpaa2_eventdev_enqueue_burst(port, ev, 1);
147 }
148
149 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
150 {
151         struct epoll_event epoll_ev;
152         int ret, i = 0;
153
154         qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
155                                          QBMAN_SWP_INTERRUPT_DQRI);
156
157 RETRY:
158         ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
159                          &epoll_ev, 1, timeout_ticks);
160         if (ret < 1) {
161                 /* sometimes due to some spurious interrupts epoll_wait fails
162                  * with errno EINTR. so here we are retrying epoll_wait in such
163                  * case to avoid the problem.
164                  */
165                 if (errno == EINTR) {
166                         PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
167                         if (i++ > 10)
168                                 PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
169                 goto RETRY;
170                 }
171         }
172 }
173
174 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
175                                             const struct qbman_fd *fd,
176                                             const struct qbman_result *dq,
177                                             struct dpaa2_queue *rxq,
178                                             struct rte_event *ev)
179 {
180         struct rte_event *ev_temp =
181                 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
182
183         RTE_SET_USED(rxq);
184
185         rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
186         rte_free(ev_temp);
187
188         qbman_swp_dqrr_consume(swp, dq);
189 }
190
191 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
192                                           const struct qbman_fd *fd,
193                                           const struct qbman_result *dq,
194                                           struct dpaa2_queue *rxq,
195                                           struct rte_event *ev)
196 {
197         struct rte_event *ev_temp =
198                 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
199         uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
200
201         RTE_SET_USED(swp);
202         RTE_SET_USED(rxq);
203
204         rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
205         rte_free(ev_temp);
206         ev->impl_opaque = dqrr_index + 1;
207         DPAA2_PER_LCORE_DPIO->dqrr_size++;
208         DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
209 }
210
211 static uint16_t
212 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
213                              uint16_t nb_events, uint64_t timeout_ticks)
214 {
215         const struct qbman_result *dq;
216         struct qbman_swp *swp;
217         const struct qbman_fd *fd;
218         struct dpaa2_queue *rxq;
219         int num_pkts = 0, ret, i = 0;
220
221         RTE_SET_USED(port);
222
223         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
224                 ret = dpaa2_affine_qbman_swp();
225                 if (ret) {
226                         PMD_DRV_LOG(ERR, "Failure in affining portal\n");
227                         return 0;
228                 }
229         }
230
231         swp = DPAA2_PER_LCORE_PORTAL;
232
233         /* Check if there are atomic contexts to be released */
234         while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
235                 if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
236                         dq = qbman_get_dqrr_from_idx(swp, i);
237                         qbman_swp_dqrr_consume(swp, dq);
238                         DPAA2_PER_LCORE_DPIO->dqrr_size--;
239                 }
240                 i++;
241         }
242         DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
243
244         do {
245                 dq = qbman_swp_dqrr_next(swp);
246                 if (!dq) {
247                         if (!num_pkts && timeout_ticks) {
248                                 dpaa2_eventdev_dequeue_wait(timeout_ticks);
249                                 timeout_ticks = 0;
250                                 continue;
251                         }
252                         return num_pkts;
253                 }
254
255                 fd = qbman_result_DQ_fd(dq);
256
257                 rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
258                 if (rxq) {
259                         rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
260                 } else {
261                         qbman_swp_dqrr_consume(swp, dq);
262                         PMD_DRV_LOG(ERR, "Null Return VQ received\n");
263                         return 0;
264                 }
265
266                 num_pkts++;
267         } while (num_pkts < nb_events);
268
269         return num_pkts;
270 }
271
272 static uint16_t
273 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
274                        uint64_t timeout_ticks)
275 {
276         return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
277 }
278
279 static void
280 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
281                         struct rte_event_dev_info *dev_info)
282 {
283         struct dpaa2_eventdev *priv = dev->data->dev_private;
284
285         PMD_DRV_FUNC_TRACE();
286
287         RTE_SET_USED(dev);
288
289         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
290         dev_info->min_dequeue_timeout_ns =
291                 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
292         dev_info->max_dequeue_timeout_ns =
293                 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
294         dev_info->dequeue_timeout_ns =
295                 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
296         dev_info->max_event_queues = priv->max_event_queues;
297         dev_info->max_event_queue_flows =
298                 DPAA2_EVENT_MAX_QUEUE_FLOWS;
299         dev_info->max_event_queue_priority_levels =
300                 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
301         dev_info->max_event_priority_levels =
302                 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
303         dev_info->max_event_ports = RTE_MAX_LCORE;
304         dev_info->max_event_port_dequeue_depth =
305                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
306         dev_info->max_event_port_enqueue_depth =
307                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
308         dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
309         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
310                 RTE_EVENT_DEV_CAP_BURST_MODE|
311                 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
312                 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
313                 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
314
315 }
316
317 static int
318 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
319 {
320         struct dpaa2_eventdev *priv = dev->data->dev_private;
321         struct rte_event_dev_config *conf = &dev->data->dev_conf;
322
323         PMD_DRV_FUNC_TRACE();
324
325         priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
326         priv->nb_event_queues = conf->nb_event_queues;
327         priv->nb_event_ports = conf->nb_event_ports;
328         priv->nb_event_queue_flows = conf->nb_event_queue_flows;
329         priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
330         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
331         priv->event_dev_cfg = conf->event_dev_cfg;
332
333         PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
334         return 0;
335 }
336
337 static int
338 dpaa2_eventdev_start(struct rte_eventdev *dev)
339 {
340         PMD_DRV_FUNC_TRACE();
341
342         RTE_SET_USED(dev);
343
344         return 0;
345 }
346
347 static void
348 dpaa2_eventdev_stop(struct rte_eventdev *dev)
349 {
350         PMD_DRV_FUNC_TRACE();
351
352         RTE_SET_USED(dev);
353 }
354
355 static int
356 dpaa2_eventdev_close(struct rte_eventdev *dev)
357 {
358         PMD_DRV_FUNC_TRACE();
359
360         RTE_SET_USED(dev);
361
362         return 0;
363 }
364
365 static void
366 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
367                               struct rte_event_queue_conf *queue_conf)
368 {
369         PMD_DRV_FUNC_TRACE();
370
371         RTE_SET_USED(dev);
372         RTE_SET_USED(queue_id);
373         RTE_SET_USED(queue_conf);
374
375         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
376         queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
377                                       RTE_SCHED_TYPE_PARALLEL;
378         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
379 }
380
381 static void
382 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
383 {
384         PMD_DRV_FUNC_TRACE();
385
386         RTE_SET_USED(dev);
387         RTE_SET_USED(queue_id);
388 }
389
390 static int
391 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
392                            const struct rte_event_queue_conf *queue_conf)
393 {
394         struct dpaa2_eventdev *priv = dev->data->dev_private;
395         struct evq_info_t *evq_info =
396                 &priv->evq_info[queue_id];
397
398         PMD_DRV_FUNC_TRACE();
399
400         evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
401
402         return 0;
403 }
404
405 static void
406 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
407                              struct rte_event_port_conf *port_conf)
408 {
409         PMD_DRV_FUNC_TRACE();
410
411         RTE_SET_USED(dev);
412         RTE_SET_USED(port_id);
413         RTE_SET_USED(port_conf);
414
415         port_conf->new_event_threshold =
416                 DPAA2_EVENT_MAX_NUM_EVENTS;
417         port_conf->dequeue_depth =
418                 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
419         port_conf->enqueue_depth =
420                 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
421         port_conf->disable_implicit_release = 0;
422 }
423
424 static void
425 dpaa2_eventdev_port_release(void *port)
426 {
427         PMD_DRV_FUNC_TRACE();
428
429         RTE_SET_USED(port);
430 }
431
432 static int
433 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
434                           const struct rte_event_port_conf *port_conf)
435 {
436         PMD_DRV_FUNC_TRACE();
437
438         RTE_SET_USED(port_conf);
439
440         if (!dpaa2_io_portal[port_id].dpio_dev) {
441                 dpaa2_io_portal[port_id].dpio_dev =
442                                 dpaa2_get_qbman_swp(port_id);
443                 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
444                 if (!dpaa2_io_portal[port_id].dpio_dev)
445                         return -1;
446         }
447
448         dpaa2_io_portal[port_id].eventdev = dev;
449         dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
450         return 0;
451 }
452
453 static int
454 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
455                            uint8_t queues[], uint16_t nb_unlinks)
456 {
457         struct dpaa2_eventdev *priv = dev->data->dev_private;
458         struct dpaa2_io_portal_t *dpaa2_portal = port;
459         struct evq_info_t *evq_info;
460         int i;
461
462         PMD_DRV_FUNC_TRACE();
463
464         for (i = 0; i < nb_unlinks; i++) {
465                 evq_info = &priv->evq_info[queues[i]];
466                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
467                                    evq_info->dpcon->channel_index, 0);
468                 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
469                                         0, dpaa2_portal->dpio_dev->token,
470                         evq_info->dpcon->dpcon_id);
471                 evq_info->link = 0;
472         }
473
474         return (int)nb_unlinks;
475 }
476
477 static int
478 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
479                          const uint8_t queues[], const uint8_t priorities[],
480                         uint16_t nb_links)
481 {
482         struct dpaa2_eventdev *priv = dev->data->dev_private;
483         struct dpaa2_io_portal_t *dpaa2_portal = port;
484         struct evq_info_t *evq_info;
485         uint8_t channel_index;
486         int ret, i, n;
487
488         PMD_DRV_FUNC_TRACE();
489
490         for (i = 0; i < nb_links; i++) {
491                 evq_info = &priv->evq_info[queues[i]];
492                 if (evq_info->link)
493                         continue;
494
495                 ret = dpio_add_static_dequeue_channel(
496                         dpaa2_portal->dpio_dev->dpio,
497                         CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
498                         evq_info->dpcon->dpcon_id, &channel_index);
499                 if (ret < 0) {
500                         PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
501                                     ret);
502                         goto err;
503                 }
504
505                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
506                                    channel_index, 1);
507                 evq_info->dpcon->channel_index = channel_index;
508                 evq_info->link = 1;
509         }
510
511         RTE_SET_USED(priorities);
512
513         return (int)nb_links;
514 err:
515         for (n = 0; n < i; n++) {
516                 evq_info = &priv->evq_info[queues[n]];
517                 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
518                                    evq_info->dpcon->channel_index, 0);
519                 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
520                                         0, dpaa2_portal->dpio_dev->token,
521                         evq_info->dpcon->dpcon_id);
522                 evq_info->link = 0;
523         }
524         return ret;
525 }
526
527 static int
528 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
529                              uint64_t *timeout_ticks)
530 {
531         uint32_t scale = 1;
532
533         PMD_DRV_FUNC_TRACE();
534
535         RTE_SET_USED(dev);
536         *timeout_ticks = ns * scale;
537
538         return 0;
539 }
540
541 static void
542 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
543 {
544         PMD_DRV_FUNC_TRACE();
545
546         RTE_SET_USED(dev);
547         RTE_SET_USED(f);
548 }
549
550 static int
551 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
552                             const struct rte_eth_dev *eth_dev,
553                             uint32_t *caps)
554 {
555         const char *ethdev_driver = eth_dev->device->driver->name;
556
557         PMD_DRV_FUNC_TRACE();
558
559         RTE_SET_USED(dev);
560
561         if (!strcmp(ethdev_driver, "net_dpaa2"))
562                 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
563         else
564                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
565
566         return 0;
567 }
568
569 static int
570 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
571                 const struct rte_eth_dev *eth_dev,
572                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
573 {
574         struct dpaa2_eventdev *priv = dev->data->dev_private;
575         uint8_t ev_qid = queue_conf->ev.queue_id;
576         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
577         int i, ret;
578
579         PMD_DRV_FUNC_TRACE();
580
581         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
582                 ret = dpaa2_eth_eventq_attach(eth_dev, i,
583                                 dpcon_id, queue_conf);
584                 if (ret) {
585                         PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
586                                     ret);
587                         goto fail;
588                 }
589         }
590         return 0;
591 fail:
592         for (i = (i - 1); i >= 0 ; i--)
593                 dpaa2_eth_eventq_detach(eth_dev, i);
594
595         return ret;
596 }
597
598 static int
599 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
600                 const struct rte_eth_dev *eth_dev,
601                 int32_t rx_queue_id,
602                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
603 {
604         struct dpaa2_eventdev *priv = dev->data->dev_private;
605         uint8_t ev_qid = queue_conf->ev.queue_id;
606         uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
607         int ret;
608
609         PMD_DRV_FUNC_TRACE();
610
611         if (rx_queue_id == -1)
612                 return dpaa2_eventdev_eth_queue_add_all(dev,
613                                 eth_dev, queue_conf);
614
615         ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
616                         dpcon_id, queue_conf);
617         if (ret) {
618                 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
619                 return ret;
620         }
621         return 0;
622 }
623
624 static int
625 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
626                              const struct rte_eth_dev *eth_dev)
627 {
628         int i, ret;
629
630         PMD_DRV_FUNC_TRACE();
631
632         RTE_SET_USED(dev);
633
634         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
635                 ret = dpaa2_eth_eventq_detach(eth_dev, i);
636                 if (ret) {
637                         PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
638                                     ret);
639                         return ret;
640                 }
641         }
642
643         return 0;
644 }
645
646 static int
647 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
648                              const struct rte_eth_dev *eth_dev,
649                              int32_t rx_queue_id)
650 {
651         int ret;
652
653         PMD_DRV_FUNC_TRACE();
654
655         if (rx_queue_id == -1)
656                 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
657
658         ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
659         if (ret) {
660                 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
661                 return ret;
662         }
663
664         return 0;
665 }
666
667 static int
668 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
669                          const struct rte_eth_dev *eth_dev)
670 {
671         PMD_DRV_FUNC_TRACE();
672
673         RTE_SET_USED(dev);
674         RTE_SET_USED(eth_dev);
675
676         return 0;
677 }
678
679 static int
680 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
681                         const struct rte_eth_dev *eth_dev)
682 {
683         PMD_DRV_FUNC_TRACE();
684
685         RTE_SET_USED(dev);
686         RTE_SET_USED(eth_dev);
687
688         return 0;
689 }
690
691 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
692         .dev_infos_get    = dpaa2_eventdev_info_get,
693         .dev_configure    = dpaa2_eventdev_configure,
694         .dev_start        = dpaa2_eventdev_start,
695         .dev_stop         = dpaa2_eventdev_stop,
696         .dev_close        = dpaa2_eventdev_close,
697         .queue_def_conf   = dpaa2_eventdev_queue_def_conf,
698         .queue_setup      = dpaa2_eventdev_queue_setup,
699         .queue_release    = dpaa2_eventdev_queue_release,
700         .port_def_conf    = dpaa2_eventdev_port_def_conf,
701         .port_setup       = dpaa2_eventdev_port_setup,
702         .port_release     = dpaa2_eventdev_port_release,
703         .port_link        = dpaa2_eventdev_port_link,
704         .port_unlink      = dpaa2_eventdev_port_unlink,
705         .timeout_ticks    = dpaa2_eventdev_timeout_ticks,
706         .dump             = dpaa2_eventdev_dump,
707         .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
708         .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
709         .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
710         .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
711         .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
712 };
713
714 static int
715 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
716                           struct dpaa2_dpcon_dev *dpcon_dev)
717 {
718         struct dpci_rx_queue_cfg rx_queue_cfg;
719         int ret, i;
720
721         /*Do settings to get the frame on a DPCON object*/
722         rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
723                   DPCI_QUEUE_OPT_USER_CTX;
724         rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
725         rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
726         rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
727
728         dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
729                 dpaa2_eventdev_process_parallel;
730         dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
731                 dpaa2_eventdev_process_atomic;
732
733         for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
734                 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
735                 ret = dpci_set_rx_queue(&dpci_dev->dpci,
736                                         CMD_PRI_LOW,
737                                         dpci_dev->token, i,
738                                         &rx_queue_cfg);
739                 if (ret) {
740                         PMD_DRV_LOG(ERR,
741                                     "set_rx_q failed with err code: %d", ret);
742                         return ret;
743                 }
744         }
745         return 0;
746 }
747
748 static int
749 dpaa2_eventdev_create(const char *name)
750 {
751         struct rte_eventdev *eventdev;
752         struct dpaa2_eventdev *priv;
753         struct dpaa2_dpcon_dev *dpcon_dev = NULL;
754         struct dpaa2_dpci_dev *dpci_dev = NULL;
755         int ret;
756
757         eventdev = rte_event_pmd_vdev_init(name,
758                                            sizeof(struct dpaa2_eventdev),
759                                            rte_socket_id());
760         if (eventdev == NULL) {
761                 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
762                 goto fail;
763         }
764
765         eventdev->dev_ops       = &dpaa2_eventdev_ops;
766         eventdev->enqueue       = dpaa2_eventdev_enqueue;
767         eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
768         eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
769         eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
770         eventdev->dequeue       = dpaa2_eventdev_dequeue;
771         eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
772
773         /* For secondary processes, the primary has done all the work */
774         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
775                 return 0;
776
777         priv = eventdev->data->dev_private;
778         priv->max_event_queues = 0;
779
780         do {
781                 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
782                 if (!dpcon_dev)
783                         break;
784                 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
785
786                 dpci_dev = rte_dpaa2_alloc_dpci_dev();
787                 if (!dpci_dev) {
788                         rte_dpaa2_free_dpcon_dev(dpcon_dev);
789                         break;
790                 }
791                 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
792
793                 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
794                 if (ret) {
795                         PMD_DRV_LOG(ERR,
796                                     "dpci setup failed with err code: %d", ret);
797                         return ret;
798                 }
799                 priv->max_event_queues++;
800         } while (dpcon_dev && dpci_dev);
801
802         return 0;
803 fail:
804         return -EFAULT;
805 }
806
807 static int
808 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
809 {
810         const char *name;
811
812         name = rte_vdev_device_name(vdev);
813         PMD_DRV_LOG(INFO, "Initializing %s", name);
814         return dpaa2_eventdev_create(name);
815 }
816
817 static int
818 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
819 {
820         const char *name;
821
822         name = rte_vdev_device_name(vdev);
823         PMD_DRV_LOG(INFO, "Closing %s", name);
824
825         return rte_event_pmd_vdev_uninit(name);
826 }
827
828 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
829         .probe = dpaa2_eventdev_probe,
830         .remove = dpaa2_eventdev_remove
831 };
832
833 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);