mempool/cnxk: add generic operations
[dpdk.git] / drivers / event / dpaa / dpaa_eventdev.c
index b8761c6..460375c 100644 (file)
@@ -1,5 +1,5 @@
 /*   SPDX-License-Identifier:        BSD-3-Clause
- *   Copyright 2017 NXP
+ *   Copyright 2017-2019 NXP
  */
 
 #include <assert.h>
@@ -24,7 +24,7 @@
 #include <rte_memzone.h>
 #include <rte_pci.h>
 #include <rte_eventdev.h>
-#include <rte_eventdev_pmd_vdev.h>
+#include <eventdev_pmd_vdev.h>
 #include <rte_ethdev.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
@@ -46,6 +46,7 @@
  * Eventqueue = Channel Instance
  * 1 Eventdev can have N Eventqueue
  */
+RTE_LOG_REGISTER(dpaa_logtype_eventdev, pmd.event.dpaa, NOTICE);
 
 #define DISABLE_INTR_MODE "disable_intr"
 
@@ -98,7 +99,7 @@ dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
                case RTE_EVENT_OP_RELEASE:
                        qman_dca_index(ev[i].impl_opaque, 0);
                        mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
-                       mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+                       *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
                        DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
                        DPAA_PER_LCORE_DQRR_SIZE--;
                        break;
@@ -173,12 +174,12 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
        int ret;
        u16 ch_id;
        void *buffers[8];
-       u32 num_frames, i, irq = 0;
+       u32 num_frames, i;
        uint64_t cur_ticks = 0, wait_time_ticks = 0;
        struct dpaa_port *portal = (struct dpaa_port *)port;
        struct rte_mbuf *mbuf;
 
-       if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+       if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
                /* Affine current thread context to a qman portal */
                ret = rte_dpaa_portal_init((void *)0);
                if (ret) {
@@ -205,7 +206,7 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
                if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
                        qman_dca_index(i, 0);
                        mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
-                       mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+                       *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
                        DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
                        DPAA_PER_LCORE_DQRR_SIZE--;
                }
@@ -222,8 +223,6 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
        do {
                /* Lets dequeue the frames */
                num_frames = qman_portal_dequeue(ev, nb_events, buffers);
-               if (irq)
-                       irq = 0;
                if (num_frames)
                        break;
                cur_ticks = rte_get_timer_cycles();
@@ -250,7 +249,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
        struct dpaa_port *portal = (struct dpaa_port *)port;
        struct rte_mbuf *mbuf;
 
-       if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+       if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
                /* Affine current thread context to a qman portal */
                ret = rte_dpaa_portal_init((void *)0);
                if (ret) {
@@ -277,7 +276,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
                if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
                        qman_dca_index(i, 0);
                        mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
-                       mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+                       *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
                        DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
                        DPAA_PER_LCORE_DQRR_SIZE--;
                }
@@ -356,7 +355,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
                RTE_EVENT_DEV_CAP_BURST_MODE |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-               RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+               RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int