X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fdpaa%2Fdpaa_eventdev.c;h=460375ca306312390b42baa53a2d24fe87909864;hb=bbf19e89b87cab524a91f76a89347538a038ceae;hp=b8761c6a916e824b686ab13bb3698a84c05effe8;hpb=ba6c1aa29419a7c9f5e5455d528282b9d3e020ef;p=dpdk.git diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c index b8761c6a91..460375ca30 100644 --- a/drivers/event/dpaa/dpaa_eventdev.c +++ b/drivers/event/dpaa/dpaa_eventdev.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 NXP + * Copyright 2017-2019 NXP */ #include @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -46,6 +46,7 @@ * Eventqueue = Channel Instance * 1 Eventdev can have N Eventqueue */ +RTE_LOG_REGISTER(dpaa_logtype_eventdev, pmd.event.dpaa, NOTICE); #define DISABLE_INTR_MODE "disable_intr" @@ -98,7 +99,7 @@ dpaa_event_enqueue_burst(void *port, const struct rte_event ev[], case RTE_EVENT_OP_RELEASE: qman_dca_index(ev[i].impl_opaque, 0); mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); - mbuf->seqn = DPAA_INVALID_MBUF_SEQN; + *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); DPAA_PER_LCORE_DQRR_SIZE--; break; @@ -173,12 +174,12 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[], int ret; u16 ch_id; void *buffers[8]; - u32 num_frames, i, irq = 0; + u32 num_frames, i; uint64_t cur_ticks = 0, wait_time_ticks = 0; struct dpaa_port *portal = (struct dpaa_port *)port; struct rte_mbuf *mbuf; - if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + if (unlikely(!DPAA_PER_LCORE_PORTAL)) { /* Affine current thread context to a qman portal */ ret = rte_dpaa_portal_init((void *)0); if (ret) { @@ -205,7 +206,7 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[], if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) { qman_dca_index(i, 0); mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); - mbuf->seqn = DPAA_INVALID_MBUF_SEQN; + *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); DPAA_PER_LCORE_DQRR_SIZE--; } @@ -222,8 +223,6 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[], do { /* Lets dequeue the frames */ num_frames = qman_portal_dequeue(ev, nb_events, buffers); - if (irq) - irq = 0; if (num_frames) break; cur_ticks = rte_get_timer_cycles(); @@ -250,7 +249,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[], struct dpaa_port *portal = (struct dpaa_port *)port; struct rte_mbuf *mbuf; - if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + if (unlikely(!DPAA_PER_LCORE_PORTAL)) { /* Affine current thread context to a qman portal */ ret = rte_dpaa_portal_init((void *)0); if (ret) { @@ -277,7 +276,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[], if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) { qman_dca_index(i, 0); mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); - mbuf->seqn = DPAA_INVALID_MBUF_SEQN; + *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); DPAA_PER_LCORE_DQRR_SIZE--; } @@ -356,7 +355,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev, RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | RTE_EVENT_DEV_CAP_BURST_MODE | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | - RTE_EVENT_DEV_CAP_NONSEQ_MODE; + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; } static int