bus/fslmc: drop inline from non-static functions
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
index 3380e54..d7ff74c 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
  *
  */
 
@@ -61,6 +61,8 @@ enum qbman_sdqcr_fc {
 #define MAX_QBMAN_PORTALS  64
 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
 
+uint32_t qman_version;
+
 /* Internal Function declaration */
 static int
 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
@@ -76,6 +78,10 @@ qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd);
 static int
+qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd);
+static int
 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd);
@@ -87,18 +93,48 @@ qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
                uint32_t *flags,
                int num_frames);
 static int
+qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd,
+               uint32_t *flags,
+               int num_frames);
+static int
 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd,
                uint32_t *flags,
                int num_frames);
 
+static int
+qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               struct qbman_fd **fd,
+               uint32_t *flags,
+               int num_frames);
+static int
+qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               struct qbman_fd **fd,
+               uint32_t *flags,
+               int num_frames);
+static int
+qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               struct qbman_fd **fd,
+               uint32_t *flags,
+               int num_frames);
+
 static int
 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd,
                int num_frames);
 static int
+qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd,
+               int num_frames);
+static int
 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd,
@@ -139,6 +175,13 @@ static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
                int num_frames)
        = qbman_swp_enqueue_multiple_direct;
 
+static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               struct qbman_fd **fd,
+               uint32_t *flags,
+               int num_frames)
+       = qbman_swp_enqueue_multiple_fd_direct;
+
 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
                const struct qbman_eq_desc *d,
                const struct qbman_fd *fd,
@@ -194,14 +237,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
-       if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+       if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+                       && (d->cena_access_mode == qman_cena_fastest_access))
                p->mr.valid_bit = QB_VALID_BIT;
 
        atomic_set(&p->vdq.busy, 1);
        p->vdq.valid_bit = QB_VALID_BIT;
        p->dqrr.valid_bit = QB_VALID_BIT;
        qman_version = p->desc.qman_version;
-       if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+       if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
                p->dqrr.dqrr_size = 4;
                p->dqrr.reset_bug = 1;
        } else {
@@ -233,7 +277,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
        qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
 
        p->eqcr.pi_ring_size = 8;
-       if ((qman_version & 0xFFFF0000) >= QMAN_REV_5000) {
+       if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+                       && (d->cena_access_mode == qman_cena_fastest_access)) {
                p->eqcr.pi_ring_size = 32;
                qbman_swp_enqueue_array_mode_ptr =
                                qbman_swp_enqueue_array_mode_mem_back;
@@ -241,6 +286,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
                                qbman_swp_enqueue_ring_mode_mem_back;
                qbman_swp_enqueue_multiple_ptr =
                                qbman_swp_enqueue_multiple_mem_back;
+               qbman_swp_enqueue_multiple_fd_ptr =
+                               qbman_swp_enqueue_multiple_fd_mem_back;
                qbman_swp_enqueue_multiple_desc_ptr =
                                qbman_swp_enqueue_multiple_desc_mem_back;
                qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
@@ -248,21 +295,33 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
                qbman_swp_release_ptr = qbman_swp_release_mem_back;
        }
 
+       if (dpaa2_svr_family == SVR_LS1080A) {
+               qbman_swp_enqueue_ring_mode_ptr =
+                               qbman_swp_enqueue_ring_mode_cinh_direct;
+               qbman_swp_enqueue_multiple_ptr =
+                               qbman_swp_enqueue_multiple_cinh_direct;
+               qbman_swp_enqueue_multiple_fd_ptr =
+                               qbman_swp_enqueue_multiple_fd_cinh_direct;
+               qbman_swp_enqueue_multiple_desc_ptr =
+                               qbman_swp_enqueue_multiple_desc_cinh_direct;
+       }
+
        for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
-               p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
+               p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
        eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
-       p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
+       p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
        p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
-       if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-               p->eqcr.ci = qbman_cinh_read(&p->sys,
-                               QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
+       if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+                       && (d->cena_access_mode == qman_cena_fastest_access))
+               p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
+                                            & p->eqcr.pi_ci_mask;
        else
-               p->eqcr.ci = qbman_cinh_read(&p->sys,
-                               QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
+               p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
+                                            & p->eqcr.pi_ci_mask;
        p->eqcr.available = p->eqcr.pi_ring_size -
                                qm_cyc_diff(p->eqcr.pi_ring_size,
-                               p->eqcr.ci & (p->eqcr.pi_mask<<1),
-                               p->eqcr.pi & (p->eqcr.pi_mask<<1));
+                               p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
+                               p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
 
        portal_idx_map[p->desc.idx] = p;
        return p;
@@ -362,10 +421,11 @@ void *qbman_swp_mc_start(struct qbman_swp *p)
 #ifdef QBMAN_CHECKING
        QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
 #endif
-       if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-               ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
-       else
+       if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+                   && (p->desc.cena_access_mode == qman_cena_fastest_access))
                ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
+       else
+               ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
 #ifdef QBMAN_CHECKING
        if (!ret)
                p->mc.check = swp_mc_can_submit;
@@ -385,16 +445,17 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
         * caller wants to OR but has forgotten to do so.
         */
        QBMAN_BUG_ON((*v & cmd_verb) != *v);
-       if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-               dma_wmb();
-               *v = cmd_verb | p->mc.valid_bit;
-               qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
-               clean(cmd);
-       } else {
+       if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+                   && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
                *v = cmd_verb | p->mr.valid_bit;
                qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
                dma_wmb();
                qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+       } else {
+               dma_wmb();
+               *v = cmd_verb | p->mc.valid_bit;
+               qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+               clean(cmd);
        }
 #ifdef QBMAN_CHECKING
        p->mc.check = swp_mc_can_poll;
@@ -407,30 +468,31 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
 #ifdef QBMAN_CHECKING
        QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
 #endif
-       if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-               qbman_cena_invalidate_prefetch(&p->sys,
-                               QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-               ret = qbman_cena_read(&p->sys,
-                               QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+       if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
+               && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
+               ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
+               /* Command completed if the valid bit is toggled */
+               if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+                       return NULL;
                /* Remove the valid-bit -
                 * command completed iff the rest is non-zero
                 */
                verb = ret[0] & ~QB_VALID_BIT;
                if (!verb)
                        return NULL;
-               p->mc.valid_bit ^= QB_VALID_BIT;
+               p->mr.valid_bit ^= QB_VALID_BIT;
        } else {
-               ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
-               /* Command completed if the valid bit is toggled */
-               if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
-                       return NULL;
+               qbman_cena_invalidate_prefetch(&p->sys,
+                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+               ret = qbman_cena_read(&p->sys,
+                                     QBMAN_CENA_SWP_RR(p->mc.valid_bit));
                /* Remove the valid-bit -
                 * command completed iff the rest is non-zero
                 */
                verb = ret[0] & ~QB_VALID_BIT;
                if (!verb)
                        return NULL;
-               p->mr.valid_bit ^= QB_VALID_BIT;
+               p->mc.valid_bit ^= QB_VALID_BIT;
        }
 #ifdef QBMAN_CHECKING
        p->mc.check = swp_mc_can_start;
@@ -640,8 +702,8 @@ static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
        const uint32_t *cl = qb_cl(d);
        uint32_t eqcr_ci, full_mask, half_mask;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
                s->eqcr.ci = qbman_cena_read_reg(&s->sys,
@@ -671,6 +733,46 @@ static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
        return 0;
 }
 
+static int qbman_swp_enqueue_ring_mode_cinh_direct(
+               struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd)
+{
+       uint32_t *p;
+       const uint32_t *cl = qb_cl(d);
+       uint32_t eqcr_ci, full_mask, half_mask;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cinh_read(&s->sys,
+                               QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                               eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return -EBUSY;
+       }
+
+       p = qbman_cena_write_start_wo_shadow(&s->sys,
+                       QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
+       memcpy(&p[1], &cl[1], 28);
+       memcpy(&p[8], fd, sizeof(*fd));
+       lwsync();
+
+       /* Set the verb byte, have to substitute in the valid-bit */
+       p[0] = cl[0] | s->eqcr.pi_vb;
+       qbman_cena_write_complete_wo_shadow(&s->sys,
+                       QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
+       s->eqcr.pi++;
+       s->eqcr.pi &= full_mask;
+       s->eqcr.available--;
+       if (!(s->eqcr.pi & half_mask))
+               s->eqcr.pi_vb ^= QB_VALID_BIT;
+
+       return 0;
+}
+
 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
                                                const struct qbman_eq_desc *d,
                                                const struct qbman_fd *fd)
@@ -679,12 +781,12 @@ static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
        const uint32_t *cl = qb_cl(d);
        uint32_t eqcr_ci, full_mask, half_mask;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
-               s->eqcr.ci = qbman_cinh_read(&s->sys,
-                               QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+               s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+                               QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
                                eqcr_ci, s->eqcr.ci);
                if (!s->eqcr.available)
@@ -737,8 +839,8 @@ static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
        int i, num_enqueued = 0;
        uint64_t addr_cena;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
                s->eqcr.ci = qbman_cena_read_reg(&s->sys,
@@ -794,25 +896,27 @@ static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
        return num_enqueued;
 }
 
-static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
-                                              const struct qbman_eq_desc *d,
-                                              const struct qbman_fd *fd,
-                                              uint32_t *flags,
-                                              int num_frames)
+static int qbman_swp_enqueue_multiple_cinh_direct(
+               struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd,
+               uint32_t *flags,
+               int num_frames)
 {
        uint32_t *p = NULL;
        const uint32_t *cl = qb_cl(d);
        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
        int i, num_enqueued = 0;
+       uint64_t addr_cena;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
                s->eqcr.ci = qbman_cinh_read(&s->sys,
-                               QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+                               QBMAN_CINH_SWP_EQCR_CI) & full_mask;
                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
-                                       eqcr_ci, s->eqcr.ci);
+                               eqcr_ci, s->eqcr.ci);
                if (!s->eqcr.available)
                        return 0;
        }
@@ -830,6 +934,8 @@ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
                eqcr_pi++;
        }
 
+       lwsync();
+
        /* Set the verb byte, have to substitute in the valid-bit */
        eqcr_pi = s->eqcr.pi;
        for (i = 0; i < num_enqueued; i++) {
@@ -846,6 +952,65 @@ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
                if (!(eqcr_pi & half_mask))
                        s->eqcr.pi_vb ^= QB_VALID_BIT;
        }
+
+       /* Flush all the cacheline without load/store in between */
+       eqcr_pi = s->eqcr.pi;
+       addr_cena = (size_t)s->sys.addr_cena;
+       for (i = 0; i < num_enqueued; i++) {
+               dcbf(addr_cena +
+                       QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               eqcr_pi++;
+       }
+       s->eqcr.pi = eqcr_pi & full_mask;
+
+       return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+                                              const struct qbman_eq_desc *d,
+                                              const struct qbman_fd *fd,
+                                              uint32_t *flags,
+                                              int num_frames)
+{
+       uint32_t *p = NULL;
+       const uint32_t *cl = qb_cl(d);
+       uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+       int i, num_enqueued = 0;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+                               QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                                       eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return 0;
+       }
+
+       eqcr_pi = s->eqcr.pi;
+       num_enqueued = (s->eqcr.available < num_frames) ?
+                       s->eqcr.available : num_frames;
+       s->eqcr.available -= num_enqueued;
+       /* Fill in the EQCR ring */
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               memcpy(&p[1], &cl[1], 28);
+               memcpy(&p[8], &fd[i], sizeof(*fd));
+               if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+                       struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+                       d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+                               ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+               }
+               eqcr_pi++;
+               p[0] = cl[0] | s->eqcr.pi_vb;
+
+               if (!(eqcr_pi & half_mask))
+                       s->eqcr.pi_vb ^= QB_VALID_BIT;
+       }
        s->eqcr.pi = eqcr_pi & full_mask;
 
        dma_wmb();
@@ -854,7 +1019,7 @@ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
        return num_enqueued;
 }
 
-inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
                                      const struct qbman_eq_desc *d,
                                      const struct qbman_fd *fd,
                                      uint32_t *flags,
@@ -863,6 +1028,214 @@ inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
        return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
 }
 
+static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
+                                               const struct qbman_eq_desc *d,
+                                               struct qbman_fd **fd,
+                                               uint32_t *flags,
+                                               int num_frames)
+{
+       uint32_t *p = NULL;
+       const uint32_t *cl = qb_cl(d);
+       uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+       int i, num_enqueued = 0;
+       uint64_t addr_cena;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+                               QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                               eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return 0;
+       }
+
+       eqcr_pi = s->eqcr.pi;
+       num_enqueued = (s->eqcr.available < num_frames) ?
+                       s->eqcr.available : num_frames;
+       s->eqcr.available -= num_enqueued;
+       /* Fill in the EQCR ring */
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               memcpy(&p[1], &cl[1], 28);
+               memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
+               eqcr_pi++;
+       }
+
+       lwsync();
+
+       /* Set the verb byte, have to substitute in the valid-bit */
+       eqcr_pi = s->eqcr.pi;
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               p[0] = cl[0] | s->eqcr.pi_vb;
+               if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+                       struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+                       d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+                               ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+               }
+               eqcr_pi++;
+               if (!(eqcr_pi & half_mask))
+                       s->eqcr.pi_vb ^= QB_VALID_BIT;
+       }
+
+       /* Flush all the cacheline without load/store in between */
+       eqcr_pi = s->eqcr.pi;
+       addr_cena = (size_t)s->sys.addr_cena;
+       for (i = 0; i < num_enqueued; i++) {
+               dcbf(addr_cena +
+                       QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               eqcr_pi++;
+       }
+       s->eqcr.pi = eqcr_pi & full_mask;
+
+       return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_fd_cinh_direct(
+               struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               struct qbman_fd **fd,
+               uint32_t *flags,
+               int num_frames)
+{
+       uint32_t *p = NULL;
+       const uint32_t *cl = qb_cl(d);
+       uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+       int i, num_enqueued = 0;
+       uint64_t addr_cena;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cinh_read(&s->sys,
+                               QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                               eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return 0;
+       }
+
+       eqcr_pi = s->eqcr.pi;
+       num_enqueued = (s->eqcr.available < num_frames) ?
+                       s->eqcr.available : num_frames;
+       s->eqcr.available -= num_enqueued;
+       /* Fill in the EQCR ring */
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               memcpy(&p[1], &cl[1], 28);
+               memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
+               eqcr_pi++;
+       }
+
+       lwsync();
+
+       /* Set the verb byte, have to substitute in the valid-bit */
+       eqcr_pi = s->eqcr.pi;
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               p[0] = cl[0] | s->eqcr.pi_vb;
+               if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+                       struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+                       d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+                               ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+               }
+               eqcr_pi++;
+               if (!(eqcr_pi & half_mask))
+                       s->eqcr.pi_vb ^= QB_VALID_BIT;
+       }
+
+       /* Flush all the cacheline without load/store in between */
+       eqcr_pi = s->eqcr.pi;
+       addr_cena = (size_t)s->sys.addr_cena;
+       for (i = 0; i < num_enqueued; i++) {
+               dcbf(addr_cena +
+                       QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               eqcr_pi++;
+       }
+       s->eqcr.pi = eqcr_pi & full_mask;
+
+       return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
+                                                 const struct qbman_eq_desc *d,
+                                                 struct qbman_fd **fd,
+                                                 uint32_t *flags,
+                                                 int num_frames)
+{
+       uint32_t *p = NULL;
+       const uint32_t *cl = qb_cl(d);
+       uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+       int i, num_enqueued = 0;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+                               QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                                       eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return 0;
+       }
+
+       eqcr_pi = s->eqcr.pi;
+       num_enqueued = (s->eqcr.available < num_frames) ?
+                       s->eqcr.available : num_frames;
+       s->eqcr.available -= num_enqueued;
+       /* Fill in the EQCR ring */
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               memcpy(&p[1], &cl[1], 28);
+               memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
+               eqcr_pi++;
+       }
+
+       /* Set the verb byte, have to substitute in the valid-bit */
+       eqcr_pi = s->eqcr.pi;
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               p[0] = cl[0] | s->eqcr.pi_vb;
+               if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+                       struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+                       d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+                               ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+               }
+               eqcr_pi++;
+               if (!(eqcr_pi & half_mask))
+                       s->eqcr.pi_vb ^= QB_VALID_BIT;
+       }
+       s->eqcr.pi = eqcr_pi & full_mask;
+
+       dma_wmb();
+       qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+                               (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+       return num_enqueued;
+}
+
+int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
+                                        const struct qbman_eq_desc *d,
+                                        struct qbman_fd **fd,
+                                        uint32_t *flags,
+                                        int num_frames)
+{
+       return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
+}
+
 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
                                        const struct qbman_eq_desc *d,
                                        const struct qbman_fd *fd,
@@ -874,8 +1247,8 @@ static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
        int i, num_enqueued = 0;
        uint64_t addr_cena;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
                s->eqcr.ci = qbman_cena_read_reg(&s->sys,
@@ -927,6 +1300,71 @@ static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
        return num_enqueued;
 }
 
+static int qbman_swp_enqueue_multiple_desc_cinh_direct(
+               struct qbman_swp *s,
+               const struct qbman_eq_desc *d,
+               const struct qbman_fd *fd,
+               int num_frames)
+{
+       uint32_t *p;
+       const uint32_t *cl;
+       uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+       int i, num_enqueued = 0;
+       uint64_t addr_cena;
+
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
+       if (!s->eqcr.available) {
+               eqcr_ci = s->eqcr.ci;
+               s->eqcr.ci = qbman_cinh_read(&s->sys,
+                               QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+               s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+                                       eqcr_ci, s->eqcr.ci);
+               if (!s->eqcr.available)
+                       return 0;
+       }
+
+       eqcr_pi = s->eqcr.pi;
+       num_enqueued = (s->eqcr.available < num_frames) ?
+                       s->eqcr.available : num_frames;
+       s->eqcr.available -= num_enqueued;
+       /* Fill in the EQCR ring */
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               cl = qb_cl(&d[i]);
+               memcpy(&p[1], &cl[1], 28);
+               memcpy(&p[8], &fd[i], sizeof(*fd));
+               eqcr_pi++;
+       }
+
+       lwsync();
+
+       /* Set the verb byte, have to substitute in the valid-bit */
+       eqcr_pi = s->eqcr.pi;
+       for (i = 0; i < num_enqueued; i++) {
+               p = qbman_cena_write_start_wo_shadow(&s->sys,
+                               QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               cl = qb_cl(&d[i]);
+               p[0] = cl[0] | s->eqcr.pi_vb;
+               eqcr_pi++;
+               if (!(eqcr_pi & half_mask))
+                       s->eqcr.pi_vb ^= QB_VALID_BIT;
+       }
+
+       /* Flush all the cacheline without load/store in between */
+       eqcr_pi = s->eqcr.pi;
+       addr_cena = (size_t)s->sys.addr_cena;
+       for (i = 0; i < num_enqueued; i++) {
+               dcbf(addr_cena +
+                       QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+               eqcr_pi++;
+       }
+       s->eqcr.pi = eqcr_pi & full_mask;
+
+       return num_enqueued;
+}
+
 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
                                        const struct qbman_eq_desc *d,
                                        const struct qbman_fd *fd,
@@ -937,12 +1375,12 @@ static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
        int i, num_enqueued = 0;
 
-       half_mask = (s->eqcr.pi_mask>>1);
-       full_mask = s->eqcr.pi_mask;
+       half_mask = (s->eqcr.pi_ci_mask>>1);
+       full_mask = s->eqcr.pi_ci_mask;
        if (!s->eqcr.available) {
                eqcr_ci = s->eqcr.ci;
-               s->eqcr.ci = qbman_cinh_read(&s->sys,
-                               QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+               s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+                               QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
                                        eqcr_ci, s->eqcr.ci);
                if (!s->eqcr.available)
@@ -983,7 +1421,7 @@ static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 
        return num_enqueued;
 }
-inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
                                           const struct qbman_eq_desc *d,
                                           const struct qbman_fd *fd,
                                           int num_frames)
@@ -1161,7 +1599,7 @@ static int qbman_swp_pull_mem_back(struct qbman_swp *s,
        return 0;
 }
 
-inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
 {
        return qbman_swp_pull_ptr(s, d);
 }
@@ -1198,7 +1636,7 @@ void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
  * only once, so repeated calls can return a sequence of DQRR entries, without
  * requiring they be consumed immediately or in any particular order.
  */
-inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
 {
        return qbman_swp_dqrr_next_ptr(s);
 }
@@ -1315,9 +1753,9 @@ const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
         */
        flags = p->dq.stat;
        response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
-       if ((response_verb == QBMAN_RESULT_DQ) &&
-           (flags & QBMAN_DQ_STAT_VOLATILE) &&
-           (flags & QBMAN_DQ_STAT_EXPIRED))
+       if ((response_verb == QBMAN_RESULT_DQ)
+                       && (flags & QBMAN_DQ_STAT_VOLATILE)
+                       && (flags & QBMAN_DQ_STAT_EXPIRED))
                atomic_inc(&s->vdq.busy);
        return p;
 }
@@ -1570,6 +2008,32 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
        return qbman_result_SCN_ctx(scn);
 }
 
+/********************/
+/* Parsing EQ RESP  */
+/********************/
+struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
+{
+       return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
+}
+
+void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
+{
+       eqresp->eq_resp.rspid = val;
+}
+
+uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
+{
+       return eqresp->eq_resp.rspid;
+}
+
+uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
+{
+       if (eqresp->eq_resp.rc == 0xE)
+               return 0;
+       else
+               return -1;
+}
+
 /******************/
 /* Buffer release */
 /******************/
@@ -1665,7 +2129,7 @@ static int qbman_swp_release_mem_back(struct qbman_swp *s,
        return 0;
 }
 
-inline int qbman_swp_release(struct qbman_swp *s,
+int qbman_swp_release(struct qbman_swp *s,
                             const struct qbman_release_desc *d,
                             const uint64_t *buffers,
                             unsigned int num_buffers)