replace alignment attributes
[dpdk.git] / drivers / bus / dpaa / base / qbman / qman.c
index ffb008e..b596e79 100644 (file)
@@ -1,13 +1,17 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
  *
  * Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP
+ * Copyright 2017,2019 NXP
  *
  */
 
 #include "qman.h"
 #include <rte_branch_prediction.h>
 #include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
+
+#include <dpaa_bits.h>
 
 /* Compilation constants */
 #define DQRR_MAXFILL   15
@@ -90,7 +94,7 @@ struct qman_portal {
         * address (6 bits for address shift + 4 bits for the DQRR size).
         */
        struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
-                   __attribute__((aligned(1024)));
+                   __rte_aligned(1024);
 #endif
 };
 
@@ -165,6 +169,11 @@ int qman_setup_fq_lookup_table(size_t num_entries)
        return 0;
 }
 
+void qman_set_fq_lookup_table(void **fq_table)
+{
+       qman_fq_lookup_table = fq_table;
+}
+
 /* global structure that maintains fq object mapping */
 static DEFINE_SPINLOCK(fq_hash_table_lock);
 
@@ -312,9 +321,9 @@ loop:
                if (!msg)
                        return 0;
        }
-       if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+       if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
                /* We aren't draining anything but FQRNIs */
-               pr_err("Found verb 0x%x in MR\n", msg->verb);
+               pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
                return -1;
        }
        qm_mr_next(p);
@@ -481,7 +490,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
        /* when accessing 'verb', use __raw_readb() to ensure that compiler
         * inlining doesn't try to optimise out "excess reads".
         */
-       if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+       if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
                mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
                if (!mr->pi)
                        mr->vbit ^= QM_MR_VERB_VBIT;
@@ -491,11 +500,10 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
        dcbit_ro(res);
 }
 
-static inline
-struct qman_portal *qman_create_portal(
-                       struct qman_portal *portal,
-                             const struct qm_portal_config *c,
-                             const struct qman_cgrs *cgrs)
+struct qman_portal *
+qman_init_portal(struct qman_portal *portal,
+                  const struct qm_portal_config *c,
+                  const struct qman_cgrs *cgrs)
 {
        struct qm_portal *p;
        char buf[16];
@@ -504,6 +512,9 @@ struct qman_portal *qman_create_portal(
 
        p = &portal->p;
 
+       if (!c)
+               c = portal->config;
+
        if (dpaa_svr_family == SVR_LS1043A_FAMILY)
                portal->use_eqcr_ci_stashing = 3;
        else
@@ -623,16 +634,16 @@ fail_eqcr:
 
 #define MAX_GLOBAL_PORTALS 8
 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
-static int global_portals_used[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
 
-static struct qman_portal *
-qman_alloc_global_portal(void)
+struct qman_portal *
+qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
 {
        unsigned int i;
 
        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
-               if (global_portals_used[i] == 0) {
-                       global_portals_used[i] = 1;
+               if (rte_atomic16_test_and_set(&global_portals_used[i])) {
+                       global_portals[i].config = q_pcfg;
                        return &global_portals[i];
                }
        }
@@ -641,37 +652,36 @@ qman_alloc_global_portal(void)
        return NULL;
 }
 
-static int
+int
 qman_free_global_portal(struct qman_portal *portal)
 {
        unsigned int i;
 
        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
                if (&global_portals[i] == portal) {
-                       global_portals_used[i] = 0;
+                       rte_atomic16_clear(&global_portals_used[i]);
                        return 0;
                }
        }
        return -1;
 }
 
+void
+qman_portal_uninhibit_isr(struct qman_portal *portal)
+{
+       qm_isr_uninhibit(&portal->p);
+}
+
 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
-                                             const struct qman_cgrs *cgrs,
-                                             int alloc)
+                                             const struct qman_cgrs *cgrs)
 {
        struct qman_portal *res;
-       struct qman_portal *portal;
-
-       if (alloc)
-               portal = qman_alloc_global_portal();
-       else
-               portal = get_affine_portal();
+       struct qman_portal *portal = get_affine_portal();
 
        /* A criteria for calling this function (from qman_driver.c) is that
         * we're already affine to the cpu and won't schedule onto another cpu.
         */
-
-       res = qman_create_portal(portal, c, cgrs);
+       res = qman_init_portal(portal, c, cgrs);
        if (res) {
                spin_lock(&affine_mask_lock);
                CPU_SET(c->cpu, &affine_mask);
@@ -832,7 +842,7 @@ mr_loop:
                        goto mr_done;
                swapped_msg = *msg;
                hw_fd_to_cpu(&swapped_msg.ern.fd);
-               verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+               verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
                /* The message is a software ERN iff the 0x20 bit is set */
                if (verb & 0x20) {
                        switch (verb) {
@@ -852,11 +862,9 @@ mr_loop:
                        case QM_MR_VERB_FQPN:
                                /* Parked */
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-                               fq = get_fq_table_entry(
-                                       be32_to_cpu(msg->fq.contextB));
+                               fq = get_fq_table_entry(msg->fq.contextB);
 #else
-                               fq = (void *)(uintptr_t)
-                                       be32_to_cpu(msg->fq.contextB);
+                               fq = (void *)(uintptr_t)msg->fq.contextB;
 #endif
                                fq_state_change(p, fq, msg, verb);
                                if (fq->cb.fqs)
@@ -967,7 +975,6 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
                *shadow = *dq;
                dq = shadow;
                shadow->fqid = be32_to_cpu(shadow->fqid);
-               shadow->contextB = be32_to_cpu(shadow->contextB);
                shadow->seqnum = be16_to_cpu(shadow->seqnum);
                hw_fd_to_cpu(&shadow->fd);
 #endif
@@ -1040,6 +1047,89 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
        return limit;
 }
 
+int qman_irqsource_add(u32 bits)
+{
+       struct qman_portal *p = get_affine_portal();
+
+       bits = bits & QM_PIRQ_VISIBLE;
+
+       /* Clear any previously remaining interrupt conditions in
+        * QCSP_ISR. This prevents raising a false interrupt when
+        * interrupt conditions are enabled in QCSP_IER.
+        */
+       qm_isr_status_clear(&p->p, bits);
+       dpaa_set_bits(bits, &p->irq_sources);
+       qm_isr_enable_write(&p->p, p->irq_sources);
+
+       return 0;
+}
+
+int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
+{
+       bits = bits & QM_PIRQ_VISIBLE;
+
+       /* Clear any previously remaining interrupt conditions in
+        * QCSP_ISR. This prevents raising a false interrupt when
+        * interrupt conditions are enabled in QCSP_IER.
+        */
+       qm_isr_status_clear(&p->p, bits);
+       dpaa_set_bits(bits, &p->irq_sources);
+       qm_isr_enable_write(&p->p, p->irq_sources);
+
+       return 0;
+}
+
+int qman_irqsource_remove(u32 bits)
+{
+       struct qman_portal *p = get_affine_portal();
+       u32 ier;
+
+       /* Our interrupt handler only processes+clears status register bits that
+        * are in p->irq_sources. As we're trimming that mask, if one of them
+        * were to assert in the status register just before we remove it from
+        * the enable register, there would be an interrupt-storm when we
+        * release the IRQ lock. So we wait for the enable register update to
+        * take effect in h/w (by reading it back) and then clear all other bits
+        * in the status register. Ie. we clear them from ISR once it's certain
+        * IER won't allow them to reassert.
+        */
+
+       bits &= QM_PIRQ_VISIBLE;
+       dpaa_clear_bits(bits, &p->irq_sources);
+       qm_isr_enable_write(&p->p, p->irq_sources);
+       ier = qm_isr_enable_read(&p->p);
+       /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+        * data-dependency, ie. to protect against re-ordering.
+        */
+       qm_isr_status_clear(&p->p, ~ier);
+       return 0;
+}
+
+int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+       u32 ier;
+
+       /* Our interrupt handler only processes+clears status register bits that
+        * are in p->irq_sources. As we're trimming that mask, if one of them
+        * were to assert in the status register just before we remove it from
+        * the enable register, there would be an interrupt-storm when we
+        * release the IRQ lock. So we wait for the enable register update to
+        * take effect in h/w (by reading it back) and then clear all other bits
+        * in the status register. Ie. we clear them from ISR once it's certain
+        * IER won't allow them to reassert.
+        */
+
+       bits &= QM_PIRQ_VISIBLE;
+       dpaa_clear_bits(bits, &p->irq_sources);
+       qm_isr_enable_write(&p->p, p->irq_sources);
+       ier = qm_isr_enable_read(&p->p);
+       /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+        * data-dependency, ie. to protect against re-ordering.
+        */
+       qm_isr_status_clear(&p->p, ~ier);
+       return 0;
+}
+
 u16 qman_affine_channel(int cpu)
 {
        if (cpu < 0) {
@@ -1051,6 +1141,144 @@ u16 qman_affine_channel(int cpu)
        return affine_channels[cpu];
 }
 
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+                                void **bufs,
+                                struct qman_portal *p)
+{
+       struct qm_portal *portal = &p->p;
+       register struct qm_dqrr *dqrr = &portal->dqrr;
+       struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+       struct qman_fq *fq;
+       unsigned int limit = 0, rx_number = 0;
+       uint32_t consume = 0;
+
+       do {
+               qm_dqrr_pvb_update(&p->p);
+               if (!dqrr->fill)
+                       break;
+
+               dq[rx_number] = dqrr->cursor;
+               dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+               /* Prefetch the next DQRR entry */
+               rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+               /* If running on an LE system the fields of the
+                * dequeue entry must be swapper.  Because the
+                * QMan HW will ignore writes the DQRR entry is
+                * copied and the index stored within the copy
+                */
+               shadow[rx_number] =
+                       &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+               shadow[rx_number]->fd.opaque_addr =
+                       dq[rx_number]->fd.opaque_addr;
+               shadow[rx_number]->fd.addr =
+                       be40_to_cpu(dq[rx_number]->fd.addr);
+               shadow[rx_number]->fd.opaque =
+                       be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+               shadow[rx_number] = dq[rx_number];
+#endif
+
+               /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+               fq = qman_fq_lookup_table[dq[rx_number]->contextB];
+#else
+               fq = (void *)dq[rx_number]->contextB;
+#endif
+               if (fq->cb.dqrr_prepare)
+                       fq->cb.dqrr_prepare(shadow[rx_number],
+                                           &bufs[rx_number]);
+
+               consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+               rx_number++;
+               --dqrr->fill;
+       } while (++limit < poll_limit);
+
+       if (rx_number)
+               fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+       /* Consume all the DQRR enries together */
+       qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+       return rx_number;
+}
+
+void qman_clear_irq(void)
+{
+       struct qman_portal *p = get_affine_portal();
+       u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+               ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+       qm_isr_status_clear(&p->p, clear);
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+                       void **bufs)
+{
+       const struct qm_dqrr_entry *dq;
+       struct qman_fq *fq;
+       enum qman_cb_dqrr_result res;
+       unsigned int limit = 0;
+       struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+       struct qm_dqrr_entry *shadow;
+#endif
+       unsigned int rx_number = 0;
+
+       do {
+               qm_dqrr_pvb_update(&p->p);
+               dq = qm_dqrr_current(&p->p);
+               if (!dq)
+                       break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+               /*
+                * If running on an LE system the fields of the
+                * dequeue entry must be swapper.  Because the
+                * QMan HW will ignore writes the DQRR entry is
+                * copied and the index stored within the copy
+                */
+               shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+               *shadow = *dq;
+               dq = shadow;
+               shadow->fqid = be32_to_cpu(shadow->fqid);
+               shadow->seqnum = be16_to_cpu(shadow->seqnum);
+               hw_fd_to_cpu(&shadow->fd);
+#endif
+
+              /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+               fq = get_fq_table_entry(dq->contextB);
+#else
+               fq = (void *)(uintptr_t)dq->contextB;
+#endif
+               /* Now let the callback do its stuff */
+               res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+                                        dq, &bufs[rx_number]);
+               rx_number++;
+               /* Interpret 'dq' from a driver perspective. */
+               /*
+                * Parking isn't possible unless HELDACTIVE was set. NB,
+                * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+                * check for HELDACTIVE to cover both.
+                */
+               DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+                           (res != qman_cb_dqrr_park));
+               if (res != qman_cb_dqrr_defer)
+                       qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+                                                res == qman_cb_dqrr_park);
+               /* Move forward */
+               qm_dqrr_next(&p->p);
+               /*
+                * Entry processed and consumed, increment our counter.  The
+                * callback can request that we exit after consuming the
+                * entry, and we also exit if we reach our processing limit,
+                * so loop back only if neither of these conditions is met.
+                */
+       } while (++limit < poll_limit);
+
+       return limit;
+}
+
 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
 {
        struct qman_portal *p = get_affine_portal();
@@ -1077,7 +1305,6 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
        *shadow = *dq;
        dq = shadow;
        shadow->fqid = be32_to_cpu(shadow->fqid);
-       shadow->contextB = be32_to_cpu(shadow->contextB);
        shadow->seqnum = be16_to_cpu(shadow->seqnum);
        hw_fd_to_cpu(&shadow->fd);
 #endif
@@ -1169,13 +1396,20 @@ u32 qman_static_dequeue_get(struct qman_portal *qp)
        return p->sdqcr;
 }
 
-void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
 {
        struct qman_portal *p = get_affine_portal();
 
        qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
 }
 
+void qman_dca_index(u8 index, int park_request)
+{
+       struct qman_portal *p = get_affine_portal();
+
+       qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
 /* Frame queue API */
 static const char *mcr_result_str(u8 result)
 {
@@ -1223,6 +1457,7 @@ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
                pr_info("Find empty table entry failed\n");
                return -ENOMEM;
        }
+       fq->qman_fq_lookup_table = qman_fq_lookup_table;
 #endif
        if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
                return 0;
@@ -1366,7 +1601,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 
                mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-               mcc->initfq.fqd.context_b = fq->key;
+               mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
 #else
                mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
 #endif
@@ -1527,7 +1762,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
                         */
                        struct qm_mr_entry msg;
 
-                       msg.verb = QM_MR_VERB_FQRNI;
+                       msg.ern.verb = QM_MR_VERB_FQRNI;
                        msg.fq.fqs = mcr->alterfq.fqs;
                        msg.fq.fqid = fq->fqid;
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
@@ -1864,13 +2099,13 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
        return 0;
 }
 
-int qman_set_vdq(struct qman_fq *fq, u16 num)
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 {
        struct qman_portal *p = get_affine_portal();
        uint32_t vdqcr;
        int ret = -EBUSY;
 
-       vdqcr = QM_VDQCR_EXACT;
+       vdqcr = vdqcr_flags;
        vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
 
        if ((fq->state != qman_fq_state_parked) &&
@@ -2024,8 +2259,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
 }
 
 int qman_enqueue_multi(struct qman_fq *fq,
-                      const struct qm_fd *fd,
-                      int frames_to_send)
+                      const struct qm_fd *fd, u32 *flags,
+               int frames_to_send)
 {
        struct qman_portal *p = get_affine_portal();
        struct qm_portal *portal = &p->p;
@@ -2033,7 +2268,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
        register struct qm_eqcr *eqcr = &portal->eqcr;
        struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
 
-       u8 i, diff, old_ci, sent = 0;
+       u8 i = 0, diff, old_ci, sent = 0;
 
        /* Update the available entries if no entry is free */
        if (!eqcr->available) {
@@ -2048,15 +2283,85 @@ int qman_enqueue_multi(struct qman_fq *fq,
        /* try to send as many frames as possible */
        while (eqcr->available && frames_to_send--) {
                eq->fqid = fq->fqid_le;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-               eq->tag = cpu_to_be32(fq->key);
-#else
-               eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-#endif
                eq->fd.opaque_addr = fd->opaque_addr;
                eq->fd.addr = cpu_to_be40(fd->addr);
                eq->fd.status = cpu_to_be32(fd->status);
                eq->fd.opaque = cpu_to_be32(fd->opaque);
+               if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+                       eq->dca = QM_EQCR_DCA_ENABLE |
+                               ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+               }
+               i++;
+               eq = (void *)((unsigned long)(eq + 1) &
+                       (~(unsigned long)(QM_EQCR_SIZE << 6)));
+               eqcr->available--;
+               sent++;
+               fd++;
+       }
+       lwsync();
+
+       /* In order for flushes to complete faster, all lines are recorded in
+        * 32 bit word.
+        */
+       eq = eqcr->cursor;
+       for (i = 0; i < sent; i++) {
+               eq->__dont_write_directly__verb =
+                       QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+               prev_eq = eq;
+               eq = (void *)((unsigned long)(eq + 1) &
+                       (~(unsigned long)(QM_EQCR_SIZE << 6)));
+               if (unlikely((prev_eq + 1) != eq))
+                       eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+       }
+
+       /* We need  to flush all the lines but without load/store operations
+        * between them
+        */
+       eq = eqcr->cursor;
+       for (i = 0; i < sent; i++) {
+               dcbf(eq);
+               eq = (void *)((unsigned long)(eq + 1) &
+                       (~(unsigned long)(QM_EQCR_SIZE << 6)));
+       }
+       /* Update cursor for the next call */
+       eqcr->cursor = eq;
+       return sent;
+}
+
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+                     u32 *flags, int frames_to_send)
+{
+       struct qman_portal *p = get_affine_portal();
+       struct qm_portal *portal = &p->p;
+
+       register struct qm_eqcr *eqcr = &portal->eqcr;
+       struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+       u8 i = 0, diff, old_ci, sent = 0;
+
+       /* Update the available entries if no entry is free */
+       if (!eqcr->available) {
+               old_ci = eqcr->ci;
+               eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+               diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+               eqcr->available += diff;
+               if (!diff)
+                       return 0;
+       }
+
+       /* try to send as many frames as possible */
+       while (eqcr->available && frames_to_send--) {
+               eq->fqid = fq[sent]->fqid_le;
+               eq->fd.opaque_addr = fd->opaque_addr;
+               eq->fd.addr = cpu_to_be40(fd->addr);
+               eq->fd.status = cpu_to_be32(fd->status);
+               eq->fd.opaque = cpu_to_be32(fd->opaque);
+               if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+                       eq->dca = QM_EQCR_DCA_ENABLE |
+                               ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+               }
+               i++;
 
                eq = (void *)((unsigned long)(eq + 1) &
                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
@@ -2434,7 +2739,7 @@ int qman_shutdown_fq(u32 fqid)
                                qm_mr_pvb_update(low_p);
                                msg = qm_mr_current(low_p);
                                while (msg) {
-                                       if ((msg->verb &
+                                       if ((msg->ern.verb &
                                             QM_MR_VERB_TYPE_MASK)
                                            == QM_MR_VERB_FQRN)
                                                found_fqrn = 1;
@@ -2502,7 +2807,7 @@ int qman_shutdown_fq(u32 fqid)
                        qm_mr_pvb_update(low_p);
                        msg = qm_mr_current(low_p);
                        while (msg) {
-                               if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+                               if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
                                    QM_MR_VERB_FQRL)
                                        orl_empty = 1;
                                qm_mr_next(low_p);