bus/dpaa: make vdqcr configurable
[dpdk.git] / drivers / bus / dpaa / base / qbman / qman.c
index 609bc76..f5fe5ef 100644 (file)
@@ -314,9 +314,9 @@ loop:
                if (!msg)
                        return 0;
        }
-       if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+       if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
                /* We aren't draining anything but FQRNIs */
-               pr_err("Found verb 0x%x in MR\n", msg->verb);
+               pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
                return -1;
        }
        qm_mr_next(p);
@@ -483,7 +483,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
        /* when accessing 'verb', use __raw_readb() to ensure that compiler
         * inlining doesn't try to optimise out "excess reads".
         */
-       if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+       if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
                mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
                if (!mr->pi)
                        mr->vbit ^= QM_MR_VERB_VBIT;
@@ -625,7 +625,7 @@ fail_eqcr:
 
 #define MAX_GLOBAL_PORTALS 8
 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
-static int global_portals_used[MAX_GLOBAL_PORTALS];
+rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
 
 static struct qman_portal *
 qman_alloc_global_portal(void)
@@ -633,10 +633,8 @@ qman_alloc_global_portal(void)
        unsigned int i;
 
        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
-               if (global_portals_used[i] == 0) {
-                       global_portals_used[i] = 1;
+               if (rte_atomic16_test_and_set(&global_portals_used[i]))
                        return &global_portals[i];
-               }
        }
        pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
 
@@ -650,7 +648,7 @@ qman_free_global_portal(struct qman_portal *portal)
 
        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
                if (&global_portals[i] == portal) {
-                       global_portals_used[i] = 0;
+                       rte_atomic16_clear(&global_portals_used[i]);
                        return 0;
                }
        }
@@ -834,7 +832,7 @@ mr_loop:
                        goto mr_done;
                swapped_msg = *msg;
                hw_fd_to_cpu(&swapped_msg.ern.fd);
-               verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+               verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
                /* The message is a software ERN iff the 0x20 bit is set */
                if (verb & 0x20) {
                        switch (verb) {
@@ -1057,64 +1055,63 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
                                 void **bufs,
                                 struct qman_portal *p)
 {
-       const struct qm_dqrr_entry *dq;
+       struct qm_portal *portal = &p->p;
+       register struct qm_dqrr *dqrr = &portal->dqrr;
+       struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
        struct qman_fq *fq;
-       enum qman_cb_dqrr_result res;
-       unsigned int limit = 0;
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-       struct qm_dqrr_entry *shadow;
-#endif
-       unsigned int rx_number = 0;
+       unsigned int limit = 0, rx_number = 0;
+       uint32_t consume = 0;
 
        do {
                qm_dqrr_pvb_update(&p->p);
-               dq = qm_dqrr_current(&p->p);
-               if (unlikely(!dq))
+               if (!dqrr->fill)
                        break;
+
+               dq[rx_number] = dqrr->cursor;
+               dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+               /* Prefetch the next DQRR entry */
+               rte_prefetch0(dqrr->cursor);
+
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-       /* If running on an LE system the fields of the
-        * dequeue entry must be swapper.  Because the
-        * QMan HW will ignore writes the DQRR entry is
-        * copied and the index stored within the copy
-        */
-               shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
-               *shadow = *dq;
-               dq = shadow;
-               shadow->fqid = be32_to_cpu(shadow->fqid);
-               shadow->contextB = be32_to_cpu(shadow->contextB);
-               shadow->seqnum = be16_to_cpu(shadow->seqnum);
-               hw_fd_to_cpu(&shadow->fd);
+               /* If running on an LE system the fields of the
+                * dequeue entry must be swapper.  Because the
+                * QMan HW will ignore writes the DQRR entry is
+                * copied and the index stored within the copy
+                */
+               shadow[rx_number] =
+                       &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+               shadow[rx_number]->fd.opaque_addr =
+                       dq[rx_number]->fd.opaque_addr;
+               shadow[rx_number]->fd.addr =
+                       be40_to_cpu(dq[rx_number]->fd.addr);
+               shadow[rx_number]->fd.opaque =
+                       be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+               shadow[rx_number] = dq[rx_number];
 #endif
 
                /* SDQCR: context_b points to the FQ */
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-               fq = get_fq_table_entry(dq->contextB);
+               fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
 #else
-               fq = (void *)(uintptr_t)dq->contextB;
+               fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
 #endif
-               /* Now let the callback do its stuff */
-               res = fq->cb.dqrr_dpdk_cb(NULL, p, fq, dq, &bufs[rx_number]);
+               if (fq->cb.dqrr_prepare)
+                       fq->cb.dqrr_prepare(shadow[rx_number],
+                                           &bufs[rx_number]);
+
+               consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
                rx_number++;
-               /* Interpret 'dq' from a driver perspective. */
-               /*
-                * Parking isn't possible unless HELDACTIVE was set. NB,
-                * FORCEELIGIBLE implies HELDACTIVE, so we only need to
-                * check for HELDACTIVE to cover both.
-                */
-               DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
-                           (res != qman_cb_dqrr_park));
-               qm_dqrr_cdc_consume_1ptr(&p->p, dq, res == qman_cb_dqrr_park);
-               /* Move forward */
-               qm_dqrr_next(&p->p);
-               /*
-                * Entry processed and consumed, increment our counter.  The
-                * callback can request that we exit after consuming the
-                * entry, and we also exit if we reach our processing limit,
-                * so loop back only if neither of these conditions is met.
-                */
-       } while (likely(++limit < poll_limit));
+               --dqrr->fill;
+       } while (++limit < poll_limit);
 
-       return limit;
+       if (rx_number)
+               fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+       /* Consume all the DQRR enries together */
+       qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+       return rx_number;
 }
 
 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
@@ -1668,7 +1665,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
                         */
                        struct qm_mr_entry msg;
 
-                       msg.verb = QM_MR_VERB_FQRNI;
+                       msg.ern.verb = QM_MR_VERB_FQRNI;
                        msg.fq.fqs = mcr->alterfq.fqs;
                        msg.fq.fqid = fq->fqid;
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
@@ -2005,13 +2002,13 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
        return 0;
 }
 
-int qman_set_vdq(struct qman_fq *fq, u16 num)
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 {
        struct qman_portal *p = get_affine_portal();
        uint32_t vdqcr;
        int ret = -EBUSY;
 
-       vdqcr = QM_VDQCR_EXACT;
+       vdqcr = vdqcr_flags;
        vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
 
        if ((fq->state != qman_fq_state_parked) &&
@@ -2198,7 +2195,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
                eq->fd.addr = cpu_to_be40(fd->addr);
                eq->fd.status = cpu_to_be32(fd->status);
                eq->fd.opaque = cpu_to_be32(fd->opaque);
-               if (flags[i] & QMAN_ENQUEUE_FLAG_DCA) {
+               if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
                        eq->dca = QM_EQCR_DCA_ENABLE |
                                ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
                }
@@ -2645,7 +2642,7 @@ int qman_shutdown_fq(u32 fqid)
                                qm_mr_pvb_update(low_p);
                                msg = qm_mr_current(low_p);
                                while (msg) {
-                                       if ((msg->verb &
+                                       if ((msg->ern.verb &
                                             QM_MR_VERB_TYPE_MASK)
                                            == QM_MR_VERB_FQRN)
                                                found_fqrn = 1;
@@ -2713,7 +2710,7 @@ int qman_shutdown_fq(u32 fqid)
                        qm_mr_pvb_update(low_p);
                        msg = qm_mr_current(low_p);
                        while (msg) {
-                               if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+                               if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
                                    QM_MR_VERB_FQRL)
                                        orl_empty = 1;
                                qm_mr_next(low_p);