+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs,
+ struct qman_portal *p)
+{
+ struct qm_portal *portal = &p->p;
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+ struct qman_fq *fq;
+ unsigned int limit = 0, rx_number = 0;
+ uint32_t consume = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ if (!dqrr->fill)
+ break;
+
+ dq[rx_number] = dqrr->cursor;
+ dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+ /* Prefetch the next DQRR entry */
+ rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow[rx_number] =
+ &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+ shadow[rx_number]->fd.opaque_addr =
+ dq[rx_number]->fd.opaque_addr;
+ shadow[rx_number]->fd.addr =
+ be40_to_cpu(dq[rx_number]->fd.addr);
+ shadow[rx_number]->fd.opaque =
+ be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+ shadow[rx_number] = dq[rx_number];
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = qman_fq_lookup_table[dq[rx_number]->contextB];
+#else
+ fq = (void *)dq[rx_number]->contextB;
+#endif
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
+
+ consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+ rx_number++;
+ --dqrr->fill;
+ } while (++limit < poll_limit);
+
+ if (rx_number)
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+ /* Consume all the DQRR enries together */
+ qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+ return rx_number;
+}
+
+void qman_clear_irq(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ qm_isr_status_clear(&p->p, clear);
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+ struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct qm_dqrr_entry *shadow;
+#endif
+ unsigned int rx_number = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+ dq, &bufs[rx_number]);
+ rx_number++;
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit);
+
+ return limit;
+}
+