bus/dpaa: support event dequeue and consumption
authorSunil Kumar Kori <sunil.kori@nxp.com>
Tue, 16 Jan 2018 20:43:55 +0000 (02:13 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 19 Jan 2018 15:09:56 +0000 (16:09 +0100)
To receive events from given event port, corresponding
function needs to be added which receives events
from portal. Also added function to consume received
events based on entry index.

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
drivers/bus/dpaa/base/qbman/qman.c
drivers/bus/dpaa/dpaa_bus.c
drivers/bus/dpaa/include/fsl_qman.h
drivers/bus/dpaa/rte_bus_dpaa_version.map
drivers/bus/dpaa/rte_dpaa_bus.h
drivers/net/dpaa/dpaa_rxtx.c

index e171356..609bc76 100644 (file)
@@ -8,6 +8,8 @@
 #include "qman.h"
 #include <rte_branch_prediction.h>
 #include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
 
 /* Compilation constants */
 #define DQRR_MAXFILL   15
@@ -1115,6 +1117,74 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
        return limit;
 }
 
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+                       void **bufs)
+{
+       const struct qm_dqrr_entry *dq;
+       struct qman_fq *fq;
+       enum qman_cb_dqrr_result res;
+       unsigned int limit = 0;
+       struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+       struct qm_dqrr_entry *shadow;
+#endif
+       unsigned int rx_number = 0;
+
+       do {
+               qm_dqrr_pvb_update(&p->p);
+               dq = qm_dqrr_current(&p->p);
+               if (!dq)
+                       break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+               /*
+                * If running on an LE system the fields of the
+                * dequeue entry must be swapper.  Because the
+                * QMan HW will ignore writes the DQRR entry is
+                * copied and the index stored within the copy
+                */
+               shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+               *shadow = *dq;
+               dq = shadow;
+               shadow->fqid = be32_to_cpu(shadow->fqid);
+               shadow->contextB = be32_to_cpu(shadow->contextB);
+               shadow->seqnum = be16_to_cpu(shadow->seqnum);
+               hw_fd_to_cpu(&shadow->fd);
+#endif
+
+              /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+               fq = get_fq_table_entry(dq->contextB);
+#else
+               fq = (void *)(uintptr_t)dq->contextB;
+#endif
+               /* Now let the callback do its stuff */
+               res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+                                        dq, &bufs[rx_number]);
+               rx_number++;
+               /* Interpret 'dq' from a driver perspective. */
+               /*
+                * Parking isn't possible unless HELDACTIVE was set. NB,
+                * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+                * check for HELDACTIVE to cover both.
+                */
+               DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+                           (res != qman_cb_dqrr_park));
+               if (res != qman_cb_dqrr_defer)
+                       qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+                                                res == qman_cb_dqrr_park);
+               /* Move forward */
+               qm_dqrr_next(&p->p);
+               /*
+                * Entry processed and consumed, increment our counter.  The
+                * callback can request that we exit after consuming the
+                * entry, and we also exit if we reach our processing limit,
+                * so loop back only if neither of these conditions is met.
+                */
+       } while (++limit < poll_limit);
+
+       return limit;
+}
+
 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
 {
        struct qman_portal *p = get_affine_portal();
@@ -1233,13 +1303,20 @@ u32 qman_static_dequeue_get(struct qman_portal *qp)
        return p->sdqcr;
 }
 
-void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
 {
        struct qman_portal *p = get_affine_portal();
 
        qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
 }
 
+void qman_dca_index(u8 index, int park_request)
+{
+       struct qman_portal *p = get_affine_portal();
+
+       qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
 /* Frame queue API */
 static const char *mcr_result_str(u8 result)
 {
@@ -2088,8 +2165,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
 }
 
 int qman_enqueue_multi(struct qman_fq *fq,
-                      const struct qm_fd *fd,
-                      int frames_to_send)
+                      const struct qm_fd *fd, u32 *flags,
+               int frames_to_send)
 {
        struct qman_portal *p = get_affine_portal();
        struct qm_portal *portal = &p->p;
@@ -2097,7 +2174,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
        register struct qm_eqcr *eqcr = &portal->eqcr;
        struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
 
-       u8 i, diff, old_ci, sent = 0;
+       u8 i = 0, diff, old_ci, sent = 0;
 
        /* Update the available entries if no entry is free */
        if (!eqcr->available) {
@@ -2121,7 +2198,11 @@ int qman_enqueue_multi(struct qman_fq *fq,
                eq->fd.addr = cpu_to_be40(fd->addr);
                eq->fd.status = cpu_to_be32(fd->status);
                eq->fd.opaque = cpu_to_be32(fd->opaque);
-
+               if (flags[i] & QMAN_ENQUEUE_FLAG_DCA) {
+                       eq->dca = QM_EQCR_DCA_ENABLE |
+                               ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+               }
+               i++;
                eq = (void *)((unsigned long)(eq + 1) &
                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
                eqcr->available--;
index 329a125..78d60be 100644 (file)
@@ -54,6 +54,7 @@ pthread_key_t dpaa_portal_key;
 unsigned int dpaa_svr_family;
 
 RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
 
 static inline void
 dpaa_add_to_device_list(struct rte_dpaa_device *dev)
index 927efb1..99e46e1 100644 (file)
@@ -12,6 +12,7 @@ extern "C" {
 #endif
 
 #include <dpaa_rbtree.h>
+#include <rte_eventdev.h>
 
 /* FQ lookups (turn this on for 64bit user-space) */
 #if (__WORDSIZE == 64)
@@ -1208,6 +1209,7 @@ struct qman_fq {
        /* DPDK Interface */
        void *dpaa_intf;
 
+       struct rte_event ev;
        /* affined portal in case of static queue */
        struct qman_portal *qp;
 
@@ -1298,6 +1300,9 @@ struct qman_cgr {
  */
 int qman_get_portal_index(void);
 
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+                       void **bufs);
+
 /**
  * qman_affine_channel - return the channel ID of an portal
  * @cpu: the cpu whose affine portal is the subject of the query
@@ -1431,7 +1436,21 @@ u32 qman_static_dequeue_get(struct qman_portal *qp);
  * function must be called from the same CPU as that which processed the DQRR
  * entry in the first place.
  */
-void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_dca_index - Perform a Discrete Consumption Acknowledgment
+ * @index: the DQRR index to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca_index(u8 index, int park_request);
 
 /**
  * qman_eqcr_is_empty - Determine if portal's EQCR is empty
@@ -1699,9 +1718,8 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
  */
 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
 
-int qman_enqueue_multi(struct qman_fq *fq,
-                      const struct qm_fd *fd,
-               int frames_to_send);
+int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
+                      int frames_to_send);
 
 /**
  * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
index 64068de..f5c291f 100644 (file)
@@ -69,17 +69,22 @@ DPDK_18.02 {
        global:
 
        dpaa_svr_family;
+       per_lcore_held_bufs;
+       qm_channel_pool1;
        qman_alloc_cgrid_range;
        qman_alloc_pool_range;
        qman_create_cgr;
+       qman_dca_index;
        qman_delete_cgr;
        qman_enqueue_multi_fq;
        qman_modify_cgr;
        qman_oos_fq;
+       qman_portal_dequeue;
        qman_portal_poll_rx;
        qman_query_fq_frm_cnt;
        qman_release_cgrid_range;
        qman_retire_fq;
+       qman_static_dequeue_add;
        rte_dpaa_portal_fq_close;
        rte_dpaa_portal_fq_init;
 
index d9ade83..6fa0c3d 100644 (file)
@@ -155,6 +155,20 @@ static void dpaainitfn_ ##nm(void) \
 } \
 RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
 
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH      16
+struct dpaa_portal_dqrr {
+       void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+       uint64_t dqrr_held;
+       uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+#define DPAA_PER_LCORE_DQRR_SIZE       RTE_PER_LCORE(held_bufs).dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD       RTE_PER_LCORE(held_bufs).dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i)    RTE_PER_LCORE(held_bufs).mbuf[i]
+
 #ifdef __cplusplus
 }
 #endif
index 0413932..3e3719d 100644 (file)
@@ -774,6 +774,7 @@ send_pkts:
                loop = 0;
                while (loop < frames_to_send) {
                        loop += qman_enqueue_multi(q, &fd_arr[loop],
+                                                  NULL,
                                        frames_to_send - loop);
                }
                nb_bufs -= frames_to_send;