From: Nipun Gupta Date: Wed, 17 Jan 2018 11:39:14 +0000 (+0530) Subject: net/dpaa2: support atomic queues X-Git-Tag: spdx-start~148 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=2d3788631862607468ac2f927333257b2dfdf7f3;p=dpdk.git net/dpaa2: support atomic queues Signed-off-by: Nipun Gupta Acked-by: Hemant Agrawal --- diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile index 068e9d362f..5a93a0b9b3 100644 --- a/drivers/net/dpaa2/Makefile +++ b/drivers/net/dpaa2/Makefile @@ -25,6 +25,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 +CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal # versioning export map diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 821c8622ed..5f24759492 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1670,6 +1670,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) + dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; else return -EINVAL; @@ -1679,6 +1681,11 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, cfg.destination.id = dpcon_id; cfg.destination.priority = queue_conf->ev.priority; + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; + cfg.destination.hold_active = 1; + } + options |= DPNI_QUEUE_OPT_USER_CTX; cfg.user_context = (uint64_t)(dpaa2_ethq); diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index 8fa1872603..ba0856f3ec 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -118,6 +118,11 @@ void dpaa2_dev_process_parallel_event(struct qbman_swp *swp, const struct qbman_result *dq, struct dpaa2_queue *rxq, struct rte_event *ev); +void dpaa2_dev_process_atomic_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); #endif /* _DPAA2_ETHDEV_H */ diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index c43e3a16a7..e88b034e68 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -15,11 +15,13 @@ #include #include +#include #include #include #include #include #include +#include #include "dpaa2_ethdev.h" #include "base/dpaa2_hw_dpni_annot.h" @@ -641,6 +643,30 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp, qbman_swp_dqrr_consume(swp, dq); } +void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + uint8_t dqrr_index = qbman_get_dqrr_idx(dq); + + ev->mbuf = eth_fd_to_mbuf(fd); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf->seqn = dqrr_index + 1; + DPAA2_PER_LCORE_DQRR_SIZE++; + DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; + DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; +} + /* * Callback to handle sending packets through WRIOP based interface */ @@ -661,6 +687,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t bpid; struct rte_eth_dev *dev = dpaa2_q->dev; struct dpaa2_dev_priv *priv = dev->data->dev_private; + uint32_t flags[MAX_TX_RING_SLOTS] = {0}; if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); @@ -679,7 +706,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) qbman_eq_desc_set_response(&eqdesc, 0, 0); qbman_eq_desc_set_qd(&eqdesc, priv->qdid, dpaa2_q->flow_id, dpaa2_q->tc_index); - /*Clear the unused FD fields before sending*/ while (nb_pkts) { /*Check if the queue is congested*/ @@ -694,6 +720,16 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; for (loop = 0; loop < frames_to_send; loop++) { + if ((*bufs)->seqn) { + uint8_t dqrr_index = (*bufs)->seqn - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | + dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + } + fd_arr[loop].simple.frc = 0; DPAA2_RESET_FD_CTRL((&fd_arr[loop])); DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); @@ -761,7 +797,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) loop = 0; while (loop < frames_to_send) { loop += qbman_swp_enqueue_multiple(swp, &eqdesc, - &fd_arr[loop], NULL, + &fd_arr[loop], &flags[loop], frames_to_send - loop); } @@ -778,7 +814,8 @@ send_n_return: while (i < loop) { i += qbman_swp_enqueue_multiple(swp, &eqdesc, - &fd_arr[i], NULL, + &fd_arr[i], + &flags[loop], loop - i); } num_tx += loop;