From ea2780632f6563f100d0b324b6d027b3b51d2b78 Mon Sep 17 00:00:00 2001 From: David Marchand Date: Wed, 28 Oct 2020 13:20:10 +0100 Subject: [PATCH] bus/fslmc: switch sequence number to dynamic mbuf field The dpaa2 drivers have been hacking the deprecated field seqn for internal features. It is moved to a dynamic mbuf field in order to allow removal of seqn. Signed-off-by: David Marchand --- drivers/bus/fslmc/fslmc_bus.c | 17 +++++++++++ drivers/bus/fslmc/rte_fslmc.h | 23 +++++++++++++++ drivers/bus/fslmc/version.map | 1 + drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 18 ++++++------ drivers/event/dpaa2/dpaa2_eventdev.c | 9 +++--- drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 4 ++- drivers/mempool/dpaa2/dpaa2_hw_mempool.h | 2 -- drivers/net/dpaa2/dpaa2_rxtx.c | 28 +++++++++---------- 8 files changed, 73 insertions(+), 29 deletions(-) diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c index beb3dd008f..db93669628 100644 --- a/drivers/bus/fslmc/fslmc_bus.c +++ b/drivers/bus/fslmc/fslmc_bus.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -27,6 +28,9 @@ struct rte_fslmc_bus rte_fslmc_bus; uint8_t dpaa2_virt_mode; +#define DPAA2_SEQN_DYNFIELD_NAME "dpaa2_seqn_dynfield" +int dpaa2_seqn_dynfield_offset = -1; + uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type) { @@ -374,9 +378,22 @@ rte_fslmc_probe(void) struct rte_dpaa2_device *dev; struct rte_dpaa2_driver *drv; + static const struct rte_mbuf_dynfield dpaa2_seqn_dynfield_desc = { + .name = DPAA2_SEQN_DYNFIELD_NAME, + .size = sizeof(dpaa2_seqn_t), + .align = __alignof__(dpaa2_seqn_t), + }; + if (TAILQ_EMPTY(&rte_fslmc_bus.device_list)) return 0; + dpaa2_seqn_dynfield_offset = + rte_mbuf_dynfield_register(&dpaa2_seqn_dynfield_desc); + if (dpaa2_seqn_dynfield_offset < 0) { + DPAA2_BUS_ERR("Failed to register mbuf field for dpaa sequence number"); + return 0; + } + ret = fslmc_vfio_setup_group(); if (ret) { DPAA2_BUS_ERR("Unable to setup VFIO %d", ret); diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h index 80873fffc9..37d45dffe5 100644 --- a/drivers/bus/fslmc/rte_fslmc.h +++ b/drivers/bus/fslmc/rte_fslmc.h @@ -32,11 +32,34 @@ extern "C" { #include #include #include +#include +#include #include #define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */ +#define DPAA2_INVALID_MBUF_SEQN 0 + +typedef uint32_t dpaa2_seqn_t; +extern int dpaa2_seqn_dynfield_offset; + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Read dpaa2 sequence number from mbuf. + * + * @param mbuf Structure to read from. + * @return pointer to dpaa2 sequence number. + */ +__rte_experimental +static inline dpaa2_seqn_t * +dpaa2_seqn(struct rte_mbuf *mbuf) +{ + return RTE_MBUF_DYNFIELD(mbuf, dpaa2_seqn_dynfield_offset, + dpaa2_seqn_t *); +} /** Device driver supports link state interrupt */ #define RTE_DPAA2_DRV_INTR_LSC 0x0008 diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map index b169f5228a..f44c1a7988 100644 --- a/drivers/bus/fslmc/version.map +++ b/drivers/bus/fslmc/version.map @@ -19,6 +19,7 @@ INTERNAL { dpaa2_free_eq_descriptors; dpaa2_get_mcp_ptr; dpaa2_io_portal; + dpaa2_seqn_dynfield_offset; dpaa2_svr_family; dpaa2_virt_mode; dpbp_disable; diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index afcd6bd063..ce1d50ce77 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1472,13 +1472,15 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_eqcr_size : nb_ops; for (loop = 0; loop < frames_to_send; loop++) { - if ((*ops)->sym->m_src->seqn) { - uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; - - flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; - DPAA2_PER_LCORE_DQRR_SIZE--; - DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); - (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; + if (*dpaa2_seqn((*ops)->sym->m_src)) { + uint8_t dqrr_index = + *dpaa2_seqn((*ops)->sym->m_src) - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + *dpaa2_seqn((*ops)->sym->m_src) = + DPAA2_INVALID_MBUF_SEQN; } /*Clear the unused FD fields before sending*/ @@ -3714,7 +3716,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, ev->event_ptr = sec_fd_to_mbuf(fd); dqrr_index = qbman_get_dqrr_idx(dq); - crypto_op->sym->m_src->seqn = dqrr_index + 1; + *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index 95f03c8b9e..eeb2494bd0 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -131,8 +131,9 @@ skip_linking: qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); if (event->sched_type == RTE_SCHED_TYPE_ATOMIC - && event->mbuf->seqn) { - uint8_t dqrr_index = event->mbuf->seqn - 1; + && *dpaa2_seqn(event->mbuf)) { + uint8_t dqrr_index = + *dpaa2_seqn(event->mbuf) - 1; qbman_eq_desc_set_dca(&eqdesc[loop], 1, dqrr_index, 0); @@ -249,7 +250,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); rte_free(ev_temp); - ev->mbuf->seqn = dqrr_index + 1; + *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; @@ -314,7 +315,7 @@ skip_linking: if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { qbman_swp_dqrr_idx_consume(swp, i); DPAA2_PER_LCORE_DQRR_SIZE--; - DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn = + *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) = DPAA2_INVALID_MBUF_SEQN; } i++; diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c index 5447db8a8a..cd7311a94d 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c +++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dpaa2_eventdev.h" #include "dpaa2_eventdev_logs.h" @@ -274,7 +275,8 @@ check_excess_events(uint8_t port) valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); RTE_TEST_ASSERT_SUCCESS(valid_event, - "Unexpected valid event=%d", ev.mbuf->seqn); + "Unexpected valid event=%d", + *dpaa2_seqn(ev.mbuf)); } return 0; } diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h index 53fa1552d1..7c493b28e7 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h @@ -10,8 +10,6 @@ #define DPAA2_MAX_BUF_POOLS 8 -#define DPAA2_INVALID_MBUF_SEQN 0 - struct buf_pool_cfg { void *addr; /**< The address from where DPAA2 will carve out the buffers */ diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 4dd1d5f578..6201de4606 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -710,7 +710,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused, ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); dqrr_index = qbman_get_dqrr_idx(dq); - ev->mbuf->seqn = dqrr_index + 1; + *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; @@ -736,9 +736,9 @@ dpaa2_dev_process_ordered_event(struct qbman_swp *swp, ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); - ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; - ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; - ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; + *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP; + *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; + *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; qbman_swp_dqrr_consume(swp, dq); } @@ -1063,14 +1063,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_eqcr_size : nb_pkts; for (loop = 0; loop < frames_to_send; loop++) { - if ((*bufs)->seqn) { - uint8_t dqrr_index = (*bufs)->seqn - 1; + if (*dpaa2_seqn(*bufs)) { + uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1; flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; DPAA2_PER_LCORE_DQRR_SIZE--; DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); - (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN; } if (likely(RTE_MBUF_DIRECT(*bufs))) { @@ -1230,10 +1230,10 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid); - if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { - orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> + if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { + orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> DPAA2_EQCR_OPRID_SHIFT; - seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> + seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> DPAA2_EQCR_SEQNUM_SHIFT; if (!priv->en_loose_ordered) { @@ -1255,12 +1255,12 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); } } else { - dq_idx = m->seqn - 1; + dq_idx = *dpaa2_seqn(m) - 1; qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); DPAA2_PER_LCORE_DQRR_SIZE--; DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); } - m->seqn = DPAA2_INVALID_MBUF_SEQN; + *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; } /* Callback to handle sending ordered packets through WRIOP based interface */ @@ -1314,7 +1314,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_eqcr_size : nb_pkts; if (!priv->en_loose_ordered) { - if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { num_free_eq_desc = dpaa2_free_eq_descriptors(); if (num_free_eq_desc < frames_to_send) frames_to_send = num_free_eq_desc; @@ -1325,7 +1325,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /*Prepare enqueue descriptor*/ qbman_eq_desc_clear(&eqdesc[loop]); - if ((*bufs)->seqn) { + if (*dpaa2_seqn(*bufs)) { /* Use only queue 0 for Tx in case of atomic/ * ordered packets as packets can get unordered * when being tranmitted out from the interface -- 2.20.1