From 99f7941037fbcb4a66890e4fa13044ccc6742961 Mon Sep 17 00:00:00 2001 From: Pavan Nikhilesh Date: Sat, 19 Feb 2022 17:43:38 +0530 Subject: [PATCH] event/cnxk: update SQB FC check for Tx adapter Update SQB limit to include CPT queue size when Security offload is enabled. Signed-off-by: Pavan Nikhilesh --- drivers/event/cnxk/cn10k_eventdev.c | 30 +++++++++++++++++++ drivers/event/cnxk/cn10k_worker.h | 18 +++++------ drivers/event/cnxk/cn9k_eventdev.c | 28 ++++++++--------- drivers/event/cnxk/cn9k_worker.h | 5 ++-- drivers/event/cnxk/cnxk_eventdev.h | 1 - drivers/event/cnxk/cnxk_eventdev_adptr.c | 38 +++++++++++++++++++----- 6 files changed, 86 insertions(+), 34 deletions(-) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 249688b6d7..9b4d2895ec 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -717,6 +717,35 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev, return 0; } +static void +cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cn10k_eth_txq *txq; + struct roc_nix_sq *sq; + int i; + + if (tx_queue_id < 0) { + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + cn10k_sso_txq_fc_update(eth_dev, i); + } else { + uint16_t sqes_per_sqb; + + sq = &cnxk_eth_dev->sqs[tx_queue_id]; + txq = eth_dev->data->tx_queues[tx_queue_id]; + sqes_per_sqb = 1U << txq->sqes_per_sqb_log2; + sq->nb_sqb_bufs_adj = + sq->nb_sqb_bufs - + RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) / + sqes_per_sqb; + if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) + sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / + (sqes_per_sqb - 1)); + txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; + txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100; + } +} + static int cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, @@ -746,6 +775,7 @@ cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev, } dev->tx_offloads |= tx_offloads; + cn10k_sso_txq_fc_update(eth_dev, tx_queue_id); rc = cn10k_sso_updt_tx_adptr_data(event_dev); if (rc < 0) return rc; diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h index cfe729cef9..bb32ef75ef 100644 --- a/drivers/event/cnxk/cn10k_worker.h +++ b/drivers/event/cnxk/cn10k_worker.h @@ -470,6 +470,14 @@ cn10k_sso_hws_xtract_meta(struct rte_mbuf *m, const uint64_t *txq_data) (BIT_ULL(48) - 1)); } +static __rte_always_inline void +cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq) +{ + while ((uint64_t)txq->nb_sqb_bufs_adj <= + __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED)) + ; +} + static __rte_always_inline void cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type, @@ -517,6 +525,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type) ws->gw_rdata = roc_sso_hws_head_wait(ws->base); + cn10k_sso_txq_fc_wait(txq); roc_lmt_submit_steorl(lmt_id, pa); } @@ -577,7 +586,6 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev, struct cn10k_eth_txq *txq; struct rte_mbuf *m; uintptr_t lmt_addr; - uint16_t ref_cnt; uint16_t lmt_id; lmt_addr = ws->lmt_base; @@ -607,17 +615,9 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev, } m = ev->mbuf; - ref_cnt = m->refcnt; cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data, flags); - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { - if (ref_cnt > 1) - return 1; - } - - cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG, - ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH); return 1; } diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index ccd98c27fb..4bba477dd1 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -952,8 +952,7 @@ cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev, } static void -cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id, - bool ena) +cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) { struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; struct cn9k_eth_txq *txq; @@ -962,20 +961,21 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id, if (tx_queue_id < 0) { for (i = 0; i < eth_dev->data->nb_tx_queues; i++) - cn9k_sso_txq_fc_update(eth_dev, i, ena); + cn9k_sso_txq_fc_update(eth_dev, i); } else { - uint16_t sq_limit; + uint16_t sqes_per_sqb; sq = &cnxk_eth_dev->sqs[tx_queue_id]; txq = eth_dev->data->tx_queues[tx_queue_id]; - sq_limit = - ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) : - sq->nb_sqb_bufs; - txq->nb_sqb_bufs_adj = - sq_limit - - RTE_ALIGN_MUL_CEIL(sq_limit, - (1ULL << txq->sqes_per_sqb_log2)) / - (1ULL << txq->sqes_per_sqb_log2); + sqes_per_sqb = 1U << txq->sqes_per_sqb_log2; + sq->nb_sqb_bufs_adj = + sq->nb_sqb_bufs - + RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) / + sqes_per_sqb; + if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) + sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / + (sqes_per_sqb - 1)); + txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100; } } @@ -1009,7 +1009,7 @@ cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev, } dev->tx_offloads |= tx_offloads; - cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true); + cn9k_sso_txq_fc_update(eth_dev, tx_queue_id); rc = cn9k_sso_updt_tx_adptr_data(event_dev); if (rc < 0) return rc; @@ -1030,7 +1030,7 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id); if (rc < 0) return rc; - cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false); + cn9k_sso_txq_fc_update(eth_dev, tx_queue_id); return cn9k_sso_updt_tx_adptr_data(event_dev); } diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h index 0905d744cc..79b2b3809f 100644 --- a/drivers/event/cnxk/cn9k_worker.h +++ b/drivers/event/cnxk/cn9k_worker.h @@ -613,9 +613,8 @@ NIX_RX_FASTPATH_MODES static __rte_always_inline void cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq) { - while (!((txq->nb_sqb_bufs_adj - - __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED)) - << (txq)->sqes_per_sqb_log2)) + while ((uint64_t)txq->nb_sqb_bufs_adj <= + __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED)) ; } diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index e3b5ffa7eb..b157fef096 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -38,7 +38,6 @@ #define CNXK_SSO_XAQ_CACHE_CNT (0x7) #define CNXK_SSO_XAQ_SLACK (8) #define CNXK_SSO_WQE_SG_PTR (9) -#define CNXK_SSO_SQB_LIMIT (0x180) #define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY) #define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY) diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c index 5ebd3340e7..7b580ca98f 100644 --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -335,8 +335,18 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev, static int cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs) { - return roc_npa_aura_limit_modify( - sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs)); + int rc; + + if (sq->nb_sqb_bufs != nb_sqb_bufs) { + rc = roc_npa_aura_limit_modify( + sq->aura_handle, + RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs)); + if (rc < 0) + return rc; + + sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs); + } + return 0; } static void @@ -522,22 +532,29 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev, { struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; struct roc_nix_sq *sq; - int i, ret; + int i, ret = 0; void *txq; if (tx_queue_id < 0) { for (i = 0; i < eth_dev->data->nb_tx_queues; i++) - cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i); + ret |= cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, + i); } else { txq = eth_dev->data->tx_queues[tx_queue_id]; sq = &cnxk_eth_dev->sqs[tx_queue_id]; - cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT); + cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs); ret = cnxk_sso_updt_tx_queue_data( event_dev, eth_dev->data->port_id, tx_queue_id, txq); if (ret < 0) return ret; } + if (ret < 0) { + plt_err("Failed to configure Tx adapter port=%d, q=%d", + eth_dev->data->port_id, tx_queue_id); + return ret; + } + return 0; } @@ -548,12 +565,13 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev, { struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; struct roc_nix_sq *sq; - int i, ret; + int i, ret = 0; RTE_SET_USED(event_dev); if (tx_queue_id < 0) { for (i = 0; i < eth_dev->data->nb_tx_queues; i++) - cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i); + ret |= cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, + i); } else { sq = &cnxk_eth_dev->sqs[tx_queue_id]; cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs); @@ -563,5 +581,11 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev, return ret; } + if (ret < 0) { + plt_err("Failed to clear Tx adapter config port=%d, q=%d", + eth_dev->data->port_id, tx_queue_id); + return ret; + } + return 0; } -- 2.39.5