]> git.droids-corp.org - dpdk.git/commitdiff
event/cnxk: update SQB FC check for Tx adapter
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Sat, 19 Feb 2022 12:13:38 +0000 (17:43 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 22 Feb 2022 09:50:54 +0000 (10:50 +0100)
Update SQB limit to include CPT queue size when Security
offload is enabled.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
drivers/event/cnxk/cn10k_eventdev.c
drivers/event/cnxk/cn10k_worker.h
drivers/event/cnxk/cn9k_eventdev.c
drivers/event/cnxk/cn9k_worker.h
drivers/event/cnxk/cnxk_eventdev.h
drivers/event/cnxk/cnxk_eventdev_adptr.c

index 249688b6d7447a21ae43af666e21bc7e2ed9e8b1..9b4d2895ecbaff0ca359175b05720e76bf0e0325 100644 (file)
@@ -717,6 +717,35 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
        return 0;
 }
 
+static void
+cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cn10k_eth_txq *txq;
+       struct roc_nix_sq *sq;
+       int i;
+
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cn10k_sso_txq_fc_update(eth_dev, i);
+       } else {
+               uint16_t sqes_per_sqb;
+
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+               sq->nb_sqb_bufs_adj =
+                       sq->nb_sqb_bufs -
+                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+                               sqes_per_sqb;
+               if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+                                               (sqes_per_sqb - 1));
+               txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+               txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+       }
+}
+
 static int
 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
                               const struct rte_eth_dev *eth_dev,
@@ -746,6 +775,7 @@ cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
        }
 
        dev->tx_offloads |= tx_offloads;
+       cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
                return rc;
index cfe729cef9f3fab9c8c2a947b0dcbd1faf3649c4..bb32ef75ef37e6d3afdcab4fcd9cd43aae25f5ce 100644 (file)
@@ -470,6 +470,14 @@ cn10k_sso_hws_xtract_meta(struct rte_mbuf *m, const uint64_t *txq_data)
                           (BIT_ULL(48) - 1));
 }
 
+static __rte_always_inline void
+cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
+{
+       while ((uint64_t)txq->nb_sqb_bufs_adj <=
+              __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+               ;
+}
+
 static __rte_always_inline void
 cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
                 uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type,
@@ -517,6 +525,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
        if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type)
                ws->gw_rdata = roc_sso_hws_head_wait(ws->base);
 
+       cn10k_sso_txq_fc_wait(txq);
        roc_lmt_submit_steorl(lmt_id, pa);
 }
 
@@ -577,7 +586,6 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
        struct cn10k_eth_txq *txq;
        struct rte_mbuf *m;
        uintptr_t lmt_addr;
-       uint16_t ref_cnt;
        uint16_t lmt_id;
 
        lmt_addr = ws->lmt_base;
@@ -607,17 +615,9 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
        }
 
        m = ev->mbuf;
-       ref_cnt = m->refcnt;
        cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data,
                         flags);
 
-       if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-               if (ref_cnt > 1)
-                       return 1;
-       }
-
-       cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
-                                ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
        return 1;
 }
 
index ccd98c27fbdd0b18225535f36dc7d75d42f3176c..4bba477dd1ecc018c9b9a23de61a1a2b06abcb85 100644 (file)
@@ -952,8 +952,7 @@ cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
 }
 
 static void
-cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
-                      bool ena)
+cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 {
        struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
        struct cn9k_eth_txq *txq;
@@ -962,20 +961,21 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
 
        if (tx_queue_id < 0) {
                for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-                       cn9k_sso_txq_fc_update(eth_dev, i, ena);
+                       cn9k_sso_txq_fc_update(eth_dev, i);
        } else {
-               uint16_t sq_limit;
+               uint16_t sqes_per_sqb;
 
                sq = &cnxk_eth_dev->sqs[tx_queue_id];
                txq = eth_dev->data->tx_queues[tx_queue_id];
-               sq_limit =
-                       ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
-                                   sq->nb_sqb_bufs;
-               txq->nb_sqb_bufs_adj =
-                       sq_limit -
-                       RTE_ALIGN_MUL_CEIL(sq_limit,
-                                          (1ULL << txq->sqes_per_sqb_log2)) /
-                               (1ULL << txq->sqes_per_sqb_log2);
+               sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+               sq->nb_sqb_bufs_adj =
+                       sq->nb_sqb_bufs -
+                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+                               sqes_per_sqb;
+               if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+                                               (sqes_per_sqb - 1));
+               txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
                txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
        }
 }
@@ -1009,7 +1009,7 @@ cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
        }
 
        dev->tx_offloads |= tx_offloads;
-       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
+       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
        rc = cn9k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
                return rc;
@@ -1030,7 +1030,7 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
        rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
        if (rc < 0)
                return rc;
-       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
+       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
        return cn9k_sso_updt_tx_adptr_data(event_dev);
 }
 
index 0905d744cc7016d3504f7fb21119008b67406f47..79b2b3809fe33b394bd3fcad39274aa4a316bc9b 100644 (file)
@@ -613,9 +613,8 @@ NIX_RX_FASTPATH_MODES
 static __rte_always_inline void
 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
 {
-       while (!((txq->nb_sqb_bufs_adj -
-                 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
-                << (txq)->sqes_per_sqb_log2))
+       while ((uint64_t)txq->nb_sqb_bufs_adj <=
+              __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
                ;
 }
 
index e3b5ffa7eb5d2ab7cd576d10a6322f9f527afbad..b157fef096818dda7c047486dacbbc9dfc4574e4 100644 (file)
@@ -38,7 +38,6 @@
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 #define CNXK_SSO_WQE_SG_PTR    (9)
-#define CNXK_SSO_SQB_LIMIT     (0x180)
 
 #define CNXK_TT_FROM_TAG(x)        (((x) >> 32) & SSO_TT_EMPTY)
 #define CNXK_TT_FROM_EVENT(x)      (((x) >> 38) & SSO_TT_EMPTY)
index 5ebd3340e7bb9f909c2e34765aa5184d9aef7ea6..7b580ca98febff277abf26e0ee26c2ac74dd0b7c 100644 (file)
@@ -335,8 +335,18 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
 static int
 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
 {
-       return roc_npa_aura_limit_modify(
-               sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+       int rc;
+
+       if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+               rc = roc_npa_aura_limit_modify(
+                       sq->aura_handle,
+                       RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+               if (rc < 0)
+                       return rc;
+
+               sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+       }
+       return 0;
 }
 
 static void
@@ -522,22 +532,29 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
 {
        struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
        struct roc_nix_sq *sq;
-       int i, ret;
+       int i, ret = 0;
        void *txq;
 
        if (tx_queue_id < 0) {
                for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-                       cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
+                       ret |= cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev,
+                                                            i);
        } else {
                txq = eth_dev->data->tx_queues[tx_queue_id];
                sq = &cnxk_eth_dev->sqs[tx_queue_id];
-               cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
+               cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
                ret = cnxk_sso_updt_tx_queue_data(
                        event_dev, eth_dev->data->port_id, tx_queue_id, txq);
                if (ret < 0)
                        return ret;
        }
 
+       if (ret < 0) {
+               plt_err("Failed to configure Tx adapter port=%d, q=%d",
+                       eth_dev->data->port_id, tx_queue_id);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -548,12 +565,13 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
 {
        struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
        struct roc_nix_sq *sq;
-       int i, ret;
+       int i, ret = 0;
 
        RTE_SET_USED(event_dev);
        if (tx_queue_id < 0) {
                for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-                       cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
+                       ret |= cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev,
+                                                            i);
        } else {
                sq = &cnxk_eth_dev->sqs[tx_queue_id];
                cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
@@ -563,5 +581,11 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
                        return ret;
        }
 
+       if (ret < 0) {
+               plt_err("Failed to clear Tx adapter config port=%d, q=%d",
+                       eth_dev->data->port_id, tx_queue_id);
+               return ret;
+       }
+
        return 0;
 }