net/cnxk: resize CQ for Rx security for errata
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Thu, 16 Jun 2022 09:24:18 +0000 (14:54 +0530)
committerJerin Jacob <jerinj@marvell.com>
Mon, 20 Jun 2022 17:23:46 +0000 (19:23 +0200)
Resize CQ for Rx security offload in case of HW errata.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev.h

index 4ea1617..2418290 100644 (file)
@@ -5,6 +5,8 @@
 
 #include <rte_eventdev.h>
 
+#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
+
 static inline uint64_t
 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 {
@@ -40,6 +42,39 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        return speed_capa;
 }
 
+static uint32_t
+nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
+                      uint32_t nb_desc)
+{
+       struct roc_nix_rq *inl_rq;
+       uint64_t limit;
+
+       if (!roc_errata_cpt_hang_on_x2p_bp())
+               return nb_desc;
+
+       /* CQ should be able to hold all buffers in first pass RQ's aura
+        * this RQ's aura.
+        */
+       inl_rq = roc_nix_inl_dev_rq(nix);
+       if (!inl_rq) {
+               /* This itself is going to be inline RQ's aura */
+               limit = roc_npa_aura_op_limit_get(mp->pool_id);
+       } else {
+               limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
+               /* Also add this RQ's aura if it is different */
+               if (inl_rq->aura_handle != mp->pool_id)
+                       limit += roc_npa_aura_op_limit_get(mp->pool_id);
+       }
+       nb_desc = PLT_MAX(limit + 1, nb_desc);
+       if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
+               plt_warn("Could not setup CQ size to accommodate"
+                        " all buffers in related auras (%" PRIu64 ")",
+                        limit);
+               nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
+       }
+       return nb_desc;
+}
+
 int
 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 {
@@ -504,7 +539,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 
 int
 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
-                       uint16_t nb_desc, uint16_t fp_rx_q_sz,
+                       uint32_t nb_desc, uint16_t fp_rx_q_sz,
                        const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
@@ -552,6 +587,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
            dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
                roc_nix_inl_dev_xaq_realloc(mp->pool_id);
 
+       /* Increase CQ size to Aura size to avoid CQ overflow and
+        * then CPT buffer leak.
+        */
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+               nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
+
        /* Setup ROC CQ */
        cq = &dev->cqs[qid];
        cq->qid = qid;
index a4e96f0..4cb7c9e 100644 (file)
@@ -530,7 +530,7 @@ int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                            uint16_t nb_desc, uint16_t fp_tx_q_sz,
                            const struct rte_eth_txconf *tx_conf);
 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
-                           uint16_t nb_desc, uint16_t fp_rx_q_sz,
+                           uint32_t nb_desc, uint16_t fp_rx_q_sz,
                            const struct rte_eth_rxconf *rx_conf,
                            struct rte_mempool *mp);
 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);