]> git.droids-corp.org - dpdk.git/commitdiff
common/cnxk: support enabling AURA tail drop for RQ
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Tue, 22 Feb 2022 19:34:58 +0000 (01:04 +0530)
committerJerin Jacob <jerinj@marvell.com>
Wed, 23 Feb 2022 16:35:38 +0000 (17:35 +0100)
Add support to enable AURA tail drop via RQ specifically
for inline device RQ's pkt pool. This is better than RQ
RED drop as it can be applied to all RQ's that are not
having security enabled but using same packet pool.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
drivers/common/cnxk/roc_nix.h
drivers/common/cnxk/roc_nix_inl.c
drivers/common/cnxk/roc_nix_inl.h
drivers/common/cnxk/roc_nix_inl_dev.c
drivers/common/cnxk/roc_nix_inl_priv.h
drivers/common/cnxk/roc_nix_queue.c
drivers/common/cnxk/roc_npa.c
drivers/common/cnxk/roc_npa.h
drivers/common/cnxk/version.map

index a0f24d2711c1d00a7cd071dfeccb5cd36330dae6..0ced7fb8d0c41717293ddf095e252797863a96e3 100644 (file)
@@ -297,6 +297,10 @@ struct roc_nix_rq {
        uint8_t spb_red_drop;
        /* Average SPB aura level pass threshold for RED */
        uint8_t spb_red_pass;
+       /* LPB aura drop enable */
+       bool lpb_drop_ena;
+       /* SPB aura drop enable */
+       bool spb_drop_ena;
        /* End of Input parameters */
        struct roc_nix *roc_nix;
        bool inl_dev_ref;
index f57f1a46ecf8291540b09f581a8d60fe3b947644..ac17e95cd007c309811b84d569ced9a47148058a 100644 (file)
@@ -528,23 +528,50 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
        inl_rq->first_skip = rq->first_skip;
        inl_rq->later_skip = rq->later_skip;
        inl_rq->lpb_size = rq->lpb_size;
+       inl_rq->lpb_drop_ena = true;
+       inl_rq->spb_ena = rq->spb_ena;
+       inl_rq->spb_aura_handle = rq->spb_aura_handle;
+       inl_rq->spb_size = rq->spb_size;
+       inl_rq->spb_drop_ena = !!rq->spb_ena;
 
        if (!roc_model_is_cn9k()) {
                uint64_t aura_limit =
                        roc_npa_aura_op_limit_get(inl_rq->aura_handle);
                uint64_t aura_shift = plt_log2_u32(aura_limit);
+               uint64_t aura_drop, drop_pc;
 
                if (aura_shift < 8)
                        aura_shift = 0;
                else
                        aura_shift = aura_shift - 8;
 
-               /* Set first pass RQ to drop when half of the buffers are in
+               /* Set first pass RQ to drop after part of buffers are in
                 * use to avoid metabuf alloc failure. This is needed as long
-                * as we cannot use different
+                * as we cannot use different aura.
                 */
-               inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
-               inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
+               drop_pc = inl_dev->lpb_drop_pc;
+               aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+               roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
+       }
+
+       if (inl_rq->spb_ena) {
+               uint64_t aura_limit =
+                       roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+               uint64_t aura_shift = plt_log2_u32(aura_limit);
+               uint64_t aura_drop, drop_pc;
+
+               if (aura_shift < 8)
+                       aura_shift = 0;
+               else
+                       aura_shift = aura_shift - 8;
+
+               /* Set first pass RQ to drop after part of buffers are in
+                * use to avoid metabuf alloc failure. This is needed as long
+                * as we cannot use different aura.
+                */
+               drop_pc = inl_dev->spb_drop_pc;
+               aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+               roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
        }
 
        /* Enable IPSec */
@@ -613,6 +640,10 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
        if (rc)
                plt_err("Failed to disable inline device rq, rc=%d", rc);
 
+       roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
+       if (inl_rq->spb_ena)
+               roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
+
        /* Flush NIX LF for CN10K */
        nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
 
index 224aaba747731207c9911802b7fa2cd08eb81aca..728225baa9eb210d63c0d55d71e141c302482fb0 100644 (file)
@@ -112,6 +112,8 @@ struct roc_nix_inl_dev {
        uint16_t chan_mask;
        bool attach_cptlf;
        bool wqe_skip;
+       uint8_t spb_drop_pc;
+       uint8_t lpb_drop_pc;
        /* End of input parameters */
 
 #define ROC_NIX_INL_MEM_SZ (1280)
index 9dc0a626b0a7d4cfb55dbe95326ade2f6d392f3d..4c1d85ae6bd9a05d3ad4da9ed4608967b386992e 100644 (file)
@@ -5,6 +5,8 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
+#define NIX_AURA_DROP_PC_DFLT 40
+
 /* Default Rx Config for Inline NIX LF */
 #define NIX_INL_LF_RX_CFG                                                      \
        (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
@@ -662,6 +664,13 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
        inl_dev->chan_mask = roc_inl_dev->chan_mask;
        inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
        inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
+       inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
+       inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
+
+       if (roc_inl_dev->spb_drop_pc)
+               inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
+       if (roc_inl_dev->lpb_drop_pc)
+               inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
 
        /* Initialize base device */
        rc = dev_init(&inl_dev->dev, pci_dev);
index dcf752e2565342fe24a5d3af0be00d47667216db..b6d860286b4b294aa7f6653cab2f6ff55cd5e49c 100644 (file)
@@ -43,6 +43,8 @@ struct nix_inl_dev {
        struct roc_nix_rq rq;
        uint16_t rq_refs;
        bool is_nix1;
+       uint8_t spb_drop_pc;
+       uint8_t lpb_drop_pc;
 
        /* NIX/CPT data */
        void *inb_sa_base;
index a283d96a01203f810ac8998116ea1e7007f0666f..7d271854f4198cd211f74c38f351bc278cad88e1 100644 (file)
@@ -299,7 +299,9 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
        aq->rq.rq_int_ena = 0;
        /* Many to one reduction */
        aq->rq.qint_idx = rq->qid % qints;
-       aq->rq.xqe_drop_ena = 1;
+       aq->rq.xqe_drop_ena = 0;
+       aq->rq.lpb_drop_ena = rq->lpb_drop_ena;
+       aq->rq.spb_drop_ena = rq->spb_drop_ena;
 
        /* If RED enabled, then fill enable for all cases */
        if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
@@ -366,6 +368,8 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
                aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
                aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
                aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+               aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena;
+               aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena;
 
                if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
                        aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
index 75fc22442f1b0f056f585894e68f9cb1b3207725..1e60f443f037819ddb0f21498e2d7b3b3ea668c3 100644 (file)
@@ -193,6 +193,35 @@ roc_npa_pool_op_pc_reset(uint64_t aura_handle)
        }
        return 0;
 }
+
+int
+roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena)
+{
+       struct npa_aq_enq_req *aura_req;
+       struct npa_lf *lf;
+       int rc;
+
+       lf = idev_npa_obj_get();
+       if (lf == NULL)
+               return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+       aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
+       if (aura_req == NULL)
+               return -ENOMEM;
+       aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+       aura_req->ctype = NPA_AQ_CTYPE_AURA;
+       aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+       aura_req->aura.aura_drop_ena = ena;
+       aura_req->aura.aura_drop = limit;
+       aura_req->aura_mask.aura_drop_ena =
+               ~(aura_req->aura_mask.aura_drop_ena);
+       aura_req->aura_mask.aura_drop = ~(aura_req->aura_mask.aura_drop);
+       rc = mbox_process(lf->mbox);
+
+       return rc;
+}
+
 static inline char *
 npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
 {
@@ -299,7 +328,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
        aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
        aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
        aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
-       aura->avg_con = ROC_NPA_AVG_CONT;
+       aura->avg_con = 0;
        /* Many to one reduction */
        aura->err_qint_idx = aura_id % lf->qints;
 
@@ -316,7 +345,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
        pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
        pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
        pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
-       pool->avg_con = ROC_NPA_AVG_CONT;
+       pool->avg_con = 0;
 
        /* Many to one reduction */
        pool->err_qint_idx = pool_id % lf->qints;
index 9f5fe5a175fc9c87d6bcd411a4dc728282e86136..0339876bff6e5a209c81d9678f92a521fe3f143d 100644 (file)
@@ -731,4 +731,7 @@ int __roc_api roc_npa_dump(void);
 /* Reset operation performance counter. */
 int __roc_api roc_npa_pool_op_pc_reset(uint64_t aura_handle);
 
+int __roc_api roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit,
+                                   bool ena);
+
 #endif /* _ROC_NPA_H_ */
index 3af17296332dd5052896c70716ce1aa74aff51ff..1ae8271e6329f9fd5bb651013767b1591c1b2b08 100644 (file)
@@ -290,6 +290,7 @@ INTERNAL {
        roc_nix_vlan_mcam_entry_write;
        roc_nix_vlan_strip_vtag_ena_dis;
        roc_nix_vlan_tpid_set;
+       roc_npa_aura_drop_set;
        roc_npa_aura_limit_modify;
        roc_npa_aura_op_range_set;
        roc_npa_ctx_dump;