uint8_t spb_red_drop;
/* Average SPB aura level pass threshold for RED */
uint8_t spb_red_pass;
+ /* LPB aura drop enable */
+ bool lpb_drop_ena;
+ /* SPB aura drop enable */
+ bool spb_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
bool inl_dev_ref;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
inl_rq->lpb_size = rq->lpb_size;
+ inl_rq->lpb_drop_ena = true;
+ inl_rq->spb_ena = rq->spb_ena;
+ inl_rq->spb_aura_handle = rq->spb_aura_handle;
+ inl_rq->spb_size = rq->spb_size;
+ inl_rq->spb_drop_ena = !!rq->spb_ena;
if (!roc_model_is_cn9k()) {
uint64_t aura_limit =
roc_npa_aura_op_limit_get(inl_rq->aura_handle);
uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
if (aura_shift < 8)
aura_shift = 0;
else
aura_shift = aura_shift - 8;
- /* Set first pass RQ to drop when half of the buffers are in
+ /* Set first pass RQ to drop after part of buffers are in
* use to avoid metabuf alloc failure. This is needed as long
- * as we cannot use different
+ * as we cannot use different aura.
*/
- inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
- inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
+ drop_pc = inl_dev->lpb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
+ }
+
+ if (inl_rq->spb_ena) {
+ uint64_t aura_limit =
+ roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+ uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
+
+ if (aura_shift < 8)
+ aura_shift = 0;
+ else
+ aura_shift = aura_shift - 8;
+
+ /* Set first pass RQ to drop after part of buffers are in
+ * use to avoid metabuf alloc failure. This is needed as long
+ * as we cannot use different aura.
+ */
+ drop_pc = inl_dev->spb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
}
/* Enable IPSec */
if (rc)
plt_err("Failed to disable inline device rq, rc=%d", rc);
+ roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
+ if (inl_rq->spb_ena)
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
+
/* Flush NIX LF for CN10K */
nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
uint16_t chan_mask;
bool attach_cptlf;
bool wqe_skip;
+ uint8_t spb_drop_pc;
+ uint8_t lpb_drop_pc;
/* End of input parameters */
#define ROC_NIX_INL_MEM_SZ (1280)
#include "roc_api.h"
#include "roc_priv.h"
+#define NIX_AURA_DROP_PC_DFLT 40
+
/* Default Rx Config for Inline NIX LF */
#define NIX_INL_LF_RX_CFG \
(ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | \
inl_dev->chan_mask = roc_inl_dev->chan_mask;
inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
+ inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
+ inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
+
+ if (roc_inl_dev->spb_drop_pc)
+ inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
+ if (roc_inl_dev->lpb_drop_pc)
+ inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
/* Initialize base device */
rc = dev_init(&inl_dev->dev, pci_dev);
struct roc_nix_rq rq;
uint16_t rq_refs;
bool is_nix1;
+ uint8_t spb_drop_pc;
+ uint8_t lpb_drop_pc;
/* NIX/CPT data */
void *inb_sa_base;
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
aq->rq.qint_idx = rq->qid % qints;
- aq->rq.xqe_drop_ena = 1;
+ aq->rq.xqe_drop_ena = 0;
+ aq->rq.lpb_drop_ena = rq->lpb_drop_ena;
+ aq->rq.spb_drop_ena = rq->spb_drop_ena;
/* If RED enabled, then fill enable for all cases */
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+ aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena;
+ aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena;
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
}
return 0;
}
+
+int
+roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena)
+{
+ struct npa_aq_enq_req *aura_req;
+ struct npa_lf *lf;
+ int rc;
+
+ lf = idev_npa_obj_get();
+ if (lf == NULL)
+ return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+ aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
+ if (aura_req == NULL)
+ return -ENOMEM;
+ aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+ aura_req->aura.aura_drop_ena = ena;
+ aura_req->aura.aura_drop = limit;
+ aura_req->aura_mask.aura_drop_ena =
+ ~(aura_req->aura_mask.aura_drop_ena);
+ aura_req->aura_mask.aura_drop = ~(aura_req->aura_mask.aura_drop);
+ rc = mbox_process(lf->mbox);
+
+ return rc;
+}
+
static inline char *
npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
{
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
- aura->avg_con = ROC_NPA_AVG_CONT;
+ aura->avg_con = 0;
/* Many to one reduction */
aura->err_qint_idx = aura_id % lf->qints;
pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
- pool->avg_con = ROC_NPA_AVG_CONT;
+ pool->avg_con = 0;
/* Many to one reduction */
pool->err_qint_idx = pool_id % lf->qints;
/* Reset operation performance counter. */
int __roc_api roc_npa_pool_op_pc_reset(uint64_t aura_handle);
+int __roc_api roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit,
+ bool ena);
+
#endif /* _ROC_NPA_H_ */
roc_nix_vlan_mcam_entry_write;
roc_nix_vlan_strip_vtag_ena_dis;
roc_nix_vlan_tpid_set;
+ roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;