* Copyright(C) 2021 Marvell.
*/
+#include <math.h>
+
#include "roc_api.h"
#include "roc_priv.h"
}
int
-roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
+nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
{
- struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
- struct mbox *mbox = (&nix->dev)->mbox;
- int rc;
+ struct mbox *mbox = dev->mbox;
/* Pkts will be dropped silently if RQ is disabled */
if (roc_model_is_cn9k()) {
aq->rq_mask.ena = ~(aq->rq_mask.ena);
}
- rc = mbox_process(mbox);
+ return mbox_process(mbox);
+}
+
+int
+roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
+ int rc;
+
+ rc = nix_rq_ena_dis(&nix->dev, rq, enable);
if (roc_model_is_cn10k())
plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
return rc;
}
-static int
-rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
+int
+nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
+ bool cfg, bool ena)
{
- struct mbox *mbox = (&nix->dev)->mbox;
+ struct mbox *mbox = dev->mbox;
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
- aq->rq.qint_idx = rq->qid % nix->qints;
+ aq->rq.qint_idx = rq->qid % qints;
aq->rq.xqe_drop_ena = 1;
/* If RED enabled, then fill enable for all cases */
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
- aq->rq.spb_aura_pass = rq->spb_red_pass;
- aq->rq.lpb_aura_pass = rq->red_pass;
+ aq->rq.spb_pool_pass = rq->spb_red_pass;
+ aq->rq.lpb_pool_pass = rq->red_pass;
- aq->rq.spb_aura_drop = rq->spb_red_drop;
- aq->rq.lpb_aura_drop = rq->red_drop;
+ aq->rq.spb_pool_drop = rq->spb_red_drop;
+ aq->rq.lpb_pool_drop = rq->red_drop;
}
if (cfg) {
aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
- aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
- aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
+ aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
+ aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
- aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
- aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
+ aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
+ aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
}
}
return 0;
}
-static int
-rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
+int
+nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+ bool ena)
{
- struct mbox *mbox = (&nix->dev)->mbox;
struct nix_cn10k_aq_enq_req *aq;
+ struct mbox *mbox = dev->mbox;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
aq->qidx = rq->qid;
aq->rq.cq = rq->qid;
}
- if (rq->ipsech_ena)
+ if (rq->ipsech_ena) {
aq->rq.ipsech_ena = 1;
+ aq->rq.ipsecd_drop_en = 1;
+ }
aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
- aq->rq.qint_idx = rq->qid % nix->qints;
+ aq->rq.qint_idx = rq->qid % qints;
aq->rq.xqe_drop_ena = 1;
/* If RED enabled, then fill enable for all cases */
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
- aq->rq.spb_pool_pass = rq->red_pass;
- aq->rq.spb_aura_pass = rq->red_pass;
+ aq->rq.spb_pool_pass = rq->spb_red_pass;
aq->rq.lpb_pool_pass = rq->red_pass;
- aq->rq.lpb_aura_pass = rq->red_pass;
aq->rq.wqe_pool_pass = rq->red_pass;
aq->rq.xqe_pass = rq->red_pass;
- aq->rq.spb_pool_drop = rq->red_drop;
- aq->rq.spb_aura_drop = rq->red_drop;
+ aq->rq.spb_pool_drop = rq->spb_red_drop;
aq->rq.lpb_pool_drop = rq->red_drop;
- aq->rq.lpb_aura_drop = rq->red_drop;
aq->rq.wqe_pool_drop = rq->red_drop;
aq->rq.xqe_drop = rq->red_drop;
}
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
- aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
- aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
- aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
- aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
}
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = (&nix->dev)->mbox;
bool is_cn9k = roc_model_is_cn9k();
+ struct dev *dev = &nix->dev;
int rc;
if (roc_nix == NULL || rq == NULL)
rq->roc_nix = roc_nix;
if (is_cn9k)
- rc = rq_cn9k_cfg(nix, rq, false, ena);
+ rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
else
- rc = rq_cfg(nix, rq, false, ena);
+ rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
if (rc)
return rc;
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = (&nix->dev)->mbox;
bool is_cn9k = roc_model_is_cn9k();
+ struct dev *dev = &nix->dev;
int rc;
if (roc_nix == NULL || rq == NULL)
rq->roc_nix = roc_nix;
if (is_cn9k)
- rc = rq_cn9k_cfg(nix, rq, true, ena);
+ rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
else
- rc = rq_cfg(nix, rq, true, ena);
+ rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
if (rc)
return rc;
cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
cq->wdata = (uint64_t)cq->qid << 32;
cq->roc_nix = roc_nix;
- cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
/* CQE of W16 */
desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
/* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
cq_ctx->cint_idx = cq->qid;
- cq_ctx->drop = cq->drop_thresh;
- cq_ctx->drop_ena = 1;
+ if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
+ const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
+ uint16_t min_rx_drop;
+
+ min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
+ cq_ctx->drop = min_rx_drop;
+ cq_ctx->drop_ena = 1;
+ cq->drop_thresh = min_rx_drop;
+ } else {
+ cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
+ /* Drop processing or red drop cannot be enabled due to
+ * due to packets coming for second pass from CPT.
+ */
+ if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
+ cq_ctx->drop = cq->drop_thresh;
+ cq_ctx->drop_ena = 1;
+ }
+ }
/* TX pause frames enable flow ctrl on RX side */
if (nix->tx_pause) {
/* Single BPID is allocated for all rx channels for now */
cq_ctx->bpid = nix->bpid[0];
- cq_ctx->bp = cq_ctx->drop;
+ cq_ctx->bp = cq->drop_thresh;
cq_ctx->bp_ena = 1;
}
memset(&aura, 0, sizeof(aura));
aura.fc_ena = 1;
+ if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
+ aura.fc_stype = 0x0; /* STF */
+ else
+ aura.fc_stype = 0x3; /* STSTP */
aura.fc_addr = (uint64_t)sq->fc;
aura.fc_hyst_bits = 0; /* Store count on all updates */
rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
aq->sq.sso_ena = !!sq->sso_ena;
+ aq->sq.cq_ena = !!sq->cq_ena;
+ aq->sq.cq = sq->cqid;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
aq->sq.sso_ena = !!sq->sso_ena;
+ aq->sq.cq_ena = !!sq->cq_ena;
+ aq->sq.cq = sq->cqid;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
rc |= NIX_ERR_NDC_SYNC;
rc |= nix_tm_sq_flush_post(sq);
+
+ /* Restore limit to max SQB count that the pool was created
+ * for aura drain to succeed.
+ */
+ roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
rc |= roc_npa_pool_destroy(sq->aura_handle);
plt_free(sq->fc);
plt_free(sq->sqe_mem);