X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fcnxk%2Froc_nix_queue.c;h=cba1294ef19669edd91eab47212f4813173ce131;hb=a83db6b3c14b5d210c013f8478c86ca7b7b8cf8c;hp=7e2f86eca7482e827110dddebaa55a90996fe52d;hpb=097835ecdfdb0f6591d4fe2bc381b5849581c289;p=dpdk.git diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 7e2f86eca7..cba1294ef1 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -2,6 +2,8 @@ * Copyright(C) 2021 Marvell. */ +#include + #include "roc_api.h" #include "roc_priv.h" @@ -27,11 +29,9 @@ nix_qsize_clampup(uint32_t val) } int -roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) +nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) { - struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); - struct mbox *mbox = (&nix->dev)->mbox; - int rc; + struct mbox *mbox = dev->mbox; /* Pkts will be dropped silently if RQ is disabled */ if (roc_model_is_cn9k()) { @@ -56,17 +56,27 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) aq->rq_mask.ena = ~(aq->rq_mask.ena); } - rc = mbox_process(mbox); + return mbox_process(mbox); +} + +int +roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) +{ + struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix); + int rc; + + rc = nix_rq_ena_dis(&nix->dev, rq, enable); if (roc_model_is_cn10k()) plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH); return rc; } -static int -rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) +int +nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, + bool cfg, bool ena) { - struct mbox *mbox = (&nix->dev)->mbox; + struct mbox *mbox = dev->mbox; struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); @@ -116,16 +126,16 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ aq->rq.rq_int_ena = 0; /* Many to one reduction */ - aq->rq.qint_idx = rq->qid % nix->qints; + aq->rq.qint_idx = rq->qid % qints; aq->rq.xqe_drop_ena = 1; /* If RED enabled, then fill enable for all cases */ if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { - aq->rq.spb_aura_pass = rq->spb_red_pass; - aq->rq.lpb_aura_pass = rq->red_pass; + aq->rq.spb_pool_pass = rq->spb_red_pass; + aq->rq.lpb_pool_pass = rq->red_pass; - aq->rq.spb_aura_drop = rq->spb_red_drop; - aq->rq.lpb_aura_drop = rq->red_drop; + aq->rq.spb_pool_drop = rq->spb_red_drop; + aq->rq.lpb_pool_drop = rq->red_drop; } if (cfg) { @@ -166,22 +176,23 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena; if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { - aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass; - aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass; + aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; + aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; - aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop; - aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop; + aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; + aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; } } return 0; } -static int -rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) +int +nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, + bool ena) { - struct mbox *mbox = (&nix->dev)->mbox; struct nix_cn10k_aq_enq_req *aq; + struct mbox *mbox = dev->mbox; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); aq->qidx = rq->qid; @@ -218,8 +229,10 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) aq->rq.cq = rq->qid; } - if (rq->ipsech_ena) + if (rq->ipsech_ena) { aq->rq.ipsech_ena = 1; + aq->rq.ipsecd_drop_en = 1; + } aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle); @@ -258,22 +271,18 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ aq->rq.rq_int_ena = 0; /* Many to one reduction */ - aq->rq.qint_idx = rq->qid % nix->qints; + aq->rq.qint_idx = rq->qid % qints; aq->rq.xqe_drop_ena = 1; /* If RED enabled, then fill enable for all cases */ if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { - aq->rq.spb_pool_pass = rq->red_pass; - aq->rq.spb_aura_pass = rq->red_pass; + aq->rq.spb_pool_pass = rq->spb_red_pass; aq->rq.lpb_pool_pass = rq->red_pass; - aq->rq.lpb_aura_pass = rq->red_pass; aq->rq.wqe_pool_pass = rq->red_pass; aq->rq.xqe_pass = rq->red_pass; - aq->rq.spb_pool_drop = rq->red_drop; - aq->rq.spb_aura_drop = rq->red_drop; + aq->rq.spb_pool_drop = rq->spb_red_drop; aq->rq.lpb_pool_drop = rq->red_drop; - aq->rq.lpb_aura_drop = rq->red_drop; aq->rq.wqe_pool_drop = rq->red_drop; aq->rq.xqe_drop = rq->red_drop; } @@ -333,16 +342,12 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena) if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass; - aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass; aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass; - aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass; aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass; aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass; aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop; - aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop; aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop; - aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop; aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop; aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop; } @@ -357,6 +362,7 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) struct nix *nix = roc_nix_to_nix_priv(roc_nix); struct mbox *mbox = (&nix->dev)->mbox; bool is_cn9k = roc_model_is_cn9k(); + struct dev *dev = &nix->dev; int rc; if (roc_nix == NULL || rq == NULL) @@ -368,9 +374,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) rq->roc_nix = roc_nix; if (is_cn9k) - rc = rq_cn9k_cfg(nix, rq, false, ena); + rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena); else - rc = rq_cfg(nix, rq, false, ena); + rc = nix_rq_cfg(dev, rq, nix->qints, false, ena); if (rc) return rc; @@ -384,6 +390,7 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) struct nix *nix = roc_nix_to_nix_priv(roc_nix); struct mbox *mbox = (&nix->dev)->mbox; bool is_cn9k = roc_model_is_cn9k(); + struct dev *dev = &nix->dev; int rc; if (roc_nix == NULL || rq == NULL) @@ -395,9 +402,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) rq->roc_nix = roc_nix; if (is_cn9k) - rc = rq_cn9k_cfg(nix, rq, true, ena); + rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena); else - rc = rq_cfg(nix, rq, true, ena); + rc = nix_rq_cfg(dev, rq, nix->qints, true, ena); if (rc) return rc; @@ -435,7 +442,6 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); cq->wdata = (uint64_t)cq->qid << 32; cq->roc_nix = roc_nix; - cq->drop_thresh = NIX_CQ_THRESH_LEVEL; /* CQE of W16 */ desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ; @@ -476,14 +482,30 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ cq_ctx->cint_idx = cq->qid; - cq_ctx->drop = cq->drop_thresh; - cq_ctx->drop_ena = 1; + if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) { + const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; + uint16_t min_rx_drop; + + min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc); + cq_ctx->drop = min_rx_drop; + cq_ctx->drop_ena = 1; + cq->drop_thresh = min_rx_drop; + } else { + cq->drop_thresh = NIX_CQ_THRESH_LEVEL; + /* Drop processing or red drop cannot be enabled due to + * due to packets coming for second pass from CPT. + */ + if (!roc_nix_inl_inb_is_enabled(roc_nix)) { + cq_ctx->drop = cq->drop_thresh; + cq_ctx->drop_ena = 1; + } + } /* TX pause frames enable flow ctrl on RX side */ if (nix->tx_pause) { /* Single BPID is allocated for all rx channels for now */ cq_ctx->bpid = nix->bpid[0]; - cq_ctx->bp = cq_ctx->drop; + cq_ctx->bp = cq->drop_thresh; cq_ctx->bp_ena = 1; } @@ -585,6 +607,10 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) memset(&aura, 0, sizeof(aura)); aura.fc_ena = 1; + if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) + aura.fc_stype = 0x0; /* STF */ + else + aura.fc_stype = 0x3; /* STSTP */ aura.fc_addr = (uint64_t)sq->fc; aura.fc_hyst_bits = 0; /* Store count on all updates */ rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura, @@ -635,6 +661,8 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; aq->sq.sso_ena = !!sq->sso_ena; + aq->sq.cq_ena = !!sq->cq_ena; + aq->sq.cq = sq->cqid; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); @@ -733,6 +761,8 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; aq->sq.sso_ena = !!sq->sso_ena; + aq->sq.cq_ena = !!sq->cq_ena; + aq->sq.cq = sq->cqid; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); @@ -908,6 +938,11 @@ roc_nix_sq_fini(struct roc_nix_sq *sq) rc |= NIX_ERR_NDC_SYNC; rc |= nix_tm_sq_flush_post(sq); + + /* Restore limit to max SQB count that the pool was created + * for aura drain to succeed. + */ + roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); rc |= roc_npa_pool_destroy(sq->aura_handle); plt_free(sq->fc); plt_free(sq->sqe_mem);