#define ROC_NIX_FC_RXCHAN_CFG 0
#define ROC_NIX_FC_CQ_CFG 1
#define ROC_NIX_FC_TM_CFG 2
+#define ROC_NIX_FC_RQ_CFG 3
uint8_t type;
union {
struct {
bool enable;
} cq_cfg;
+ struct {
+ uint32_t rq;
+ uint16_t tc;
+ uint16_t cq_drop;
+ bool enable;
+ uint64_t pool;
+ } rq_cfg;
+
struct {
uint32_t sq;
uint16_t tc;
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
-void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
- uint8_t ena, uint8_t force);
+void __roc_api roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
+ uint8_t ena, uint8_t force, uint8_t tc);
/* NPC */
int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
bool ena);
int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable);
+int __roc_api roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid);
int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq);
int __roc_api roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq);
int __roc_api roc_nix_cq_fini(struct roc_nix_cq *cq);
return rc;
}
+static int
+nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct mbox *mbox = get_mbox(roc_nix);
+ struct nix_aq_enq_rsp *rsp;
+ struct npa_aq_enq_req *npa_req;
+ struct npa_aq_enq_rsp *npa_rsp;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ } else {
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = fc_cfg->rq_cfg.rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ }
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ npa_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!npa_req)
+ return -ENOSPC;
+
+ npa_req->aura_id = rsp->rq.lpb_aura;
+ npa_req->ctype = NPA_AQ_CTYPE_AURA;
+ npa_req->op = NPA_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&npa_rsp);
+ if (rc)
+ goto exit;
+
+ fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp;
+ fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena;
+ fc_cfg->type = ROC_NIX_FC_RQ_CFG;
+
+exit:
+ return rc;
+}
+
static int
nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
return mbox_process(mbox);
}
+static int
+nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
+{
+ struct roc_nix_fc_cfg tmp;
+ int sso_ena = 0;
+
+ /* Check whether RQ is connected to SSO or not */
+ sso_ena = roc_nix_rq_is_sso_enable(roc_nix, fc_cfg->rq_cfg.rq);
+ if (sso_ena < 0)
+ return -EINVAL;
+
+ if (sso_ena)
+ roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
+ fc_cfg->rq_cfg.enable, true,
+ fc_cfg->rq_cfg.tc);
+
+ /* Copy RQ config to CQ config as they are occupying same area */
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = ROC_NIX_FC_CQ_CFG;
+ tmp.cq_cfg.rq = fc_cfg->rq_cfg.rq;
+ tmp.cq_cfg.tc = fc_cfg->rq_cfg.tc;
+ tmp.cq_cfg.cq_drop = fc_cfg->rq_cfg.cq_drop;
+ tmp.cq_cfg.enable = fc_cfg->rq_cfg.enable;
+
+ return nix_fc_cq_config_set(roc_nix, &tmp);
+}
+
int
roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_get(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
int
roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
{
- if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) &&
- !roc_nix_is_sdp(roc_nix))
- return 0;
-
if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
return nix_fc_cq_config_set(roc_nix, fc_cfg);
+ else if (fc_cfg->type == ROC_NIX_FC_RQ_CFG)
+ return nix_fc_rq_config_set(roc_nix, fc_cfg);
else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
}
void
-rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
- uint8_t force)
+roc_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+ uint8_t force, uint8_t tc)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct npa_lf *lf = idev_npa_obj_get();
struct npa_aq_enq_rsp *rsp;
struct mbox *mbox;
uint32_t limit;
+ uint64_t shift;
int rc;
if (roc_nix_is_sdp(roc_nix))
return;
limit = rsp->aura.limit;
+ shift = rsp->aura.shift;
+
/* BP is already enabled. */
- if (rsp->aura.bp_ena) {
+ if (rsp->aura.bp_ena && ena) {
uint16_t bpid;
bool nix1;
bpid = rsp->aura.nix0_bpid;
/* If BP ids don't match disable BP. */
- if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
+ if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc])) &&
!force) {
req = mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
+ plt_info("Disabling BP/FC on aura 0x%" PRIx64
+ " as it shared across ports or tc",
+ pool_id);
req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_WRITE;
mbox_process(mbox);
}
+
+ if ((nix1 != nix->is_nix1) || (bpid != nix->bpid[tc]))
+ plt_info("Ignoring aura 0x%" PRIx64 "->%u bpid mapping",
+ pool_id, nix->bpid[tc]);
return;
}
/* BP was previously enabled but now disabled skip. */
- if (rsp->aura.bp)
+ if (rsp->aura.bp && ena)
return;
req = mbox_alloc_msg_npa_aq_enq(mbox);
if (ena) {
if (nix->is_nix1) {
- req->aura.nix1_bpid = nix->bpid[0];
+ req->aura.nix1_bpid = nix->bpid[tc];
req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
} else {
- req->aura.nix0_bpid = nix->bpid[0];
+ req->aura.nix0_bpid = nix->bpid[tc];
req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
}
- req->aura.bp = NIX_RQ_AURA_THRESH(
- limit > 128 ? 256 : limit); /* 95% of size*/
+ req->aura.bp = NIX_RQ_AURA_THRESH(limit >> shift);
+ req->aura_mask.bp = ~(req->aura_mask.bp);
+ } else {
+ req->aura.bp = 0;
req->aura_mask.bp = ~(req->aura_mask.bp);
}
return "Default Tree";
else if (tree == ROC_NIX_TM_RLIMIT)
return "Rate Limit Tree";
+ else if (tree == ROC_NIX_TM_PFC)
+ return "PFC Tree";
else if (tree == ROC_NIX_TM_USER)
return "User Tree";
return "???";
return rc;
}
+int
+roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct dev *dev = &nix->dev;
+ struct mbox *mbox = dev->mbox;
+ bool sso_enable;
+ int rc;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ } else {
+ struct nix_cn10k_aq_enq_rsp *rsp;
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ sso_enable = rsp->rq.sso_ena;
+ }
+
+ return sso_enable ? true : false;
+}
+
int
nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
bool cfg, bool ena)
nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
{
struct nix_tm_node_list *list;
- bool is_pf_or_lbk = false;
struct nix_tm_node *node;
bool skip_bp = false;
uint32_t hw_lvl;
list = nix_tm_node_list(nix, tree);
- if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
- is_pf_or_lbk = true;
-
for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != hw_lvl)
* set per channel only for PF or lbk vf.
*/
node->bp_capa = 0;
- if (is_pf_or_lbk && !skip_bp &&
+ if (!nix->sdp_link && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
skip_bp = false;
struct nix_tm_node *sq_node;
struct nix_tm_node *parent;
struct nix_tm_node *node;
+ uint8_t parent_lvl;
uint8_t k = 0;
int rc = 0;
if (!sq_node)
return -ENOENT;
+ parent_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH2 :
+ ROC_TM_LVL_SCH1);
+
parent = sq_node->parent;
while (parent) {
- if (parent->lvl == ROC_TM_LVL_SCH2)
+ if (parent->lvl == parent_lvl)
break;
parent = parent->parent;
roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint8_t leaf_lvl, lvl, lvl_start, lvl_end;
uint32_t nonleaf_id = nix->nb_tx_queues;
struct nix_tm_node *node = NULL;
- uint8_t leaf_lvl, lvl, lvl_end;
uint32_t tl2_node_id;
uint32_t parent, i;
int rc = -ENOMEM;
parent = ROC_NIX_TM_NODE_ID_INVALID;
- lvl_end = ROC_TM_LVL_SCH3;
- leaf_lvl = ROC_TM_LVL_QUEUE;
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+ ROC_TM_LVL_SCH2);
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
/* TL1 node */
node = nix_tm_node_alloc();
parent = nonleaf_id;
nonleaf_id++;
- /* TL2 node */
- rc = -ENOMEM;
- node = nix_tm_node_alloc();
- if (!node)
- goto error;
+ lvl_start = ROC_TM_LVL_SCH1;
+ if (roc_nix_is_pf(roc_nix)) {
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
- node->id = nonleaf_id;
- node->parent_id = parent;
- node->priority = 0;
- node->weight = NIX_TM_DFLT_RR_WT;
- node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
- node->lvl = ROC_TM_LVL_SCH1;
- node->tree = ROC_NIX_TM_PFC;
- node->rel_chan = NIX_TM_CHAN_INVALID;
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
- rc = nix_tm_node_add(roc_nix, node);
- if (rc)
- goto error;
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
- tl2_node_id = nonleaf_id;
- nonleaf_id++;
+ lvl_start = ROC_TM_LVL_SCH2;
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+ } else {
+ tl2_node_id = parent;
+ }
for (i = 0; i < nix->nb_tx_queues; i++) {
parent = tl2_node_id;
- for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ for (lvl = lvl_start; lvl <= lvl_end; lvl++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
nonleaf_id++;
}
- lvl = ROC_TM_LVL_SCH4;
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+ ROC_TM_LVL_SCH3);
rc = -ENOMEM;
node = nix_tm_node_alloc();
roc_nix_fc_config_set;
roc_nix_fc_mode_set;
roc_nix_fc_mode_get;
- rox_nix_fc_npa_bp_cfg;
+ roc_nix_fc_npa_bp_cfg;
roc_nix_get_base_chan;
roc_nix_get_pf;
roc_nix_get_pf_func;
roc_nix_rq_ena_dis;
roc_nix_rq_fini;
roc_nix_rq_init;
+ roc_nix_rq_is_sso_enable;
roc_nix_rq_modify;
roc_nix_rss_default_setup;
roc_nix_rss_flowkey_set;
rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
false);
}
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
- rxq_sp->qconf.mp->pool_id, true,
- dev->force_ena_bp);
+
+ if (rxq_sp->tx_pause)
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp, rxq_sp->tc);
cnxk_eth_dev->nb_rxq_sso++;
}
rxq_sp = cnxk_eth_rxq_to_sp(
eth_dev->data->rx_queues[rx_queue_id]);
rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
- rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, false,
- dev->force_ena_bp);
+ dev->force_ena_bp, 0);
cnxk_eth_dev->nb_rxq_sso--;
/* Enable drop_re if it was disabled earlier */
struct cnxk_eth_dev *dev;
struct cnxk_eth_qconf qconf;
uint16_t qid;
+ uint8_t tx_pause;
+ uint8_t tc;
} __plt_cache_aligned;
struct cnxk_eth_txq_sp {