]> git.droids-corp.org - dpdk.git/commitdiff
common/cnxk: enable TM to listen on Rx pause frames
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Tue, 2 Nov 2021 15:54:15 +0000 (21:24 +0530)
committerJerin Jacob <jerinj@marvell.com>
Wed, 3 Nov 2021 15:05:47 +0000 (16:05 +0100)
Enable TM topology to listen on backpressure received when
Rx pause frame is enabled. Only one TM node in Tl3/TL2 per
channel can listen on backpressure on that channel.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
drivers/common/cnxk/roc_nix.c
drivers/common/cnxk/roc_nix.h
drivers/common/cnxk/roc_nix_fc.c
drivers/common/cnxk/roc_nix_priv.h
drivers/common/cnxk/roc_nix_tm.c
drivers/common/cnxk/roc_nix_tm_ops.c
drivers/common/cnxk/roc_nix_tm_utils.c
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev_ops.c

index fbfc550beaba7a9ecba561d3bab38fd4271d1ad0..c96b26604c90cea8ad3dbf9f4ca0188827b68d09 100644 (file)
@@ -418,6 +418,9 @@ skip_dev_init:
        if (nix->lbk_link) {
                nix->rx_pause = 1;
                nix->tx_pause = 1;
+       } else if (!roc_nix_is_vf_or_sdp(roc_nix)) {
+               /* Get the current state of flow control */
+               roc_nix_fc_mode_get(roc_nix);
        }
 
        /* Register error and ras interrupts */
index d83a9b5bce638aa5dd8864c82ef6f1fc7bd571a1..8f36ce70216f71e180fbdc41b131cda00de01c08 100644 (file)
@@ -153,7 +153,10 @@ struct roc_nix_vlan_config {
 };
 
 struct roc_nix_fc_cfg {
-       bool cq_cfg_valid;
+#define ROC_NIX_FC_RXCHAN_CFG 0
+#define ROC_NIX_FC_CQ_CFG     1
+#define ROC_NIX_FC_TM_CFG     2
+       uint8_t type;
        union {
                struct {
                        bool enable;
@@ -164,6 +167,10 @@ struct roc_nix_fc_cfg {
                        uint16_t cq_drop;
                        bool enable;
                } cq_cfg;
+
+               struct {
+                       bool enable;
+               } tm_cfg;
        };
 };
 
index ef46842bfab3e7020d4d9f2384a9d2aa43634f6a..645325813a7e321cf025bf7a1e268f870f19b045 100644 (file)
@@ -24,7 +24,7 @@ nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
        else
                fc_cfg->rxchan_cfg.enable = false;
 
-       fc_cfg->cq_cfg_valid = false;
+       fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG;
 
        return 0;
 }
@@ -103,7 +103,7 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 
        fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
        fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
-       fc_cfg->cq_cfg_valid = true;
+       fc_cfg->type = ROC_NIX_FC_CQ_CFG;
 
 exit:
        return rc;
@@ -160,10 +160,14 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
        if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
                return 0;
 
-       if (fc_cfg->cq_cfg_valid)
+       if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
                return nix_fc_cq_config_get(roc_nix, fc_cfg);
-       else
+       else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
                return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
+       else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
+               return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable);
+
+       return -EINVAL;
 }
 
 int
@@ -172,11 +176,15 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
        if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
                return 0;
 
-       if (fc_cfg->cq_cfg_valid)
+       if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
                return nix_fc_cq_config_set(roc_nix, fc_cfg);
-       else
+       else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
                return nix_fc_rxchan_bpid_set(roc_nix,
                                              fc_cfg->rxchan_cfg.enable);
+       else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
+               return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+
+       return -EINVAL;
 }
 
 enum roc_nix_fc_mode
index 9805d4a8a631c7df76504c49e7830fbb422d7306..60a00a3141258cfc781eed06bb5c257fc794a30e 100644 (file)
@@ -74,6 +74,7 @@ struct nix_tm_node {
        uint32_t red_algo : 2;
        uint32_t pkt_mode : 1;
        uint32_t pkt_mode_set : 1;
+       uint32_t bp_capa : 1;
 
        bool child_realloc;
        struct nix_tm_node *parent;
@@ -373,6 +374,8 @@ int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
               bool ena);
 int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
+int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
 
 /*
  * TM priv utils.
index 08d6e866fe43c7b9277bfa9e6e3151d982315ba8..b3d8ebd3c2fedaf661aca11a9394ca9e7b517626 100644 (file)
@@ -98,16 +98,32 @@ int
 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
 {
        struct nix_tm_node_list *list;
+       bool is_pf_or_lbk = false;
        struct nix_tm_node *node;
+       bool skip_bp = false;
        uint32_t hw_lvl;
        int rc = 0;
 
        list = nix_tm_node_list(nix, tree);
 
+       if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
+               is_pf_or_lbk = true;
+
        for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
                TAILQ_FOREACH(node, list, node) {
                        if (node->hw_lvl != hw_lvl)
                                continue;
+
+                       /* Only one TL3/TL2 Link config should have BP enable
+                        * set per channel only for PF or lbk vf.
+                        */
+                       node->bp_capa = 0;
+                       if (is_pf_or_lbk && !skip_bp &&
+                           node->hw_lvl == nix->tm_link_cfg_lvl) {
+                               node->bp_capa = 1;
+                               skip_bp = true;
+                       }
+
                        rc = nix_tm_node_reg_conf(nix, node);
                        if (rc)
                                goto exit;
@@ -300,6 +316,130 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
        return 0;
 }
 
+int
+nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+{
+       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+       enum roc_nix_tm_tree tree = nix->tm_tree;
+       struct mbox *mbox = (&nix->dev)->mbox;
+       struct nix_txschq_config *req = NULL;
+       struct nix_tm_node_list *list;
+       struct nix_tm_node *node;
+       uint8_t k = 0;
+       uint16_t link;
+       int rc = 0;
+
+       list = nix_tm_node_list(nix, tree);
+       link = nix->tx_link;
+
+       TAILQ_FOREACH(node, list, node) {
+               if (node->hw_lvl != nix->tm_link_cfg_lvl)
+                       continue;
+
+               if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
+                       continue;
+
+               if (!req) {
+                       req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+                       req->lvl = nix->tm_link_cfg_lvl;
+                       k = 0;
+               }
+
+               req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
+               req->regval[k] = enable ? BIT_ULL(13) : 0;
+               req->regval_mask[k] = ~BIT_ULL(13);
+               k++;
+
+               if (k >= MAX_REGS_PER_MBOX_MSG) {
+                       req->num_regs = k;
+                       rc = mbox_process(mbox);
+                       if (rc)
+                               goto err;
+                       req = NULL;
+               }
+       }
+
+       if (req) {
+               req->num_regs = k;
+               rc = mbox_process(mbox);
+               if (rc)
+                       goto err;
+       }
+
+       return 0;
+err:
+       plt_err("Failed to %s bp on link %u, rc=%d(%s)",
+               enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
+       return rc;
+}
+
+int
+nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
+{
+       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+       struct nix_txschq_config *req = NULL, *rsp;
+       enum roc_nix_tm_tree tree = nix->tm_tree;
+       struct mbox *mbox = (&nix->dev)->mbox;
+       struct nix_tm_node_list *list;
+       struct nix_tm_node *node;
+       bool found = false;
+       uint8_t enable = 1;
+       uint8_t k = 0, i;
+       uint16_t link;
+       int rc = 0;
+
+       list = nix_tm_node_list(nix, tree);
+       link = nix->tx_link;
+
+       TAILQ_FOREACH(node, list, node) {
+               if (node->hw_lvl != nix->tm_link_cfg_lvl)
+                       continue;
+
+               if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
+                       continue;
+
+               found = true;
+               if (!req) {
+                       req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+                       req->read = 1;
+                       req->lvl = nix->tm_link_cfg_lvl;
+                       k = 0;
+               }
+
+               req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
+               k++;
+
+               if (k >= MAX_REGS_PER_MBOX_MSG) {
+                       req->num_regs = k;
+                       rc = mbox_process_msg(mbox, (void **)&rsp);
+                       if (rc || rsp->num_regs != k)
+                               goto err;
+                       req = NULL;
+
+                       /* Report it as enabled only if enabled or all */
+                       for (i = 0; i < k; i++)
+                               enable &= !!(rsp->regval[i] & BIT_ULL(13));
+               }
+       }
+
+       if (req) {
+               req->num_regs = k;
+               rc = mbox_process(mbox);
+               if (rc)
+                       goto err;
+               /* Report it as enabled only if enabled or all */
+               for (i = 0; i < k; i++)
+                       enable &= !!(rsp->regval[i] & BIT_ULL(13));
+       }
+
+       *is_enabled = found ? !!enable : false;
+       return 0;
+err:
+       plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
+               roc_error_msg_get(rc));
+       return rc;
+}
+
 int
 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
 {
@@ -461,6 +601,13 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
                }
        }
 
+       /* Disable backpressure */
+       rc = nix_tm_bp_config_set(roc_nix, false);
+       if (rc) {
+               plt_err("Failed to disable backpressure for flush, rc=%d", rc);
+               return rc;
+       }
+
        /* Disable smq xoff for case it was enabled earlier */
        rc = nix_tm_smq_xoff(nix, node->parent, false);
        if (rc) {
@@ -580,6 +727,16 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
                }
        }
 
+       if (!nix->rx_pause)
+               return 0;
+
+       /* Restore backpressure */
+       rc = nix_tm_bp_config_set(roc_nix, true);
+       if (rc) {
+               plt_err("Failed to restore backpressure, rc=%d", rc);
+               return rc;
+       }
+
        return 0;
 }
 
index eee80d5f0038265c0d4de598cdb37251e066dbf8..6a417c0c887851d68318ea1afe2cf133b66b7b0f 100644 (file)
@@ -452,6 +452,15 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
                }
        }
 
+       /* Disable backpressure, it will be enabled back if needed on
+        * hierarchy enable
+        */
+       rc = nix_tm_bp_config_set(roc_nix, false);
+       if (rc) {
+               plt_err("Failed to disable backpressure for flush, rc=%d", rc);
+               goto cleanup;
+       }
+
        /* Flush all tx queues */
        for (i = 0; i < sq_cnt; i++) {
                sq = nix->sqs[i];
index a135454eeba98951b2c1be479bb26d17e2fa82c2..543adf9e56a6c03fe16427679ed5a93044b3e78e 100644 (file)
@@ -522,6 +522,10 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
                    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
                        reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
                        regval[k] = BIT_ULL(12) | relchan;
+                       /* Enable BP if node is BP capable and rx_pause is set
+                        */
+                       if (nix->rx_pause && node->bp_capa)
+                               regval[k] |= BIT_ULL(13);
                        k++;
                }
 
@@ -541,6 +545,10 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
                    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
                        reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
                        regval[k] = BIT_ULL(12) | relchan;
+                       /* Enable BP if node is BP capable and rx_pause is set
+                        */
+                       if (nix->rx_pause && node->bp_capa)
+                               regval[k] |= BIT_ULL(13);
                        k++;
                }
 
index db54468dbca1f875bfe1c4b7519384e84dc05654..e9bebfe615a6ade46edc4c5c38652776c689e3ce 100644 (file)
@@ -1199,7 +1199,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        }
 
        /* Init flow control configuration */
-       fc_cfg.cq_cfg_valid = false;
+       fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
        fc_cfg.rxchan_cfg.enable = true;
        rc = roc_nix_fc_config_set(nix, &fc_cfg);
        if (rc) {
index 62306b6cd6fb1285431d3293b16ccab99a254373..e0696f1ae53d5b8eb675fdb0c982acd0acc1570a 100644 (file)
@@ -228,7 +228,7 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 
        memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
        cq = &dev->cqs[qid];
-       fc_cfg.cq_cfg_valid = true;
+       fc_cfg.type = ROC_NIX_FC_CQ_CFG;
        fc_cfg.cq_cfg.enable = enable;
        fc_cfg.cq_cfg.rq = qid;
        fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;