net/cnxk: support flow control operations
authorSunil Kumar Kori <skori@marvell.com>
Wed, 23 Jun 2021 04:46:38 +0000 (10:16 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 29 Jun 2021 22:45:54 +0000 (00:45 +0200)
Patch implements set and get operations for flow control.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
doc/guides/nics/cnxk.rst
doc/guides/nics/features/cnxk.ini
doc/guides/nics/features/cnxk_vec.ini
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev.h
drivers/net/cnxk/cnxk_ethdev_ops.c

index 5652ee0..d9365ff 100644 (file)
@@ -26,6 +26,7 @@ Features of the CNXK Ethdev PMD are:
 - MAC filtering
 - Inner and Outer Checksum offload
 - Link state information
+- Link flow control
 - MTU update
 - Scatter-Gather IO support
 - Vector Poll mode driver
index 298f167..afd0f01 100644 (file)
@@ -23,6 +23,7 @@ Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Inner RSS            = Y
+Flow control         = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
 L3 checksum offload  = Y
index a673cc1..4bd11ce 100644 (file)
@@ -22,6 +22,7 @@ Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Inner RSS            = Y
+Flow control         = Y
 Jumbo frame          = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
index 0311df3..abca49b 100644 (file)
@@ -81,6 +81,55 @@ nix_recalc_mtu(struct rte_eth_dev *eth_dev)
        return rc;
 }
 
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_conf = {0};
+       int rc;
+
+       /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+        * by AF driver, update those info in PMD structure.
+        */
+       rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+       if (rc)
+               goto exit;
+
+       fc->mode = fc_conf.mode;
+       fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_RX_PAUSE);
+       fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+       return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_cfg = {0};
+
+       if (roc_nix_is_vf_or_sdp(&dev->nix))
+               return 0;
+
+       fc_cfg.mode = fc->mode;
+
+       /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+       if (roc_model_is_cn96_ax() &&
+           (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+               fc_cfg.mode =
+                               (fc_cfg.mode == RTE_FC_FULL ||
+                               fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+                               RTE_FC_TX_PAUSE : RTE_FC_NONE;
+       }
+
+       return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
 uint64_t
 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
 {
@@ -686,6 +735,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        struct rte_eth_rxmode *rxmode = &conf->rxmode;
        struct rte_eth_txmode *txmode = &conf->txmode;
        char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+       struct roc_nix_fc_cfg fc_cfg = {0};
        struct roc_nix *nix = &dev->nix;
        struct rte_ether_addr *ea;
        uint8_t nb_rxq, nb_txq;
@@ -867,6 +917,21 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto cq_fini;
        }
 
+       /* Init flow control configuration */
+       fc_cfg.cq_cfg_valid = false;
+       fc_cfg.rxchan_cfg.enable = true;
+       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
+
+       /* Update flow control configuration to PMD */
+       rc = nix_init_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
        /*
         * Restore queue config when reconfigure followed by
         * reconfigure and no queue configure invoked from application case.
@@ -1066,6 +1131,13 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
                        return rc;
        }
 
+       /* Update Flow control configuration */
+       rc = nix_update_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to enable flow control. error code(%d)", rc);
+               return rc;
+       }
+
        /* Enable Rx in NPC */
        rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
        if (rc) {
@@ -1115,6 +1187,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
        .allmulticast_disable = cnxk_nix_allmulticast_disable,
        .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
        .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
+       .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
+       .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
 };
 
 static int
index aea0005..e788a42 100644 (file)
        ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
         (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
 
+struct cnxk_fc_cfg {
+       enum rte_eth_fc_mode mode;
+       uint8_t rx_pause;
+       uint8_t tx_pause;
+};
+
 struct cnxk_eth_qconf {
        union {
                struct rte_eth_txconf tx;
@@ -174,6 +180,9 @@ struct cnxk_eth_dev {
        struct cnxk_eth_qconf *tx_qconf;
        struct cnxk_eth_qconf *rx_qconf;
 
+       /* Flow control configuration */
+       struct cnxk_fc_cfg fc_cfg;
+
        /* Rx burst for cleanup(Only Primary) */
        eth_rx_burst_t rx_pkt_burst_no_offload;
 
@@ -238,6 +247,10 @@ int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
                               struct rte_eth_burst_mode *mode);
 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
                               struct rte_eth_burst_mode *mode);
+int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+                          struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+                          struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                            uint16_t nb_desc, uint16_t fp_tx_q_sz,
index 7ae961a..97c5428 100644 (file)
@@ -198,6 +198,107 @@ done:
        return 0;
 }
 
+int
+cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+                      struct rte_eth_fc_conf *fc_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       enum rte_eth_fc_mode mode_map[] = {
+                                          RTE_FC_NONE, RTE_FC_RX_PAUSE,
+                                          RTE_FC_TX_PAUSE, RTE_FC_FULL
+                                         };
+       struct roc_nix *nix = &dev->nix;
+       int mode;
+
+       mode = roc_nix_fc_mode_get(nix);
+       if (mode < 0)
+               return mode;
+
+       memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+       fc_conf->mode = mode_map[mode];
+       return 0;
+}
+
+static int
+nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
+{
+       struct roc_nix *nix = &dev->nix;
+       struct roc_nix_fc_cfg fc_cfg;
+       struct roc_nix_cq *cq;
+
+       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+       cq = &dev->cqs[qid];
+       fc_cfg.cq_cfg_valid = true;
+       fc_cfg.cq_cfg.enable = enable;
+       fc_cfg.cq_cfg.rq = qid;
+       fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+
+       return roc_nix_fc_config_set(nix, &fc_cfg);
+}
+
+int
+cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+                      struct rte_eth_fc_conf *fc_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       enum roc_nix_fc_mode mode_map[] = {
+                                          ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+                                          ROC_NIX_FC_TX, ROC_NIX_FC_FULL
+                                         };
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct roc_nix *nix = &dev->nix;
+       uint8_t rx_pause, tx_pause;
+       int rc, i;
+
+       if (roc_nix_is_vf_or_sdp(nix)) {
+               plt_err("Flow control configuration is not allowed on VFs");
+               return -ENOTSUP;
+       }
+
+       if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
+           fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
+               plt_info("Only MODE configuration is supported");
+               return -EINVAL;
+       }
+
+       if (fc_conf->mode == fc->mode)
+               return 0;
+
+       rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+                   (fc_conf->mode == RTE_FC_RX_PAUSE);
+       tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+                   (fc_conf->mode == RTE_FC_TX_PAUSE);
+
+       /* Check if TX pause frame is already enabled or not */
+       if (fc->tx_pause ^ tx_pause) {
+               if (roc_model_is_cn96_ax() && data->dev_started) {
+                       /* On Ax, CQ should be in disabled state
+                        * while setting flow control configuration.
+                        */
+                       plt_info("Stop the port=%d for setting flow control",
+                                data->port_id);
+                       return 0;
+               }
+
+               for (i = 0; i < data->nb_rx_queues; i++) {
+                       rc = nix_fc_cq_config_set(dev, i, tx_pause);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
+       if (rc)
+               return rc;
+
+       fc->rx_pause = rx_pause;
+       fc->tx_pause = tx_pause;
+       fc->mode = fc_conf->mode;
+
+       return rc;
+}
+
 int
 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
 {