Patch implements set and get operations for flow control.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
- MAC filtering
- Inner and Outer Checksum offload
- Link state information
+- Link flow control
- MTU update
- Scatter-Gather IO support
- Vector Poll mode driver
Unicast MAC filter = Y
RSS hash = Y
Inner RSS = Y
+Flow control = Y
Jumbo frame = Y
Scattered Rx = Y
L3 checksum offload = Y
Unicast MAC filter = Y
RSS hash = Y
Inner RSS = Y
+Flow control = Y
Jumbo frame = Y
L3 checksum offload = Y
L4 checksum offload = Y
return rc;
}
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_conf = {0};
+ int rc;
+
+ /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ * by AF driver, update those info in PMD structure.
+ */
+ rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+ if (rc)
+ goto exit;
+
+ fc->mode = fc_conf.mode;
+ fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+ return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_cfg = {0};
+
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
+ return 0;
+
+ fc_cfg.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (roc_model_is_cn96_ax() &&
+ (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ fc_cfg.mode =
+ (fc_cfg.mode == RTE_FC_FULL ||
+ fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+ RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ }
+
+ return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
uint64_t
cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
{
struct rte_eth_rxmode *rxmode = &conf->rxmode;
struct rte_eth_txmode *txmode = &conf->txmode;
char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+ struct roc_nix_fc_cfg fc_cfg = {0};
struct roc_nix *nix = &dev->nix;
struct rte_ether_addr *ea;
uint8_t nb_rxq, nb_txq;
goto cq_fini;
}
+ /* Init flow control configuration */
+ fc_cfg.cq_cfg_valid = false;
+ fc_cfg.rxchan_cfg.enable = true;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc) {
+ plt_err("Failed to initialize flow control rc=%d", rc);
+ goto cq_fini;
+ }
+
+ /* Update flow control configuration to PMD */
+ rc = nix_init_flow_ctrl_config(eth_dev);
+ if (rc) {
+ plt_err("Failed to initialize flow control rc=%d", rc);
+ goto cq_fini;
+ }
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
return rc;
}
+ /* Update Flow control configuration */
+ rc = nix_update_flow_ctrl_config(eth_dev);
+ if (rc) {
+ plt_err("Failed to enable flow control. error code(%d)", rc);
+ return rc;
+ }
+
/* Enable Rx in NPC */
rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
if (rc) {
.allmulticast_disable = cnxk_nix_allmulticast_disable,
.rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
+ .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
+ .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
};
static int
((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
(1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
+struct cnxk_fc_cfg {
+ enum rte_eth_fc_mode mode;
+ uint8_t rx_pause;
+ uint8_t tx_pause;
+};
+
struct cnxk_eth_qconf {
union {
struct rte_eth_txconf tx;
struct cnxk_eth_qconf *tx_qconf;
struct cnxk_eth_qconf *rx_qconf;
+ /* Flow control configuration */
+ struct cnxk_fc_cfg fc_cfg;
+
/* Rx burst for cleanup(Only Primary) */
eth_rx_burst_t rx_pkt_burst_no_offload;
struct rte_eth_burst_mode *mode);
int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
+int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf);
int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_tx_q_sz,
return 0;
}
+int
+cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ enum rte_eth_fc_mode mode_map[] = {
+ RTE_FC_NONE, RTE_FC_RX_PAUSE,
+ RTE_FC_TX_PAUSE, RTE_FC_FULL
+ };
+ struct roc_nix *nix = &dev->nix;
+ int mode;
+
+ mode = roc_nix_fc_mode_get(nix);
+ if (mode < 0)
+ return mode;
+
+ memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ fc_conf->mode = mode_map[mode];
+ return 0;
+}
+
+static int
+nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
+{
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_fc_cfg fc_cfg;
+ struct roc_nix_cq *cq;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ cq = &dev->cqs[qid];
+ fc_cfg.cq_cfg_valid = true;
+ fc_cfg.cq_cfg.enable = enable;
+ fc_cfg.cq_cfg.rq = qid;
+ fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+
+ return roc_nix_fc_config_set(nix, &fc_cfg);
+}
+
+int
+cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ enum roc_nix_fc_mode mode_map[] = {
+ ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+ ROC_NIX_FC_TX, ROC_NIX_FC_FULL
+ };
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct roc_nix *nix = &dev->nix;
+ uint8_t rx_pause, tx_pause;
+ int rc, i;
+
+ if (roc_nix_is_vf_or_sdp(nix)) {
+ plt_err("Flow control configuration is not allowed on VFs");
+ return -ENOTSUP;
+ }
+
+ if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
+ plt_info("Only MODE configuration is supported");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == fc->mode)
+ return 0;
+
+ rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_TX_PAUSE);
+
+ /* Check if TX pause frame is already enabled or not */
+ if (fc->tx_pause ^ tx_pause) {
+ if (roc_model_is_cn96_ax() && data->dev_started) {
+ /* On Ax, CQ should be in disabled state
+ * while setting flow control configuration.
+ */
+ plt_info("Stop the port=%d for setting flow control",
+ data->port_id);
+ return 0;
+ }
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ rc = nix_fc_cq_config_set(dev, i, tx_pause);
+ if (rc)
+ return rc;
+ }
+ }
+
+ rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
+ if (rc)
+ return rc;
+
+ fc->rx_pause = rx_pause;
+ fc->tx_pause = tx_pause;
+ fc->mode = fc_conf->mode;
+
+ return rc;
+}
+
int
cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
{