net/cnxk: support queue start and stop
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Wed, 23 Jun 2021 04:46:18 +0000 (10:16 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 29 Jun 2021 20:18:36 +0000 (22:18 +0200)
Add Rx/Tx queue start and stop callbacks for
CN9K and CN10K.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
doc/guides/nics/features/cnxk.ini
doc/guides/nics/features/cnxk_vec.ini
doc/guides/nics/features/cnxk_vf.ini
drivers/net/cnxk/cn10k_ethdev.c
drivers/net/cnxk/cn9k_ethdev.c
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev.h

index 503582c..712f8d5 100644 (file)
@@ -12,6 +12,7 @@ Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
 Runtime Tx queue setup = Y
+Queue start/stop     = Y
 RSS hash             = Y
 Inner RSS            = Y
 Packet type parsing  = Y
index 9ad225a..82f2af0 100644 (file)
@@ -12,6 +12,7 @@ Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
 Runtime Tx queue setup = Y
+Queue start/stop     = Y
 RSS hash             = Y
 Inner RSS            = Y
 Packet type parsing  = Y
index 8c93ba7..61fed11 100644 (file)
@@ -11,6 +11,7 @@ Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
 Runtime Tx queue setup = Y
+Queue start/stop     = Y
 RSS hash             = Y
 Inner RSS            = Y
 Packet type parsing  = Y
index f79d03c..d70ab00 100644 (file)
@@ -137,6 +137,21 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        return 0;
 }
 
+static int
+cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+       int rc;
+
+       rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
+       if (rc)
+               return rc;
+
+       /* Clear fc cache pkts to trigger worker stop */
+       txq->fc_cache_pkts = 0;
+       return 0;
+}
+
 static int
 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
 {
@@ -169,6 +184,7 @@ nix_eth_dev_ops_override(void)
        cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
        cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
        cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
+       cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
        cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
 }
 
index 19b3727..806e95f 100644 (file)
@@ -135,6 +135,21 @@ cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        return 0;
 }
 
+static int
+cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+       int rc;
+
+       rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
+       if (rc)
+               return rc;
+
+       /* Clear fc cache pkts to trigger worker stop */
+       txq->fc_cache_pkts = 0;
+       return 0;
+}
+
 static int
 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 {
@@ -178,6 +193,7 @@ nix_eth_dev_ops_override(void)
        cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
        cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
        cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
+       cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
        cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
 }
 
index b1ed046..6c20098 100644 (file)
@@ -866,12 +866,104 @@ fail_configure:
        return rc;
 }
 
+static int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc = -EINVAL;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, true);
+       if (rc) {
+               plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, false);
+       if (rc) {
+               plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+                       rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, true);
+       if (rc) {
+               plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, false);
+       if (rc) {
+               plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
 /* CNXK platform independent eth dev ops */
 struct eth_dev_ops cnxk_eth_dev_ops = {
        .dev_infos_get = cnxk_nix_info_get,
        .link_update = cnxk_nix_link_update,
        .tx_queue_release = cnxk_nix_tx_queue_release,
        .rx_queue_release = cnxk_nix_rx_queue_release,
+       .tx_queue_start = cnxk_nix_tx_queue_start,
+       .rx_queue_start = cnxk_nix_rx_queue_start,
+       .rx_queue_stop = cnxk_nix_rx_queue_stop,
        .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
 };
 
index b23df4a..5a52489 100644 (file)
@@ -214,6 +214,7 @@ int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                            uint16_t nb_desc, uint16_t fp_rx_q_sz,
                            const struct rte_eth_rxconf *rx_conf,
                            struct rte_mempool *mp);
+int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
 
 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);