net/failsafe: add Tx queue start and stop functions
authorIan Dolzhansky <ian.dolzhansky@oktetlabs.ru>
Thu, 20 Sep 2018 13:55:52 +0000 (14:55 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 27 Sep 2018 23:41:02 +0000 (01:41 +0200)
Support Tx queue deferred start.

Signed-off-by: Ian Dolzhansky <ian.dolzhansky@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Gaetan Rivet <gaetan.rivet@6wind.com>
doc/guides/nics/features/failsafe.ini
doc/guides/rel_notes/release_18_11.rst
drivers/net/failsafe/failsafe_ether.c
drivers/net/failsafe/failsafe_ops.c

index 712c0b7..74eae4a 100644 (file)
@@ -7,7 +7,7 @@
 Link status          = Y
 Link status event    = Y
 Rx interrupt         = Y
-Queue start/stop     = P
+Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Promiscuous mode     = Y
index 485caf6..d4ef2ef 100644 (file)
@@ -76,8 +76,8 @@ New Features
 
   Updated the failsafe driver including the following changes:
 
-  * Support for Rx queues start and stop.
-  * Support for Rx queues deferred start.
+  * Support for Rx and Tx queues start and stop.
+  * Support for Rx and Tx queues deferred start.
 
 * **Added ability to switch queue deferred start flag on testpmd app.**
 
index 305deed..191f95f 100644 (file)
@@ -407,6 +407,47 @@ failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
+{
+       struct txq *txq;
+       int ret;
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+
+               if (txq->info.conf.tx_deferred_start &&
+                   dev->data->tx_queue_state[i] ==
+                                               RTE_ETH_QUEUE_STATE_STARTED) {
+                       /*
+                        * The subdevice Tx queue does not launch on device
+                        * start if deferred start flag is set. It needs to be
+                        * started manually in case an appropriate failsafe Tx
+                        * queue has been started earlier.
+                        */
+                       ret = dev->dev_ops->tx_queue_start(dev, i);
+                       if (ret) {
+                               ERROR("Could not synchronize Tx queue %d", i);
+                               return ret;
+                       }
+               } else if (dev->data->tx_queue_state[i] ==
+                                               RTE_ETH_QUEUE_STATE_STOPPED) {
+                       /*
+                        * The subdevice Tx queue needs to be stopped manually
+                        * in case an appropriate failsafe Tx queue has been
+                        * stopped earlier.
+                        */
+                       ret = dev->dev_ops->tx_queue_stop(dev, i);
+                       if (ret) {
+                               ERROR("Could not synchronize Tx queue %d", i);
+                               return ret;
+                       }
+               }
+       }
+       return 0;
+}
+
 int
 failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
 {
@@ -466,6 +507,9 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
        if (ret)
                goto err_remove;
        ret = failsafe_eth_dev_rx_queues_sync(dev);
+       if (ret)
+               goto err_remove;
+       ret = failsafe_eth_dev_tx_queues_sync(dev);
        if (ret)
                goto err_remove;
        return 0;
index b3bfacb..84f4b7a 100644 (file)
@@ -172,6 +172,7 @@ static void
 fs_set_queues_state_start(struct rte_eth_dev *dev)
 {
        struct rxq *rxq;
+       struct txq *txq;
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -180,6 +181,12 @@ fs_set_queues_state_start(struct rte_eth_dev *dev)
                        dev->data->rx_queue_state[i] =
                                                RTE_ETH_QUEUE_STATE_STARTED;
        }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (!txq->info.conf.tx_deferred_start)
+                       dev->data->tx_queue_state[i] =
+                                               RTE_ETH_QUEUE_STATE_STARTED;
+       }
 }
 
 static int
@@ -232,6 +239,8 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++)
                dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 static void
@@ -371,6 +380,59 @@ fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        return 0;
 }
 
+static int
+fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct sub_device *sdev;
+       uint8_t i;
+       int ret;
+       int err = 0;
+       bool failure = true;
+
+       fs_lock(dev, 0);
+       FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+               uint16_t port_id = ETH(sdev)->data->port_id;
+
+               ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
+               ret = fs_err(sdev, ret);
+               if (ret) {
+                       ERROR("Tx queue stop failed for subdevice %d", i);
+                       err = ret;
+               } else {
+                       failure = false;
+               }
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+       fs_unlock(dev, 0);
+       /* Return 0 in case of at least one successful queue stop */
+       return (failure) ? err : 0;
+}
+
+static int
+fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct sub_device *sdev;
+       uint8_t i;
+       int ret;
+
+       fs_lock(dev, 0);
+       FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+               uint16_t port_id = ETH(sdev)->data->port_id;
+
+               ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
+               ret = fs_err(sdev, ret);
+               if (ret) {
+                       ERROR("Tx queue start failed for subdevice %d", i);
+                       fs_tx_queue_stop(dev, tx_queue_id);
+                       fs_unlock(dev, 0);
+                       return ret;
+               }
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+       fs_unlock(dev, 0);
+       return 0;
+}
+
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -592,12 +654,17 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
        uint8_t i;
        int ret;
 
+       fs_lock(dev, 0);
        if (tx_conf->tx_deferred_start) {
-               ERROR("Tx queue deferred start is not supported");
-               return -EINVAL;
+               FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+                       if (SUBOPS(sdev, tx_queue_start) == NULL) {
+                               ERROR("Tx queue deferred start is not "
+                                       "supported for subdevice %d", i);
+                               fs_unlock(dev, 0);
+                               return -EINVAL;
+                       }
+               }
        }
-
-       fs_lock(dev, 0);
        txq = dev->data->tx_queues[tx_queue_id];
        if (txq != NULL) {
                fs_tx_queue_release(txq);
@@ -1127,6 +1194,8 @@ const struct eth_dev_ops failsafe_ops = {
        .vlan_filter_set = fs_vlan_filter_set,
        .rx_queue_start = fs_rx_queue_start,
        .rx_queue_stop = fs_rx_queue_stop,
+       .tx_queue_start = fs_tx_queue_start,
+       .tx_queue_stop = fs_tx_queue_stop,
        .rx_queue_setup = fs_rx_queue_setup,
        .tx_queue_setup = fs_tx_queue_setup,
        .rx_queue_release = fs_rx_queue_release,