net/ngbe: support Tx queue start/stop
authorJiawen Wu <jiawenwu@trustnetic.com>
Thu, 8 Jul 2021 09:32:35 +0000 (17:32 +0800)
committerAndrew Rybchenko <Andrew.Rybchenko@oktetlabs.ru>
Mon, 12 Jul 2021 15:55:22 +0000 (17:55 +0200)
Initializes transmit unit, support to start and stop transmit unit for
specified queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
doc/guides/nics/features/ngbe.ini
drivers/net/ngbe/base/ngbe_type.h
drivers/net/ngbe/ngbe_ethdev.c
drivers/net/ngbe/ngbe_ethdev.h
drivers/net/ngbe/ngbe_rxtx.c
drivers/net/ngbe/ngbe_rxtx.h

index 291a542..08d5f1b 100644 (file)
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Link status event    = Y
+Queue start/stop     = Y
 Multiprocess aware   = Y
 Linux                = Y
 ARMv8                = Y
index 3f6698b..2846a6a 100644 (file)
@@ -190,6 +190,7 @@ struct ngbe_hw {
        u16 nb_rx_queues;
        u16 nb_tx_queues;
 
+       u32 q_tx_regs[8 * 4];
        bool is_pf;
 };
 
index 31c1ebd..a70a860 100644 (file)
@@ -598,6 +598,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                ETH_LINK_SPEED_10M;
 
        /* Driver-preferred Rx/Tx parameters */
+       dev_info->default_txportconf.burst_size = 32;
        dev_info->default_rxportconf.nb_queues = 1;
        dev_info->default_txportconf.nb_queues = 1;
        dev_info->default_rxportconf.ring_size = 256;
@@ -1085,6 +1086,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
        .dev_start                  = ngbe_dev_start,
        .dev_stop                   = ngbe_dev_stop,
        .link_update                = ngbe_dev_link_update,
+       .tx_queue_start             = ngbe_dev_tx_queue_start,
+       .tx_queue_stop              = ngbe_dev_tx_queue_stop,
        .rx_queue_setup             = ngbe_dev_rx_queue_setup,
        .rx_queue_release           = ngbe_dev_rx_queue_release,
        .tx_queue_setup             = ngbe_dev_tx_queue_setup,
index 9e086ee..5c2aea8 100644 (file)
@@ -86,6 +86,13 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int ngbe_dev_rxtx_start(struct rte_eth_dev *dev);
 
+void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
+void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
+
+int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 
index 6cb0465..63f0647 100644 (file)
@@ -528,7 +528,32 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
 void
 ngbe_dev_tx_init(struct rte_eth_dev *dev)
 {
-       RTE_SET_USED(dev);
+       struct ngbe_hw     *hw;
+       struct ngbe_tx_queue *txq;
+       uint64_t bus_addr;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = ngbe_dev_hw(dev);
+
+       wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
+       wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+
+               bus_addr = txq->tx_ring_phys_addr;
+               wr32(hw, NGBE_TXBAL(txq->reg_idx),
+                               (uint32_t)(bus_addr & BIT_MASK32));
+               wr32(hw, NGBE_TXBAH(txq->reg_idx),
+                               (uint32_t)(bus_addr >> 32));
+               wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
+                       NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+               /* Setup the HW Tx Head and TX Tail descriptor pointers */
+               wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
+               wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
+       }
 }
 
 /*
@@ -537,7 +562,140 @@ ngbe_dev_tx_init(struct rte_eth_dev *dev)
 int
 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
-       RTE_SET_USED(dev);
+       struct ngbe_hw     *hw;
+       struct ngbe_tx_queue *txq;
+       uint32_t dmatxctl;
+       uint16_t i;
+       int ret = 0;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = ngbe_dev_hw(dev);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               /* Setup Transmit Threshold Registers */
+               wr32m(hw, NGBE_TXCFG(txq->reg_idx),
+                     NGBE_TXCFG_HTHRESH_MASK |
+                     NGBE_TXCFG_WTHRESH_MASK,
+                     NGBE_TXCFG_HTHRESH(txq->hthresh) |
+                     NGBE_TXCFG_WTHRESH(txq->wthresh));
+       }
+
+       dmatxctl = rd32(hw, NGBE_DMATXCTRL);
+       dmatxctl |= NGBE_DMATXCTRL_ENA;
+       wr32(hw, NGBE_DMATXCTRL, dmatxctl);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq->tx_deferred_start == 0) {
+                       ret = ngbe_dev_tx_queue_start(dev, i);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
 
        return -EINVAL;
 }
+
+void
+ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
+{
+       u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+       *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
+       *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
+       *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
+}
+
+void
+ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
+{
+       u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+       wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
+       wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
+       wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_tx_queue *txq;
+       uint32_t txdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[tx_queue_id];
+       wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
+
+       /* Wait until Tx Enable ready */
+       poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_ms(1);
+               txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
+       } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
+       if (poll_ms == 0)
+               PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+                            tx_queue_id);
+
+       rte_wmb();
+       wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+       return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_tx_queue *txq;
+       uint32_t txdctl;
+       uint32_t txtdh, txtdt;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[tx_queue_id];
+
+       /* Wait until Tx queue is empty */
+       poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_us(RTE_NGBE_WAIT_100_US);
+               txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
+               txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
+       } while (--poll_ms && (txtdh != txtdt));
+       if (poll_ms == 0)
+               PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
+                            tx_queue_id);
+
+       ngbe_dev_save_tx_queue(hw, txq->reg_idx);
+       wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
+
+       /* Wait until Tx Enable bit clear */
+       poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_ms(1);
+               txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
+       } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
+       if (poll_ms == 0)
+               PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+                            tx_queue_id);
+
+       rte_delay_us(RTE_NGBE_WAIT_100_US);
+       ngbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+       if (txq->ops != NULL) {
+               txq->ops->release_mbufs(txq);
+               txq->ops->reset(txq);
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
index 4b824d4..03e9828 100644 (file)
@@ -73,6 +73,9 @@ struct ngbe_tx_desc {
 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
                    sizeof(struct ngbe_rx_desc))
 
+#define RTE_NGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_NGBE_WAIT_100_US               100
+
 #define NGBE_TX_MAX_SEG                    40
 
 #ifndef DEFAULT_TX_FREE_THRESH