From: Jiawen Wu Date: Mon, 19 Oct 2020 08:53:39 +0000 (+0800) Subject: net/txgbe: add Rx and Tx start and stop X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=b4cfffaa85ecfac2035eee26fa48167fdcefd678;p=dpdk.git net/txgbe: add Rx and Tx start and stop Add receive and transmit units start and stop for specified queue. Signed-off-by: Jiawen Wu Reviewed-by: Ferruh Yigit --- diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 707f641318..e76e9af460 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -7,6 +7,7 @@ Speed capabilities = Y Link status = Y Link status event = Y +Queue start/stop = Y Jumbo frame = Y Scattered Rx = Y Unicast MAC filter = Y diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index 747ada0f91..5237200d45 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -469,6 +469,9 @@ struct txgbe_hw { TXGBE_SW_RESET, TXGBE_GLOBAL_RESET } reset_type; + + u32 q_rx_regs[128 * 4]; + u32 q_tx_regs[128 * 4]; }; #include "txgbe_regs.h" diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index feaf030823..ed0cf9381e 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -566,6 +566,8 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + txgbe_dev_free_queues(dev); + /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); @@ -1320,6 +1322,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_infos_get = txgbe_dev_info_get, .dev_set_link_up = txgbe_dev_set_link_up, .dev_set_link_down = txgbe_dev_set_link_down, + .rx_queue_start = txgbe_dev_rx_queue_start, + .rx_queue_stop = txgbe_dev_rx_queue_stop, + .tx_queue_start = txgbe_dev_tx_queue_start, + .tx_queue_stop = txgbe_dev_tx_queue_stop, .rx_queue_setup = txgbe_dev_rx_queue_setup, .rx_queue_release = txgbe_dev_rx_queue_release, .tx_queue_setup = txgbe_dev_tx_queue_setup, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 6636b6e9a6..1a29281a89 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -80,6 +80,8 @@ struct txgbe_adapter { /* * RX/TX function prototypes */ +void txgbe_dev_free_queues(struct rte_eth_dev *dev); + void txgbe_dev_rx_queue_release(void *rxq); void txgbe_dev_tx_queue_release(void *txq); @@ -97,6 +99,19 @@ int txgbe_dev_rx_init(struct rte_eth_dev *dev); void txgbe_dev_tx_init(struct rte_eth_dev *dev); +void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); +void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); +void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); +void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); + +int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 707d5b2e4b..d6ba1545cb 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -10,6 +10,9 @@ #include #include +#include +#include +#include #include #include #include @@ -622,12 +625,64 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +void +txgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + txgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + void __rte_cold txgbe_set_rx_function(struct rte_eth_dev *dev) { RTE_SET_USED(dev); } +static int __rte_cold +txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq) +{ + struct txgbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct txgbe_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned int)rxq->queue_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + TXGBE_RXD_HDRADDR(rxd, 0); + TXGBE_RXD_PKTADDR(rxd, dma_addr); + rxe[i].mbuf = mbuf; + } + + return 0; +} + /** * txgbe_get_rscctl_maxdesc * @@ -958,3 +1013,204 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev) } } +void +txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id)); + *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id)); + *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id)); +} + +void +txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++)); + wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++)); + wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA); +} + +void +txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id)); + *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id)); + *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id)); +} + +void +txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++)); + wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++)); + wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA); +} + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + rxdctl |= TXGBE_RXCFG_ENA; + wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0); + wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int __rte_cold +txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + struct txgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + txgbe_dev_save_rx_queue(hw, rxq->reg_idx); + wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0); + + /* Wait until RX Enable bit clear */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); + + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txgbe_dev_store_rx_queue(hw, rxq->reg_idx); + + txgbe_rx_queue_release_mbufs(rxq); + txgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA); + + /* Wait until TX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d", tx_queue_id); + + rte_wmb(); + wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int __rte_cold +txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx)); + txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, + "Tx Queue %d is not empty when stopping.", + tx_queue_id); + + txgbe_dev_save_tx_queue(hw, txq->reg_idx); + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0); + + /* Wait until TX Enable bit clear */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); + + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txgbe_dev_store_tx_queue(hw, txq->reg_idx); + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index be165dd196..5b991e3040 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -42,6 +42,14 @@ struct txgbe_rx_desc { } qw1; /* also as r.hdr_addr */ }; +/* @txgbe_rx_desc.qw0 */ +#define TXGBE_RXD_PKTADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v)) + +/* @txgbe_rx_desc.qw1 */ +#define TXGBE_RXD_HDRADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v)) + /** * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA) **/ @@ -59,6 +67,9 @@ struct txgbe_tx_desc { #define TXGBE_PTID_MASK 0xFF +#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_TXGBE_WAIT_100_US 100 + #define TXGBE_TX_MAX_SEG 40 /** @@ -140,6 +151,7 @@ struct txgbe_tx_queue { volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ volatile uint32_t *tdc_reg_addr; /**< Address of TDC register. */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ /**< Start freeing TX buffers if there are less free descriptors than * this value. */