+ hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
+ }
+ }
+}
+
+static int
+hns3_start_rx_queues(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_queue *rxq;
+ int i, j;
+ int ret;
+
+ /* Initialize RSS for queues */
+ ret = hns3_config_rss(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to configure rss %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ ret = hns3_dev_rx_queue_start(hns, i);
+ if (ret) {
+ hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+ ret);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ hns3_fake_rx_queue_start(hns, i);
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++) {
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
+ hns3_rx_queue_release_mbufs(rxq);
+ }
+
+ return ret;
+}
+
+static void
+hns3_start_tx_queues(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_dev_tx_queue_start(hns, i);
+ }
+
+ for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_fake_tx_queue_start(hns, i);
+ }
+
+ hns3_init_tx_ring_tc(hns);
+}
+
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
+int
+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (reset_queue) {
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues %d", ret);
+ return ret;
+ }
+ }
+
+ ret = hns3_start_rx_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to start rx queues: %d", ret);
+ return ret;
+ }
+
+ hns3_start_tx_queues(hns);
+
+ return 0;
+}
+
+int
+hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ hns3_enable_all_queues(hw, false);
+ if (reset_queue) {
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *rx_mz;
+ struct hns3_rx_queue *rxq;
+ unsigned int rx_desc;
+
+ rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate rx ring hardware descriptors. */
+ rxq->queue_id = q_info->idx;
+ rxq->nb_rx_desc = q_info->nb_desc;
+ rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
+ rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ rx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (rx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ q_info->idx);
+ hns3_rx_queue_release(rxq);
+ return NULL;
+ }
+ rxq->mz = rx_mz;
+ rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+ rxq->rx_ring_phys_addr = rx_mz->iova;
+
+ hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ rxq->rx_ring_phys_addr);
+
+ return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_rx_queue *rxq;
+ uint16_t nb_rx_q;
+
+ if (hw->fkq_data.rx_queues[idx]) {
+ hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+ hw->fkq_data.rx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake RX queue";
+ q_info.ring_name = "rx_fake_ring";
+ rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ rxq->sw_ring = NULL;
+
+ rxq->hns = hns;
+ rxq->rx_deferred_start = false;
+ rxq->port_id = dev->data->port_id;
+ rxq->configured = true;
+ nb_rx_q = dev->data->nb_rx_queues;
+ rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+ rxq->rx_buf_len = hw->rx_buf_len;
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.rx_queues[idx] = rxq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *tx_mz;
+ struct hns3_tx_queue *txq;
+ struct hns3_desc *desc;
+ unsigned int tx_desc;
+ int i;
+
+ txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate tx ring hardware descriptors. */
+ txq->queue_id = q_info->idx;
+ txq->nb_tx_desc = q_info->nb_desc;
+ tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
+ tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ tx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (tx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ q_info->idx);
+ hns3_tx_queue_release(txq);
+ return NULL;
+ }
+ txq->mz = tx_mz;
+ txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
+ txq->tx_ring_phys_addr = tx_mz->iova;
+
+ hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ txq->tx_ring_phys_addr);
+
+ /* Clear tx bd */
+ desc = txq->tx_ring;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ desc->tx.tp_fe_sc_vld_ra_ri = 0;
+ desc++;
+ }
+
+ return txq;
+}
+
+static int
+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_tx_queue *txq;
+ uint16_t nb_tx_q;
+
+ if (hw->fkq_data.tx_queues[idx] != NULL) {
+ hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+ hw->fkq_data.tx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake TX queue";
+ q_info.ring_name = "tx_fake_ring";
+ txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ txq->sw_ring = NULL;
+
+ txq->hns = hns;
+ txq->tx_deferred_start = false;
+ txq->port_id = dev->data->port_id;
+ txq->configured = true;
+ nb_tx_q = dev->data->nb_tx_queues;
+ txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.tx_queues[idx] = txq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
+ void **rxq;
+ uint8_t i;
+
+ if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+ uint32_t size;
+ size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
+ hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.rx_queues == NULL) {
+ hw->fkq_data.nb_fake_rx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
+ }
+
+ hw->fkq_data.rx_queues = rxq;
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rte_free(hw->fkq_data.rx_queues);
+ hw->fkq_data.rx_queues = NULL;
+ }
+
+ hw->fkq_data.nb_fake_rx_queues = nb_queues;
+
+ return 0;
+}
+
+static int
+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
+ void **txq;
+ uint8_t i;
+
+ if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+ uint32_t size;
+ size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
+ hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.tx_queues == NULL) {
+ hw->fkq_data.nb_fake_tx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+ txq = hw->fkq_data.tx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_tx_queue_release(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);