+
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
+ hns3_init_tx_queue(txq);
+}
+
+static void
+hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+
+ txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
+ hns3_init_tx_queue(txq);
+}
+
+static void
+hns3_init_tx_ring_tc(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+ int i, num;
+
+ for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+ struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
+ int j;
+
+ if (!tc_queue->enable)
+ continue;
+
+ for (j = 0; j < tc_queue->tqp_count; j++) {
+ num = tc_queue->tqp_offset + j;
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
+ if (txq == NULL)
+ continue;
+
+ hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
+ }
+ }
+}
+
+static int
+hns3_start_rx_queues(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_rx_queue *rxq;
+ int i, j;
+ int ret;
+
+ /* Initialize RSS for queues */
+ ret = hns3_config_rss(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to configure rss %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ ret = hns3_dev_rx_queue_start(hns, i);
+ if (ret) {
+ hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+ ret);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ hns3_fake_rx_queue_start(hns, i);
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++) {
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
+ hns3_rx_queue_release_mbufs(rxq);
+ }
+
+ return ret;
+}
+
+static void
+hns3_start_tx_queues(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_dev_tx_queue_start(hns, i);
+ }
+
+ for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_fake_tx_queue_start(hns, i);
+ }
+
+ hns3_init_tx_ring_tc(hns);
+}
+
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
+int
+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (reset_queue) {
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues %d", ret);
+ return ret;
+ }
+ }
+
+ ret = hns3_start_rx_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to start rx queues: %d", ret);
+ return ret;
+ }
+
+ hns3_start_tx_queues(hns);
+
+ return 0;
+}
+
+int
+hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ hns3_enable_all_queues(hw, false);
+ if (reset_queue) {
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *rx_mz;
+ struct hns3_rx_queue *rxq;
+ unsigned int rx_desc;
+
+ rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate rx ring hardware descriptors. */
+ rxq->queue_id = q_info->idx;
+ rxq->nb_rx_desc = q_info->nb_desc;
+ rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
+ rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ rx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (rx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ q_info->idx);
+ hns3_rx_queue_release(rxq);
+ return NULL;
+ }
+ rxq->mz = rx_mz;
+ rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+ rxq->rx_ring_phys_addr = rx_mz->iova;
+
+ hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ rxq->rx_ring_phys_addr);
+
+ return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_rx_queue *rxq;
+ uint16_t nb_rx_q;
+
+ if (hw->fkq_data.rx_queues[idx]) {
+ hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+ hw->fkq_data.rx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake RX queue";
+ q_info.ring_name = "rx_fake_ring";
+ rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ rxq->sw_ring = NULL;
+
+ rxq->hns = hns;
+ rxq->rx_deferred_start = false;
+ rxq->port_id = dev->data->port_id;
+ rxq->configured = true;
+ nb_rx_q = dev->data->nb_rx_queues;
+ rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+ rxq->rx_buf_len = hw->rx_buf_len;
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.rx_queues[idx] = rxq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *tx_mz;
+ struct hns3_tx_queue *txq;