+
+void
+hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ /* Report the HW Rx buffer length to user */
+ qinfo->rx_buf_size = rxq->rx_buf_len;
+
+ /*
+ * If there are no available Rx buffer descriptors, incoming packets
+ * are always dropped by hardware based on hns3 network engine.
+ */
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ ret = hns3_init_rxq(hns, rx_queue_id);
+ if (ret) {
+ hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_enable_rxq(rxq, true);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+static void
+hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
+{
+ rxq->next_to_use = 0;
+ rxq->rx_rearm_start = 0;
+ rxq->rx_free_hold = 0;
+ rxq->rx_rearm_nb = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
+ hns3_rxq_vec_setup(rxq);
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_rxq(rxq, false);
+
+ hns3_rx_queue_release_mbufs(rxq);
+
+ hns3_reset_sw_rxq(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+ tx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_init_txq(txq);
+ hns3_enable_txq(txq, true);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_txq(txq, false);
+ hns3_tx_queue_release_mbufs(txq);
+ /*
+ * All the mbufs in sw_ring are released and all the pointers in sw_ring
+ * are set to NULL. If this queue is still called by upper layer,
+ * residual SW status of this txq may cause these pointers in sw_ring
+ * which have been set to NULL to be released again. To avoid it,
+ * reinit the txq.
+ */
+ hns3_init_txq(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+uint32_t
+hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ /*
+ * Number of BDs that have been processed by the driver
+ * but have not been notified to the hardware.
+ */
+ uint32_t driver_hold_bd_num;
+ struct hns3_rx_queue *rxq;
+ uint32_t fbd_num;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
+ if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
+ driver_hold_bd_num = rxq->rx_rearm_nb;
+ else
+ driver_hold_bd_num = rxq->rx_free_hold;
+
+ if (fbd_num <= driver_hold_bd_num)
+ return 0;
+ else
+ return fbd_num - driver_hold_bd_num;
+}