+
+void
+hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ /* Report the HW Rx buffer length to user */
+ qinfo->rx_buf_size = rxq->rx_buf_len;
+
+ /*
+ * If there are no available Rx buffer descriptors, incoming packets
+ * are always dropped by hardware based on hns3 network engine.
+ */
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ ret = hns3_init_rxq(hns, rx_queue_id);
+ if (ret) {
+ hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ hns3_enable_rxq(rxq, true);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static void
+hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
+{
+ rxq->next_to_use = 0;
+ rxq->rx_rearm_start = 0;
+ rxq->rx_free_hold = 0;
+ rxq->rx_rearm_nb = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
+ hns3_rxq_vec_setup(rxq);
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ hns3_enable_rxq(rxq, false);
+
+ hns3_rx_queue_release_mbufs(rxq);
+
+ hns3_reset_sw_rxq(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+ tx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ hns3_init_txq(txq);
+ hns3_enable_txq(txq, true);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ rte_spinlock_lock(&hw->lock);
+ hns3_enable_txq(txq, false);
+ hns3_tx_queue_release_mbufs(txq);
+ /*
+ * All the mbufs in sw_ring are released and all the pointers in sw_ring
+ * are set to NULL. If this queue is still called by upper layer,
+ * residual SW status of this txq may cause these pointers in sw_ring
+ * which have been set to NULL to be released again. To avoid it,
+ * reinit the txq.
+ */
+ hns3_init_txq(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
+{
+ uint16_t next_to_clean = txq->next_to_clean;
+ uint16_t next_to_use = txq->next_to_use;
+ uint16_t tx_bd_ready = txq->tx_bd_ready;
+ struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
+ struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
+ uint32_t idx;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ for (idx = 0; idx < free_cnt; idx++) {
+ if (next_to_clean == next_to_use)
+ break;
+
+ if (desc->tx.tp_fe_sc_vld_ra_ri &
+ rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ break;
+
+ if (tx_pkt->mbuf != NULL) {
+ rte_pktmbuf_free_seg(tx_pkt->mbuf);
+ tx_pkt->mbuf = NULL;
+ }
+
+ next_to_clean++;
+ tx_bd_ready++;
+ tx_pkt++;
+ desc++;
+ if (next_to_clean == txq->nb_tx_desc) {
+ tx_pkt = txq->sw_ring;
+ desc = txq->tx_ring;
+ next_to_clean = 0;
+ }
+ }
+
+ if (idx > 0) {
+ txq->next_to_clean = next_to_clean;
+ txq->tx_bd_ready = tx_bd_ready;
+ }
+
+ return (int)idx;
+}
+
+int
+hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+
+ if (dev->tx_pkt_burst == hns3_xmit_pkts)
+ return hns3_tx_done_cleanup_full(q, free_cnt);
+ else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
+ return 0;
+ else
+ return -ENOTSUP;
+}
+
+int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *rxdp;
+ struct hns3_rx_queue *rxq;
+ struct rte_eth_dev *dev;
+ uint32_t bd_base_info;
+ uint16_t desc_id;
+
+ rxq = (struct hns3_rx_queue *)rx_queue;
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+ rxdp = &rxq->rx_ring[desc_id];
+ bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+ dev = &rte_eth_devices[rxq->port_id];
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
+ dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return RTE_ETH_RX_DESC_AVAIL;
+ else
+ return RTE_ETH_RX_DESC_DONE;
+}
+
+int
+hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *txdp;
+ struct hns3_tx_queue *txq;
+ struct rte_eth_dev *dev;
+ uint16_t desc_id;
+
+ txq = (struct hns3_tx_queue *)tx_queue;
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[txq->port_id];
+ if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
+ dev->tx_pkt_burst != hns3_xmit_pkts &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
+ dev->tx_pkt_burst != hns3_xmit_pkts_vec)
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
+ txdp = &txq->tx_ring[desc_id];
+ if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
+uint32_t
+hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ /*
+ * Number of BDs that have been processed by the driver
+ * but have not been notified to the hardware.
+ */
+ uint32_t driver_hold_bd_num;
+ struct hns3_rx_queue *rxq;
+ uint32_t fbd_num;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
+ if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
+ driver_hold_bd_num = rxq->rx_rearm_nb;
+ else
+ driver_hold_bd_num = rxq->rx_free_hold;
+
+ if (fbd_num <= driver_hold_bd_num)
+ return 0;
+ else
+ return fbd_num - driver_hold_bd_num;
+}
+
+void
+hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
+{
+ /*
+ * If the hardware support rxd advanced layout, then driver enable it
+ * default.
+ */
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
+}