uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
- /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
+ /* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
uint8_t crc_len;
/*
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
- * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
unsigned int socket_id;
};
-#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_OUTER_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_L4_MASK)
+#define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_L4_MASK)
enum hns3_cksum_status {
HNS3_CKSUM_NONE = 0,
BIT(HNS3_RXD_OL4E_B))
if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
- rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
rxq->dfx_stats.l3_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
rxq->dfx_stats.l4_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
rxq->dfx_stats.ol3_csum_errors++;
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
- rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
rxq->dfx_stats.ol4_csum_errors++;
}
}
rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
}
-void hns3_dev_rx_queue_release(void *queue);
-void hns3_dev_tx_queue_release(void *queue);
+void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
+void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
void hns3_free_all_queues(struct rte_eth_dev *dev);
int hns3_reset_all_tqps(struct hns3_adapter *hns);
void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
struct rte_mempool *mp);
int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket, const struct rte_eth_txconf *conf);
-uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t hns3_rx_queue_count(void *rx_queue);
int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
void hns3_tx_push_init(struct rte_eth_dev *dev);
+void hns3_stop_tx_datapath(struct rte_eth_dev *dev);
+void hns3_start_tx_datapath(struct rte_eth_dev *dev);
#endif /* _HNS3_RXTX_H_ */