uint64_t rx_ring_phys_addr; /* RX ring DMA address */
const struct rte_memzone *mz;
struct hns3_entry *sw_ring;
-
struct rte_mbuf *pkt_first_seg;
struct rte_mbuf *pkt_last_seg;
uint16_t rx_free_hold; /* num of BDs waited to passed to hardware */
uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */
- /*
- * port based vlan configuration state.
- * value range: HNS3_PORT_BASE_VLAN_DISABLE / HNS3_PORT_BASE_VLAN_ENABLE
- */
- uint16_t pvid_state;
/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
uint8_t crc_len;
bool rx_deferred_start; /* don't start this queue in dev start */
bool configured; /* indicate if rx queue has been configured */
+ /*
+ * Indicate whether ignore the outer VLAN field in the Rx BD reported
+ * by the Hardware. Because the outer VLAN is the PVID if the PVID is
+ * set for some version of hardware network engine whose vlan mode is
+ * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN
+ * should not be transitted to the upper-layer application. For hardware
+ * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
+ * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
+ * driver does not need to perform PVID-related operation in Rx. At this
+ * point, the pvid_sw_discard_en will be false.
+ */
+ bool pvid_sw_discard_en;
+ bool enabled; /* indicate if Rx queue has been enabled */
uint64_t l2_errors;
uint64_t pkt_len_errors;
struct rte_mbuf **free;
/*
- * port based vlan configuration state.
- * value range: HNS3_PORT_BASE_VLAN_DISABLE / HNS3_PORT_BASE_VLAN_ENABLE
+ * tso mode.
+ * value range:
+ * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ *
+ * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
+ * In this mode, because of the hardware constraint, network driver
+ * software need erase the L4 len value of the TCP pseudo header
+ * and recalculate the TCP pseudo header checksum of packets that
+ * need TSO.
+ *
+ * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ * In this mode, hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver software
+ * not need to recalculate it.
*/
- uint16_t pvid_state;
-
+ uint8_t tso_mode;
/*
* The minimum length of the packet supported by hardware in the Tx
* direction.
*/
uint32_t min_tx_pkt_len;
+ uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
bool tx_deferred_start; /* don't start this queue in dev start */
bool configured; /* indicate if tx queue has been configured */
+ /*
+ * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field
+ * of Tx BD. Because the outer VLAN will always be the PVID when the
+ * PVID is set and for some version of hardware network engine whose
+ * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the
+ * PVID will overwrite the outer VLAN field of Tx BD. For the hardware
+ * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
+ * such as kunpeng 930, if the PVID is set, the hardware will shift the
+ * VLAN field automatically. So, PMD driver does not need to do
+ * PVID-related operations in Tx. And pvid_sw_shift_en will be false at
+ * this point.
+ */
+ bool pvid_sw_shift_en;
+ bool enabled; /* indicate if Tx queue has been enabled */
/*
* The following items are used for the abnormal errors statistics in
void hns3_dev_rx_queue_release(void *queue);
void hns3_dev_tx_queue_release(void *queue);
void hns3_free_all_queues(struct rte_eth_dev *dev);
-int hns3_reset_all_queues(struct hns3_adapter *hns);
+int hns3_reset_all_tqps(struct hns3_adapter *hns);
void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
-int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
-int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
+int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
+void hns3_start_tqps(struct hns3_hw *hw);
+void hns3_stop_tqps(struct hns3_hw *hw);
int hns3_rxq_iterate(struct rte_eth_dev *dev,
int (*callback)(struct hns3_rx_queue *, void *), void *arg);
void hns3_dev_release_mbufs(struct hns3_adapter *hns);
struct rte_mempool *mp);
int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket, const struct rte_eth_txconf *conf);
+int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_tx_q);
int hns3_config_gro(struct hns3_hw *hw, bool en);
int hns3_restore_gro_conf(struct hns3_hw *hw);
-void hns3_update_all_queues_pvid_state(struct hns3_hw *hw);
+void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
struct rte_eth_rxq_info *qinfo);
void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
+int hns3_start_all_txqs(struct rte_eth_dev *dev);
+int hns3_start_all_rxqs(struct rte_eth_dev *dev);
+void hns3_stop_all_txqs(struct rte_eth_dev *dev);
+
#endif /* _HNS3_RXTX_H_ */