#ifndef _HNS3_RXTX_H_
#define _HNS3_RXTX_H_
+#include <stdint.h>
+#include <rte_mbuf_core.h>
+
#define HNS3_MIN_RING_DESC 64
#define HNS3_MAX_RING_DESC 32768
#define HNS3_DEFAULT_RING_DESC 1024
* point, the pvid_sw_discard_en will be false.
*/
bool pvid_sw_discard_en;
+ bool enabled; /* indicate if Rx queue has been enabled */
uint64_t l2_errors;
uint64_t pkt_len_errors;
* this point.
*/
bool pvid_sw_shift_en;
+ bool enabled; /* indicate if Tx queue has been enabled */
/*
* The following items are used for the abnormal errors statistics in
};
#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TUNNEL_MASK)
+ PKT_TX_TCP_SEG | \
+ PKT_TX_L4_MASK)
enum hns3_cksum_status {
HNS3_CKSUM_NONE = 0,
hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
const uint32_t ol_info)
{
- const struct hns3_ptype_table *const ptype_tbl = rxq->ptype_tbl;
+ const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
uint32_t l2id, l3id, l4id;
- uint32_t ol3id, ol4id;
+ uint32_t ol3id, ol4id, ol2id;
ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
- l2id = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S);
+ ol2id = hns3_get_field(ol_info, HNS3_RXD_OVLAN_M, HNS3_RXD_OVLAN_S);
+ l2id = hns3_get_field(l234_info, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S);
l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
return ptype_tbl->inner_l2table[l2id] |
ptype_tbl->inner_l3table[l3id] |
ptype_tbl->inner_l4table[l4id] |
- ptype_tbl->ol3table[ol3id] | ptype_tbl->ol4table[ol4id];
+ ptype_tbl->ol3table[ol3id] |
+ ptype_tbl->ol4table[ol4id] | ptype_tbl->ol2table[ol2id];
else
- return ptype_tbl->l2table[l2id] | ptype_tbl->l3table[l3id] |
+ return ptype_tbl->l2l3table[l2id][l3id] |
ptype_tbl->l4table[l4id];
}
void hns3_dev_rx_queue_release(void *queue);
void hns3_dev_tx_queue_release(void *queue);
void hns3_free_all_queues(struct rte_eth_dev *dev);
-int hns3_reset_all_queues(struct hns3_adapter *hns);
+int hns3_reset_all_tqps(struct hns3_adapter *hns);
void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
-int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
-int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
+int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
+void hns3_start_tqps(struct hns3_hw *hw);
+void hns3_stop_tqps(struct hns3_hw *hw);
int hns3_rxq_iterate(struct rte_eth_dev *dev,
int (*callback)(struct hns3_rx_queue *, void *), void *arg);
void hns3_dev_release_mbufs(struct hns3_adapter *hns);
struct rte_mempool *mp);
int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket, const struct rte_eth_txconf *conf);
+uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode);
uint16_t nb_pkts);
uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode);
struct rte_eth_rxq_info *qinfo);
void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
+int hns3_start_all_txqs(struct rte_eth_dev *dev);
+int hns3_start_all_rxqs(struct rte_eth_dev *dev);
+void hns3_stop_all_txqs(struct rte_eth_dev *dev);
+void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
+
#endif /* _HNS3_RXTX_H_ */