X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.h;h=ba24e0076a269e37db164ce671573064a5f3abff;hb=f4e5c18ffa2656d253bc334171854e60463afbbb;hp=d7d70f6a1bf8b176d08f0506066c04d0dbcceae3;hpb=dd1e461182619323eb0da141952c70465605c312;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index d7d70f6a1b..ba24e0076a 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -1,10 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018-2019 Hisilicon Limited. + * Copyright(c) 2018-2021 HiSilicon Limited. */ #ifndef _HNS3_RXTX_H_ #define _HNS3_RXTX_H_ +#include +#include + #define HNS3_MIN_RING_DESC 64 #define HNS3_MAX_RING_DESC 32768 #define HNS3_DEFAULT_RING_DESC 1024 @@ -17,7 +20,7 @@ #define HNS3_DEFAULT_TX_RS_THRESH 32 #define HNS3_TX_FAST_FREE_AHEAD 64 -#define HNS3_DEFAULT_RX_BURST 32 +#define HNS3_DEFAULT_RX_BURST 64 #if (HNS3_DEFAULT_RX_BURST > 64) #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n" #endif @@ -85,6 +88,8 @@ #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) #define HNS3_RXD_OL4ID_S 8 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_PTYPE_S 4 +#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S) #define HNS3_RXD_FBHI_S 12 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) #define HNS3_RXD_FBLI_S 14 @@ -99,9 +104,8 @@ #define HNS3_RXD_LUM_B 9 #define HNS3_RXD_CRCP_B 10 #define HNS3_RXD_L3L4P_B 11 -#define HNS3_RXD_TSIND_S 12 -#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) -#define HNS3_RXD_LKBK_B 15 + +#define HNS3_RXD_TS_VLD_B 14 #define HNS3_RXD_GRO_SIZE_S 16 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) @@ -144,6 +148,7 @@ #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TXD_OL4CS_B 22 #define HNS3_L2_LEN_UNIT 1UL #define HNS3_L3_LEN_UNIT 2UL #define HNS3_L4_LEN_UNIT 2UL @@ -194,6 +199,8 @@ enum hns3_pkt_tun_type { struct hns3_desc { union { uint64_t addr; + uint64_t timestamp; + struct { uint32_t addr0; uint32_t addr1; @@ -229,7 +236,7 @@ struct hns3_desc { }; }; - uint32_t paylen; + uint32_t paylen_fd_dop_ol4cs; uint16_t tp_fe_sc_vld_ra_ri; uint16_t mss; } tx; @@ -263,23 +270,33 @@ struct hns3_entry { struct rte_mbuf *mbuf; }; +struct hns3_rx_basic_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; +}; + +struct hns3_rx_dfx_stats { + uint64_t l3_csum_errors; + uint64_t l4_csum_errors; + uint64_t ol3_csum_errors; + uint64_t ol4_csum_errors; +}; + +struct hns3_rx_bd_errors_stats { + uint64_t l2_errors; + uint64_t pkt_len_errors; +}; + struct hns3_rx_queue { - void *io_base; volatile void *io_head_reg; - struct hns3_adapter *hns; struct hns3_ptype_table *ptype_tbl; struct rte_mempool *mb_pool; struct hns3_desc *rx_ring; - uint64_t rx_ring_phys_addr; /* RX ring DMA address */ - const struct rte_memzone *mz; struct hns3_entry *sw_ring; - struct rte_mbuf *pkt_first_seg; - struct rte_mbuf *pkt_last_seg; - uint16_t queue_id; uint16_t port_id; uint16_t nb_rx_desc; - uint16_t rx_buf_len; /* * threshold for the number of BDs waited to passed to hardware. If the * number exceeds the threshold, driver will pass these BDs to hardware. @@ -293,8 +310,6 @@ struct hns3_rx_queue { /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */ uint8_t crc_len; - bool rx_deferred_start; /* don't start this queue in dev start */ - bool configured; /* indicate if rx queue has been configured */ /* * Indicate whether ignore the outer VLAN field in the Rx BD reported * by the Hardware. Because the outer VLAN is the PVID if the PVID is @@ -306,35 +321,108 @@ struct hns3_rx_queue { * driver does not need to perform PVID-related operation in Rx. At this * point, the pvid_sw_discard_en will be false. */ - bool pvid_sw_discard_en; + uint8_t pvid_sw_discard_en:1; + uint8_t ptype_en:1; /* indicate if the ptype field enabled */ - uint64_t l2_errors; - uint64_t pkt_len_errors; - uint64_t l3_csum_errors; - uint64_t l4_csum_errors; - uint64_t ol3_csum_errors; - uint64_t ol4_csum_errors; + uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ + /* offset_table: used for vector, to solve execute re-order problem */ + uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; + + uint16_t bulk_mbuf_num; /* indicate bulk_mbuf valid nums */ + + struct hns3_rx_basic_stats basic_stats; + + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; - uint16_t bulk_mbuf_num; - /* offset_table: used for vector, to solve execute re-order problem */ - uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; - uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ + /* DFX statistics that driver does not need to discard packets */ + struct hns3_rx_dfx_stats dfx_stats; + /* Error statistics that driver needs to discard packets */ + struct hns3_rx_bd_errors_stats err_stats; + struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ + + + /* + * The following fields are not accessed in the I/O path, so they are + * placed at the end. + */ + void *io_base; + struct hns3_adapter *hns; + uint64_t rx_ring_phys_addr; /* RX ring DMA address */ + const struct rte_memzone *mz; + + uint16_t queue_id; + uint16_t rx_buf_len; + + bool configured; /* indicate if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ + bool enabled; /* indicate if Rx queue has been enabled */ +}; + +struct hns3_tx_basic_stats { + uint64_t packets; + uint64_t bytes; +}; + +/* + * The following items are used for the abnormal errors statistics in + * the Tx datapath. When upper level application calls the + * rte_eth_tx_burst API function to send multiple packets at a time with + * burst mode based on hns3 network engine, there are some abnormal + * conditions that cause the driver to fail to operate the hardware to + * send packets correctly. + * Note: When using burst mode to call the rte_eth_tx_burst API function + * to send multiple packets at a time. When the first abnormal error is + * detected, add one to the relevant error statistics item, and then + * exit the loop of sending multiple packets of the function. That is to + * say, even if there are multiple packets in which abnormal errors may + * be detected in the burst, the relevant error statistics in the driver + * will only be increased by one. + * The detail description of the Tx abnormal errors statistic items as + * below: + * - over_length_pkt_cnt + * Total number of greater than HNS3_MAX_FRAME_LEN the driver + * supported. + * + * - exceed_limit_bd_pkt_cnt + * Total number of exceeding the hardware limited bd which process + * a packet needed bd numbers. + * + * - exceed_limit_bd_reassem_fail + * Total number of exceeding the hardware limited bd fail which + * process a packet needed bd numbers and reassemble fail. + * + * - unsupported_tunnel_pkt_cnt + * Total number of unsupported tunnel packet. The unsupported tunnel + * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet + * with MPLS-in-UDP RFC 7510 header. + * + * - queue_full_cnt + * Total count which the available bd numbers in current bd queue is + * less than the bd numbers with the pkt process needed. + * + * - pkt_padding_fail_cnt + * Total count which the packet length is less than minimum packet + * length(struct hns3_tx_queue::min_tx_pkt_len) supported by + * hardware in Tx direction and fail to be appended with 0. + */ +struct hns3_tx_dfx_stats { + uint64_t over_length_pkt_cnt; + uint64_t exceed_limit_bd_pkt_cnt; + uint64_t exceed_limit_bd_reassem_fail; + uint64_t unsupported_tunnel_pkt_cnt; + uint64_t queue_full_cnt; + uint64_t pkt_padding_fail_cnt; }; struct hns3_tx_queue { - void *io_base; volatile void *io_tail_reg; - struct hns3_adapter *hns; struct hns3_desc *tx_ring; - uint64_t tx_ring_phys_addr; /* TX ring DMA address */ - const struct rte_memzone *mz; struct hns3_entry *sw_ring; - uint16_t queue_id; - uint16_t port_id; uint16_t nb_tx_desc; /* * index of next BD whose corresponding rte_mbuf can be released by @@ -350,21 +438,12 @@ struct hns3_tx_queue { uint16_t tx_free_thresh; /* - * For better performance in tx datapath, releasing mbuf in batches is - * required. - * Only checking the VLD bit of the last descriptor in a batch of the - * thresh descriptors does not mean that these descriptors are all sent - * by hardware successfully. So we need to check that the VLD bits of - * all descriptors are cleared. and then free all mbufs in the batch. - * - tx_rs_thresh - * Number of mbufs released at a time. - * - * - free - * Tx mbuf free array used for preserving temporarily address of mbuf - * released back to mempool, when releasing mbuf in batches. + * The minimum length of the packet supported by hardware in the Tx + * direction. */ - uint16_t tx_rs_thresh; - struct rte_mbuf **free; + uint8_t min_tx_pkt_len; + + uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ /* * tso mode. @@ -382,16 +461,26 @@ struct hns3_tx_queue { * checksum of packets that need TSO, so network driver software * not need to recalculate it. */ - uint8_t tso_mode; + uint16_t tso_mode:1; /* - * The minimum length of the packet supported by hardware in the Tx - * direction. + * udp checksum mode. + * value range: + * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE + * + * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE + * In this mode, HW can not do checksum for special UDP port like + * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel + * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need + * do the checksum for these packets to avoid a checksum error. + * + * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE + * In this mode, HW does not have the preceding problems and can + * directly calculate the checksum of these UDP packets. */ - uint32_t min_tx_pkt_len; + uint16_t udp_cksum_mode:1; - uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ - bool tx_deferred_start; /* don't start this queue in dev start */ - bool configured; /* indicate if tx queue has been configured */ + uint16_t simple_bd_enable:1; + uint16_t tx_push_enable:1; /* check whether the tx push is enabled */ /* * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field * of Tx BD. Because the outer VLAN will always be the PVID when the @@ -404,56 +493,44 @@ struct hns3_tx_queue { * PVID-related operations in Tx. And pvid_sw_shift_en will be false at * this point. */ - bool pvid_sw_shift_en; + uint16_t pvid_sw_shift_en:1; /* - * The following items are used for the abnormal errors statistics in - * the Tx datapath. When upper level application calls the - * rte_eth_tx_burst API function to send multiple packets at a time with - * burst mode based on hns3 network engine, there are some abnormal - * conditions that cause the driver to fail to operate the hardware to - * send packets correctly. - * Note: When using burst mode to call the rte_eth_tx_burst API function - * to send multiple packets at a time. When the first abnormal error is - * detected, add one to the relevant error statistics item, and then - * exit the loop of sending multiple packets of the function. That is to - * say, even if there are multiple packets in which abnormal errors may - * be detected in the burst, the relevant error statistics in the driver - * will only be increased by one. - * The detail description of the Tx abnormal errors statistic items as - * below: - * - over_length_pkt_cnt - * Total number of greater than HNS3_MAX_FRAME_LEN the driver - * supported. - * - * - exceed_limit_bd_pkt_cnt - * Total number of exceeding the hardware limited bd which process - * a packet needed bd numbers. - * - * - exceed_limit_bd_reassem_fail - * Total number of exceeding the hardware limited bd fail which - * process a packet needed bd numbers and reassemble fail. - * - * - unsupported_tunnel_pkt_cnt - * Total number of unsupported tunnel packet. The unsupported tunnel - * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet - * with MPLS-in-UDP RFC 7510 header. - * - * - queue_full_cnt - * Total count which the available bd numbers in current bd queue is - * less than the bd numbers with the pkt process needed. + * For better performance in tx datapath, releasing mbuf in batches is + * required. + * Only checking the VLD bit of the last descriptor in a batch of the + * thresh descriptors does not mean that these descriptors are all sent + * by hardware successfully. So we need to check that the VLD bits of + * all descriptors are cleared. and then free all mbufs in the batch. + * - tx_rs_thresh + * Number of mbufs released at a time. * - * - pkt_padding_fail_cnt - * Total count which the packet length is less than minimum packet - * length(struct hns3_tx_queue::min_tx_pkt_len) supported by - * hardware in Tx direction and fail to be appended with 0. + * - free + * Tx mbuf free array used for preserving temporarily address of mbuf + * released back to mempool, when releasing mbuf in batches. */ - uint64_t over_length_pkt_cnt; - uint64_t exceed_limit_bd_pkt_cnt; - uint64_t exceed_limit_bd_reassem_fail; - uint64_t unsupported_tunnel_pkt_cnt; - uint64_t queue_full_cnt; - uint64_t pkt_padding_fail_cnt; + uint16_t tx_rs_thresh; + struct rte_mbuf **free; + + struct hns3_tx_basic_stats basic_stats; + struct hns3_tx_dfx_stats dfx_stats; + + + /* + * The following fields are not accessed in the I/O path, so they are + * placed at the end. + */ + void *io_base; + struct hns3_adapter *hns; + uint64_t tx_ring_phys_addr; /* TX ring DMA address */ + const struct rte_memzone *mz; + + uint16_t port_id; + uint16_t queue_id; + + bool configured; /* indicate if tx queue has been configured */ + bool tx_deferred_start; /* don't start this queue in dev start */ + bool enabled; /* indicate if Tx queue has been enabled */ }; #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ @@ -468,14 +545,11 @@ struct hns3_queue_info { }; #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \ - PKT_TX_OUTER_IPV6 | \ - PKT_TX_OUTER_IPV4 | \ + PKT_TX_OUTER_UDP_CKSUM | \ PKT_TX_OUTER_IP_CKSUM | \ - PKT_TX_IPV6 | \ - PKT_TX_IPV4 | \ PKT_TX_IP_CKSUM | \ - PKT_TX_L4_MASK | \ - PKT_TX_TUNNEL_MASK) + PKT_TX_TCP_SEG | \ + PKT_TX_L4_MASK) enum hns3_cksum_status { HNS3_CKSUM_NONE = 0, @@ -485,19 +559,53 @@ enum hns3_cksum_status { HNS3_OUTER_L4_CKSUM_ERR = 8 }; -static inline int -hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, - uint32_t bd_base_info, uint32_t l234_info, - uint32_t *cksum_err) +extern uint64_t hns3_timestamp_rx_dynflag; +extern int hns3_timestamp_dynfield_offset; + +static inline void +hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq, + struct rte_mbuf *rxm, + uint32_t l234_info) { -#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \ - BIT(HNS3_RXD_TRUNCATE_B)) -#define CHECKSUM_ERR_FLAG (BIT(HNS3_RXD_L3E_B) | \ +#define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \ BIT(HNS3_RXD_L4E_B) | \ BIT(HNS3_RXD_OL3E_B) | \ BIT(HNS3_RXD_OL4E_B)) - uint32_t tmp = 0; + if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) { + rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + return; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { + rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + rxq->dfx_stats.l3_csum_errors++; + } else { + rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { + rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; + rxq->dfx_stats.l4_csum_errors++; + } else { + rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) + rxq->dfx_stats.ol3_csum_errors++; + + if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { + rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + rxq->dfx_stats.ol4_csum_errors++; + } +} + +static inline int +hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, + uint32_t bd_base_info, uint32_t l234_info) +{ +#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \ + BIT(HNS3_RXD_TRUNCATE_B)) /* * If packet len bigger than mtu when recv with no-scattered algorithm, @@ -510,105 +618,58 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) { if (l234_info & BIT(HNS3_RXD_L2E_B)) - rxq->l2_errors++; + rxq->err_stats.l2_errors++; else - rxq->pkt_len_errors++; + rxq->err_stats.pkt_len_errors++; return -EINVAL; } - if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) { - if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) { - *cksum_err = 0; - return 0; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { - rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; - rxq->l3_csum_errors++; - tmp |= HNS3_L3_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { - rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; - rxq->l4_csum_errors++; - tmp |= HNS3_L4_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { - rxq->ol3_csum_errors++; - tmp |= HNS3_OUTER_L3_CKSUM_ERR; - } - - if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { - rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; - rxq->ol4_csum_errors++; - tmp |= HNS3_OUTER_L4_CKSUM_ERR; - } - } - *cksum_err = tmp; + if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) + hns3_rx_set_cksum_flag(rxq, rxm, l234_info); return 0; } -static inline void -hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type, - const uint32_t cksum_err) -{ - if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) { - if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) && - (cksum_err & HNS3_L3_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) && - (cksum_err & HNS3_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_L4_MASK) && - (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; - } else { - if (likely(packet_type & RTE_PTYPE_L3_MASK) && - (cksum_err & HNS3_L3_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - if (likely(packet_type & RTE_PTYPE_L4_MASK) && - (cksum_err & HNS3_L4_CKSUM_ERR) == 0) - rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - } -} - static inline uint32_t hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info, const uint32_t ol_info) { - const struct hns3_ptype_table *const ptype_tbl = rxq->ptype_tbl; - uint32_t l2id, l3id, l4id; + const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl; uint32_t ol3id, ol4id; + uint32_t l3id, l4id; + uint32_t ptype; + + if (rxq->ptype_en) { + ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + return ptype_tbl->ptype[ptype]; + } ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); - l2id = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S); l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); if (unlikely(ptype_tbl->ol4table[ol4id])) - return ptype_tbl->inner_l2table[l2id] | - ptype_tbl->inner_l3table[l3id] | + return ptype_tbl->inner_l3table[l3id] | ptype_tbl->inner_l4table[l4id] | - ptype_tbl->ol3table[ol3id] | ptype_tbl->ol4table[ol4id]; + ptype_tbl->ol3table[ol3id] | + ptype_tbl->ol4table[ol4id]; else - return ptype_tbl->l2table[l2id] | ptype_tbl->l3table[l3id] | - ptype_tbl->l4table[l4id]; + return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id]; } void hns3_dev_rx_queue_release(void *queue); void hns3_dev_tx_queue_release(void *queue); void hns3_free_all_queues(struct rte_eth_dev *dev); -int hns3_reset_all_queues(struct hns3_adapter *hns); +int hns3_reset_all_tqps(struct hns3_adapter *hns); void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en); int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); void hns3_enable_all_queues(struct hns3_hw *hw, bool en); -int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue); -int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue); +int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue); +void hns3_start_tqps(struct hns3_hw *hw); +void hns3_stop_tqps(struct hns3_hw *hw); int hns3_rxq_iterate(struct rte_eth_dev *dev, int (*callback)(struct hns3_rx_queue *, void *), void *arg); void hns3_dev_release_mbufs(struct hns3_adapter *hns); @@ -617,12 +678,19 @@ int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, struct rte_mempool *mp); int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, unsigned int socket, const struct rte_eth_txconf *conf); -uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); +uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +uint16_t hns3_recv_pkts_simple(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode); @@ -635,12 +703,15 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode); const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, uint8_t gl_idx, uint16_t gl_value); void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, @@ -661,4 +732,14 @@ void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +uint32_t hns3_get_tqp_reg_offset(uint16_t idx); +int hns3_start_all_txqs(struct rte_eth_dev *dev); +int hns3_start_all_rxqs(struct rte_eth_dev *dev); +void hns3_stop_all_txqs(struct rte_eth_dev *dev); +void hns3_restore_tqp_enable_state(struct hns3_hw *hw); +int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); +void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); +int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + #endif /* _HNS3_RXTX_H_ */