* should not be transitted to the upper-layer application. For hardware
* network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
* such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
- * driver does not need to perform PVID-related operation in Rx. At this
+ * does not need to perform PVID-related operation in Rx. At this
* point, the pvid_sw_discard_en will be false.
*/
uint8_t pvid_sw_discard_en:1;
struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
-
/*
* The following fields are not accessed in the I/O path, so they are
* placed at the end.
*/
- void *io_base;
+ void *io_base __rte_cache_aligned;
struct hns3_adapter *hns;
uint64_t rx_ring_phys_addr; /* RX ring DMA address */
const struct rte_memzone *mz;
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
- * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
+ * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
* PVID will overwrite the outer VLAN field of Tx BD. For the hardware
* network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
* such as kunpeng 930, if the PVID is set, the hardware will shift the
- * VLAN field automatically. So, PMD driver does not need to do
+ * VLAN field automatically. So, PMD does not need to do
* PVID-related operations in Tx. And pvid_sw_shift_en will be false at
* this point.
*/
uint16_t pvid_sw_shift_en:1;
+ /* check whether the mbuf fast free offload is enabled */
+ uint16_t mbuf_fast_free_en:1;
/*
* For better performance in tx datapath, releasing mbuf in batches is
struct hns3_tx_basic_stats basic_stats;
struct hns3_tx_dfx_stats dfx_stats;
-
/*
* The following fields are not accessed in the I/O path, so they are
* placed at the end.
*/
- void *io_base;
+ void *io_base __rte_cache_aligned;
struct hns3_adapter *hns;
uint64_t tx_ring_phys_addr; /* TX ring DMA address */
const struct rte_memzone *mz;
unsigned int socket_id;
};
-#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
- PKT_TX_OUTER_UDP_CKSUM | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
- PKT_TX_L4_MASK)
+#define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_SEG | \
+ RTE_MBUF_F_TX_L4_MASK)
enum hns3_cksum_status {
HNS3_CKSUM_NONE = 0,
BIT(HNS3_RXD_OL4E_B))
if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
- rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
rxq->dfx_stats.l3_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
rxq->dfx_stats.l4_csum_errors++;
} else {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
rxq->dfx_stats.ol3_csum_errors++;
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
- rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
rxq->dfx_stats.ol4_csum_errors++;
}
}
/*
* If packet len bigger than mtu when recv with no-scattered algorithm,
- * the first n bd will without FE bit, we need process this sisution.
+ * the first n bd will without FE bit, we need process this situation.
* Note: we don't need add statistic counter because latest BD which
* with FE bit will mark HNS3_RXD_L2E_B bit.
*/
int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
__rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode);
-int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev,
eth_tx_prep_t *prep);
-uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused);
uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,