* should not be transitted to the upper-layer application. For hardware
* network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
* such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
- * driver does not need to perform PVID-related operation in Rx. At this
+ * does not need to perform PVID-related operation in Rx. At this
* point, the pvid_sw_discard_en will be false.
*/
uint8_t pvid_sw_discard_en:1;
struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
-
/*
* The following fields are not accessed in the I/O path, so they are
* placed at the end.
*/
- void *io_base;
+ void *io_base __rte_cache_aligned;
struct hns3_adapter *hns;
uint64_t rx_ring_phys_addr; /* RX ring DMA address */
const struct rte_memzone *mz;
* PVID will overwrite the outer VLAN field of Tx BD. For the hardware
* network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
* such as kunpeng 930, if the PVID is set, the hardware will shift the
- * VLAN field automatically. So, PMD driver does not need to do
+ * VLAN field automatically. So, PMD does not need to do
* PVID-related operations in Tx. And pvid_sw_shift_en will be false at
* this point.
*/
uint16_t pvid_sw_shift_en:1;
+ /* check whether the mbuf fast free offload is enabled */
+ uint16_t mbuf_fast_free_en:1;
/*
* For better performance in tx datapath, releasing mbuf in batches is
struct hns3_tx_basic_stats basic_stats;
struct hns3_tx_dfx_stats dfx_stats;
-
/*
* The following fields are not accessed in the I/O path, so they are
* placed at the end.
*/
- void *io_base;
+ void *io_base __rte_cache_aligned;
struct hns3_adapter *hns;
uint64_t tx_ring_phys_addr; /* TX ring DMA address */
const struct rte_memzone *mz;
/*
* If packet len bigger than mtu when recv with no-scattered algorithm,
- * the first n bd will without FE bit, we need process this sisution.
+ * the first n bd will without FE bit, we need process this situation.
* Note: we don't need add statistic counter because latest BD which
* with FE bit will mark HNS3_RXD_L2E_B bit.
*/
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev,
eth_tx_prep_t *prep);
-uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused);
uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,