uint16_t last_id;
};
+struct iavf_tx_vec_entry {
+ struct rte_mbuf *mbuf;
+};
+
/* Structure associated with each TX queue. */
struct iavf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
const struct rte_eth_txconf *tx_conf);
int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
void iavf_dev_tx_queue_release(void *txq);
void iavf_stop_queues(struct rte_eth_dev *dev);
uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
const uint32_t *iavf_get_default_ptype_table(void);
{
if (on) {
/* enable flow director processing */
- if (ad->fdir_ref_cnt++ == 0)
- FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+ FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+ ad->fdir_ref_cnt++;
} else {
if (ad->fdir_ref_cnt >= 1) {
ad->fdir_ref_cnt--;