Once mbufs are transmitted, mbufs are freed by H/W. No mbufs are
accumalated as a pending mbuf.
Hence operation is NOP for cnxk platform.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
.xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
.rxq_info_get = cnxk_nix_rxq_info_get,
.txq_info_get = cnxk_nix_txq_info_get,
+ .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
};
static int
int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id);
int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
+int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
}
+
+/* It is a NOP for cnxk as HW frees the buffer on xmit */
+int
+cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ RTE_SET_USED(txq);
+ RTE_SET_USED(free_cnt);
+
+ return 0;
+}