txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
txq->udp_cksum_mode = hw->udp_cksum_mode;
+ txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
tx_entry = &txq->sw_ring[txq->next_to_clean];
+ if (txq->mbuf_fast_free_en) {
+ rte_mempool_put_bulk(tx_entry->mbuf->pool,
+ (void **)tx_entry, txq->tx_rs_thresh);
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ tx_entry[i].mbuf = NULL;
+ goto update_field;
+ }
+
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((tx_entry + i)->mbuf);
for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
tx_entry->mbuf = NULL;
}
+update_field:
txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
txq->tx_bd_ready += txq->tx_rs_thresh;
}
* this point.
*/
uint16_t pvid_sw_shift_en:1;
+ /* check whether the mbuf fast free offload is enabled */
+ uint16_t mbuf_fast_free_en:1;
/*
* For better performance in tx datapath, releasing mbuf in batches is
int i;
tx_entry = &txq->sw_ring[txq->next_to_clean];
+ if (txq->mbuf_fast_free_en) {
+ rte_mempool_put_bulk(tx_entry->mbuf->pool, (void **)tx_entry,
+ txq->tx_rs_thresh);
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ tx_entry[i].mbuf = NULL;
+ goto update_field;
+ }
+
for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
tx_entry->mbuf = NULL;
if (nb_free)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+update_field:
/* Update numbers of available descriptor due to buffer freed */
txq->tx_bd_ready += txq->tx_rs_thresh;
txq->next_to_clean += txq->tx_rs_thresh;