1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 HiSilicon Limited.
5 #ifndef _HNS3_RXTX_VEC_H_
6 #define _HNS3_RXTX_VEC_H_
9 #include "hns3_ethdev.h"
12 hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
14 struct rte_mbuf **free = txq->free;
15 struct hns3_entry *tx_entry;
20 tx_entry = &txq->sw_ring[txq->next_to_clean];
21 if (txq->mbuf_fast_free_en) {
22 rte_mempool_put_bulk(tx_entry->mbuf->pool, (void **)tx_entry,
24 for (i = 0; i < txq->tx_rs_thresh; i++)
25 tx_entry[i].mbuf = NULL;
29 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
30 m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
31 tx_entry->mbuf = NULL;
36 if (nb_free && m->pool != free[0]->pool) {
37 rte_mempool_put_bulk(free[0]->pool, (void **)free,
45 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
48 /* Update numbers of available descriptor due to buffer freed */
49 txq->tx_bd_ready += txq->tx_rs_thresh;
50 txq->next_to_clean += txq->tx_rs_thresh;
51 if (txq->next_to_clean >= txq->nb_tx_desc)
52 txq->next_to_clean = 0;
56 hns3_tx_free_buffers(struct hns3_tx_queue *txq)
58 struct hns3_desc *tx_desc;
62 * All mbufs can be released only when the VLD bits of all
63 * descriptors in a batch are cleared.
65 tx_desc = &txq->tx_ring[txq->next_to_clean];
66 for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
67 if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
68 rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
72 hns3_tx_bulk_free_buffers(txq);
75 static inline uint16_t
76 hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
78 uint64_t pkt_err_mask)
83 if (likely(pkt_err_mask == 0))
87 for (i = 0; i < nb_pkts; i++) {
88 mask = ((uint64_t)1u) << i;
89 if (pkt_err_mask & mask)
90 rte_pktmbuf_free_seg(rx_pkts[i]);
92 rx_pkts[count++] = rx_pkts[i];
97 #endif /* _HNS3_RXTX_VEC_H_ */