1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 HiSilicon Limited.
5 #ifndef _HNS3_RXTX_VEC_H_
6 #define _HNS3_RXTX_VEC_H_
9 #include "hns3_ethdev.h"
12 hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
14 struct rte_mbuf **free = txq->free;
15 struct hns3_entry *tx_entry;
20 tx_entry = &txq->sw_ring[txq->next_to_clean];
21 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
22 m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
23 tx_entry->mbuf = NULL;
28 if (nb_free && m->pool != free[0]->pool) {
29 rte_mempool_put_bulk(free[0]->pool, (void **)free,
37 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
39 /* Update numbers of available descriptor due to buffer freed */
40 txq->tx_bd_ready += txq->tx_rs_thresh;
41 txq->next_to_clean += txq->tx_rs_thresh;
42 if (txq->next_to_clean >= txq->nb_tx_desc)
43 txq->next_to_clean = 0;
47 hns3_tx_free_buffers(struct hns3_tx_queue *txq)
49 struct hns3_desc *tx_desc;
53 * All mbufs can be released only when the VLD bits of all
54 * descriptors in a batch are cleared.
56 tx_desc = &txq->tx_ring[txq->next_to_clean];
57 for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
58 if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
59 rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
63 hns3_tx_bulk_free_buffers(txq);
66 static inline uint16_t
67 hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
69 uint64_t pkt_err_mask)
74 if (likely(pkt_err_mask == 0))
78 for (i = 0; i < nb_pkts; i++) {
79 mask = ((uint64_t)1u) << i;
80 if (pkt_err_mask & mask)
81 rte_pktmbuf_free_seg(rx_pkts[i]);
83 rx_pkts[count++] = rx_pkts[i];
88 #endif /* _HNS3_RXTX_VEC_H_ */