1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Hisilicon Limited.
5 #ifndef _HNS3_RXTX_VEC_H_
6 #define _HNS3_RXTX_VEC_H_
9 #include "hns3_ethdev.h"
12 hns3_tx_free_buffers(struct hns3_tx_queue *txq)
14 struct rte_mbuf **free = txq->free;
15 struct hns3_entry *tx_entry;
16 struct hns3_desc *tx_desc;
22 * All mbufs can be released only when the VLD bits of all
23 * descriptors in a batch are cleared.
25 tx_desc = &txq->tx_ring[txq->next_to_clean];
26 for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
27 if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
28 rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
32 tx_entry = &txq->sw_ring[txq->next_to_clean];
33 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
34 m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
35 tx_entry->mbuf = NULL;
40 if (nb_free && m->pool != free[0]->pool) {
41 rte_mempool_put_bulk(free[0]->pool, (void **)free,
49 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
51 /* Update numbers of available descriptor due to buffer freed */
52 txq->tx_bd_ready += txq->tx_rs_thresh;
53 txq->next_to_clean += txq->tx_rs_thresh;
54 if (txq->next_to_clean >= txq->nb_tx_desc)
55 txq->next_to_clean = 0;
58 static inline uint16_t
59 hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
61 uint64_t pkt_err_mask)
67 for (i = 0; i < nb_pkts; i++) {
68 mask = ((uint64_t)1u) << i;
69 if (pkt_err_mask & mask)
70 rte_pktmbuf_free_seg(rx_pkts[i]);
72 rx_pkts[count++] = rx_pkts[i];
77 #endif /* _HNS3_RXTX_VEC_H_ */