1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Hisilicon Limited.
5 #ifndef _HNS3_RXTX_VEC_NEON_H_
6 #define _HNS3_RXTX_VEC_NEON_H_
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 hns3_vec_tx(volatile struct hns3_desc *desc, struct rte_mbuf *pkt)
16 pkt->buf_iova + pkt->data_off,
17 ((uint64_t)pkt->data_len) << HNS3_TXD_SEND_SIZE_SHIFT
21 ((uint64_t)HNS3_TXD_DEFAULT_VLD_FE_BDTYPE) << HNS3_UINT32_BIT
23 vst1q_u64((uint64_t *)&desc->addr, val1);
24 vst1q_u64((uint64_t *)&desc->tx.outer_vlan_tag, val2);
28 hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
29 struct rte_mbuf **__restrict tx_pkts,
32 struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
33 volatile struct hns3_desc *tx_desc;
34 struct hns3_entry *tx_entry;
40 if (txq->tx_bd_ready < txq->tx_free_thresh)
41 hns3_tx_free_buffers(txq);
43 nb_commit = RTE_MIN(txq->tx_bd_ready, nb_pkts);
44 if (unlikely(nb_commit == 0)) {
45 txq->queue_full_cnt++;
50 next_to_use = txq->next_to_use;
51 tx_desc = &txq->tx_ring[next_to_use];
52 tx_entry = &txq->sw_ring[next_to_use];
55 * We need to deal with n descriptors first for better performance,
56 * if nb_commit is greater than the difference between txq->nb_tx_desc
57 * and next_to_use in sw_ring and tx_ring.
59 n = txq->nb_tx_desc - next_to_use;
61 for (i = 0; i < n; i++, tx_pkts++, tx_desc++) {
62 hns3_vec_tx(tx_desc, *tx_pkts);
63 tx_entry[i].mbuf = *tx_pkts;
68 tx_desc = &txq->tx_ring[next_to_use];
69 tx_entry = &txq->sw_ring[next_to_use];
72 for (i = 0; i < nb_commit; i++, tx_pkts++, tx_desc++) {
73 hns3_vec_tx(tx_desc, *tx_pkts);
74 tx_entry[i].mbuf = *tx_pkts;
77 next_to_use += nb_commit;
78 txq->next_to_use = next_to_use;
79 txq->tx_bd_ready -= nb_tx;
81 hns3_write_reg_opt(txq->io_tail_reg, nb_tx);
85 #endif /* _HNS3_RXTX_VEC_NEON_H_ */