1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 HiSilicon Limited.
6 #include <ethdev_driver.h>
8 #include "hns3_ethdev.h"
10 #include "hns3_rxtx_vec.h"
12 #if defined RTE_ARCH_ARM64
13 #include "hns3_rxtx_vec_neon.h"
17 hns3_tx_check_vec_support(struct rte_eth_dev *dev)
19 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
20 struct hns3_adapter *hns = dev->data->dev_private;
21 struct hns3_pf *pf = &hns->pf;
23 /* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
24 if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
27 /* Vec is not supported when PTP enabled */
35 hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
37 struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
41 uint16_t ret, new_burst;
43 new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
44 ret = hns3_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
56 hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq)
58 #define REARM_LOOP_STEP_NUM 4
59 struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
60 struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
64 if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
65 HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
66 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
70 for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
71 rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
73 HNS3_DEFAULT_RXQ_REARM_THRESH - REARM_LOOP_STEP_NUM)) {
74 rte_prefetch_non_temporal(rxep[4].mbuf);
75 rte_prefetch_non_temporal(rxep[5].mbuf);
76 rte_prefetch_non_temporal(rxep[6].mbuf);
77 rte_prefetch_non_temporal(rxep[7].mbuf);
80 dma_addr = rte_mbuf_data_iova_default(rxep[0].mbuf);
81 rxdp[0].addr = rte_cpu_to_le_64(dma_addr);
82 rxdp[0].rx.bd_base_info = 0;
84 dma_addr = rte_mbuf_data_iova_default(rxep[1].mbuf);
85 rxdp[1].addr = rte_cpu_to_le_64(dma_addr);
86 rxdp[1].rx.bd_base_info = 0;
88 dma_addr = rte_mbuf_data_iova_default(rxep[2].mbuf);
89 rxdp[2].addr = rte_cpu_to_le_64(dma_addr);
90 rxdp[2].rx.bd_base_info = 0;
92 dma_addr = rte_mbuf_data_iova_default(rxep[3].mbuf);
93 rxdp[3].addr = rte_cpu_to_le_64(dma_addr);
94 rxdp[3].rx.bd_base_info = 0;
97 rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
98 if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
99 rxq->rx_rearm_start = 0;
101 rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
103 hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
107 hns3_recv_pkts_vec(void *__restrict rx_queue,
108 struct rte_mbuf **__restrict rx_pkts,
111 struct hns3_rx_queue *rxq = rx_queue;
112 struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
113 uint64_t pkt_err_mask; /* bit mask indicate whick pkts is error */
116 rte_prefetch_non_temporal(rxdp);
118 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_DEFAULT_DESCS_PER_LOOP);
120 if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
121 hns3_rxq_rearm_mbuf(rxq);
123 if (unlikely(!(rxdp->rx.bd_base_info &
124 rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
127 rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 0].mbuf);
128 rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 1].mbuf);
129 rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 2].mbuf);
130 rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 3].mbuf);
132 if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) {
134 nb_rx = hns3_recv_burst_vec(rxq, rx_pkts, nb_pkts,
136 nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask);
141 while (nb_pkts > 0) {
144 n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
146 ret = hns3_recv_burst_vec(rxq, &rx_pkts[nb_rx], n,
149 nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret,
154 if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
155 hns3_rxq_rearm_mbuf(rxq);
162 hns3_rxq_vec_setup_rearm_data(struct hns3_rx_queue *rxq)
165 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
168 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
169 mb_def.port = rxq->port_id;
170 rte_mbuf_refcnt_set(&mb_def, 1);
172 /* compile-time verifies the rearm_data first 8bytes */
173 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) <
174 offsetof(struct rte_mbuf, rearm_data));
175 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) <
176 offsetof(struct rte_mbuf, rearm_data));
177 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) <
178 offsetof(struct rte_mbuf, rearm_data));
179 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) <
180 offsetof(struct rte_mbuf, rearm_data));
181 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) -
182 offsetof(struct rte_mbuf, rearm_data) > 6);
183 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
184 offsetof(struct rte_mbuf, rearm_data) > 6);
185 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
186 offsetof(struct rte_mbuf, rearm_data) > 6);
187 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
188 offsetof(struct rte_mbuf, rearm_data) > 6);
190 /* prevent compiler reordering: rearm_data covers previous fields */
191 rte_compiler_barrier();
192 p = (uintptr_t)&mb_def.rearm_data;
193 rxq->mbuf_initializer = *(uint64_t *)p;
197 hns3_rxq_vec_setup(struct hns3_rx_queue *rxq)
199 struct hns3_entry *sw_ring = &rxq->sw_ring[rxq->nb_rx_desc];
202 memset(&rxq->rx_ring[rxq->nb_rx_desc], 0,
203 sizeof(struct hns3_desc) * HNS3_DEFAULT_RX_BURST);
205 memset(&rxq->fake_mbuf, 0, sizeof(rxq->fake_mbuf));
206 for (i = 0; i < HNS3_DEFAULT_RX_BURST; i++)
207 sw_ring[i].mbuf = &rxq->fake_mbuf;
209 hns3_rxq_vec_setup_rearm_data(rxq);
211 memset(rxq->offset_table, 0, sizeof(rxq->offset_table));
215 hns3_rxq_vec_check(struct hns3_rx_queue *rxq, void *arg)
217 uint32_t min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
218 HNS3_DEFAULT_RX_BURST;
220 if (rxq->nb_rx_desc < min_vec_bds)
223 if (rxq->nb_rx_desc % HNS3_DEFAULT_RXQ_REARM_THRESH)
231 hns3_rx_check_vec_support(struct rte_eth_dev *dev)
233 struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
234 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
235 uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO |
236 RTE_ETH_RX_OFFLOAD_VLAN;
237 struct hns3_adapter *hns = dev->data->dev_private;
238 struct hns3_pf *pf = &hns->pf;
240 if (dev->data->scattered_rx)
243 if (fconf->mode != RTE_FDIR_MODE_NONE)
246 if (rxmode->offloads & offloads_mask)
249 if (hns3_rxq_iterate(dev, hns3_rxq_vec_check, NULL) != 0)
252 /* Vec is not supported when PTP enabled */