1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022 Intel Corporation
3 * Copyright(c) 2022 Arm Limited
7 #include <ethdev_driver.h>
8 #include <rte_malloc.h>
12 #include "iavf_rxtx.h"
13 #include "iavf_rxtx_vec_common.h"
16 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
20 volatile union iavf_rx_desc *rxdp;
21 struct rte_mbuf **rxep = &rxq->sw_ring[rxq->rxrearm_start];
22 struct rte_mbuf *mb0, *mb1;
23 uint64x2_t dma_addr0, dma_addr1;
24 uint64x2_t zero = vdupq_n_u64(0);
27 rxdp = rxq->rx_ring + rxq->rxrearm_start;
29 /* Pull 'n' more MBUFs into the software ring */
30 if (unlikely(rte_mempool_get_bulk(rxq->mp,
32 IAVF_RXQ_REARM_THRESH) < 0)) {
33 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
35 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
36 rxep[i] = &rxq->fake_mbuf;
37 vst1q_u64((uint64_t *)&rxdp[i].read, zero);
40 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
41 IAVF_RXQ_REARM_THRESH;
45 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
46 for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxep += 2) {
50 paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
51 dma_addr0 = vdupq_n_u64(paddr);
53 /* flush desc with pa dma_addr */
54 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
56 paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
57 dma_addr1 = vdupq_n_u64(paddr);
58 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
61 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
62 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
63 rxq->rxrearm_start = 0;
65 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
67 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
68 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
71 /* Update the tail pointer on the NIC */
72 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
76 desc_to_olflags_v(struct iavf_rx_queue *rxq, volatile union iavf_rx_desc *rxdp,
77 uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
81 uint32x4_t vlan0, vlan1, rss, l3_l4e;
82 const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
83 uint64x2_t rearm0, rearm1, rearm2, rearm3;
85 /* mask everything except RSS, flow director and VLAN flags
86 * bit2 is for VLAN tag, bit11 for flow director indication
87 * bit13:12 for RSS indication.
89 const uint32x4_t rss_vlan_msk = {
90 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
92 const uint32x4_t cksum_mask = {
93 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
94 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
95 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
96 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
97 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
98 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
99 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
100 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
101 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
102 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
103 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
104 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
106 /* map rss and vlan type to rss hash and vlan flag */
107 const uint8x16_t vlan_flags = {
109 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
113 const uint8x16_t rss_flags = {
114 0, RTE_MBUF_F_RX_FDIR, 0, 0,
115 0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
119 const uint8x16_t l3_l4e_flags = {
120 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
121 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
122 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
123 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
124 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
125 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
126 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
127 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
128 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
129 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
130 0, 0, 0, 0, 0, 0, 0, 0};
132 vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
133 vreinterpretq_u32_u64(descs[2])).val[1];
134 vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
135 vreinterpretq_u32_u64(descs[3])).val[1];
136 vlan0 = vzipq_u32(vlan0, vlan1).val[0];
138 vlan1 = vandq_u32(vlan0, rss_vlan_msk);
139 vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
140 vreinterpretq_u8_u32(vlan1)));
142 const uint32x4_t desc_fltstat = vshrq_n_u32(vlan1, 11);
143 rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
144 vreinterpretq_u8_u32(desc_fltstat)));
146 l3_l4e = vshrq_n_u32(vlan1, 22);
147 l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
148 vreinterpretq_u8_u32(l3_l4e)));
149 /* then we shift left 1 bit */
150 l3_l4e = vshlq_n_u32(l3_l4e, 1);
151 /* we need to mask out the redundant bits */
152 l3_l4e = vandq_u32(l3_l4e, cksum_mask);
154 vlan0 = vorrq_u32(vlan0, rss);
155 vlan0 = vorrq_u32(vlan0, l3_l4e);
157 rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
158 rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
159 rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
160 rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
162 vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
163 vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
164 vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
165 vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
168 #define PKTLEN_SHIFT 10
169 #define IAVF_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
172 desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__rte_restrict rx_pkts,
173 uint32_t *__rte_restrict ptype_tbl)
179 for (i = 0; i < 4; i++) {
180 tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
181 ptype = vgetq_lane_u8(tmp, 8);
182 rx_pkts[i]->packet_type = ptype_tbl[ptype];
187 * vPMD raw receive routine, only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
190 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
191 * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
193 static inline uint16_t
194 _recv_raw_pkts_vec(struct iavf_rx_queue *__rte_restrict rxq,
195 struct rte_mbuf **__rte_restrict rx_pkts,
196 uint16_t nb_pkts, uint8_t *split_packet)
198 RTE_SET_USED(split_packet);
200 volatile union iavf_rx_desc *rxdp;
201 struct rte_mbuf **sw_ring;
202 uint16_t nb_pkts_recd;
204 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
206 /* mask to shuffle from desc. to mbuf */
207 uint8x16_t shuf_msk = {
208 0xFF, 0xFF, /* pkt_type set as unknown */
209 0xFF, 0xFF, /* pkt_type set as unknown */
210 14, 15, /* octet 15~14, low 16 bits pkt_len */
211 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
212 14, 15, /* octet 15~14, 16 bits data_len */
213 2, 3, /* octet 2~3, low 16 bits vlan_macip */
214 4, 5, 6, 7 /* octet 4~7, 32bits rss */
217 uint16x8_t crc_adjust = {
218 0, 0, /* ignore pkt_type field */
219 rxq->crc_len, /* sub crc on pkt_len */
220 0, /* ignore high-16bits of pkt_len */
221 rxq->crc_len, /* sub crc on data_len */
222 0, 0, 0 /* ignore non-length fields */
224 /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
225 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
227 rxdp = rxq->rx_ring + rxq->rx_tail;
229 rte_prefetch_non_temporal(rxdp);
231 /* See if we need to rearm the RX queue - gives the prefetch a bit
234 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
237 /* Before we start moving massive data around, check to see if
238 * there is actually a packet available
240 if (!(rxdp->wb.qword1.status_error_len &
241 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
244 /* Cache is empty -> need to scan the buffer rings, but first move
245 * the next 'n' mbufs into the cache
247 sw_ring = &rxq->sw_ring[rxq->rx_tail];
248 /* A. load 4 packet in one loop
249 * [A*. mask out 4 unused dirty field in desc]
250 * B. copy 4 mbuf point from swring to rx_pkts
251 * C. calc the number of DD bits among the 4 packets
252 * [C*. extract the end-of-packet bit, if requested]
253 * D. fill info. from desc to mbuf
256 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
257 pos += IAVF_VPMD_DESCS_PER_LOOP,
258 rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
259 uint64x2_t descs[IAVF_VPMD_DESCS_PER_LOOP];
260 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
261 uint16x8x2_t sterr_tmp1, sterr_tmp2;
262 uint64x2_t mbp1, mbp2;
267 int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
269 /* A.1 load desc[3-0] */
270 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
271 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
272 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
273 descs[0] = vld1q_u64((uint64_t *)(rxdp));
275 /* Use acquire fence to order loads of descriptor qwords */
276 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
277 /* A.2 reload qword0 to make it ordered after qword1 load */
278 descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
279 descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
280 descs[1] = vld1q_lane_u64((uint64_t *)(rxdp + 1), descs[1], 0);
281 descs[0] = vld1q_lane_u64((uint64_t *)(rxdp), descs[0], 0);
283 /* B.1 load 4 mbuf point */
284 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
285 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
287 /* B.2 copy 4 mbuf point into rx_pkts */
288 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
289 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
291 /* pkts shift the pktlen field to be 16-bit aligned*/
292 uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
294 descs[3] = vreinterpretq_u64_u16(vsetq_lane_u16
295 (vgetq_lane_u16(vreinterpretq_u16_u32(len3), 7),
296 vreinterpretq_u16_u64(descs[3]),
298 uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
300 descs[2] = vreinterpretq_u64_u16(vsetq_lane_u16
301 (vgetq_lane_u16(vreinterpretq_u16_u32(len2), 7),
302 vreinterpretq_u16_u64(descs[2]),
304 uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
306 descs[1] = vreinterpretq_u64_u16(vsetq_lane_u16
307 (vgetq_lane_u16(vreinterpretq_u16_u32(len1), 7),
308 vreinterpretq_u16_u64(descs[1]),
310 uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
312 descs[0] = vreinterpretq_u64_u16(vsetq_lane_u16
313 (vgetq_lane_u16(vreinterpretq_u16_u32(len0), 7),
314 vreinterpretq_u16_u64(descs[0]),
316 desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
318 /* D.1 pkts convert format from desc to pktmbuf */
319 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
320 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
321 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
322 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
324 /* D.2 pkts set in_port/nb_seg and remove crc */
325 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
326 pkt_mb4 = vreinterpretq_u8_u16(tmp);
327 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
328 pkt_mb3 = vreinterpretq_u8_u16(tmp);
329 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
330 pkt_mb2 = vreinterpretq_u8_u16(tmp);
331 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
332 pkt_mb1 = vreinterpretq_u8_u16(tmp);
334 /* D.3 copy final data to rx_pkts */
335 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
337 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
339 vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
341 vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
344 desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
346 if (likely(pos + IAVF_VPMD_DESCS_PER_LOOP < nb_pkts))
347 rte_prefetch_non_temporal(rxdp + IAVF_VPMD_DESCS_PER_LOOP);
349 /* C.1 4=>2 filter staterr info only */
350 sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
351 vreinterpretq_u16_u64(descs[3]));
352 sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
353 vreinterpretq_u16_u64(descs[2]));
355 /* C.2 get 4 pkts staterr value */
356 staterr = vzipq_u16(sterr_tmp1.val[1],
357 sterr_tmp2.val[1]).val[0];
359 staterr = vshlq_n_u16(staterr, IAVF_UINT16_BIT - 1);
360 staterr = vreinterpretq_u16_s16(
361 vshrq_n_s16(vreinterpretq_s16_u16(staterr),
362 IAVF_UINT16_BIT - 1));
363 stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
365 /* C.4 calc available number of desc */
366 if (unlikely(stat == 0)) {
367 nb_pkts_recd += IAVF_VPMD_DESCS_PER_LOOP;
369 nb_pkts_recd += __builtin_ctzl(stat) / IAVF_UINT16_BIT;
374 /* Update our internal tail pointer */
375 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
376 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
377 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
383 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
384 * - nb_pkts > IAVF_VPMD_RX_BURST, only scan IAVF_VPMD_RX_BURST
388 iavf_recv_pkts_vec(void *__rte_restrict rx_queue,
389 struct rte_mbuf **__rte_restrict rx_pkts, uint16_t nb_pkts)
391 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
394 static void __rte_cold
395 iavf_rx_queue_release_mbufs_neon(struct iavf_rx_queue *rxq)
397 _iavf_rx_queue_release_mbufs_vec(rxq);
400 static const struct iavf_rxq_ops neon_vec_rxq_ops = {
401 .release_mbufs = iavf_rx_queue_release_mbufs_neon,
405 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
407 rxq->ops = &neon_vec_rxq_ops;
408 return iavf_rxq_vec_setup_default(rxq);
412 iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
414 return iavf_rx_vec_dev_check_default(dev);