1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
10 #include "ixgbe_ethdev.h"
11 #include "ixgbe_rxtx.h"
12 #include "ixgbe_rxtx_vec_common.h"
14 #pragma GCC diagnostic ignored "-Wcast-qual"
17 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
21 volatile union ixgbe_adv_rx_desc *rxdp;
22 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
23 struct rte_mbuf *mb0, *mb1;
24 uint64x2_t dma_addr0, dma_addr1;
25 uint64x2_t zero = vdupq_n_u64(0);
29 rxdp = rxq->rx_ring + rxq->rxrearm_start;
31 /* Pull 'n' more MBUFs into the software ring */
32 if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
34 RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
35 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
37 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
38 rxep[i].mbuf = &rxq->fake_mbuf;
39 vst1q_u64((uint64_t *)&rxdp[i].read,
43 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
44 RTE_IXGBE_RXQ_REARM_THRESH;
48 p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
56 * Flush mbuf with pkt template.
57 * Data to be rearmed is 6 bytes long.
59 vst1_u8((uint8_t *)&mb0->rearm_data, p);
60 paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
61 dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
62 /* flush desc with pa dma_addr */
63 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
65 vst1_u8((uint8_t *)&mb1->rearm_data, p);
66 paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
67 dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
68 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
71 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
72 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
73 rxq->rxrearm_start = 0;
75 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
77 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
78 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
80 /* Update the tail pointer on the NIC */
81 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
85 desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
86 uint8x16_t staterr, uint8_t vlan_flags, struct rte_mbuf **rx_pkts)
96 const uint8x16_t rsstype_msk = {
97 0x0F, 0x0F, 0x0F, 0x0F,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00};
102 const uint8x16_t rss_flags = {
103 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
104 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
105 PKT_RX_RSS_HASH, 0, 0, 0,
106 0, 0, 0, PKT_RX_FDIR};
108 const uint8x16_t vlan_msk = {
109 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
110 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
115 const uint8x16_t vlan_map = {
121 ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
122 ptype = vandq_u8(ptype, rsstype_msk);
123 ptype = vqtbl1q_u8(rss_flags, ptype);
125 /* extract vlan_flags from IXGBE_RXD_STAT_VP bits of staterr */
126 vtag = vandq_u8(staterr, vlan_msk);
127 vtag = vqtbl1q_u8(vlan_map, vtag);
128 vtag = vorrq_u8(ptype, vtag);
130 vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
132 rx_pkts[0]->ol_flags = vol.e[0];
133 rx_pkts[1]->ol_flags = vol.e[1];
134 rx_pkts[2]->ol_flags = vol.e[2];
135 rx_pkts[3]->ol_flags = vol.e[3];
138 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
139 #define IXGBE_UINT8_BIT (CHAR_BIT * sizeof(uint8_t))
141 static inline uint32_t
142 get_packet_type(uint32_t pkt_info,
144 uint32_t tunnel_check)
147 return RTE_PTYPE_UNKNOWN;
150 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
151 return ptype_table_tn[pkt_info];
154 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
155 return ptype_table[pkt_info];
159 desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask,
160 struct rte_mbuf **rx_pkts)
162 uint32x4_t etqf_check, tunnel_check;
163 uint32x4_t etqf_mask = vdupq_n_u32(0x8000);
164 uint32x4_t tunnel_mask = vdupq_n_u32(0x10000);
165 uint32x4_t ptype_mask = vdupq_n_u32((uint32_t)pkt_type_mask);
166 uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
167 vreinterpretq_u32_u64(descs[2])).val[0];
168 uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
169 vreinterpretq_u32_u64(descs[3])).val[0];
171 /* interleave low 32 bits,
172 * now we have 4 ptypes in a NEON register
174 ptype0 = vzipq_u32(ptype0, ptype1).val[0];
177 etqf_check = vandq_u32(ptype0, etqf_mask);
178 /* mask tunnel bits */
179 tunnel_check = vandq_u32(ptype0, tunnel_mask);
181 /* shift right by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
182 ptype0 = vandq_u32(vshrq_n_u32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
185 rx_pkts[0]->packet_type =
186 get_packet_type(vgetq_lane_u32(ptype0, 0),
187 vgetq_lane_u32(etqf_check, 0),
188 vgetq_lane_u32(tunnel_check, 0));
189 rx_pkts[1]->packet_type =
190 get_packet_type(vgetq_lane_u32(ptype0, 1),
191 vgetq_lane_u32(etqf_check, 1),
192 vgetq_lane_u32(tunnel_check, 1));
193 rx_pkts[2]->packet_type =
194 get_packet_type(vgetq_lane_u32(ptype0, 2),
195 vgetq_lane_u32(etqf_check, 2),
196 vgetq_lane_u32(tunnel_check, 2));
197 rx_pkts[3]->packet_type =
198 get_packet_type(vgetq_lane_u32(ptype0, 3),
199 vgetq_lane_u32(etqf_check, 3),
200 vgetq_lane_u32(tunnel_check, 3));
204 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
207 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
208 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
210 static inline uint16_t
211 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
212 uint16_t nb_pkts, uint8_t *split_packet)
214 volatile union ixgbe_adv_rx_desc *rxdp;
215 struct ixgbe_rx_entry *sw_ring;
216 uint16_t nb_pkts_recd;
218 uint8x16_t shuf_msk = {
220 0xFF, 0xFF, /* skip 32 bits pkt_type */
221 12, 13, /* octet 12~13, low 16 bits pkt_len */
222 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
223 12, 13, /* octet 12~13, 16 bits data_len */
224 14, 15, /* octet 14~15, low 16 bits vlan_macip */
225 4, 5, 6, 7 /* octet 4~7, 32bits rss */
227 uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
228 rxq->crc_len, 0, 0, 0};
231 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
232 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
234 /* Just the act of getting into the function from the application is
235 * going to cost about 7 cycles
237 rxdp = rxq->rx_ring + rxq->rx_tail;
239 rte_prefetch_non_temporal(rxdp);
241 /* See if we need to rearm the RX queue - gives the prefetch a bit
244 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
245 ixgbe_rxq_rearm(rxq);
247 /* Before we start moving massive data around, check to see if
248 * there is actually a packet available
250 if (!(rxdp->wb.upper.status_error &
251 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
254 /* Cache is empty -> need to scan the buffer rings, but first move
255 * the next 'n' mbufs into the cache
257 sw_ring = &rxq->sw_ring[rxq->rx_tail];
259 /* ensure these 2 flags are in the lower 8 bits */
260 RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
261 vlan_flags = rxq->vlan_flags & UINT8_MAX;
263 /* A. load 4 packet in one loop
264 * B. copy 4 mbuf point from swring to rx_pkts
265 * C. calc the number of DD bits among the 4 packets
266 * [C*. extract the end-of-packet bit, if requested]
267 * D. fill info. from desc to mbuf
269 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
270 pos += RTE_IXGBE_DESCS_PER_LOOP,
271 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
272 uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
273 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
274 uint8x16x2_t sterr_tmp1, sterr_tmp2;
275 uint64x2_t mbp1, mbp2;
280 /* B.1 load 2 mbuf point */
281 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
283 /* B.2 copy 2 mbuf point into rx_pkts */
284 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
286 /* B.1 load 2 mbuf point */
287 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
289 /* A. load 4 pkts descs */
290 descs[0] = vld1q_u64((uint64_t *)(rxdp));
291 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
292 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
293 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
295 /* B.2 copy 2 mbuf point into rx_pkts */
296 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
299 rte_mbuf_prefetch_part2(rx_pkts[pos]);
300 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
301 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
302 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
305 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
306 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
307 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
309 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
310 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
311 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
313 /* C.1 4=>2 filter staterr info only */
314 sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
315 vreinterpretq_u8_u64(descs[3]));
316 /* C.1 4=>2 filter staterr info only */
317 sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
318 vreinterpretq_u8_u64(descs[2]));
320 /* C.2 get 4 pkts staterr value */
321 staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
323 /* set ol_flags with vlan packet type */
324 desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags,
327 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
328 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
329 pkt_mb4 = vreinterpretq_u8_u16(tmp);
330 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
331 pkt_mb3 = vreinterpretq_u8_u16(tmp);
333 /* D.3 copy final 3,4 data to rx_pkts */
334 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
336 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
339 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
340 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
341 pkt_mb2 = vreinterpretq_u8_u16(tmp);
342 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
343 pkt_mb1 = vreinterpretq_u8_u16(tmp);
345 /* C* extract and record EOP bit */
347 stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
348 /* and with mask to extract bits, flipping 1-0 */
349 *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
351 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
354 /* C.4 expand DD bit to saturate UINT8 */
355 staterr = vshlq_n_u8(staterr, IXGBE_UINT8_BIT - 1);
356 staterr = vreinterpretq_u8_s8
357 (vshrq_n_s8(vreinterpretq_s8_u8(staterr),
358 IXGBE_UINT8_BIT - 1));
359 stat = ~vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
361 rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
363 /* D.3 copy final 1,2 data to rx_pkts */
364 vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
366 vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
369 desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
371 /* C.5 calc available number of desc */
372 if (unlikely(stat == 0)) {
373 nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
375 nb_pkts_recd += __builtin_ctz(stat) / IXGBE_UINT8_BIT;
380 /* Update our internal tail pointer */
381 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
382 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
383 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
389 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
392 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
393 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
394 * - don't support ol_flags for rss and csum err
397 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
400 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
404 * vPMD receive routine that reassembles scattered packets
407 * - don't support ol_flags for rss and csum err
408 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
409 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
412 ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
415 struct ixgbe_rx_queue *rxq = rx_queue;
416 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
418 /* get some new buffers */
419 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
424 /* happy day case, full burst + no packets to be joined */
425 const uint64_t *split_fl64 = (uint64_t *)split_flags;
426 if (rxq->pkt_first_seg == NULL &&
427 split_fl64[0] == 0 && split_fl64[1] == 0 &&
428 split_fl64[2] == 0 && split_fl64[3] == 0)
431 /* reassemble any packets that need reassembly*/
433 if (rxq->pkt_first_seg == NULL) {
434 /* find the first split flag, and only reassemble then*/
435 while (i < nb_bufs && !split_flags[i])
439 rxq->pkt_first_seg = rx_pkts[i];
441 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
446 * vPMD receive routine that reassembles scattered packets.
449 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
454 while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
457 burst = ixgbe_recv_scattered_burst_vec(rx_queue,
459 RTE_IXGBE_MAX_RX_BURST);
462 if (burst < RTE_IXGBE_MAX_RX_BURST)
466 return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
472 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
473 struct rte_mbuf *pkt, uint64_t flags)
475 uint64x2_t descriptor = {
476 pkt->buf_iova + pkt->data_off,
477 (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
479 vst1q_u64((uint64_t *)&txdp->read, descriptor);
483 vtx(volatile union ixgbe_adv_tx_desc *txdp,
484 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
488 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
489 vtx1(txdp, *pkt, flags);
493 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
496 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
497 volatile union ixgbe_adv_tx_desc *txdp;
498 struct ixgbe_tx_entry_v *txep;
499 uint16_t n, nb_commit, tx_id;
500 uint64_t flags = DCMD_DTYP_FLAGS;
501 uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
504 /* cross rx_thresh boundary is not allowed */
505 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
507 if (txq->nb_tx_free < txq->tx_free_thresh)
508 ixgbe_tx_free_bufs(txq);
510 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
511 if (unlikely(nb_pkts == 0))
514 tx_id = txq->tx_tail;
515 txdp = &txq->tx_ring[tx_id];
516 txep = &txq->sw_ring_v[tx_id];
518 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
520 n = (uint16_t)(txq->nb_tx_desc - tx_id);
521 if (nb_commit >= n) {
522 tx_backlog_entry(txep, tx_pkts, n);
524 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
525 vtx1(txdp, *tx_pkts, flags);
527 vtx1(txdp, *tx_pkts++, rs);
529 nb_commit = (uint16_t)(nb_commit - n);
532 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
534 /* avoid reach the end of ring */
535 txdp = &txq->tx_ring[tx_id];
536 txep = &txq->sw_ring_v[tx_id];
539 tx_backlog_entry(txep, tx_pkts, nb_commit);
541 vtx(txdp, tx_pkts, nb_commit, flags);
543 tx_id = (uint16_t)(tx_id + nb_commit);
544 if (tx_id > txq->tx_next_rs) {
545 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
546 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
547 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
551 txq->tx_tail = tx_id;
553 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
558 static void __rte_cold
559 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
561 _ixgbe_tx_queue_release_mbufs_vec(txq);
565 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
567 _ixgbe_rx_queue_release_mbufs_vec(rxq);
570 static void __rte_cold
571 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
573 _ixgbe_tx_free_swring_vec(txq);
576 static void __rte_cold
577 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
579 _ixgbe_reset_tx_queue_vec(txq);
582 static const struct ixgbe_txq_ops vec_txq_ops = {
583 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
584 .free_swring = ixgbe_tx_free_swring,
585 .reset = ixgbe_reset_tx_queue,
589 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
591 return ixgbe_rxq_vec_setup_default(rxq);
595 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
597 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
601 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
603 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
605 /* no csum error report support */
606 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
609 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);