1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
10 #include "ixgbe_ethdev.h"
11 #include "ixgbe_rxtx.h"
12 #include "ixgbe_rxtx_vec_common.h"
14 #pragma GCC diagnostic ignored "-Wcast-qual"
17 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
21 volatile union ixgbe_adv_rx_desc *rxdp;
22 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
23 struct rte_mbuf *mb0, *mb1;
24 uint64x2_t dma_addr0, dma_addr1;
25 uint64x2_t zero = vdupq_n_u64(0);
29 rxdp = rxq->rx_ring + rxq->rxrearm_start;
31 /* Pull 'n' more MBUFs into the software ring */
32 if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
34 RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
35 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
37 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
38 rxep[i].mbuf = &rxq->fake_mbuf;
39 vst1q_u64((uint64_t *)&rxdp[i].read,
43 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
44 RTE_IXGBE_RXQ_REARM_THRESH;
48 p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
56 * Flush mbuf with pkt template.
57 * Data to be rearmed is 6 bytes long.
59 vst1_u8((uint8_t *)&mb0->rearm_data, p);
60 paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
61 dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
62 /* flush desc with pa dma_addr */
63 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
65 vst1_u8((uint8_t *)&mb1->rearm_data, p);
66 paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
67 dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
68 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
71 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
72 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
73 rxq->rxrearm_start = 0;
75 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
77 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
78 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
80 /* Update the tail pointer on the NIC */
81 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
84 #define VTAG_SHIFT (3)
87 desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
88 uint8x16_t staterr, struct rte_mbuf **rx_pkts)
98 const uint8x16_t pkttype_msk = {
99 PKT_RX_VLAN, PKT_RX_VLAN,
100 PKT_RX_VLAN, PKT_RX_VLAN,
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00};
105 const uint8x16_t rsstype_msk = {
106 0x0F, 0x0F, 0x0F, 0x0F,
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00};
111 const uint8x16_t rss_flags = {
112 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
113 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
114 PKT_RX_RSS_HASH, 0, 0, 0,
115 0, 0, 0, PKT_RX_FDIR};
117 ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
118 ptype = vandq_u8(ptype, rsstype_msk);
119 ptype = vqtbl1q_u8(rss_flags, ptype);
121 vtag = vshrq_n_u8(staterr, VTAG_SHIFT);
122 vtag = vandq_u8(vtag, pkttype_msk);
123 vtag = vorrq_u8(ptype, vtag);
125 vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
127 rx_pkts[0]->ol_flags = vol.e[0];
128 rx_pkts[1]->ol_flags = vol.e[1];
129 rx_pkts[2]->ol_flags = vol.e[2];
130 rx_pkts[3]->ol_flags = vol.e[3];
133 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
134 #define IXGBE_UINT8_BIT (CHAR_BIT * sizeof(uint8_t))
136 static inline uint32_t
137 get_packet_type(uint32_t pkt_info,
139 uint32_t tunnel_check)
142 return RTE_PTYPE_UNKNOWN;
145 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
146 return ptype_table_tn[pkt_info];
149 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
150 return ptype_table[pkt_info];
154 desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask,
155 struct rte_mbuf **rx_pkts)
157 uint32x4_t etqf_check, tunnel_check;
158 uint32x4_t etqf_mask = vdupq_n_u32(0x8000);
159 uint32x4_t tunnel_mask = vdupq_n_u32(0x10000);
160 uint32x4_t ptype_mask = vdupq_n_u32((uint32_t)pkt_type_mask);
161 uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
162 vreinterpretq_u32_u64(descs[2])).val[0];
163 uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
164 vreinterpretq_u32_u64(descs[3])).val[0];
166 /* interleave low 32 bits,
167 * now we have 4 ptypes in a NEON register
169 ptype0 = vzipq_u32(ptype0, ptype1).val[0];
172 etqf_check = vandq_u32(ptype0, etqf_mask);
173 /* mask tunnel bits */
174 tunnel_check = vandq_u32(ptype0, tunnel_mask);
176 /* shift right by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
177 ptype0 = vandq_u32(vshrq_n_u32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
180 rx_pkts[0]->packet_type =
181 get_packet_type(vgetq_lane_u32(ptype0, 0),
182 vgetq_lane_u32(etqf_check, 0),
183 vgetq_lane_u32(tunnel_check, 0));
184 rx_pkts[1]->packet_type =
185 get_packet_type(vgetq_lane_u32(ptype0, 1),
186 vgetq_lane_u32(etqf_check, 1),
187 vgetq_lane_u32(tunnel_check, 1));
188 rx_pkts[2]->packet_type =
189 get_packet_type(vgetq_lane_u32(ptype0, 2),
190 vgetq_lane_u32(etqf_check, 2),
191 vgetq_lane_u32(tunnel_check, 2));
192 rx_pkts[3]->packet_type =
193 get_packet_type(vgetq_lane_u32(ptype0, 3),
194 vgetq_lane_u32(etqf_check, 3),
195 vgetq_lane_u32(tunnel_check, 3));
199 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
202 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
203 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
205 static inline uint16_t
206 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
207 uint16_t nb_pkts, uint8_t *split_packet)
209 volatile union ixgbe_adv_rx_desc *rxdp;
210 struct ixgbe_rx_entry *sw_ring;
211 uint16_t nb_pkts_recd;
213 uint8x16_t shuf_msk = {
215 0xFF, 0xFF, /* skip 32 bits pkt_type */
216 12, 13, /* octet 12~13, low 16 bits pkt_len */
217 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
218 12, 13, /* octet 12~13, 16 bits data_len */
219 14, 15, /* octet 14~15, low 16 bits vlan_macip */
220 4, 5, 6, 7 /* octet 4~7, 32bits rss */
222 uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
223 rxq->crc_len, 0, 0, 0};
225 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
226 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
228 /* Just the act of getting into the function from the application is
229 * going to cost about 7 cycles
231 rxdp = rxq->rx_ring + rxq->rx_tail;
233 rte_prefetch_non_temporal(rxdp);
235 /* See if we need to rearm the RX queue - gives the prefetch a bit
238 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
239 ixgbe_rxq_rearm(rxq);
241 /* Before we start moving massive data around, check to see if
242 * there is actually a packet available
244 if (!(rxdp->wb.upper.status_error &
245 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
248 /* Cache is empty -> need to scan the buffer rings, but first move
249 * the next 'n' mbufs into the cache
251 sw_ring = &rxq->sw_ring[rxq->rx_tail];
253 /* A. load 4 packet in one loop
254 * B. copy 4 mbuf point from swring to rx_pkts
255 * C. calc the number of DD bits among the 4 packets
256 * [C*. extract the end-of-packet bit, if requested]
257 * D. fill info. from desc to mbuf
259 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
260 pos += RTE_IXGBE_DESCS_PER_LOOP,
261 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
262 uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
263 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
264 uint8x16x2_t sterr_tmp1, sterr_tmp2;
265 uint64x2_t mbp1, mbp2;
270 /* B.1 load 2 mbuf point */
271 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
273 /* B.2 copy 2 mbuf point into rx_pkts */
274 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
276 /* B.1 load 2 mbuf point */
277 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
279 /* A. load 4 pkts descs */
280 descs[0] = vld1q_u64((uint64_t *)(rxdp));
281 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
282 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
283 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
285 /* B.2 copy 2 mbuf point into rx_pkts */
286 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
289 rte_mbuf_prefetch_part2(rx_pkts[pos]);
290 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
291 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
292 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
295 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
296 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
297 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
299 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
300 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
301 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
303 /* C.1 4=>2 filter staterr info only */
304 sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
305 vreinterpretq_u8_u64(descs[3]));
306 /* C.1 4=>2 filter staterr info only */
307 sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
308 vreinterpretq_u8_u64(descs[2]));
310 /* C.2 get 4 pkts staterr value */
311 staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
313 /* set ol_flags with vlan packet type */
314 desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr,
317 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
318 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
319 pkt_mb4 = vreinterpretq_u8_u16(tmp);
320 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
321 pkt_mb3 = vreinterpretq_u8_u16(tmp);
323 /* D.3 copy final 3,4 data to rx_pkts */
324 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
326 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
329 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
330 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
331 pkt_mb2 = vreinterpretq_u8_u16(tmp);
332 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
333 pkt_mb1 = vreinterpretq_u8_u16(tmp);
335 /* C* extract and record EOP bit */
337 stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
338 /* and with mask to extract bits, flipping 1-0 */
339 *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
341 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
344 /* C.4 expand DD bit to saturate UINT8 */
345 staterr = vshlq_n_u8(staterr, IXGBE_UINT8_BIT - 1);
346 staterr = vreinterpretq_u8_s8
347 (vshrq_n_s8(vreinterpretq_s8_u8(staterr),
348 IXGBE_UINT8_BIT - 1));
349 stat = ~vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
351 rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
353 /* D.3 copy final 1,2 data to rx_pkts */
354 vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
356 vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
359 desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
361 /* C.5 calc available number of desc */
362 if (unlikely(stat == 0)) {
363 nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
365 nb_pkts_recd += __builtin_ctz(stat) / IXGBE_UINT8_BIT;
370 /* Update our internal tail pointer */
371 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
372 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
373 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
379 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
382 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
383 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
384 * - don't support ol_flags for rss and csum err
387 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
390 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
394 * vPMD receive routine that reassembles scattered packets
397 * - don't support ol_flags for rss and csum err
398 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
399 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
402 ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
405 struct ixgbe_rx_queue *rxq = rx_queue;
406 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
408 /* get some new buffers */
409 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
414 /* happy day case, full burst + no packets to be joined */
415 const uint64_t *split_fl64 = (uint64_t *)split_flags;
416 if (rxq->pkt_first_seg == NULL &&
417 split_fl64[0] == 0 && split_fl64[1] == 0 &&
418 split_fl64[2] == 0 && split_fl64[3] == 0)
421 /* reassemble any packets that need reassembly*/
423 if (rxq->pkt_first_seg == NULL) {
424 /* find the first split flag, and only reassemble then*/
425 while (i < nb_bufs && !split_flags[i])
429 rxq->pkt_first_seg = rx_pkts[i];
431 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
436 * vPMD receive routine that reassembles scattered packets.
439 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
444 while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
447 burst = ixgbe_recv_scattered_burst_vec(rx_queue,
449 RTE_IXGBE_MAX_RX_BURST);
452 if (burst < RTE_IXGBE_MAX_RX_BURST)
456 return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
462 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
463 struct rte_mbuf *pkt, uint64_t flags)
465 uint64x2_t descriptor = {
466 pkt->buf_iova + pkt->data_off,
467 (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
469 vst1q_u64((uint64_t *)&txdp->read, descriptor);
473 vtx(volatile union ixgbe_adv_tx_desc *txdp,
474 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
478 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
479 vtx1(txdp, *pkt, flags);
483 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
486 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
487 volatile union ixgbe_adv_tx_desc *txdp;
488 struct ixgbe_tx_entry_v *txep;
489 uint16_t n, nb_commit, tx_id;
490 uint64_t flags = DCMD_DTYP_FLAGS;
491 uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
494 /* cross rx_thresh boundary is not allowed */
495 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
497 if (txq->nb_tx_free < txq->tx_free_thresh)
498 ixgbe_tx_free_bufs(txq);
500 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
501 if (unlikely(nb_pkts == 0))
504 tx_id = txq->tx_tail;
505 txdp = &txq->tx_ring[tx_id];
506 txep = &txq->sw_ring_v[tx_id];
508 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
510 n = (uint16_t)(txq->nb_tx_desc - tx_id);
511 if (nb_commit >= n) {
512 tx_backlog_entry(txep, tx_pkts, n);
514 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
515 vtx1(txdp, *tx_pkts, flags);
517 vtx1(txdp, *tx_pkts++, rs);
519 nb_commit = (uint16_t)(nb_commit - n);
522 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
524 /* avoid reach the end of ring */
525 txdp = &txq->tx_ring[tx_id];
526 txep = &txq->sw_ring_v[tx_id];
529 tx_backlog_entry(txep, tx_pkts, nb_commit);
531 vtx(txdp, tx_pkts, nb_commit, flags);
533 tx_id = (uint16_t)(tx_id + nb_commit);
534 if (tx_id > txq->tx_next_rs) {
535 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
536 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
537 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
541 txq->tx_tail = tx_id;
543 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
548 static void __rte_cold
549 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
551 _ixgbe_tx_queue_release_mbufs_vec(txq);
555 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
557 _ixgbe_rx_queue_release_mbufs_vec(rxq);
560 static void __rte_cold
561 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
563 _ixgbe_tx_free_swring_vec(txq);
566 static void __rte_cold
567 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
569 _ixgbe_reset_tx_queue_vec(txq);
572 static const struct ixgbe_txq_ops vec_txq_ops = {
573 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
574 .free_swring = ixgbe_tx_free_swring,
575 .reset = ixgbe_reset_tx_queue,
579 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
581 return ixgbe_rxq_vec_setup_default(rxq);
585 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
587 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
591 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
593 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
595 /* no csum error report support */
596 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
599 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);