4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "base/i40e_prototype.h"
39 #include "base/i40e_type.h"
40 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
43 #include <tmmintrin.h>
45 #ifndef __INTEL_COMPILER
46 #pragma GCC diagnostic ignored "-Wcast-qual"
50 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
54 volatile union i40e_rx_desc *rxdp;
55 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
56 struct rte_mbuf *mb0, *mb1;
57 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
58 RTE_PKTMBUF_HEADROOM);
59 __m128i dma_addr0, dma_addr1;
61 rxdp = rxq->rx_ring + rxq->rxrearm_start;
63 /* Pull 'n' more MBUFs into the software ring */
64 if (rte_mempool_get_bulk(rxq->mp,
66 RTE_I40E_RXQ_REARM_THRESH) < 0) {
67 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
69 dma_addr0 = _mm_setzero_si128();
70 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
71 rxep[i].mbuf = &rxq->fake_mbuf;
72 _mm_store_si128((__m128i *)&rxdp[i].read,
76 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
77 RTE_I40E_RXQ_REARM_THRESH;
81 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
82 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
83 __m128i vaddr0, vaddr1;
89 /* Flush mbuf with pkt template.
90 * Data to be rearmed is 6 bytes long.
91 * Though, RX will overwrite ol_flags that are coming next
92 * anyway. So overwrite whole 8 bytes with one load:
93 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
95 p0 = (uintptr_t)&mb0->rearm_data;
96 *(uint64_t *)p0 = rxq->mbuf_initializer;
97 p1 = (uintptr_t)&mb1->rearm_data;
98 *(uint64_t *)p1 = rxq->mbuf_initializer;
100 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
101 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
102 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
104 /* convert pa to dma_addr hdr/data */
105 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
106 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
108 /* add headroom to pa values */
109 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
110 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
112 /* flush desc with pa dma_addr */
113 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
114 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
117 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
118 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
119 rxq->rxrearm_start = 0;
121 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
123 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
124 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
126 /* Update the tail pointer on the NIC */
127 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
130 /* Handling the offload flags (olflags) field takes computation
131 * time when receiving packets. Therefore we provide a flag to disable
132 * the processing of the olflags field when they are not needed. This
133 * gives improved performance, at the cost of losing the offload info
134 * in the received packet
136 #ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
139 desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
141 __m128i vlan0, vlan1, rss;
147 /* mask everything except rss and vlan flags
148 *bit2 is for vlan tag, bits 13:12 for rss
150 const __m128i rss_vlan_msk = _mm_set_epi16(
151 0x0000, 0x0000, 0x0000, 0x0000,
152 0x3004, 0x3004, 0x3004, 0x3004);
154 /* map rss and vlan type to rss hash and vlan flag */
155 const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
157 0, 0, 0, PKT_RX_VLAN_PKT,
160 const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
163 PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
165 vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
166 vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
167 vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
169 vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
170 vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
172 rss = _mm_srli_epi16(vlan1, 12);
173 rss = _mm_shuffle_epi8(rss_flags, rss);
175 vlan0 = _mm_or_si128(vlan0, rss);
176 vol.dword = _mm_cvtsi128_si64(vlan0);
178 rx_pkts[0]->ol_flags = vol.e[0];
179 rx_pkts[1]->ol_flags = vol.e[1];
180 rx_pkts[2]->ol_flags = vol.e[2];
181 rx_pkts[3]->ol_flags = vol.e[3];
184 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
187 #define PKTLEN_SHIFT (6)
188 #define PKTLEN_MASK (0x3FFF)
189 /* Handling the pkt len field is not aligned with 1byte, so shift is
190 * needed to let it align
193 desc_pktlen_align(__m128i descs[4])
195 __m128i pktlen0, pktlen1, zero;
201 /* mask everything except pktlen field*/
202 const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
203 PKTLEN_MASK, PKTLEN_MASK);
205 pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
206 pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
207 pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
209 zero = _mm_xor_si128(pktlen0, pktlen0);
211 pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
212 pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
214 pktlen0 = _mm_packs_epi32(pktlen0, zero);
215 vol.dword = _mm_cvtsi128_si64(pktlen0);
216 /* let the descriptor byte 15-14 store the pkt len */
217 *((uint16_t *)&descs[0]+7) = vol.e[0];
218 *((uint16_t *)&descs[1]+7) = vol.e[1];
219 *((uint16_t *)&descs[2]+7) = vol.e[2];
220 *((uint16_t *)&descs[3]+7) = vol.e[3];
223 /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
227 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
228 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
231 static inline uint16_t
232 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
233 uint16_t nb_pkts, uint8_t *split_packet)
235 volatile union i40e_rx_desc *rxdp;
236 struct i40e_rx_entry *sw_ring;
237 uint16_t nb_pkts_recd;
242 __m128i crc_adjust = _mm_set_epi16(
243 0, 0, 0, /* ignore non-length fields */
244 -rxq->crc_len, /* sub crc on data_len */
245 0, /* ignore high-16bits of pkt_len */
246 -rxq->crc_len, /* sub crc on pkt_len */
247 0, 0 /* ignore pkt_type field */
249 __m128i dd_check, eop_check;
251 /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
252 nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
254 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
255 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
257 /* Just the act of getting into the function from the application is
258 * going to cost about 7 cycles
260 rxdp = rxq->rx_ring + rxq->rx_tail;
262 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
264 /* See if we need to rearm the RX queue - gives the prefetch a bit
267 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
270 /* Before we start moving massive data around, check to see if
271 * there is actually a packet available
273 if (!(rxdp->wb.qword1.status_error_len &
274 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
277 /* 4 packets DD mask */
278 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
280 /* 4 packets EOP mask */
281 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
283 /* mask to shuffle from desc. to mbuf */
284 shuf_msk = _mm_set_epi8(
285 7, 6, 5, 4, /* octet 4~7, 32bits rss */
286 3, 2, /* octet 2~3, low 16 bits vlan_macip */
287 15, 14, /* octet 15~14, 16 bits data_len */
288 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
289 15, 14, /* octet 15~14, low 16 bits pkt_len */
290 0xFF, 0xFF, /* pkt_type set as unknown */
291 0xFF, 0xFF /*pkt_type set as unknown */
294 /* Cache is empty -> need to scan the buffer rings, but first move
295 * the next 'n' mbufs into the cache
297 sw_ring = &rxq->sw_ring[rxq->rx_tail];
299 /* A. load 4 packet in one loop
300 * [A*. mask out 4 unused dirty field in desc]
301 * B. copy 4 mbuf point from swring to rx_pkts
302 * C. calc the number of DD bits among the 4 packets
303 * [C*. extract the end-of-packet bit, if requested]
304 * D. fill info. from desc to mbuf
307 for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
308 pos += RTE_I40E_DESCS_PER_LOOP,
309 rxdp += RTE_I40E_DESCS_PER_LOOP) {
310 __m128i descs[RTE_I40E_DESCS_PER_LOOP];
311 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
312 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
313 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
315 /* B.1 load 1 mbuf point */
316 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
317 /* Read desc statuses backwards to avoid race condition */
318 /* A.1 load 4 pkts desc */
319 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
321 /* B.2 copy 2 mbuf point into rx_pkts */
322 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
324 /* B.1 load 1 mbuf point */
325 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
327 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
328 /* B.1 load 2 mbuf point */
329 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
330 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
332 /* B.2 copy 2 mbuf point into rx_pkts */
333 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
336 rte_prefetch0(&rx_pkts[pos]->cacheline1);
337 rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
338 rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
339 rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
342 /*shift the pktlen field*/
343 desc_pktlen_align(descs);
345 /* avoid compiler reorder optimization */
346 rte_compiler_barrier();
348 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
349 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
350 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
352 /* C.1 4=>2 filter staterr info only */
353 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
354 /* C.1 4=>2 filter staterr info only */
355 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
357 desc_to_olflags_v(descs, &rx_pkts[pos]);
359 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
360 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
361 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
363 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
364 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
365 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
367 /* C.2 get 4 pkts staterr value */
368 zero = _mm_xor_si128(dd_check, dd_check);
369 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
371 /* D.3 copy final 3,4 data to rx_pkts */
372 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
374 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
377 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
378 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
379 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
381 /* C* extract and record EOP bit */
383 __m128i eop_shuf_mask = _mm_set_epi8(
384 0xFF, 0xFF, 0xFF, 0xFF,
385 0xFF, 0xFF, 0xFF, 0xFF,
386 0xFF, 0xFF, 0xFF, 0xFF,
387 0x04, 0x0C, 0x00, 0x08
390 /* and with mask to extract bits, flipping 1-0 */
391 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
392 /* the staterr values are not in order, as the count
393 * count of dd bits doesn't care. However, for end of
394 * packet tracking, we do care, so shuffle. This also
395 * compresses the 32-bit values to 8-bit
397 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
398 /* store the resulting 32-bit value */
399 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
400 split_packet += RTE_I40E_DESCS_PER_LOOP;
402 /* zero-out next pointers */
403 rx_pkts[pos]->next = NULL;
404 rx_pkts[pos + 1]->next = NULL;
405 rx_pkts[pos + 2]->next = NULL;
406 rx_pkts[pos + 3]->next = NULL;
409 /* C.3 calc available number of desc */
410 staterr = _mm_and_si128(staterr, dd_check);
411 staterr = _mm_packs_epi32(staterr, zero);
413 /* D.3 copy final 1,2 data to rx_pkts */
414 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
416 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
418 /* C.4 calc avaialbe number of desc */
419 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
421 if (likely(var != RTE_I40E_DESCS_PER_LOOP))
425 /* Update our internal tail pointer */
426 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
427 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
428 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
433 /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
437 * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
438 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
442 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
445 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
448 void __attribute__((cold))
449 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
451 const unsigned mask = rxq->nb_rx_desc - 1;
454 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
457 /* free all mbufs that are valid in the ring */
458 for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
459 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
460 rxq->rxrearm_nb = rxq->nb_rx_desc;
462 /* set all entries to NULL */
463 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
466 int __attribute__((cold))
467 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
470 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
473 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
474 mb_def.port = rxq->port_id;
475 rte_mbuf_refcnt_set(&mb_def, 1);
477 /* prevent compiler reordering: rearm_data covers previous fields */
478 rte_compiler_barrier();
479 p = (uintptr_t)&mb_def.rearm_data;
480 rxq->mbuf_initializer = *(uint64_t *)p;