1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #ifndef _I40E_RXTX_VEC_COMMON_H_
6 #define _I40E_RXTX_VEC_COMMON_H_
8 #include <ethdev_driver.h>
9 #include <rte_malloc.h>
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
14 #ifndef __INTEL_COMPILER
15 #pragma GCC diagnostic ignored "-Wcast-qual"
18 static inline uint16_t
19 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
20 uint16_t nb_bufs, uint8_t *split_flags)
22 struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
23 struct rte_mbuf *start = rxq->pkt_first_seg;
24 struct rte_mbuf *end = rxq->pkt_last_seg;
25 unsigned pkt_idx, buf_idx;
27 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
29 /* processing a split packet */
30 end->next = rx_bufs[buf_idx];
31 rx_bufs[buf_idx]->data_len += rxq->crc_len;
34 start->pkt_len += rx_bufs[buf_idx]->data_len;
37 if (!split_flags[buf_idx]) {
38 /* it's the last packet of the set */
39 start->hash = end->hash;
40 start->vlan_tci = end->vlan_tci;
41 start->ol_flags = end->ol_flags;
42 /* we need to strip crc for the whole packet */
43 start->pkt_len -= rxq->crc_len;
44 if (end->data_len > rxq->crc_len)
45 end->data_len -= rxq->crc_len;
47 /* free up last mbuf */
48 struct rte_mbuf *secondlast = start;
51 while (secondlast->next != end)
52 secondlast = secondlast->next;
53 secondlast->data_len -= (rxq->crc_len -
55 secondlast->next = NULL;
56 rte_pktmbuf_free_seg(end);
58 pkts[pkt_idx++] = start;
62 /* not processing a split packet */
63 if (!split_flags[buf_idx]) {
64 /* not a split packet, save and skip */
65 pkts[pkt_idx++] = rx_bufs[buf_idx];
68 end = start = rx_bufs[buf_idx];
69 rx_bufs[buf_idx]->data_len += rxq->crc_len;
70 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
74 /* save the partial packet for next time */
75 rxq->pkt_first_seg = start;
76 rxq->pkt_last_seg = end;
77 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
81 static __rte_always_inline int
82 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
84 struct i40e_tx_entry *txep;
88 struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
90 /* check DD bits on threshold descriptor */
91 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
92 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
93 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
96 n = txq->tx_rs_thresh;
98 /* first buffer to free from S/W ring is at index
99 * tx_next_dd - (tx_rs_thresh-1)
101 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
102 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
103 if (likely(m != NULL)) {
106 for (i = 1; i < n; i++) {
107 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
108 if (likely(m != NULL)) {
109 if (likely(m->pool == free[0]->pool)) {
112 rte_mempool_put_bulk(free[0]->pool,
120 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
122 for (i = 1; i < n; i++) {
123 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
125 rte_mempool_put(m->pool, m);
129 /* buffers were freed, update counters */
130 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
131 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
132 if (txq->tx_next_dd >= txq->nb_tx_desc)
133 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
135 return txq->tx_rs_thresh;
138 static __rte_always_inline void
139 tx_backlog_entry(struct i40e_tx_entry *txep,
140 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
144 for (i = 0; i < (int)nb_pkts; ++i)
145 txep[i].mbuf = tx_pkts[i];
149 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
151 const unsigned mask = rxq->nb_rx_desc - 1;
154 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
157 /* free all mbufs that are valid in the ring */
158 if (rxq->rxrearm_nb == 0) {
159 for (i = 0; i < rxq->nb_rx_desc; i++) {
160 if (rxq->sw_ring[i].mbuf != NULL)
161 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
164 for (i = rxq->rx_tail;
165 i != rxq->rxrearm_start;
166 i = (i + 1) & mask) {
167 if (rxq->sw_ring[i].mbuf != NULL)
168 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
172 rxq->rxrearm_nb = rxq->nb_rx_desc;
174 /* set all entries to NULL */
175 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
179 i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
182 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
185 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
186 mb_def.port = rxq->port_id;
187 rte_mbuf_refcnt_set(&mb_def, 1);
189 /* prevent compiler reordering: rearm_data covers previous fields */
190 rte_compiler_barrier();
191 p = (uintptr_t)&mb_def.rearm_data;
192 rxq->mbuf_initializer = *(uint64_t *)p;
197 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
199 #ifndef RTE_LIBRTE_IEEE1588
200 struct i40e_adapter *ad =
201 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
202 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
203 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
204 struct i40e_rx_queue *rxq;
208 /* no fdir support */
209 if (fconf->mode != RTE_FDIR_MODE_NONE)
212 /* no header split support */
213 if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
216 /* no QinQ support */
217 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
221 * Vector mode is allowed only when number of Rx queue
222 * descriptor is power of 2.
224 if (!dev->data->dev_started) {
226 for (i = 0; i < dev->data->nb_rx_queues; i++) {
227 rxq = dev->data->rx_queues[i];
230 desc = rxq->nb_rx_desc;
233 rte_is_power_of_2(desc);
237 rte_is_power_of_2(desc) :
242 /* Only check the first queue's descriptor number */
243 for (i = 0; i < dev->data->nb_rx_queues; i++) {
244 rxq = dev->data->rx_queues[i];
247 desc = rxq->nb_rx_desc;
248 ad->rx_vec_allowed = rte_is_power_of_2(desc);
260 #ifdef CC_AVX2_SUPPORT
261 static __rte_always_inline void
262 i40e_rxq_rearm_common(struct i40e_rx_queue *rxq, __rte_unused bool avx512)
266 volatile union i40e_rx_desc *rxdp;
267 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
269 rxdp = rxq->rx_ring + rxq->rxrearm_start;
271 /* Pull 'n' more MBUFs into the software ring */
272 if (rte_mempool_get_bulk(rxq->mp,
274 RTE_I40E_RXQ_REARM_THRESH) < 0) {
275 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
278 dma_addr0 = _mm_setzero_si128();
279 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
280 rxep[i].mbuf = &rxq->fake_mbuf;
281 _mm_store_si128((__m128i *)&rxdp[i].read,
285 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
286 RTE_I40E_RXQ_REARM_THRESH;
290 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
291 struct rte_mbuf *mb0, *mb1;
292 __m128i dma_addr0, dma_addr1;
293 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
294 RTE_PKTMBUF_HEADROOM);
295 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
296 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
297 __m128i vaddr0, vaddr1;
302 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
303 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
304 offsetof(struct rte_mbuf, buf_addr) + 8);
305 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
306 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
308 /* convert pa to dma_addr hdr/data */
309 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
310 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
312 /* add headroom to pa values */
313 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
314 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
316 /* flush desc with pa dma_addr */
317 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
318 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
321 #ifdef CC_AVX512_SUPPORT
323 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
324 struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
325 __m512i dma_addr0_3, dma_addr4_7;
326 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
327 /* Initialize the mbufs in vector, process 8 mbufs in one loop */
328 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
329 i += 8, rxep += 8, rxdp += 8) {
330 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
331 __m128i vaddr4, vaddr5, vaddr6, vaddr7;
332 __m256i vaddr0_1, vaddr2_3;
333 __m256i vaddr4_5, vaddr6_7;
334 __m512i vaddr0_3, vaddr4_7;
345 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
346 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
347 offsetof(struct rte_mbuf, buf_addr) + 8);
348 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
349 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
350 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
351 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
352 vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
353 vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
354 vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
355 vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
358 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
359 * into the high lanes. Similarly for 2 & 3, and so on.
362 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
365 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
368 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
371 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
374 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
377 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
380 /* convert pa to dma_addr hdr/data */
381 dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3);
382 dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7);
384 /* add headroom to pa values */
385 dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room);
386 dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
388 /* flush desc with pa dma_addr */
389 _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
390 _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
395 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
396 __m256i dma_addr0_1, dma_addr2_3;
397 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
398 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
399 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
400 i += 4, rxep += 4, rxdp += 4) {
401 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
402 __m256i vaddr0_1, vaddr2_3;
409 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
410 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
411 offsetof(struct rte_mbuf, buf_addr) + 8);
412 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
413 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
414 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
415 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
418 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
419 * into the high lanes. Similarly for 2 & 3
421 vaddr0_1 = _mm256_inserti128_si256(
422 _mm256_castsi128_si256(vaddr0), vaddr1, 1);
423 vaddr2_3 = _mm256_inserti128_si256(
424 _mm256_castsi128_si256(vaddr2), vaddr3, 1);
426 /* convert pa to dma_addr hdr/data */
427 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
428 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
430 /* add headroom to pa values */
431 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
432 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
434 /* flush desc with pa dma_addr */
435 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
436 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
442 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
443 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
444 rxq->rxrearm_start = 0;
446 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
448 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
449 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
451 /* Update the tail pointer on the NIC */
452 I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);