1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #ifndef _I40E_RXTX_VEC_COMMON_H_
6 #define _I40E_RXTX_VEC_COMMON_H_
8 #include <ethdev_driver.h>
9 #include <rte_malloc.h>
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
14 #ifndef __INTEL_COMPILER
15 #pragma GCC diagnostic ignored "-Wcast-qual"
18 static inline uint16_t
19 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
20 uint16_t nb_bufs, uint8_t *split_flags)
22 struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
23 struct rte_mbuf *start = rxq->pkt_first_seg;
24 struct rte_mbuf *end = rxq->pkt_last_seg;
25 unsigned pkt_idx, buf_idx;
27 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
29 /* processing a split packet */
30 end->next = rx_bufs[buf_idx];
31 rx_bufs[buf_idx]->data_len += rxq->crc_len;
34 start->pkt_len += rx_bufs[buf_idx]->data_len;
37 if (!split_flags[buf_idx]) {
38 /* it's the last packet of the set */
39 start->hash = end->hash;
40 start->vlan_tci = end->vlan_tci;
41 start->ol_flags = end->ol_flags;
42 /* we need to strip crc for the whole packet */
43 start->pkt_len -= rxq->crc_len;
44 if (end->data_len > rxq->crc_len)
45 end->data_len -= rxq->crc_len;
47 /* free up last mbuf */
48 struct rte_mbuf *secondlast = start;
51 while (secondlast->next != end)
52 secondlast = secondlast->next;
53 secondlast->data_len -= (rxq->crc_len -
55 secondlast->next = NULL;
56 rte_pktmbuf_free_seg(end);
58 pkts[pkt_idx++] = start;
62 /* not processing a split packet */
63 if (!split_flags[buf_idx]) {
64 /* not a split packet, save and skip */
65 pkts[pkt_idx++] = rx_bufs[buf_idx];
68 end = start = rx_bufs[buf_idx];
69 rx_bufs[buf_idx]->data_len += rxq->crc_len;
70 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
74 /* save the partial packet for next time */
75 rxq->pkt_first_seg = start;
76 rxq->pkt_last_seg = end;
77 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
81 static __rte_always_inline int
82 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
84 struct i40e_tx_entry *txep;
88 struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
90 /* check DD bits on threshold descriptor */
91 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
92 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
93 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
96 n = txq->tx_rs_thresh;
98 /* first buffer to free from S/W ring is at index
99 * tx_next_dd - (tx_rs_thresh-1)
101 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
103 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
104 for (i = 0; i < n; i++) {
105 free[i] = txep[i].mbuf;
108 rte_mempool_put_bulk(free[0]->pool, (void **)free, n);
112 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
113 if (likely(m != NULL)) {
116 for (i = 1; i < n; i++) {
117 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
118 if (likely(m != NULL)) {
119 if (likely(m->pool == free[0]->pool)) {
122 rte_mempool_put_bulk(free[0]->pool,
130 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
132 for (i = 1; i < n; i++) {
133 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
135 rte_mempool_put(m->pool, m);
140 /* buffers were freed, update counters */
141 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
142 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
143 if (txq->tx_next_dd >= txq->nb_tx_desc)
144 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
146 return txq->tx_rs_thresh;
149 static __rte_always_inline void
150 tx_backlog_entry(struct i40e_tx_entry *txep,
151 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
155 for (i = 0; i < (int)nb_pkts; ++i)
156 txep[i].mbuf = tx_pkts[i];
160 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
162 const unsigned mask = rxq->nb_rx_desc - 1;
165 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
168 /* free all mbufs that are valid in the ring */
169 if (rxq->rxrearm_nb == 0) {
170 for (i = 0; i < rxq->nb_rx_desc; i++) {
171 if (rxq->sw_ring[i].mbuf != NULL)
172 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
175 for (i = rxq->rx_tail;
176 i != rxq->rxrearm_start;
177 i = (i + 1) & mask) {
178 if (rxq->sw_ring[i].mbuf != NULL)
179 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
183 rxq->rxrearm_nb = rxq->nb_rx_desc;
185 /* set all entries to NULL */
186 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
190 i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
193 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
196 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
197 mb_def.port = rxq->port_id;
198 rte_mbuf_refcnt_set(&mb_def, 1);
200 /* prevent compiler reordering: rearm_data covers previous fields */
201 rte_compiler_barrier();
202 p = (uintptr_t)&mb_def.rearm_data;
203 rxq->mbuf_initializer = *(uint64_t *)p;
208 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
210 #ifndef RTE_LIBRTE_IEEE1588
211 struct i40e_adapter *ad =
212 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
213 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
214 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
215 struct i40e_rx_queue *rxq;
219 /* no fdir support */
220 if (fconf->mode != RTE_FDIR_MODE_NONE)
223 /* no header split support */
224 if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
227 /* no QinQ support */
228 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
232 * Vector mode is allowed only when number of Rx queue
233 * descriptor is power of 2.
235 if (!dev->data->dev_started) {
237 for (i = 0; i < dev->data->nb_rx_queues; i++) {
238 rxq = dev->data->rx_queues[i];
241 desc = rxq->nb_rx_desc;
244 rte_is_power_of_2(desc);
248 rte_is_power_of_2(desc) :
253 /* Only check the first queue's descriptor number */
254 for (i = 0; i < dev->data->nb_rx_queues; i++) {
255 rxq = dev->data->rx_queues[i];
258 desc = rxq->nb_rx_desc;
259 ad->rx_vec_allowed = rte_is_power_of_2(desc);
271 #ifdef CC_AVX2_SUPPORT
272 static __rte_always_inline void
273 i40e_rxq_rearm_common(struct i40e_rx_queue *rxq, __rte_unused bool avx512)
277 volatile union i40e_rx_desc *rxdp;
278 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
280 rxdp = rxq->rx_ring + rxq->rxrearm_start;
282 /* Pull 'n' more MBUFs into the software ring */
283 if (rte_mempool_get_bulk(rxq->mp,
285 RTE_I40E_RXQ_REARM_THRESH) < 0) {
286 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
289 dma_addr0 = _mm_setzero_si128();
290 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
291 rxep[i].mbuf = &rxq->fake_mbuf;
292 _mm_store_si128((__m128i *)&rxdp[i].read,
296 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
297 RTE_I40E_RXQ_REARM_THRESH;
301 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
302 struct rte_mbuf *mb0, *mb1;
303 __m128i dma_addr0, dma_addr1;
304 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
305 RTE_PKTMBUF_HEADROOM);
306 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
307 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
308 __m128i vaddr0, vaddr1;
313 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
314 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
315 offsetof(struct rte_mbuf, buf_addr) + 8);
316 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
317 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
319 /* convert pa to dma_addr hdr/data */
320 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
321 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
323 /* add headroom to pa values */
324 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
325 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
327 /* flush desc with pa dma_addr */
328 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
329 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
332 #ifdef CC_AVX512_SUPPORT
334 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
335 struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
336 __m512i dma_addr0_3, dma_addr4_7;
337 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
338 /* Initialize the mbufs in vector, process 8 mbufs in one loop */
339 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
340 i += 8, rxep += 8, rxdp += 8) {
341 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
342 __m128i vaddr4, vaddr5, vaddr6, vaddr7;
343 __m256i vaddr0_1, vaddr2_3;
344 __m256i vaddr4_5, vaddr6_7;
345 __m512i vaddr0_3, vaddr4_7;
356 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
357 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
358 offsetof(struct rte_mbuf, buf_addr) + 8);
359 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
360 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
361 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
362 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
363 vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
364 vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
365 vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
366 vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
369 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
370 * into the high lanes. Similarly for 2 & 3, and so on.
373 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
376 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
379 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
382 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
385 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
388 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
391 /* convert pa to dma_addr hdr/data */
392 dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3);
393 dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7);
395 /* add headroom to pa values */
396 dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room);
397 dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
399 /* flush desc with pa dma_addr */
400 _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
401 _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
406 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
407 __m256i dma_addr0_1, dma_addr2_3;
408 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
409 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
410 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
411 i += 4, rxep += 4, rxdp += 4) {
412 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
413 __m256i vaddr0_1, vaddr2_3;
420 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
421 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
422 offsetof(struct rte_mbuf, buf_addr) + 8);
423 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
424 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
425 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
426 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
429 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
430 * into the high lanes. Similarly for 2 & 3
432 vaddr0_1 = _mm256_inserti128_si256(
433 _mm256_castsi128_si256(vaddr0), vaddr1, 1);
434 vaddr2_3 = _mm256_inserti128_si256(
435 _mm256_castsi128_si256(vaddr2), vaddr3, 1);
437 /* convert pa to dma_addr hdr/data */
438 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
439 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
441 /* add headroom to pa values */
442 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
443 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
445 /* flush desc with pa dma_addr */
446 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
447 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
453 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
454 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
455 rxq->rxrearm_start = 0;
457 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
459 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
460 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
462 /* Update the tail pointer on the NIC */
463 I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);