1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #ifndef _ICE_RXTX_COMMON_AVX_H_
6 #define _ICE_RXTX_COMMON_AVX_H_
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
15 static __rte_always_inline void
16 ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)
20 volatile union ice_rx_flex_desc *rxdp;
21 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
23 rxdp = rxq->rx_ring + rxq->rxrearm_start;
25 /* Pull 'n' more MBUFs into the software ring */
26 if (rte_mempool_get_bulk(rxq->mp,
28 ICE_RXQ_REARM_THRESH) < 0) {
29 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
33 dma_addr0 = _mm_setzero_si128();
34 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
35 rxep[i].mbuf = &rxq->fake_mbuf;
36 _mm_store_si128((__m128i *)&rxdp[i].read,
40 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
45 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
46 struct rte_mbuf *mb0, *mb1;
47 __m128i dma_addr0, dma_addr1;
48 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
49 RTE_PKTMBUF_HEADROOM);
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
52 __m128i vaddr0, vaddr1;
57 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
58 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
59 offsetof(struct rte_mbuf, buf_addr) + 8);
60 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
61 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
63 /* convert pa to dma_addr hdr/data */
64 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
65 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
67 /* add headroom to pa values */
68 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
69 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
71 /* flush desc with pa dma_addr */
72 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
73 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
78 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
79 struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
80 __m512i dma_addr0_3, dma_addr4_7;
81 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
82 /* Initialize the mbufs in vector, process 8 mbufs in one loop */
83 for (i = 0; i < ICE_RXQ_REARM_THRESH;
84 i += 8, rxep += 8, rxdp += 8) {
85 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
86 __m128i vaddr4, vaddr5, vaddr6, vaddr7;
87 __m256i vaddr0_1, vaddr2_3;
88 __m256i vaddr4_5, vaddr6_7;
89 __m512i vaddr0_3, vaddr4_7;
100 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
101 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
102 offsetof(struct rte_mbuf, buf_addr) + 8);
103 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
104 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
105 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
106 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
107 vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
108 vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
109 vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
110 vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
113 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
114 * into the high lanes. Similarly for 2 & 3, and so on.
117 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
120 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
123 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
126 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
129 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
132 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
135 /* convert pa to dma_addr hdr/data */
136 dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3);
137 dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7);
139 /* add headroom to pa values */
140 dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room);
141 dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
143 /* flush desc with pa dma_addr */
144 _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
145 _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
148 #endif /* __AVX512VL__ */
150 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
151 __m256i dma_addr0_1, dma_addr2_3;
152 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
153 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
154 for (i = 0; i < ICE_RXQ_REARM_THRESH;
155 i += 4, rxep += 4, rxdp += 4) {
156 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
157 __m256i vaddr0_1, vaddr2_3;
164 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
165 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
166 offsetof(struct rte_mbuf, buf_addr) + 8);
167 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
168 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
169 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
170 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
173 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
174 * into the high lanes. Similarly for 2 & 3
177 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
180 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
183 /* convert pa to dma_addr hdr/data */
184 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
185 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
187 /* add headroom to pa values */
188 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
189 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
191 /* flush desc with pa dma_addr */
192 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
193 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
199 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
200 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
201 rxq->rxrearm_start = 0;
203 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
205 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
206 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
208 /* Update the tail pointer on the NIC */
209 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
211 #endif /* __AVX2__ */
213 #endif /* _ICE_RXTX_COMMON_AVX_H_ */