net/iavf: fix pointer of meta data
[dpdk.git] / drivers / net / ice / ice_rxtx_common_avx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #ifndef _ICE_RXTX_COMMON_AVX_H_
6 #define _ICE_RXTX_COMMON_AVX_H_
7
8 #include "ice_rxtx.h"
9
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
12 #endif
13
14 #ifdef __AVX2__
15 static __rte_always_inline void
16 ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)
17 {
18         int i;
19         uint16_t rx_id;
20         volatile union ice_rx_flex_desc *rxdp;
21         struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
22
23         rxdp = rxq->rx_ring + rxq->rxrearm_start;
24
25         /* Pull 'n' more MBUFs into the software ring */
26         if (rte_mempool_get_bulk(rxq->mp,
27                                  (void *)rxep,
28                                  ICE_RXQ_REARM_THRESH) < 0) {
29                 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
30                     rxq->nb_rx_desc) {
31                         __m128i dma_addr0;
32
33                         dma_addr0 = _mm_setzero_si128();
34                         for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
35                                 rxep[i].mbuf = &rxq->fake_mbuf;
36                                 _mm_store_si128((__m128i *)&rxdp[i].read,
37                                                 dma_addr0);
38                         }
39                 }
40                 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
41                         ICE_RXQ_REARM_THRESH;
42                 return;
43         }
44
45 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
46         struct rte_mbuf *mb0, *mb1;
47         __m128i dma_addr0, dma_addr1;
48         __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
49                         RTE_PKTMBUF_HEADROOM);
50         /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51         for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
52                 __m128i vaddr0, vaddr1;
53
54                 mb0 = rxep[0].mbuf;
55                 mb1 = rxep[1].mbuf;
56
57                 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
58                 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
59                                 offsetof(struct rte_mbuf, buf_addr) + 8);
60                 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
61                 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
62
63                 /* convert pa to dma_addr hdr/data */
64                 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
65                 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
66
67                 /* add headroom to pa values */
68                 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
69                 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
70
71                 /* flush desc with pa dma_addr */
72                 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
73                 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
74         }
75 #else
76 #ifdef __AVX512VL__
77         if (avx512) {
78                 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
79                 struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
80                 __m512i dma_addr0_3, dma_addr4_7;
81                 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
82                 /* Initialize the mbufs in vector, process 8 mbufs in one loop */
83                 for (i = 0; i < ICE_RXQ_REARM_THRESH;
84                                 i += 8, rxep += 8, rxdp += 8) {
85                         __m128i vaddr0, vaddr1, vaddr2, vaddr3;
86                         __m128i vaddr4, vaddr5, vaddr6, vaddr7;
87                         __m256i vaddr0_1, vaddr2_3;
88                         __m256i vaddr4_5, vaddr6_7;
89                         __m512i vaddr0_3, vaddr4_7;
90
91                         mb0 = rxep[0].mbuf;
92                         mb1 = rxep[1].mbuf;
93                         mb2 = rxep[2].mbuf;
94                         mb3 = rxep[3].mbuf;
95                         mb4 = rxep[4].mbuf;
96                         mb5 = rxep[5].mbuf;
97                         mb6 = rxep[6].mbuf;
98                         mb7 = rxep[7].mbuf;
99
100                         /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
101                         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
102                                         offsetof(struct rte_mbuf, buf_addr) + 8);
103                         vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
104                         vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
105                         vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
106                         vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
107                         vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
108                         vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
109                         vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
110                         vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
111
112                         /**
113                          * merge 0 & 1, by casting 0 to 256-bit and inserting 1
114                          * into the high lanes. Similarly for 2 & 3, and so on.
115                          */
116                         vaddr0_1 =
117                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
118                                                         vaddr1, 1);
119                         vaddr2_3 =
120                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
121                                                         vaddr3, 1);
122                         vaddr4_5 =
123                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
124                                                         vaddr5, 1);
125                         vaddr6_7 =
126                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
127                                                         vaddr7, 1);
128                         vaddr0_3 =
129                                 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
130                                                    vaddr2_3, 1);
131                         vaddr4_7 =
132                                 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
133                                                    vaddr6_7, 1);
134
135                         /* convert pa to dma_addr hdr/data */
136                         dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3);
137                         dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7);
138
139                         /* add headroom to pa values */
140                         dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room);
141                         dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
142
143                         /* flush desc with pa dma_addr */
144                         _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
145                         _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
146                 }
147         } else
148 #endif /* __AVX512VL__ */
149         {
150                 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
151                 __m256i dma_addr0_1, dma_addr2_3;
152                 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
153                 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
154                 for (i = 0; i < ICE_RXQ_REARM_THRESH;
155                                 i += 4, rxep += 4, rxdp += 4) {
156                         __m128i vaddr0, vaddr1, vaddr2, vaddr3;
157                         __m256i vaddr0_1, vaddr2_3;
158
159                         mb0 = rxep[0].mbuf;
160                         mb1 = rxep[1].mbuf;
161                         mb2 = rxep[2].mbuf;
162                         mb3 = rxep[3].mbuf;
163
164                         /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
165                         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
166                                         offsetof(struct rte_mbuf, buf_addr) + 8);
167                         vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
168                         vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
169                         vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
170                         vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
171
172                         /**
173                          * merge 0 & 1, by casting 0 to 256-bit and inserting 1
174                          * into the high lanes. Similarly for 2 & 3
175                          */
176                         vaddr0_1 =
177                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
178                                                         vaddr1, 1);
179                         vaddr2_3 =
180                                 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
181                                                         vaddr3, 1);
182
183                         /* convert pa to dma_addr hdr/data */
184                         dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
185                         dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
186
187                         /* add headroom to pa values */
188                         dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
189                         dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
190
191                         /* flush desc with pa dma_addr */
192                         _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
193                         _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
194                 }
195         }
196
197 #endif
198
199         rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
200         if (rxq->rxrearm_start >= rxq->nb_rx_desc)
201                 rxq->rxrearm_start = 0;
202
203         rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
204
205         rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
206                              (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
207
208         /* Update the tail pointer on the NIC */
209         ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
210 }
211 #endif /* __AVX2__ */
212
213 #endif /* _ICE_RXTX_COMMON_AVX_H_ */