eae4ab0ed57bdf7a8cdb6be2df44ed0b7387b2a7
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <stdarg.h>
40 #include <unistd.h>
41 #include <inttypes.h>
42 #include <sys/queue.h>
43
44 #include <rte_string_fns.h>
45 #include <rte_memzone.h>
46 #include <rte_mbuf.h>
47 #include <rte_malloc.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
50 #include <rte_tcp.h>
51 #include <rte_sctp.h>
52 #include <rte_udp.h>
53
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
59
60 #define I40E_MIN_RING_DESC     64
61 #define I40E_MAX_RING_DESC     4096
62 #define I40E_ALIGN             128
63 #define DEFAULT_TX_RS_THRESH   32
64 #define DEFAULT_TX_FREE_THRESH 32
65 #define I40E_MAX_PKT_TYPE      256
66
67 #define I40E_TX_MAX_BURST  32
68
69 #define I40E_DMA_MEM_ALIGN 4096
70
71 #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
72                                         ETH_TXQ_FLAGS_NOOFFLOADS)
73
74 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
75
76 #define I40E_TX_CKSUM_OFFLOAD_MASK (             \
77                 PKT_TX_IP_CKSUM |                \
78                 PKT_TX_L4_MASK |                 \
79                 PKT_TX_OUTER_IP_CKSUM)
80
81 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
82         (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
83
84 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
85         ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
86
87 static const struct rte_memzone *
88 i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
89                            const char *ring_name,
90                            uint16_t queue_id,
91                            uint32_t ring_size,
92                            int socket_id);
93 static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
94                                       struct rte_mbuf **tx_pkts,
95                                       uint16_t nb_pkts);
96
97 static inline void
98 i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
99 {
100         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
101                 (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
102                 mb->ol_flags |= PKT_RX_VLAN_PKT;
103                 mb->vlan_tci =
104                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
105                 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
106                            rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
107         } else {
108                 mb->vlan_tci = 0;
109         }
110 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
111         if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
112                 (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
113                 mb->ol_flags |= PKT_RX_QINQ_PKT;
114                 mb->vlan_tci_outer = mb->vlan_tci;
115                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
116                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
117                            rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
118                            rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
119         } else {
120                 mb->vlan_tci_outer = 0;
121         }
122 #endif
123         PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
124                    mb->vlan_tci, mb->vlan_tci_outer);
125 }
126
127 /* Translate the rx descriptor status to pkt flags */
128 static inline uint64_t
129 i40e_rxd_status_to_pkt_flags(uint64_t qword)
130 {
131         uint64_t flags;
132
133         /* Check if RSS_HASH */
134         flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
135                                         I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
136                         I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
137
138         /* Check if FDIR Match */
139         flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
140                                                         PKT_RX_FDIR : 0);
141
142         return flags;
143 }
144
145 static inline uint64_t
146 i40e_rxd_error_to_pkt_flags(uint64_t qword)
147 {
148         uint64_t flags = 0;
149         uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
150
151 #define I40E_RX_ERR_BITS 0x3f
152         if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
153                 return flags;
154         /* If RXE bit set, all other status bits are meaningless */
155         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
156                 flags |= PKT_RX_MAC_ERR;
157                 return flags;
158         }
159
160         /* If RECIPE bit set, all other status indications should be ignored */
161         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
162                 flags |= PKT_RX_RECIP_ERR;
163                 return flags;
164         }
165         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
166                 flags |= PKT_RX_HBUF_OVERFLOW;
167         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
168                 flags |= PKT_RX_IP_CKSUM_BAD;
169         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
170                 flags |= PKT_RX_L4_CKSUM_BAD;
171         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
172                 flags |= PKT_RX_EIP_CKSUM_BAD;
173         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
174                 flags |= PKT_RX_OVERSIZE;
175
176         return flags;
177 }
178
179 /* Function to check and set the ieee1588 timesync index and get the
180  * appropriate flags.
181  */
182 #ifdef RTE_LIBRTE_IEEE1588
183 static inline uint64_t
184 i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
185 {
186         uint64_t pkt_flags = 0;
187         uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
188                                   | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
189                                     >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
190
191 #ifdef RTE_NEXT_ABI
192         if ((mb->packet_type & RTE_PTYPE_L2_MASK)
193                         == RTE_PTYPE_L2_ETHER_TIMESYNC)
194                 pkt_flags = PKT_RX_IEEE1588_PTP;
195 #endif
196         if (tsyn & 0x04) {
197                 pkt_flags |= PKT_RX_IEEE1588_TMST;
198                 mb->timesync = tsyn & 0x03;
199         }
200
201         return pkt_flags;
202 }
203 #endif
204
205 #ifdef RTE_NEXT_ABI
206 /* For each value it means, datasheet of hardware can tell more details */
207 static inline uint32_t
208 i40e_rxd_pkt_type_mapping(uint8_t ptype)
209 {
210         static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
211                 /* L2 types */
212                 /* [0] reserved */
213                 [1] = RTE_PTYPE_L2_ETHER,
214                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
215                 /* [3] - [5] reserved */
216                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
217                 /* [7] - [10] reserved */
218                 [11] = RTE_PTYPE_L2_ETHER_ARP,
219                 /* [12] - [21] reserved */
220
221                 /* Non tunneled IPv4 */
222                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
223                         RTE_PTYPE_L4_FRAG,
224                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225                         RTE_PTYPE_L4_NONFRAG,
226                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
227                         RTE_PTYPE_L4_UDP,
228                 /* [25] reserved */
229                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
230                         RTE_PTYPE_L4_TCP,
231                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232                         RTE_PTYPE_L4_SCTP,
233                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
234                         RTE_PTYPE_L4_ICMP,
235
236                 /* IPv4 --> IPv4 */
237                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238                         RTE_PTYPE_TUNNEL_IP |
239                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240                         RTE_PTYPE_INNER_L4_FRAG,
241                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242                         RTE_PTYPE_TUNNEL_IP |
243                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
244                         RTE_PTYPE_INNER_L4_NONFRAG,
245                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246                         RTE_PTYPE_TUNNEL_IP |
247                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
248                         RTE_PTYPE_INNER_L4_UDP,
249                 /* [32] reserved */
250                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251                         RTE_PTYPE_TUNNEL_IP |
252                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
253                         RTE_PTYPE_INNER_L4_TCP,
254                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
255                         RTE_PTYPE_TUNNEL_IP |
256                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257                         RTE_PTYPE_INNER_L4_SCTP,
258                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
259                         RTE_PTYPE_TUNNEL_IP |
260                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
261                         RTE_PTYPE_INNER_L4_ICMP,
262
263                 /* IPv4 --> IPv6 */
264                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
265                         RTE_PTYPE_TUNNEL_IP |
266                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267                         RTE_PTYPE_INNER_L4_FRAG,
268                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
269                         RTE_PTYPE_TUNNEL_IP |
270                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
271                         RTE_PTYPE_INNER_L4_NONFRAG,
272                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
273                         RTE_PTYPE_TUNNEL_IP |
274                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
275                         RTE_PTYPE_INNER_L4_UDP,
276                 /* [39] reserved */
277                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278                         RTE_PTYPE_TUNNEL_IP |
279                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280                         RTE_PTYPE_INNER_L4_TCP,
281                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
282                         RTE_PTYPE_TUNNEL_IP |
283                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
284                         RTE_PTYPE_INNER_L4_SCTP,
285                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
286                         RTE_PTYPE_TUNNEL_IP |
287                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
288                         RTE_PTYPE_INNER_L4_ICMP,
289
290                 /* IPv4 --> GRE/Teredo/VXLAN */
291                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
292                         RTE_PTYPE_TUNNEL_GRENAT,
293
294                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
295                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
296                         RTE_PTYPE_TUNNEL_GRENAT |
297                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
298                         RTE_PTYPE_INNER_L4_FRAG,
299                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
300                         RTE_PTYPE_TUNNEL_GRENAT |
301                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
302                         RTE_PTYPE_INNER_L4_NONFRAG,
303                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
304                         RTE_PTYPE_TUNNEL_GRENAT |
305                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
306                         RTE_PTYPE_INNER_L4_UDP,
307                 /* [47] reserved */
308                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
309                         RTE_PTYPE_TUNNEL_GRENAT |
310                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
311                         RTE_PTYPE_INNER_L4_TCP,
312                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
313                         RTE_PTYPE_TUNNEL_GRENAT |
314                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
315                         RTE_PTYPE_INNER_L4_SCTP,
316                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
317                         RTE_PTYPE_TUNNEL_GRENAT |
318                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
319                         RTE_PTYPE_INNER_L4_ICMP,
320
321                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
322                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
323                         RTE_PTYPE_TUNNEL_GRENAT |
324                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
325                         RTE_PTYPE_INNER_L4_FRAG,
326                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
327                         RTE_PTYPE_TUNNEL_GRENAT |
328                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
329                         RTE_PTYPE_INNER_L4_NONFRAG,
330                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
331                         RTE_PTYPE_TUNNEL_GRENAT |
332                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
333                         RTE_PTYPE_INNER_L4_UDP,
334                 /* [54] reserved */
335                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
336                         RTE_PTYPE_TUNNEL_GRENAT |
337                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
338                         RTE_PTYPE_INNER_L4_TCP,
339                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
340                         RTE_PTYPE_TUNNEL_GRENAT |
341                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
342                         RTE_PTYPE_INNER_L4_SCTP,
343                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
344                         RTE_PTYPE_TUNNEL_GRENAT |
345                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
346                         RTE_PTYPE_INNER_L4_ICMP,
347
348                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
349                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
350                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
351
352                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
353                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
354                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
355                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
356                         RTE_PTYPE_INNER_L4_FRAG,
357                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
358                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
359                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
360                         RTE_PTYPE_INNER_L4_NONFRAG,
361                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
362                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
363                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
364                         RTE_PTYPE_INNER_L4_UDP,
365                 /* [62] reserved */
366                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
367                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
368                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
369                         RTE_PTYPE_INNER_L4_TCP,
370                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
371                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
372                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
373                         RTE_PTYPE_INNER_L4_SCTP,
374                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
375                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
376                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
377                         RTE_PTYPE_INNER_L4_ICMP,
378
379                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
380                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
381                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
382                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
383                         RTE_PTYPE_INNER_L4_FRAG,
384                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
385                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
386                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
387                         RTE_PTYPE_INNER_L4_NONFRAG,
388                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
389                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
390                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
391                         RTE_PTYPE_INNER_L4_UDP,
392                 /* [69] reserved */
393                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
394                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
395                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
396                         RTE_PTYPE_INNER_L4_TCP,
397                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
398                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
399                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
400                         RTE_PTYPE_INNER_L4_SCTP,
401                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
402                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
403                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
404                         RTE_PTYPE_INNER_L4_ICMP,
405
406                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
407                 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
408                         RTE_PTYPE_TUNNEL_GRENAT |
409                         RTE_PTYPE_INNER_L2_ETHER_VLAN,
410
411                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
412                 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
413                         RTE_PTYPE_TUNNEL_GRENAT |
414                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
415                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
416                         RTE_PTYPE_INNER_L4_FRAG,
417                 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
418                         RTE_PTYPE_TUNNEL_GRENAT |
419                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
420                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
421                         RTE_PTYPE_INNER_L4_NONFRAG,
422                 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
423                         RTE_PTYPE_TUNNEL_GRENAT |
424                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
425                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
426                         RTE_PTYPE_INNER_L4_UDP,
427                 /* [77] reserved */
428                 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
429                         RTE_PTYPE_TUNNEL_GRENAT |
430                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
431                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
432                         RTE_PTYPE_INNER_L4_TCP,
433                 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
434                         RTE_PTYPE_TUNNEL_GRENAT |
435                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
436                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
437                         RTE_PTYPE_INNER_L4_SCTP,
438                 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
439                         RTE_PTYPE_TUNNEL_GRENAT |
440                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
441                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
442                         RTE_PTYPE_INNER_L4_ICMP,
443
444                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
445                 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
446                         RTE_PTYPE_TUNNEL_GRENAT |
447                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
448                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
449                         RTE_PTYPE_INNER_L4_FRAG,
450                 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
451                         RTE_PTYPE_TUNNEL_GRENAT |
452                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
453                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
454                         RTE_PTYPE_INNER_L4_NONFRAG,
455                 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
456                         RTE_PTYPE_TUNNEL_GRENAT |
457                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
458                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
459                         RTE_PTYPE_INNER_L4_UDP,
460                 /* [84] reserved */
461                 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
462                         RTE_PTYPE_TUNNEL_GRENAT |
463                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
464                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
465                         RTE_PTYPE_INNER_L4_TCP,
466                 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
467                         RTE_PTYPE_TUNNEL_GRENAT |
468                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
469                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
470                         RTE_PTYPE_INNER_L4_SCTP,
471                 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
472                         RTE_PTYPE_TUNNEL_GRENAT |
473                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
474                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
475                         RTE_PTYPE_INNER_L4_ICMP,
476
477                 /* Non tunneled IPv6 */
478                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
479                         RTE_PTYPE_L4_FRAG,
480                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
481                         RTE_PTYPE_L4_NONFRAG,
482                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
483                         RTE_PTYPE_L4_UDP,
484                 /* [91] reserved */
485                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
486                         RTE_PTYPE_L4_TCP,
487                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
488                         RTE_PTYPE_L4_SCTP,
489                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
490                         RTE_PTYPE_L4_ICMP,
491
492                 /* IPv6 --> IPv4 */
493                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
494                         RTE_PTYPE_TUNNEL_IP |
495                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
496                         RTE_PTYPE_INNER_L4_FRAG,
497                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
498                         RTE_PTYPE_TUNNEL_IP |
499                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
500                         RTE_PTYPE_INNER_L4_NONFRAG,
501                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
502                         RTE_PTYPE_TUNNEL_IP |
503                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
504                         RTE_PTYPE_INNER_L4_UDP,
505                 /* [98] reserved */
506                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
507                         RTE_PTYPE_TUNNEL_IP |
508                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
509                         RTE_PTYPE_INNER_L4_TCP,
510                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
511                         RTE_PTYPE_TUNNEL_IP |
512                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
513                         RTE_PTYPE_INNER_L4_SCTP,
514                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
515                         RTE_PTYPE_TUNNEL_IP |
516                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
517                         RTE_PTYPE_INNER_L4_ICMP,
518
519                 /* IPv6 --> IPv6 */
520                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
521                         RTE_PTYPE_TUNNEL_IP |
522                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
523                         RTE_PTYPE_INNER_L4_FRAG,
524                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
525                         RTE_PTYPE_TUNNEL_IP |
526                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
527                         RTE_PTYPE_INNER_L4_NONFRAG,
528                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
529                         RTE_PTYPE_TUNNEL_IP |
530                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
531                         RTE_PTYPE_INNER_L4_UDP,
532                 /* [105] reserved */
533                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
534                         RTE_PTYPE_TUNNEL_IP |
535                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
536                         RTE_PTYPE_INNER_L4_TCP,
537                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
538                         RTE_PTYPE_TUNNEL_IP |
539                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
540                         RTE_PTYPE_INNER_L4_SCTP,
541                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
542                         RTE_PTYPE_TUNNEL_IP |
543                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
544                         RTE_PTYPE_INNER_L4_ICMP,
545
546                 /* IPv6 --> GRE/Teredo/VXLAN */
547                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
548                         RTE_PTYPE_TUNNEL_GRENAT,
549
550                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
551                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
552                         RTE_PTYPE_TUNNEL_GRENAT |
553                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
554                         RTE_PTYPE_INNER_L4_FRAG,
555                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
556                         RTE_PTYPE_TUNNEL_GRENAT |
557                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
558                         RTE_PTYPE_INNER_L4_NONFRAG,
559                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
560                         RTE_PTYPE_TUNNEL_GRENAT |
561                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
562                         RTE_PTYPE_INNER_L4_UDP,
563                 /* [113] reserved */
564                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
565                         RTE_PTYPE_TUNNEL_GRENAT |
566                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
567                         RTE_PTYPE_INNER_L4_TCP,
568                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
569                         RTE_PTYPE_TUNNEL_GRENAT |
570                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
571                         RTE_PTYPE_INNER_L4_SCTP,
572                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
573                         RTE_PTYPE_TUNNEL_GRENAT |
574                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
575                         RTE_PTYPE_INNER_L4_ICMP,
576
577                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
578                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
579                         RTE_PTYPE_TUNNEL_GRENAT |
580                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
581                         RTE_PTYPE_INNER_L4_FRAG,
582                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
583                         RTE_PTYPE_TUNNEL_GRENAT |
584                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
585                         RTE_PTYPE_INNER_L4_NONFRAG,
586                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
587                         RTE_PTYPE_TUNNEL_GRENAT |
588                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
589                         RTE_PTYPE_INNER_L4_UDP,
590                 /* [120] reserved */
591                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
592                         RTE_PTYPE_TUNNEL_GRENAT |
593                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
594                         RTE_PTYPE_INNER_L4_TCP,
595                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
596                         RTE_PTYPE_TUNNEL_GRENAT |
597                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
598                         RTE_PTYPE_INNER_L4_SCTP,
599                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
600                         RTE_PTYPE_TUNNEL_GRENAT |
601                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
602                         RTE_PTYPE_INNER_L4_ICMP,
603
604                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
605                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
606                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
607
608                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
609                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
610                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
611                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
612                         RTE_PTYPE_INNER_L4_FRAG,
613                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
614                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
615                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
616                         RTE_PTYPE_INNER_L4_NONFRAG,
617                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
618                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
619                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
620                         RTE_PTYPE_INNER_L4_UDP,
621                 /* [128] reserved */
622                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
623                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
624                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
625                         RTE_PTYPE_INNER_L4_TCP,
626                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
627                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
628                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
629                         RTE_PTYPE_INNER_L4_SCTP,
630                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
631                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
632                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
633                         RTE_PTYPE_INNER_L4_ICMP,
634
635                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
636                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
637                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
638                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
639                         RTE_PTYPE_INNER_L4_FRAG,
640                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
641                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
642                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
643                         RTE_PTYPE_INNER_L4_NONFRAG,
644                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
645                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
646                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
647                         RTE_PTYPE_INNER_L4_UDP,
648                 /* [135] reserved */
649                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
650                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
651                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
652                         RTE_PTYPE_INNER_L4_TCP,
653                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
654                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
655                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
656                         RTE_PTYPE_INNER_L4_SCTP,
657                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
658                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
659                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
660                         RTE_PTYPE_INNER_L4_ICMP,
661
662                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
663                 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
664                         RTE_PTYPE_TUNNEL_GRENAT |
665                         RTE_PTYPE_INNER_L2_ETHER_VLAN,
666
667                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
668                 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
669                         RTE_PTYPE_TUNNEL_GRENAT |
670                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
671                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
672                         RTE_PTYPE_INNER_L4_FRAG,
673                 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
674                         RTE_PTYPE_TUNNEL_GRENAT |
675                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
676                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
677                         RTE_PTYPE_INNER_L4_NONFRAG,
678                 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
679                         RTE_PTYPE_TUNNEL_GRENAT |
680                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
681                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
682                         RTE_PTYPE_INNER_L4_UDP,
683                 /* [143] reserved */
684                 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
685                         RTE_PTYPE_TUNNEL_GRENAT |
686                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
687                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
688                         RTE_PTYPE_INNER_L4_TCP,
689                 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
690                         RTE_PTYPE_TUNNEL_GRENAT |
691                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
692                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
693                         RTE_PTYPE_INNER_L4_SCTP,
694                 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
695                         RTE_PTYPE_TUNNEL_GRENAT |
696                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
697                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
698                         RTE_PTYPE_INNER_L4_ICMP,
699
700                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
701                 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
702                         RTE_PTYPE_TUNNEL_GRENAT |
703                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
704                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
705                         RTE_PTYPE_INNER_L4_FRAG,
706                 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
707                         RTE_PTYPE_TUNNEL_GRENAT |
708                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
709                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
710                         RTE_PTYPE_INNER_L4_NONFRAG,
711                 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
712                         RTE_PTYPE_TUNNEL_GRENAT |
713                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
714                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
715                         RTE_PTYPE_INNER_L4_UDP,
716                 /* [150] reserved */
717                 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
718                         RTE_PTYPE_TUNNEL_GRENAT |
719                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
720                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
721                         RTE_PTYPE_INNER_L4_TCP,
722                 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
723                         RTE_PTYPE_TUNNEL_GRENAT |
724                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
725                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
726                         RTE_PTYPE_INNER_L4_SCTP,
727                 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
728                         RTE_PTYPE_TUNNEL_GRENAT |
729                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
730                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
731                         RTE_PTYPE_INNER_L4_ICMP,
732
733                 /* All others reserved */
734         };
735
736         return ptype_table[ptype];
737 }
738 #else /* RTE_NEXT_ABI */
739 /* Translate pkt types to pkt flags */
740 static inline uint64_t
741 i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
742 {
743         uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
744                                         I40E_RXD_QW1_PTYPE_SHIFT);
745         static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
746                 0, /* PTYPE 0 */
747                 0, /* PTYPE 1 */
748                 PKT_RX_IEEE1588_PTP, /* PTYPE 2 */
749                 0, /* PTYPE 3 */
750                 0, /* PTYPE 4 */
751                 0, /* PTYPE 5 */
752                 0, /* PTYPE 6 */
753                 0, /* PTYPE 7 */
754                 0, /* PTYPE 8 */
755                 0, /* PTYPE 9 */
756                 0, /* PTYPE 10 */
757                 0, /* PTYPE 11 */
758                 0, /* PTYPE 12 */
759                 0, /* PTYPE 13 */
760                 0, /* PTYPE 14 */
761                 0, /* PTYPE 15 */
762                 0, /* PTYPE 16 */
763                 0, /* PTYPE 17 */
764                 0, /* PTYPE 18 */
765                 0, /* PTYPE 19 */
766                 0, /* PTYPE 20 */
767                 0, /* PTYPE 21 */
768                 PKT_RX_IPV4_HDR, /* PTYPE 22 */
769                 PKT_RX_IPV4_HDR, /* PTYPE 23 */
770                 PKT_RX_IPV4_HDR, /* PTYPE 24 */
771                 0, /* PTYPE 25 */
772                 PKT_RX_IPV4_HDR, /* PTYPE 26 */
773                 PKT_RX_IPV4_HDR, /* PTYPE 27 */
774                 PKT_RX_IPV4_HDR, /* PTYPE 28 */
775                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */
776                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */
777                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */
778                 0, /* PTYPE 32 */
779                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */
780                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */
781                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */
782                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */
783                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */
784                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */
785                 0, /* PTYPE 39 */
786                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */
787                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */
788                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */
789                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */
790                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */
791                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */
792                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */
793                 0, /* PTYPE 47 */
794                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */
795                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */
796                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */
797                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */
798                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */
799                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */
800                 0, /* PTYPE 54 */
801                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */
802                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
803                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
804                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
805                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
806                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
807                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
808                 0, /* PTYPE 62 */
809                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
810                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
811                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
812                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
813                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
814                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
815                 0, /* PTYPE 69 */
816                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
817                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
818                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
819                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
820                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
821                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
822                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
823                 0, /* PTYPE 77 */
824                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
825                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
826                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
827                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
828                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
829                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
830                 0, /* PTYPE 84 */
831                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
832                 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
833                 PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
834                 PKT_RX_IPV6_HDR, /* PTYPE 88 */
835                 PKT_RX_IPV6_HDR, /* PTYPE 89 */
836                 PKT_RX_IPV6_HDR, /* PTYPE 90 */
837                 0, /* PTYPE 91 */
838                 PKT_RX_IPV6_HDR, /* PTYPE 92 */
839                 PKT_RX_IPV6_HDR, /* PTYPE 93 */
840                 PKT_RX_IPV6_HDR, /* PTYPE 94 */
841                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */
842                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */
843                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */
844                 0, /* PTYPE 98 */
845                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */
846                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */
847                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */
848                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */
849                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */
850                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */
851                 0, /* PTYPE 105 */
852                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */
853                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */
854                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */
855                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */
856                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */
857                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */
858                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */
859                 0, /* PTYPE 113 */
860                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */
861                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */
862                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */
863                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */
864                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */
865                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */
866                 0, /* PTYPE 120 */
867                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */
868                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
869                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
870                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
871                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
872                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
873                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
874                 0, /* PTYPE 128 */
875                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
876                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
877                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
878                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
879                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
880                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
881                 0, /* PTYPE 135 */
882                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
883                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
884                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
885                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
886                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
887                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
888                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
889                 0, /* PTYPE 143 */
890                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
891                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
892                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
893                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
894                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
895                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
896                 0, /* PTYPE 150 */
897                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
898                 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
899                 PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
900                 0, /* PTYPE 154 */
901                 0, /* PTYPE 155 */
902                 0, /* PTYPE 156 */
903                 0, /* PTYPE 157 */
904                 0, /* PTYPE 158 */
905                 0, /* PTYPE 159 */
906                 0, /* PTYPE 160 */
907                 0, /* PTYPE 161 */
908                 0, /* PTYPE 162 */
909                 0, /* PTYPE 163 */
910                 0, /* PTYPE 164 */
911                 0, /* PTYPE 165 */
912                 0, /* PTYPE 166 */
913                 0, /* PTYPE 167 */
914                 0, /* PTYPE 168 */
915                 0, /* PTYPE 169 */
916                 0, /* PTYPE 170 */
917                 0, /* PTYPE 171 */
918                 0, /* PTYPE 172 */
919                 0, /* PTYPE 173 */
920                 0, /* PTYPE 174 */
921                 0, /* PTYPE 175 */
922                 0, /* PTYPE 176 */
923                 0, /* PTYPE 177 */
924                 0, /* PTYPE 178 */
925                 0, /* PTYPE 179 */
926                 0, /* PTYPE 180 */
927                 0, /* PTYPE 181 */
928                 0, /* PTYPE 182 */
929                 0, /* PTYPE 183 */
930                 0, /* PTYPE 184 */
931                 0, /* PTYPE 185 */
932                 0, /* PTYPE 186 */
933                 0, /* PTYPE 187 */
934                 0, /* PTYPE 188 */
935                 0, /* PTYPE 189 */
936                 0, /* PTYPE 190 */
937                 0, /* PTYPE 191 */
938                 0, /* PTYPE 192 */
939                 0, /* PTYPE 193 */
940                 0, /* PTYPE 194 */
941                 0, /* PTYPE 195 */
942                 0, /* PTYPE 196 */
943                 0, /* PTYPE 197 */
944                 0, /* PTYPE 198 */
945                 0, /* PTYPE 199 */
946                 0, /* PTYPE 200 */
947                 0, /* PTYPE 201 */
948                 0, /* PTYPE 202 */
949                 0, /* PTYPE 203 */
950                 0, /* PTYPE 204 */
951                 0, /* PTYPE 205 */
952                 0, /* PTYPE 206 */
953                 0, /* PTYPE 207 */
954                 0, /* PTYPE 208 */
955                 0, /* PTYPE 209 */
956                 0, /* PTYPE 210 */
957                 0, /* PTYPE 211 */
958                 0, /* PTYPE 212 */
959                 0, /* PTYPE 213 */
960                 0, /* PTYPE 214 */
961                 0, /* PTYPE 215 */
962                 0, /* PTYPE 216 */
963                 0, /* PTYPE 217 */
964                 0, /* PTYPE 218 */
965                 0, /* PTYPE 219 */
966                 0, /* PTYPE 220 */
967                 0, /* PTYPE 221 */
968                 0, /* PTYPE 222 */
969                 0, /* PTYPE 223 */
970                 0, /* PTYPE 224 */
971                 0, /* PTYPE 225 */
972                 0, /* PTYPE 226 */
973                 0, /* PTYPE 227 */
974                 0, /* PTYPE 228 */
975                 0, /* PTYPE 229 */
976                 0, /* PTYPE 230 */
977                 0, /* PTYPE 231 */
978                 0, /* PTYPE 232 */
979                 0, /* PTYPE 233 */
980                 0, /* PTYPE 234 */
981                 0, /* PTYPE 235 */
982                 0, /* PTYPE 236 */
983                 0, /* PTYPE 237 */
984                 0, /* PTYPE 238 */
985                 0, /* PTYPE 239 */
986                 0, /* PTYPE 240 */
987                 0, /* PTYPE 241 */
988                 0, /* PTYPE 242 */
989                 0, /* PTYPE 243 */
990                 0, /* PTYPE 244 */
991                 0, /* PTYPE 245 */
992                 0, /* PTYPE 246 */
993                 0, /* PTYPE 247 */
994                 0, /* PTYPE 248 */
995                 0, /* PTYPE 249 */
996                 0, /* PTYPE 250 */
997                 0, /* PTYPE 251 */
998                 0, /* PTYPE 252 */
999                 0, /* PTYPE 253 */
1000                 0, /* PTYPE 254 */
1001                 0, /* PTYPE 255 */
1002         };
1003
1004         return ip_ptype_map[ptype];
1005 }
1006 #endif /* RTE_NEXT_ABI */
1007
1008 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK   0x03
1009 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
1010 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX   0x02
1011 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK   0x03
1012 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX   0x01
1013
1014 static inline uint64_t
1015 i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
1016 {
1017         uint64_t flags = 0;
1018 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1019         uint16_t flexbh, flexbl;
1020
1021         flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1022                 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1023                 I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1024         flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1025                 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
1026                 I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
1027
1028
1029         if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1030                 mb->hash.fdir.hi =
1031                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1032                 flags |= PKT_RX_FDIR_ID;
1033         } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
1034                 mb->hash.fdir.hi =
1035                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
1036                 flags |= PKT_RX_FDIR_FLX;
1037         }
1038         if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
1039                 mb->hash.fdir.lo =
1040                         rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
1041                 flags |= PKT_RX_FDIR_FLX;
1042         }
1043 #else
1044         mb->hash.fdir.hi =
1045                 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1046         flags |= PKT_RX_FDIR_ID;
1047 #endif
1048         return flags;
1049 }
1050 static inline void
1051 i40e_txd_enable_checksum(uint64_t ol_flags,
1052                         uint32_t *td_cmd,
1053                         uint32_t *td_offset,
1054                         union i40e_tx_offload tx_offload,
1055                         uint32_t *cd_tunneling)
1056 {
1057         /* UDP tunneling packet TX checksum offload */
1058         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
1059
1060                 *td_offset |= (tx_offload.outer_l2_len >> 1)
1061                                 << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1062
1063                 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1064                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1065                 else if (ol_flags & PKT_TX_OUTER_IPV4)
1066                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1067                 else if (ol_flags & PKT_TX_OUTER_IPV6)
1068                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1069
1070                 /* Now set the ctx descriptor fields */
1071                 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
1072                                 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1073                                 (tx_offload.l2_len >> 1) <<
1074                                 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1075
1076         } else
1077                 *td_offset |= (tx_offload.l2_len >> 1)
1078                         << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1079
1080         /* Enable L3 checksum offloads */
1081         if (ol_flags & PKT_TX_IP_CKSUM) {
1082                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1083                 *td_offset |= (tx_offload.l3_len >> 2)
1084                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1085         } else if (ol_flags & PKT_TX_IPV4) {
1086                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1087                 *td_offset |= (tx_offload.l3_len >> 2)
1088                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1089         } else if (ol_flags & PKT_TX_IPV6) {
1090                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1091                 *td_offset |= (tx_offload.l3_len >> 2)
1092                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1093         }
1094
1095         if (ol_flags & PKT_TX_TCP_SEG) {
1096                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1097                 *td_offset |= (tx_offload.l4_len >> 2)
1098                         << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1099                 return;
1100         }
1101
1102         /* Enable L4 checksum offloads */
1103         switch (ol_flags & PKT_TX_L4_MASK) {
1104         case PKT_TX_TCP_CKSUM:
1105                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1106                 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1107                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1108                 break;
1109         case PKT_TX_SCTP_CKSUM:
1110                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1111                 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1112                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1113                 break;
1114         case PKT_TX_UDP_CKSUM:
1115                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1116                 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1117                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1118                 break;
1119         default:
1120                 break;
1121         }
1122 }
1123
1124 static inline struct rte_mbuf *
1125 rte_rxmbuf_alloc(struct rte_mempool *mp)
1126 {
1127         struct rte_mbuf *m;
1128
1129         m = __rte_mbuf_raw_alloc(mp);
1130         __rte_mbuf_sanity_check_raw(m, 0);
1131
1132         return m;
1133 }
1134
1135 /* Construct the tx flags */
1136 static inline uint64_t
1137 i40e_build_ctob(uint32_t td_cmd,
1138                 uint32_t td_offset,
1139                 unsigned int size,
1140                 uint32_t td_tag)
1141 {
1142         return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1143                         ((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
1144                         ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1145                         ((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1146                         ((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
1147 }
1148
1149 static inline int
1150 i40e_xmit_cleanup(struct i40e_tx_queue *txq)
1151 {
1152         struct i40e_tx_entry *sw_ring = txq->sw_ring;
1153         volatile struct i40e_tx_desc *txd = txq->tx_ring;
1154         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1155         uint16_t nb_tx_desc = txq->nb_tx_desc;
1156         uint16_t desc_to_clean_to;
1157         uint16_t nb_tx_to_clean;
1158
1159         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1160         if (desc_to_clean_to >= nb_tx_desc)
1161                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1162
1163         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1164         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1165                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
1166                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
1167                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1168                         "(port=%d queue=%d)", desc_to_clean_to,
1169                                 txq->port_id, txq->queue_id);
1170                 return -1;
1171         }
1172
1173         if (last_desc_cleaned > desc_to_clean_to)
1174                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1175                                                         desc_to_clean_to);
1176         else
1177                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1178                                         last_desc_cleaned);
1179
1180         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1181
1182         txq->last_desc_cleaned = desc_to_clean_to;
1183         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1184
1185         return 0;
1186 }
1187
1188 static inline int
1189 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1190 check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
1191 #else
1192 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
1193 #endif
1194 {
1195         int ret = 0;
1196
1197 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1198         if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
1199                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1200                              "rxq->rx_free_thresh=%d, "
1201                              "RTE_PMD_I40E_RX_MAX_BURST=%d",
1202                              rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
1203                 ret = -EINVAL;
1204         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
1205                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1206                              "rxq->rx_free_thresh=%d, "
1207                              "rxq->nb_rx_desc=%d",
1208                              rxq->rx_free_thresh, rxq->nb_rx_desc);
1209                 ret = -EINVAL;
1210         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
1211                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1212                              "rxq->nb_rx_desc=%d, "
1213                              "rxq->rx_free_thresh=%d",
1214                              rxq->nb_rx_desc, rxq->rx_free_thresh);
1215                 ret = -EINVAL;
1216         } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
1217                                 RTE_PMD_I40E_RX_MAX_BURST))) {
1218                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1219                              "rxq->nb_rx_desc=%d, "
1220                              "I40E_MAX_RING_DESC=%d, "
1221                              "RTE_PMD_I40E_RX_MAX_BURST=%d",
1222                              rxq->nb_rx_desc, I40E_MAX_RING_DESC,
1223                              RTE_PMD_I40E_RX_MAX_BURST);
1224                 ret = -EINVAL;
1225         }
1226 #else
1227         ret = -EINVAL;
1228 #endif
1229
1230         return ret;
1231 }
1232
1233 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1234 #define I40E_LOOK_AHEAD 8
1235 #if (I40E_LOOK_AHEAD != 8)
1236 #error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
1237 #endif
1238 static inline int
1239 i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
1240 {
1241         volatile union i40e_rx_desc *rxdp;
1242         struct i40e_rx_entry *rxep;
1243         struct rte_mbuf *mb;
1244         uint16_t pkt_len;
1245         uint64_t qword1;
1246         uint32_t rx_status;
1247         int32_t s[I40E_LOOK_AHEAD], nb_dd;
1248         int32_t i, j, nb_rx = 0;
1249         uint64_t pkt_flags;
1250
1251         rxdp = &rxq->rx_ring[rxq->rx_tail];
1252         rxep = &rxq->sw_ring[rxq->rx_tail];
1253
1254         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1255         rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1256                                 I40E_RXD_QW1_STATUS_SHIFT;
1257
1258         /* Make sure there is at least 1 packet to receive */
1259         if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1260                 return 0;
1261
1262         /**
1263          * Scan LOOK_AHEAD descriptors at a time to determine which
1264          * descriptors reference packets that are ready to be received.
1265          */
1266         for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
1267                         rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
1268                 /* Read desc statuses backwards to avoid race condition */
1269                 for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
1270                         qword1 = rte_le_to_cpu_64(\
1271                                 rxdp[j].wb.qword1.status_error_len);
1272                         s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1273                                         I40E_RXD_QW1_STATUS_SHIFT;
1274                 }
1275
1276                 /* Compute how many status bits were set */
1277                 for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
1278                         nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
1279
1280                 nb_rx += nb_dd;
1281
1282                 /* Translate descriptor info to mbuf parameters */
1283                 for (j = 0; j < nb_dd; j++) {
1284                         mb = rxep[j].mbuf;
1285                         qword1 = rte_le_to_cpu_64(\
1286                                 rxdp[j].wb.qword1.status_error_len);
1287                         pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1288                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1289                         mb->data_len = pkt_len;
1290                         mb->pkt_len = pkt_len;
1291                         mb->ol_flags = 0;
1292                         i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
1293                         pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1294                         pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1295 #ifdef RTE_NEXT_ABI
1296                         mb->packet_type =
1297                                 i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1298                                                 I40E_RXD_QW1_PTYPE_MASK) >>
1299                                                 I40E_RXD_QW1_PTYPE_SHIFT));
1300 #else
1301                         pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1302
1303                         mb->packet_type = (uint16_t)((qword1 &
1304                                         I40E_RXD_QW1_PTYPE_MASK) >>
1305                                         I40E_RXD_QW1_PTYPE_SHIFT);
1306 #endif /* RTE_NEXT_ABI */
1307                         if (pkt_flags & PKT_RX_RSS_HASH)
1308                                 mb->hash.rss = rte_le_to_cpu_32(\
1309                                         rxdp[j].wb.qword0.hi_dword.rss);
1310                         if (pkt_flags & PKT_RX_FDIR)
1311                                 pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
1312
1313 #ifdef RTE_LIBRTE_IEEE1588
1314                         pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
1315 #endif
1316                         mb->ol_flags |= pkt_flags;
1317
1318                 }
1319
1320                 for (j = 0; j < I40E_LOOK_AHEAD; j++)
1321                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1322
1323                 if (nb_dd != I40E_LOOK_AHEAD)
1324                         break;
1325         }
1326
1327         /* Clear software ring entries */
1328         for (i = 0; i < nb_rx; i++)
1329                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1330
1331         return nb_rx;
1332 }
1333
1334 static inline uint16_t
1335 i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
1336                         struct rte_mbuf **rx_pkts,
1337                         uint16_t nb_pkts)
1338 {
1339         uint16_t i;
1340         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1341
1342         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1343
1344         for (i = 0; i < nb_pkts; i++)
1345                 rx_pkts[i] = stage[i];
1346
1347         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1348         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1349
1350         return nb_pkts;
1351 }
1352
1353 static inline int
1354 i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
1355 {
1356         volatile union i40e_rx_desc *rxdp;
1357         struct i40e_rx_entry *rxep;
1358         struct rte_mbuf *mb;
1359         uint16_t alloc_idx, i;
1360         uint64_t dma_addr;
1361         int diag;
1362
1363         /* Allocate buffers in bulk */
1364         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1365                                 (rxq->rx_free_thresh - 1));
1366         rxep = &(rxq->sw_ring[alloc_idx]);
1367         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1368                                         rxq->rx_free_thresh);
1369         if (unlikely(diag != 0)) {
1370                 PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
1371                 return -ENOMEM;
1372         }
1373
1374         rxdp = &rxq->rx_ring[alloc_idx];
1375         for (i = 0; i < rxq->rx_free_thresh; i++) {
1376                 if (likely(i < (rxq->rx_free_thresh - 1)))
1377                         /* Prefetch next mbuf */
1378                         rte_prefetch0(rxep[i + 1].mbuf);
1379
1380                 mb = rxep[i].mbuf;
1381                 rte_mbuf_refcnt_set(mb, 1);
1382                 mb->next = NULL;
1383                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1384                 mb->nb_segs = 1;
1385                 mb->port = rxq->port_id;
1386                 dma_addr = rte_cpu_to_le_64(\
1387                         RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1388                 rxdp[i].read.hdr_addr = 0;
1389                 rxdp[i].read.pkt_addr = dma_addr;
1390         }
1391
1392         /* Update rx tail regsiter */
1393         rte_wmb();
1394         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1395
1396         rxq->rx_free_trigger =
1397                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1398         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1399                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1400
1401         return 0;
1402 }
1403
1404 static inline uint16_t
1405 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1406 {
1407         struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
1408         uint16_t nb_rx = 0;
1409
1410         if (!nb_pkts)
1411                 return 0;
1412
1413         if (rxq->rx_nb_avail)
1414                 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1415
1416         nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
1417         rxq->rx_next_avail = 0;
1418         rxq->rx_nb_avail = nb_rx;
1419         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1420
1421         if (rxq->rx_tail > rxq->rx_free_trigger) {
1422                 if (i40e_rx_alloc_bufs(rxq) != 0) {
1423                         uint16_t i, j;
1424
1425                         PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1426                                    "port_id=%u, queue_id=%u",
1427                                    rxq->port_id, rxq->queue_id);
1428                         rxq->rx_nb_avail = 0;
1429                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1430                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1431                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1432
1433                         return 0;
1434                 }
1435         }
1436
1437         if (rxq->rx_tail >= rxq->nb_rx_desc)
1438                 rxq->rx_tail = 0;
1439
1440         if (rxq->rx_nb_avail)
1441                 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1442
1443         return 0;
1444 }
1445
1446 static uint16_t
1447 i40e_recv_pkts_bulk_alloc(void *rx_queue,
1448                           struct rte_mbuf **rx_pkts,
1449                           uint16_t nb_pkts)
1450 {
1451         uint16_t nb_rx = 0, n, count;
1452
1453         if (unlikely(nb_pkts == 0))
1454                 return 0;
1455
1456         if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
1457                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1458
1459         while (nb_pkts) {
1460                 n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
1461                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1462                 nb_rx = (uint16_t)(nb_rx + count);
1463                 nb_pkts = (uint16_t)(nb_pkts - count);
1464                 if (count < n)
1465                         break;
1466         }
1467
1468         return nb_rx;
1469 }
1470 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
1471
1472 uint16_t
1473 i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1474 {
1475         struct i40e_rx_queue *rxq;
1476         volatile union i40e_rx_desc *rx_ring;
1477         volatile union i40e_rx_desc *rxdp;
1478         union i40e_rx_desc rxd;
1479         struct i40e_rx_entry *sw_ring;
1480         struct i40e_rx_entry *rxe;
1481         struct rte_mbuf *rxm;
1482         struct rte_mbuf *nmb;
1483         uint16_t nb_rx;
1484         uint32_t rx_status;
1485         uint64_t qword1;
1486         uint16_t rx_packet_len;
1487         uint16_t rx_id, nb_hold;
1488         uint64_t dma_addr;
1489         uint64_t pkt_flags;
1490
1491         nb_rx = 0;
1492         nb_hold = 0;
1493         rxq = rx_queue;
1494         rx_id = rxq->rx_tail;
1495         rx_ring = rxq->rx_ring;
1496         sw_ring = rxq->sw_ring;
1497
1498         while (nb_rx < nb_pkts) {
1499                 rxdp = &rx_ring[rx_id];
1500                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1501                 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1502                                 >> I40E_RXD_QW1_STATUS_SHIFT;
1503
1504                 /* Check the DD bit first */
1505                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1506                         break;
1507
1508                 nmb = rte_rxmbuf_alloc(rxq->mp);
1509                 if (unlikely(!nmb))
1510                         break;
1511                 rxd = *rxdp;
1512
1513                 nb_hold++;
1514                 rxe = &sw_ring[rx_id];
1515                 rx_id++;
1516                 if (unlikely(rx_id == rxq->nb_rx_desc))
1517                         rx_id = 0;
1518
1519                 /* Prefetch next mbuf */
1520                 rte_prefetch0(sw_ring[rx_id].mbuf);
1521
1522                 /**
1523                  * When next RX descriptor is on a cache line boundary,
1524                  * prefetch the next 4 RX descriptors and next 8 pointers
1525                  * to mbufs.
1526                  */
1527                 if ((rx_id & 0x3) == 0) {
1528                         rte_prefetch0(&rx_ring[rx_id]);
1529                         rte_prefetch0(&sw_ring[rx_id]);
1530                 }
1531                 rxm = rxe->mbuf;
1532                 rxe->mbuf = nmb;
1533                 dma_addr =
1534                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1535                 rxdp->read.hdr_addr = 0;
1536                 rxdp->read.pkt_addr = dma_addr;
1537
1538                 rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1539                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1540
1541                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1542                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1543                 rxm->nb_segs = 1;
1544                 rxm->next = NULL;
1545                 rxm->pkt_len = rx_packet_len;
1546                 rxm->data_len = rx_packet_len;
1547                 rxm->port = rxq->port_id;
1548                 rxm->ol_flags = 0;
1549                 i40e_rxd_to_vlan_tci(rxm, &rxd);
1550                 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1551                 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1552 #ifdef RTE_NEXT_ABI
1553                 rxm->packet_type =
1554                         i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1555                         I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1556 #else
1557                 pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1558                 rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
1559                                 I40E_RXD_QW1_PTYPE_SHIFT);
1560 #endif /* RTE_NEXT_ABI */
1561                 if (pkt_flags & PKT_RX_RSS_HASH)
1562                         rxm->hash.rss =
1563                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1564                 if (pkt_flags & PKT_RX_FDIR)
1565                         pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1566
1567 #ifdef RTE_LIBRTE_IEEE1588
1568                 pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
1569 #endif
1570                 rxm->ol_flags |= pkt_flags;
1571
1572                 rx_pkts[nb_rx++] = rxm;
1573         }
1574         rxq->rx_tail = rx_id;
1575
1576         /**
1577          * If the number of free RX descriptors is greater than the RX free
1578          * threshold of the queue, advance the receive tail register of queue.
1579          * Update that register with the value of the last processed RX
1580          * descriptor minus 1.
1581          */
1582         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1583         if (nb_hold > rxq->rx_free_thresh) {
1584                 rx_id = (uint16_t) ((rx_id == 0) ?
1585                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1586                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1587                 nb_hold = 0;
1588         }
1589         rxq->nb_rx_hold = nb_hold;
1590
1591         return nb_rx;
1592 }
1593
1594 uint16_t
1595 i40e_recv_scattered_pkts(void *rx_queue,
1596                          struct rte_mbuf **rx_pkts,
1597                          uint16_t nb_pkts)
1598 {
1599         struct i40e_rx_queue *rxq = rx_queue;
1600         volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
1601         volatile union i40e_rx_desc *rxdp;
1602         union i40e_rx_desc rxd;
1603         struct i40e_rx_entry *sw_ring = rxq->sw_ring;
1604         struct i40e_rx_entry *rxe;
1605         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1606         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1607         struct rte_mbuf *nmb, *rxm;
1608         uint16_t rx_id = rxq->rx_tail;
1609         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1610         uint32_t rx_status;
1611         uint64_t qword1;
1612         uint64_t dma_addr;
1613         uint64_t pkt_flags;
1614
1615         while (nb_rx < nb_pkts) {
1616                 rxdp = &rx_ring[rx_id];
1617                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1618                 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1619                                         I40E_RXD_QW1_STATUS_SHIFT;
1620
1621                 /* Check the DD bit */
1622                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1623                         break;
1624
1625                 nmb = rte_rxmbuf_alloc(rxq->mp);
1626                 if (unlikely(!nmb))
1627                         break;
1628                 rxd = *rxdp;
1629                 nb_hold++;
1630                 rxe = &sw_ring[rx_id];
1631                 rx_id++;
1632                 if (rx_id == rxq->nb_rx_desc)
1633                         rx_id = 0;
1634
1635                 /* Prefetch next mbuf */
1636                 rte_prefetch0(sw_ring[rx_id].mbuf);
1637
1638                 /**
1639                  * When next RX descriptor is on a cache line boundary,
1640                  * prefetch the next 4 RX descriptors and next 8 pointers
1641                  * to mbufs.
1642                  */
1643                 if ((rx_id & 0x3) == 0) {
1644                         rte_prefetch0(&rx_ring[rx_id]);
1645                         rte_prefetch0(&sw_ring[rx_id]);
1646                 }
1647
1648                 rxm = rxe->mbuf;
1649                 rxe->mbuf = nmb;
1650                 dma_addr =
1651                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1652
1653                 /* Set data buffer address and data length of the mbuf */
1654                 rxdp->read.hdr_addr = 0;
1655                 rxdp->read.pkt_addr = dma_addr;
1656                 rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1657                                         I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1658                 rxm->data_len = rx_packet_len;
1659                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1660
1661                 /**
1662                  * If this is the first buffer of the received packet, set the
1663                  * pointer to the first mbuf of the packet and initialize its
1664                  * context. Otherwise, update the total length and the number
1665                  * of segments of the current scattered packet, and update the
1666                  * pointer to the last mbuf of the current packet.
1667                  */
1668                 if (!first_seg) {
1669                         first_seg = rxm;
1670                         first_seg->nb_segs = 1;
1671                         first_seg->pkt_len = rx_packet_len;
1672                 } else {
1673                         first_seg->pkt_len =
1674                                 (uint16_t)(first_seg->pkt_len +
1675                                                 rx_packet_len);
1676                         first_seg->nb_segs++;
1677                         last_seg->next = rxm;
1678                 }
1679
1680                 /**
1681                  * If this is not the last buffer of the received packet,
1682                  * update the pointer to the last mbuf of the current scattered
1683                  * packet and continue to parse the RX ring.
1684                  */
1685                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
1686                         last_seg = rxm;
1687                         continue;
1688                 }
1689
1690                 /**
1691                  * This is the last buffer of the received packet. If the CRC
1692                  * is not stripped by the hardware:
1693                  *  - Subtract the CRC length from the total packet length.
1694                  *  - If the last buffer only contains the whole CRC or a part
1695                  *  of it, free the mbuf associated to the last buffer. If part
1696                  *  of the CRC is also contained in the previous mbuf, subtract
1697                  *  the length of that CRC part from the data length of the
1698                  *  previous mbuf.
1699                  */
1700                 rxm->next = NULL;
1701                 if (unlikely(rxq->crc_len > 0)) {
1702                         first_seg->pkt_len -= ETHER_CRC_LEN;
1703                         if (rx_packet_len <= ETHER_CRC_LEN) {
1704                                 rte_pktmbuf_free_seg(rxm);
1705                                 first_seg->nb_segs--;
1706                                 last_seg->data_len =
1707                                         (uint16_t)(last_seg->data_len -
1708                                         (ETHER_CRC_LEN - rx_packet_len));
1709                                 last_seg->next = NULL;
1710                         } else
1711                                 rxm->data_len = (uint16_t)(rx_packet_len -
1712                                                                 ETHER_CRC_LEN);
1713                 }
1714
1715                 first_seg->port = rxq->port_id;
1716                 first_seg->ol_flags = 0;
1717                 i40e_rxd_to_vlan_tci(first_seg, &rxd);
1718                 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1719                 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1720 #ifdef RTE_NEXT_ABI
1721                 first_seg->packet_type =
1722                         i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1723                         I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1724 #else
1725                 pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1726                 first_seg->packet_type = (uint16_t)((qword1 &
1727                                         I40E_RXD_QW1_PTYPE_MASK) >>
1728                                         I40E_RXD_QW1_PTYPE_SHIFT);
1729 #endif /* RTE_NEXT_ABI */
1730                 if (pkt_flags & PKT_RX_RSS_HASH)
1731                         rxm->hash.rss =
1732                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1733                 if (pkt_flags & PKT_RX_FDIR)
1734                         pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1735
1736 #ifdef RTE_LIBRTE_IEEE1588
1737                 pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
1738 #endif
1739                 first_seg->ol_flags |= pkt_flags;
1740
1741                 /* Prefetch data of first segment, if configured to do so. */
1742                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1743                         first_seg->data_off));
1744                 rx_pkts[nb_rx++] = first_seg;
1745                 first_seg = NULL;
1746         }
1747
1748         /* Record index of the next RX descriptor to probe. */
1749         rxq->rx_tail = rx_id;
1750         rxq->pkt_first_seg = first_seg;
1751         rxq->pkt_last_seg = last_seg;
1752
1753         /**
1754          * If the number of free RX descriptors is greater than the RX free
1755          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1756          * register. Update the RDT with the value of the last processed RX
1757          * descriptor minus 1, to guarantee that the RDT register is never
1758          * equal to the RDH register, which creates a "full" ring situtation
1759          * from the hardware point of view.
1760          */
1761         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1762         if (nb_hold > rxq->rx_free_thresh) {
1763                 rx_id = (uint16_t)(rx_id == 0 ?
1764                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1765                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1766                 nb_hold = 0;
1767         }
1768         rxq->nb_rx_hold = nb_hold;
1769
1770         return nb_rx;
1771 }
1772
1773 /* Check if the context descriptor is needed for TX offloading */
1774 static inline uint16_t
1775 i40e_calc_context_desc(uint64_t flags)
1776 {
1777         static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
1778                 PKT_TX_TCP_SEG |
1779                 PKT_TX_QINQ_PKT;
1780
1781 #ifdef RTE_LIBRTE_IEEE1588
1782         mask |= PKT_TX_IEEE1588_TMST;
1783 #endif
1784
1785         return ((flags & mask) ? 1 : 0);
1786 }
1787
1788 /* set i40e TSO context descriptor */
1789 static inline uint64_t
1790 i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
1791 {
1792         uint64_t ctx_desc = 0;
1793         uint32_t cd_cmd, hdr_len, cd_tso_len;
1794
1795         if (!tx_offload.l4_len) {
1796                 PMD_DRV_LOG(DEBUG, "L4 length set to 0");
1797                 return ctx_desc;
1798         }
1799
1800         /**
1801          * in case of tunneling packet, the outer_l2_len and
1802          * outer_l3_len must be 0.
1803          */
1804         hdr_len = tx_offload.outer_l2_len +
1805                 tx_offload.outer_l3_len +
1806                 tx_offload.l2_len +
1807                 tx_offload.l3_len +
1808                 tx_offload.l4_len;
1809
1810         cd_cmd = I40E_TX_CTX_DESC_TSO;
1811         cd_tso_len = mbuf->pkt_len - hdr_len;
1812         ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1813                 ((uint64_t)cd_tso_len <<
1814                  I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1815                 ((uint64_t)mbuf->tso_segsz <<
1816                  I40E_TXD_CTX_QW1_MSS_SHIFT);
1817
1818         return ctx_desc;
1819 }
1820
1821 uint16_t
1822 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1823 {
1824         struct i40e_tx_queue *txq;
1825         struct i40e_tx_entry *sw_ring;
1826         struct i40e_tx_entry *txe, *txn;
1827         volatile struct i40e_tx_desc *txd;
1828         volatile struct i40e_tx_desc *txr;
1829         struct rte_mbuf *tx_pkt;
1830         struct rte_mbuf *m_seg;
1831         uint32_t cd_tunneling_params;
1832         uint16_t tx_id;
1833         uint16_t nb_tx;
1834         uint32_t td_cmd;
1835         uint32_t td_offset;
1836         uint32_t tx_flags;
1837         uint32_t td_tag;
1838         uint64_t ol_flags;
1839         uint16_t nb_used;
1840         uint16_t nb_ctx;
1841         uint16_t tx_last;
1842         uint16_t slen;
1843         uint64_t buf_dma_addr;
1844         union i40e_tx_offload tx_offload = {0};
1845
1846         txq = tx_queue;
1847         sw_ring = txq->sw_ring;
1848         txr = txq->tx_ring;
1849         tx_id = txq->tx_tail;
1850         txe = &sw_ring[tx_id];
1851
1852         /* Check if the descriptor ring needs to be cleaned. */
1853         if (txq->nb_tx_free < txq->tx_free_thresh)
1854                 i40e_xmit_cleanup(txq);
1855
1856         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1857                 td_cmd = 0;
1858                 td_tag = 0;
1859                 td_offset = 0;
1860                 tx_flags = 0;
1861
1862                 tx_pkt = *tx_pkts++;
1863                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1864
1865                 ol_flags = tx_pkt->ol_flags;
1866                 tx_offload.l2_len = tx_pkt->l2_len;
1867                 tx_offload.l3_len = tx_pkt->l3_len;
1868                 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1869                 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1870                 tx_offload.l4_len = tx_pkt->l4_len;
1871                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1872
1873                 /* Calculate the number of context descriptors needed. */
1874                 nb_ctx = i40e_calc_context_desc(ol_flags);
1875
1876                 /**
1877                  * The number of descriptors that must be allocated for
1878                  * a packet equals to the number of the segments of that
1879                  * packet plus 1 context descriptor if needed.
1880                  */
1881                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1882                 tx_last = (uint16_t)(tx_id + nb_used - 1);
1883
1884                 /* Circular ring */
1885                 if (tx_last >= txq->nb_tx_desc)
1886                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1887
1888                 if (nb_used > txq->nb_tx_free) {
1889                         if (i40e_xmit_cleanup(txq) != 0) {
1890                                 if (nb_tx == 0)
1891                                         return 0;
1892                                 goto end_of_tx;
1893                         }
1894                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
1895                                 while (nb_used > txq->nb_tx_free) {
1896                                         if (i40e_xmit_cleanup(txq) != 0) {
1897                                                 if (nb_tx == 0)
1898                                                         return 0;
1899                                                 goto end_of_tx;
1900                                         }
1901                                 }
1902                         }
1903                 }
1904
1905                 /* Descriptor based VLAN insertion */
1906                 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1907                         tx_flags |= tx_pkt->vlan_tci <<
1908                                 I40E_TX_FLAG_L2TAG1_SHIFT;
1909                         tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
1910                         td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1911                         td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
1912                                                 I40E_TX_FLAG_L2TAG1_SHIFT;
1913                 }
1914
1915                 /* Always enable CRC offload insertion */
1916                 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1917
1918                 /* Enable checksum offloading */
1919                 cd_tunneling_params = 0;
1920                 if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
1921                         i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
1922                                 tx_offload, &cd_tunneling_params);
1923                 }
1924
1925                 if (nb_ctx) {
1926                         /* Setup TX context descriptor if required */
1927                         volatile struct i40e_tx_context_desc *ctx_txd =
1928                                 (volatile struct i40e_tx_context_desc *)\
1929                                                         &txr[tx_id];
1930                         uint16_t cd_l2tag2 = 0;
1931                         uint64_t cd_type_cmd_tso_mss =
1932                                 I40E_TX_DESC_DTYPE_CONTEXT;
1933
1934                         txn = &sw_ring[txe->next_id];
1935                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1936                         if (txe->mbuf != NULL) {
1937                                 rte_pktmbuf_free_seg(txe->mbuf);
1938                                 txe->mbuf = NULL;
1939                         }
1940
1941                         /* TSO enabled means no timestamp */
1942                         if (ol_flags & PKT_TX_TCP_SEG)
1943                                 cd_type_cmd_tso_mss |=
1944                                         i40e_set_tso_ctx(tx_pkt, tx_offload);
1945                         else {
1946 #ifdef RTE_LIBRTE_IEEE1588
1947                                 if (ol_flags & PKT_TX_IEEE1588_TMST)
1948                                         cd_type_cmd_tso_mss |=
1949                                                 ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
1950                                                  I40E_TXD_CTX_QW1_CMD_SHIFT);
1951 #endif
1952                         }
1953
1954                         ctx_txd->tunneling_params =
1955                                 rte_cpu_to_le_32(cd_tunneling_params);
1956                         if (ol_flags & PKT_TX_QINQ_PKT) {
1957                                 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1958                                 cd_type_cmd_tso_mss |=
1959                                         ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
1960                                                 I40E_TXD_CTX_QW1_CMD_SHIFT);
1961                         }
1962                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1963                         ctx_txd->type_cmd_tso_mss =
1964                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1965
1966                         PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
1967                                 "tunneling_params: %#x;\n"
1968                                 "l2tag2: %#hx;\n"
1969                                 "rsvd: %#hx;\n"
1970                                 "type_cmd_tso_mss: %#"PRIx64";\n",
1971                                 tx_pkt, tx_id,
1972                                 ctx_txd->tunneling_params,
1973                                 ctx_txd->l2tag2,
1974                                 ctx_txd->rsvd,
1975                                 ctx_txd->type_cmd_tso_mss);
1976
1977                         txe->last_id = tx_last;
1978                         tx_id = txe->next_id;
1979                         txe = txn;
1980                 }
1981
1982                 m_seg = tx_pkt;
1983                 do {
1984                         txd = &txr[tx_id];
1985                         txn = &sw_ring[txe->next_id];
1986
1987                         if (txe->mbuf)
1988                                 rte_pktmbuf_free_seg(txe->mbuf);
1989                         txe->mbuf = m_seg;
1990
1991                         /* Setup TX Descriptor */
1992                         slen = m_seg->data_len;
1993                         buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
1994
1995                         PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
1996                                 "buf_dma_addr: %#"PRIx64";\n"
1997                                 "td_cmd: %#x;\n"
1998                                 "td_offset: %#x;\n"
1999                                 "td_len: %u;\n"
2000                                 "td_tag: %#x;\n",
2001                                 tx_pkt, tx_id, buf_dma_addr,
2002                                 td_cmd, td_offset, slen, td_tag);
2003
2004                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2005                         txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
2006                                                 td_offset, slen, td_tag);
2007                         txe->last_id = tx_last;
2008                         tx_id = txe->next_id;
2009                         txe = txn;
2010                         m_seg = m_seg->next;
2011                 } while (m_seg != NULL);
2012
2013                 /* The last packet data descriptor needs End Of Packet (EOP) */
2014                 td_cmd |= I40E_TX_DESC_CMD_EOP;
2015                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2016                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2017
2018                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2019                         PMD_TX_FREE_LOG(DEBUG,
2020                                         "Setting RS bit on TXD id="
2021                                         "%4u (port=%d queue=%d)",
2022                                         tx_last, txq->port_id, txq->queue_id);
2023
2024                         td_cmd |= I40E_TX_DESC_CMD_RS;
2025
2026                         /* Update txq RS bit counters */
2027                         txq->nb_tx_used = 0;
2028                 }
2029
2030                 txd->cmd_type_offset_bsz |=
2031                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2032                                         I40E_TXD_QW1_CMD_SHIFT);
2033         }
2034
2035 end_of_tx:
2036         rte_wmb();
2037
2038         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2039                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
2040                    (unsigned) tx_id, (unsigned) nb_tx);
2041
2042         I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2043         txq->tx_tail = tx_id;
2044
2045         return nb_tx;
2046 }
2047
2048 static inline int __attribute__((always_inline))
2049 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
2050 {
2051         struct i40e_tx_entry *txep;
2052         uint16_t i;
2053
2054         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2055                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
2056                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2057                 return 0;
2058
2059         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
2060
2061         for (i = 0; i < txq->tx_rs_thresh; i++)
2062                 rte_prefetch0((txep + i)->mbuf);
2063
2064         if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
2065                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2066                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2067                         txep->mbuf = NULL;
2068                 }
2069         } else {
2070                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2071                         rte_pktmbuf_free_seg(txep->mbuf);
2072                         txep->mbuf = NULL;
2073                 }
2074         }
2075
2076         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2077         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2078         if (txq->tx_next_dd >= txq->nb_tx_desc)
2079                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2080
2081         return txq->tx_rs_thresh;
2082 }
2083
2084 #define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
2085                      I40E_TX_DESC_CMD_EOP)
2086
2087 /* Populate 4 descriptors with data from 4 mbufs */
2088 static inline void
2089 tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
2090 {
2091         uint64_t dma_addr;
2092         uint32_t i;
2093
2094         for (i = 0; i < 4; i++, txdp++, pkts++) {
2095                 dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
2096                 txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
2097                 txdp->cmd_type_offset_bsz =
2098                         i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
2099                                         (*pkts)->data_len, 0);
2100         }
2101 }
2102
2103 /* Populate 1 descriptor with data from 1 mbuf */
2104 static inline void
2105 tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
2106 {
2107         uint64_t dma_addr;
2108
2109         dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
2110         txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
2111         txdp->cmd_type_offset_bsz =
2112                 i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
2113                                 (*pkts)->data_len, 0);
2114 }
2115
2116 /* Fill hardware descriptor ring with mbuf data */
2117 static inline void
2118 i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
2119                      struct rte_mbuf **pkts,
2120                      uint16_t nb_pkts)
2121 {
2122         volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
2123         struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
2124         const int N_PER_LOOP = 4;
2125         const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2126         int mainpart, leftover;
2127         int i, j;
2128
2129         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
2130         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
2131         for (i = 0; i < mainpart; i += N_PER_LOOP) {
2132                 for (j = 0; j < N_PER_LOOP; ++j) {
2133                         (txep + i + j)->mbuf = *(pkts + i + j);
2134                 }
2135                 tx4(txdp + i, pkts + i);
2136         }
2137         if (unlikely(leftover > 0)) {
2138                 for (i = 0; i < leftover; ++i) {
2139                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2140                         tx1(txdp + mainpart + i, pkts + mainpart + i);
2141                 }
2142         }
2143 }
2144
2145 static inline uint16_t
2146 tx_xmit_pkts(struct i40e_tx_queue *txq,
2147              struct rte_mbuf **tx_pkts,
2148              uint16_t nb_pkts)
2149 {
2150         volatile struct i40e_tx_desc *txr = txq->tx_ring;
2151         uint16_t n = 0;
2152
2153         /**
2154          * Begin scanning the H/W ring for done descriptors when the number
2155          * of available descriptors drops below tx_free_thresh. For each done
2156          * descriptor, free the associated buffer.
2157          */
2158         if (txq->nb_tx_free < txq->tx_free_thresh)
2159                 i40e_tx_free_bufs(txq);
2160
2161         /* Use available descriptor only */
2162         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2163         if (unlikely(!nb_pkts))
2164                 return 0;
2165
2166         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2167         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2168                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2169                 i40e_tx_fill_hw_ring(txq, tx_pkts, n);
2170                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2171                         rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
2172                                                 I40E_TXD_QW1_CMD_SHIFT);
2173                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2174                 txq->tx_tail = 0;
2175         }
2176
2177         /* Fill hardware descriptor ring with mbuf data */
2178         i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2179         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2180
2181         /* Determin if RS bit needs to be set */
2182         if (txq->tx_tail > txq->tx_next_rs) {
2183                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2184                         rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
2185                                                 I40E_TXD_QW1_CMD_SHIFT);
2186                 txq->tx_next_rs =
2187                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2188                 if (txq->tx_next_rs >= txq->nb_tx_desc)
2189                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2190         }
2191
2192         if (txq->tx_tail >= txq->nb_tx_desc)
2193                 txq->tx_tail = 0;
2194
2195         /* Update the tx tail register */
2196         rte_wmb();
2197         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2198
2199         return nb_pkts;
2200 }
2201
2202 static uint16_t
2203 i40e_xmit_pkts_simple(void *tx_queue,
2204                       struct rte_mbuf **tx_pkts,
2205                       uint16_t nb_pkts)
2206 {
2207         uint16_t nb_tx = 0;
2208
2209         if (likely(nb_pkts <= I40E_TX_MAX_BURST))
2210                 return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
2211                                                 tx_pkts, nb_pkts);
2212
2213         while (nb_pkts) {
2214                 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2215                                                 I40E_TX_MAX_BURST);
2216
2217                 ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
2218                                                 &tx_pkts[nb_tx], num);
2219                 nb_tx = (uint16_t)(nb_tx + ret);
2220                 nb_pkts = (uint16_t)(nb_pkts - ret);
2221                 if (ret < num)
2222                         break;
2223         }
2224
2225         return nb_tx;
2226 }
2227
2228 /*
2229  * Find the VSI the queue belongs to. 'queue_idx' is the queue index
2230  * application used, which assume having sequential ones. But from driver's
2231  * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
2232  * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
2233  * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
2234  * use queue_idx from 0 to 95 to access queues, while real queue would be
2235  * different. This function will do a queue mapping to find VSI the queue
2236  * belongs to.
2237  */
2238 static struct i40e_vsi*
2239 i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
2240 {
2241         /* the queue in MAIN VSI range */
2242         if (queue_idx < pf->main_vsi->nb_qps)
2243                 return pf->main_vsi;
2244
2245         queue_idx -= pf->main_vsi->nb_qps;
2246
2247         /* queue_idx is greater than VMDQ VSIs range */
2248         if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
2249                 PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
2250                 return NULL;
2251         }
2252
2253         return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
2254 }
2255
2256 static uint16_t
2257 i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
2258 {
2259         /* the queue in MAIN VSI range */
2260         if (queue_idx < pf->main_vsi->nb_qps)
2261                 return queue_idx;
2262
2263         /* It's VMDQ queues */
2264         queue_idx -= pf->main_vsi->nb_qps;
2265
2266         if (pf->nb_cfg_vmdq_vsi)
2267                 return queue_idx % pf->vmdq_nb_qps;
2268         else {
2269                 PMD_INIT_LOG(ERR, "Fail to get queue offset");
2270                 return (uint16_t)(-1);
2271         }
2272 }
2273
2274 int
2275 i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2276 {
2277         struct i40e_rx_queue *rxq;
2278         int err = -1;
2279         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2280
2281         PMD_INIT_FUNC_TRACE();
2282
2283         if (rx_queue_id < dev->data->nb_rx_queues) {
2284                 rxq = dev->data->rx_queues[rx_queue_id];
2285
2286                 err = i40e_alloc_rx_queue_mbufs(rxq);
2287                 if (err) {
2288                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
2289                         return err;
2290                 }
2291
2292                 rte_wmb();
2293
2294                 /* Init the RX tail regieter. */
2295                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
2296
2297                 err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
2298
2299                 if (err) {
2300                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
2301                                     rx_queue_id);
2302
2303                         i40e_rx_queue_release_mbufs(rxq);
2304                         i40e_reset_rx_queue(rxq);
2305                 }
2306         }
2307
2308         return err;
2309 }
2310
2311 int
2312 i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2313 {
2314         struct i40e_rx_queue *rxq;
2315         int err;
2316         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2317
2318         if (rx_queue_id < dev->data->nb_rx_queues) {
2319                 rxq = dev->data->rx_queues[rx_queue_id];
2320
2321                 /*
2322                 * rx_queue_id is queue id aplication refers to, while
2323                 * rxq->reg_idx is the real queue index.
2324                 */
2325                 err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
2326
2327                 if (err) {
2328                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
2329                                     rx_queue_id);
2330                         return err;
2331                 }
2332                 i40e_rx_queue_release_mbufs(rxq);
2333                 i40e_reset_rx_queue(rxq);
2334         }
2335
2336         return 0;
2337 }
2338
2339 int
2340 i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2341 {
2342         int err = -1;
2343         struct i40e_tx_queue *txq;
2344         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2345
2346         PMD_INIT_FUNC_TRACE();
2347
2348         if (tx_queue_id < dev->data->nb_tx_queues) {
2349                 txq = dev->data->tx_queues[tx_queue_id];
2350
2351                 /*
2352                 * tx_queue_id is queue id aplication refers to, while
2353                 * rxq->reg_idx is the real queue index.
2354                 */
2355                 err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
2356                 if (err)
2357                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
2358                                     tx_queue_id);
2359         }
2360
2361         return err;
2362 }
2363
2364 int
2365 i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2366 {
2367         struct i40e_tx_queue *txq;
2368         int err;
2369         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2370
2371         if (tx_queue_id < dev->data->nb_tx_queues) {
2372                 txq = dev->data->tx_queues[tx_queue_id];
2373
2374                 /*
2375                 * tx_queue_id is queue id aplication refers to, while
2376                 * txq->reg_idx is the real queue index.
2377                 */
2378                 err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
2379
2380                 if (err) {
2381                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
2382                                     tx_queue_id);
2383                         return err;
2384                 }
2385
2386                 i40e_tx_queue_release_mbufs(txq);
2387                 i40e_reset_tx_queue(txq);
2388         }
2389
2390         return 0;
2391 }
2392
2393 int
2394 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
2395                         uint16_t queue_idx,
2396                         uint16_t nb_desc,
2397                         unsigned int socket_id,
2398                         const struct rte_eth_rxconf *rx_conf,
2399                         struct rte_mempool *mp)
2400 {
2401         struct i40e_vsi *vsi;
2402         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2403         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2404         struct i40e_rx_queue *rxq;
2405         const struct rte_memzone *rz;
2406         uint32_t ring_size;
2407         uint16_t len;
2408         int use_def_burst_func = 1;
2409
2410         if (hw->mac.type == I40E_MAC_VF) {
2411                 struct i40e_vf *vf =
2412                         I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2413                 vsi = &vf->vsi;
2414         } else
2415                 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2416
2417         if (vsi == NULL) {
2418                 PMD_DRV_LOG(ERR, "VSI not available or queue "
2419                             "index exceeds the maximum");
2420                 return I40E_ERR_PARAM;
2421         }
2422         if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
2423                                         (nb_desc > I40E_MAX_RING_DESC) ||
2424                                         (nb_desc < I40E_MIN_RING_DESC)) {
2425                 PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
2426                             "invalid", nb_desc);
2427                 return I40E_ERR_PARAM;
2428         }
2429
2430         /* Free memory if needed */
2431         if (dev->data->rx_queues[queue_idx]) {
2432                 i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
2433                 dev->data->rx_queues[queue_idx] = NULL;
2434         }
2435
2436         /* Allocate the rx queue data structure */
2437         rxq = rte_zmalloc_socket("i40e rx queue",
2438                                  sizeof(struct i40e_rx_queue),
2439                                  RTE_CACHE_LINE_SIZE,
2440                                  socket_id);
2441         if (!rxq) {
2442                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2443                             "rx queue data structure");
2444                 return (-ENOMEM);
2445         }
2446         rxq->mp = mp;
2447         rxq->nb_rx_desc = nb_desc;
2448         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2449         rxq->queue_id = queue_idx;
2450         if (hw->mac.type == I40E_MAC_VF)
2451                 rxq->reg_idx = queue_idx;
2452         else /* PF device */
2453                 rxq->reg_idx = vsi->base_queue +
2454                         i40e_get_queue_offset_by_qindex(pf, queue_idx);
2455
2456         rxq->port_id = dev->data->port_id;
2457         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2458                                                         0 : ETHER_CRC_LEN);
2459         rxq->drop_en = rx_conf->rx_drop_en;
2460         rxq->vsi = vsi;
2461         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2462
2463         /* Allocate the maximun number of RX ring hardware descriptor. */
2464         ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
2465         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2466         rz = i40e_ring_dma_zone_reserve(dev,
2467                                         "rx_ring",
2468                                         queue_idx,
2469                                         ring_size,
2470                                         socket_id);
2471         if (!rz) {
2472                 i40e_dev_rx_queue_release(rxq);
2473                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
2474                 return (-ENOMEM);
2475         }
2476
2477         /* Zero all the descriptors in the ring. */
2478         memset(rz->addr, 0, ring_size);
2479
2480 #ifdef RTE_LIBRTE_XEN_DOM0
2481         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2482 #else
2483         rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
2484 #endif
2485
2486         rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
2487
2488 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2489         len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
2490 #else
2491         len = nb_desc;
2492 #endif
2493
2494         /* Allocate the software ring. */
2495         rxq->sw_ring =
2496                 rte_zmalloc_socket("i40e rx sw ring",
2497                                    sizeof(struct i40e_rx_entry) * len,
2498                                    RTE_CACHE_LINE_SIZE,
2499                                    socket_id);
2500         if (!rxq->sw_ring) {
2501                 i40e_dev_rx_queue_release(rxq);
2502                 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
2503                 return (-ENOMEM);
2504         }
2505
2506         i40e_reset_rx_queue(rxq);
2507         rxq->q_set = TRUE;
2508         dev->data->rx_queues[queue_idx] = rxq;
2509
2510         use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
2511
2512         if (!use_def_burst_func && !dev->data->scattered_rx) {
2513 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2514                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2515                              "satisfied. Rx Burst Bulk Alloc function will be "
2516                              "used on port=%d, queue=%d.",
2517                              rxq->port_id, rxq->queue_id);
2518                 dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
2519 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2520         } else {
2521                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2522                              "not satisfied, Scattered Rx is requested, "
2523                              "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
2524                              "not enabled on port=%d, queue=%d.",
2525                              rxq->port_id, rxq->queue_id);
2526         }
2527
2528         return 0;
2529 }
2530
2531 void
2532 i40e_dev_rx_queue_release(void *rxq)
2533 {
2534         struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
2535
2536         if (!q) {
2537                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2538                 return;
2539         }
2540
2541         i40e_rx_queue_release_mbufs(q);
2542         rte_free(q->sw_ring);
2543         rte_free(q);
2544 }
2545
2546 uint32_t
2547 i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2548 {
2549 #define I40E_RXQ_SCAN_INTERVAL 4
2550         volatile union i40e_rx_desc *rxdp;
2551         struct i40e_rx_queue *rxq;
2552         uint16_t desc = 0;
2553
2554         if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
2555                 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
2556                 return 0;
2557         }
2558
2559         rxq = dev->data->rx_queues[rx_queue_id];
2560         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2561         while ((desc < rxq->nb_rx_desc) &&
2562                 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2563                 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2564                                 (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
2565                 /**
2566                  * Check the DD bit of a rx descriptor of each 4 in a group,
2567                  * to avoid checking too frequently and downgrading performance
2568                  * too much.
2569                  */
2570                 desc += I40E_RXQ_SCAN_INTERVAL;
2571                 rxdp += I40E_RXQ_SCAN_INTERVAL;
2572                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2573                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2574                                         desc - rxq->nb_rx_desc]);
2575         }
2576
2577         return desc;
2578 }
2579
2580 int
2581 i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2582 {
2583         volatile union i40e_rx_desc *rxdp;
2584         struct i40e_rx_queue *rxq = rx_queue;
2585         uint16_t desc;
2586         int ret;
2587
2588         if (unlikely(offset >= rxq->nb_rx_desc)) {
2589                 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
2590                 return 0;
2591         }
2592
2593         desc = rxq->rx_tail + offset;
2594         if (desc >= rxq->nb_rx_desc)
2595                 desc -= rxq->nb_rx_desc;
2596
2597         rxdp = &(rxq->rx_ring[desc]);
2598
2599         ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2600                 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2601                                 (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
2602
2603         return ret;
2604 }
2605
2606 int
2607 i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
2608                         uint16_t queue_idx,
2609                         uint16_t nb_desc,
2610                         unsigned int socket_id,
2611                         const struct rte_eth_txconf *tx_conf)
2612 {
2613         struct i40e_vsi *vsi;
2614         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2615         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2616         struct i40e_tx_queue *txq;
2617         const struct rte_memzone *tz;
2618         uint32_t ring_size;
2619         uint16_t tx_rs_thresh, tx_free_thresh;
2620
2621         if (hw->mac.type == I40E_MAC_VF) {
2622                 struct i40e_vf *vf =
2623                         I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2624                 vsi = &vf->vsi;
2625         } else
2626                 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2627
2628         if (vsi == NULL) {
2629                 PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
2630                             "exceeds the maximum", queue_idx);
2631                 return I40E_ERR_PARAM;
2632         }
2633
2634         if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
2635                                         (nb_desc > I40E_MAX_RING_DESC) ||
2636                                         (nb_desc < I40E_MIN_RING_DESC)) {
2637                 PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
2638                             "invalid", nb_desc);
2639                 return I40E_ERR_PARAM;
2640         }
2641
2642         /**
2643          * The following two parameters control the setting of the RS bit on
2644          * transmit descriptors. TX descriptors will have their RS bit set
2645          * after txq->tx_rs_thresh descriptors have been used. The TX
2646          * descriptor ring will be cleaned after txq->tx_free_thresh
2647          * descriptors are used or if the number of descriptors required to
2648          * transmit a packet is greater than the number of free TX descriptors.
2649          *
2650          * The following constraints must be satisfied:
2651          *  - tx_rs_thresh must be greater than 0.
2652          *  - tx_rs_thresh must be less than the size of the ring minus 2.
2653          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
2654          *  - tx_rs_thresh must be a divisor of the ring size.
2655          *  - tx_free_thresh must be greater than 0.
2656          *  - tx_free_thresh must be less than the size of the ring minus 3.
2657          *
2658          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
2659          * race condition, hence the maximum threshold constraints. When set
2660          * to zero use default values.
2661          */
2662         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2663                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2664         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2665                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2666         if (tx_rs_thresh >= (nb_desc - 2)) {
2667                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2668                              "number of TX descriptors minus 2. "
2669                              "(tx_rs_thresh=%u port=%d queue=%d)",
2670                              (unsigned int)tx_rs_thresh,
2671                              (int)dev->data->port_id,
2672                              (int)queue_idx);
2673                 return I40E_ERR_PARAM;
2674         }
2675         if (tx_free_thresh >= (nb_desc - 3)) {
2676                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2677                              "tx_free_thresh must be less than the "
2678                              "number of TX descriptors minus 3. "
2679                              "(tx_free_thresh=%u port=%d queue=%d)",
2680                              (unsigned int)tx_free_thresh,
2681                              (int)dev->data->port_id,
2682                              (int)queue_idx);
2683                 return I40E_ERR_PARAM;
2684         }
2685         if (tx_rs_thresh > tx_free_thresh) {
2686                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
2687                              "equal to tx_free_thresh. (tx_free_thresh=%u"
2688                              " tx_rs_thresh=%u port=%d queue=%d)",
2689                              (unsigned int)tx_free_thresh,
2690                              (unsigned int)tx_rs_thresh,
2691                              (int)dev->data->port_id,
2692                              (int)queue_idx);
2693                 return I40E_ERR_PARAM;
2694         }
2695         if ((nb_desc % tx_rs_thresh) != 0) {
2696                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2697                              "number of TX descriptors. (tx_rs_thresh=%u"
2698                              " port=%d queue=%d)",
2699                              (unsigned int)tx_rs_thresh,
2700                              (int)dev->data->port_id,
2701                              (int)queue_idx);
2702                 return I40E_ERR_PARAM;
2703         }
2704         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2705                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2706                              "tx_rs_thresh is greater than 1. "
2707                              "(tx_rs_thresh=%u port=%d queue=%d)",
2708                              (unsigned int)tx_rs_thresh,
2709                              (int)dev->data->port_id,
2710                              (int)queue_idx);
2711                 return I40E_ERR_PARAM;
2712         }
2713
2714         /* Free memory if needed. */
2715         if (dev->data->tx_queues[queue_idx]) {
2716                 i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
2717                 dev->data->tx_queues[queue_idx] = NULL;
2718         }
2719
2720         /* Allocate the TX queue data structure. */
2721         txq = rte_zmalloc_socket("i40e tx queue",
2722                                   sizeof(struct i40e_tx_queue),
2723                                   RTE_CACHE_LINE_SIZE,
2724                                   socket_id);
2725         if (!txq) {
2726                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2727                             "tx queue structure");
2728                 return (-ENOMEM);
2729         }
2730
2731         /* Allocate TX hardware ring descriptors. */
2732         ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
2733         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2734         tz = i40e_ring_dma_zone_reserve(dev,
2735                                         "tx_ring",
2736                                         queue_idx,
2737                                         ring_size,
2738                                         socket_id);
2739         if (!tz) {
2740                 i40e_dev_tx_queue_release(txq);
2741                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
2742                 return (-ENOMEM);
2743         }
2744
2745         txq->nb_tx_desc = nb_desc;
2746         txq->tx_rs_thresh = tx_rs_thresh;
2747         txq->tx_free_thresh = tx_free_thresh;
2748         txq->pthresh = tx_conf->tx_thresh.pthresh;
2749         txq->hthresh = tx_conf->tx_thresh.hthresh;
2750         txq->wthresh = tx_conf->tx_thresh.wthresh;
2751         txq->queue_id = queue_idx;
2752         if (hw->mac.type == I40E_MAC_VF)
2753                 txq->reg_idx = queue_idx;
2754         else /* PF device */
2755                 txq->reg_idx = vsi->base_queue +
2756                         i40e_get_queue_offset_by_qindex(pf, queue_idx);
2757
2758         txq->port_id = dev->data->port_id;
2759         txq->txq_flags = tx_conf->txq_flags;
2760         txq->vsi = vsi;
2761         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2762
2763 #ifdef RTE_LIBRTE_XEN_DOM0
2764         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2765 #else
2766         txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
2767 #endif
2768         txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
2769
2770         /* Allocate software ring */
2771         txq->sw_ring =
2772                 rte_zmalloc_socket("i40e tx sw ring",
2773                                    sizeof(struct i40e_tx_entry) * nb_desc,
2774                                    RTE_CACHE_LINE_SIZE,
2775                                    socket_id);
2776         if (!txq->sw_ring) {
2777                 i40e_dev_tx_queue_release(txq);
2778                 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
2779                 return (-ENOMEM);
2780         }
2781
2782         i40e_reset_tx_queue(txq);
2783         txq->q_set = TRUE;
2784         dev->data->tx_queues[queue_idx] = txq;
2785
2786         /* Use a simple TX queue without offloads or multi segs if possible */
2787         if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
2788                                 (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
2789                 PMD_INIT_LOG(INFO, "Using simple tx path");
2790                 dev->tx_pkt_burst = i40e_xmit_pkts_simple;
2791         } else {
2792                 PMD_INIT_LOG(INFO, "Using full-featured tx path");
2793                 dev->tx_pkt_burst = i40e_xmit_pkts;
2794         }
2795
2796         return 0;
2797 }
2798
2799 void
2800 i40e_dev_tx_queue_release(void *txq)
2801 {
2802         struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
2803
2804         if (!q) {
2805                 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
2806                 return;
2807         }
2808
2809         i40e_tx_queue_release_mbufs(q);
2810         rte_free(q->sw_ring);
2811         rte_free(q);
2812 }
2813
2814 static const struct rte_memzone *
2815 i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
2816                            const char *ring_name,
2817                            uint16_t queue_id,
2818                            uint32_t ring_size,
2819                            int socket_id)
2820 {
2821         char z_name[RTE_MEMZONE_NAMESIZE];
2822         const struct rte_memzone *mz;
2823
2824         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2825                         dev->driver->pci_drv.name, ring_name,
2826                                 dev->data->port_id, queue_id);
2827         mz = rte_memzone_lookup(z_name);
2828         if (mz)
2829                 return mz;
2830
2831 #ifdef RTE_LIBRTE_XEN_DOM0
2832         return rte_memzone_reserve_bounded(z_name, ring_size,
2833                 socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
2834 #else
2835         return rte_memzone_reserve_aligned(z_name, ring_size,
2836                                 socket_id, 0, I40E_ALIGN);
2837 #endif
2838 }
2839
2840 const struct rte_memzone *
2841 i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
2842 {
2843         const struct rte_memzone *mz = NULL;
2844
2845         mz = rte_memzone_lookup(name);
2846         if (mz)
2847                 return mz;
2848 #ifdef RTE_LIBRTE_XEN_DOM0
2849         mz = rte_memzone_reserve_bounded(name, len,
2850                 socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
2851 #else
2852         mz = rte_memzone_reserve_aligned(name, len,
2853                                 socket_id, 0, I40E_ALIGN);
2854 #endif
2855         return mz;
2856 }
2857
2858 void
2859 i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
2860 {
2861         uint16_t i;
2862
2863         if (!rxq || !rxq->sw_ring) {
2864                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
2865                 return;
2866         }
2867
2868         for (i = 0; i < rxq->nb_rx_desc; i++) {
2869                 if (rxq->sw_ring[i].mbuf) {
2870                         rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2871                         rxq->sw_ring[i].mbuf = NULL;
2872                 }
2873         }
2874 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2875         if (rxq->rx_nb_avail == 0)
2876                 return;
2877         for (i = 0; i < rxq->rx_nb_avail; i++) {
2878                 struct rte_mbuf *mbuf;
2879
2880                 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
2881                 rte_pktmbuf_free_seg(mbuf);
2882         }
2883         rxq->rx_nb_avail = 0;
2884 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2885 }
2886
2887 void
2888 i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
2889 {
2890         unsigned i;
2891         uint16_t len;
2892
2893         if (!rxq) {
2894                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2895                 return;
2896         }
2897
2898 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2899         if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
2900                 len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
2901         else
2902 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2903                 len = rxq->nb_rx_desc;
2904
2905         for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
2906                 ((volatile char *)rxq->rx_ring)[i] = 0;
2907
2908 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2909         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2910         for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
2911                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
2912
2913         rxq->rx_nb_avail = 0;
2914         rxq->rx_next_avail = 0;
2915         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2916 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2917         rxq->rx_tail = 0;
2918         rxq->nb_rx_hold = 0;
2919         rxq->pkt_first_seg = NULL;
2920         rxq->pkt_last_seg = NULL;
2921 }
2922
2923 void
2924 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
2925 {
2926         uint16_t i;
2927
2928         if (!txq || !txq->sw_ring) {
2929                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
2930                 return;
2931         }
2932
2933         for (i = 0; i < txq->nb_tx_desc; i++) {
2934                 if (txq->sw_ring[i].mbuf) {
2935                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2936                         txq->sw_ring[i].mbuf = NULL;
2937                 }
2938         }
2939 }
2940
2941 void
2942 i40e_reset_tx_queue(struct i40e_tx_queue *txq)
2943 {
2944         struct i40e_tx_entry *txe;
2945         uint16_t i, prev, size;
2946
2947         if (!txq) {
2948                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
2949                 return;
2950         }
2951
2952         txe = txq->sw_ring;
2953         size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
2954         for (i = 0; i < size; i++)
2955                 ((volatile char *)txq->tx_ring)[i] = 0;
2956
2957         prev = (uint16_t)(txq->nb_tx_desc - 1);
2958         for (i = 0; i < txq->nb_tx_desc; i++) {
2959                 volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
2960
2961                 txd->cmd_type_offset_bsz =
2962                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
2963                 txe[i].mbuf =  NULL;
2964                 txe[i].last_id = i;
2965                 txe[prev].next_id = i;
2966                 prev = i;
2967         }
2968
2969         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2970         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2971
2972         txq->tx_tail = 0;
2973         txq->nb_tx_used = 0;
2974
2975         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2976         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2977 }
2978
2979 /* Init the TX queue in hardware */
2980 int
2981 i40e_tx_queue_init(struct i40e_tx_queue *txq)
2982 {
2983         enum i40e_status_code err = I40E_SUCCESS;
2984         struct i40e_vsi *vsi = txq->vsi;
2985         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2986         uint16_t pf_q = txq->reg_idx;
2987         struct i40e_hmc_obj_txq tx_ctx;
2988         uint32_t qtx_ctl;
2989
2990         /* clear the context structure first */
2991         memset(&tx_ctx, 0, sizeof(tx_ctx));
2992         tx_ctx.new_context = 1;
2993         tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
2994         tx_ctx.qlen = txq->nb_tx_desc;
2995
2996 #ifdef RTE_LIBRTE_IEEE1588
2997         tx_ctx.timesync_ena = 1;
2998 #endif
2999         tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
3000         if (vsi->type == I40E_VSI_FDIR)
3001                 tx_ctx.fd_ena = TRUE;
3002
3003         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3004         if (err != I40E_SUCCESS) {
3005                 PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
3006                 return err;
3007         }
3008
3009         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3010         if (err != I40E_SUCCESS) {
3011                 PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
3012                 return err;
3013         }
3014
3015         /* Now associate this queue with this PCI function */
3016         qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3017         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3018                                         I40E_QTX_CTL_PF_INDX_MASK);
3019         I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3020         I40E_WRITE_FLUSH(hw);
3021
3022         txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3023
3024         return err;
3025 }
3026
3027 int
3028 i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
3029 {
3030         struct i40e_rx_entry *rxe = rxq->sw_ring;
3031         uint64_t dma_addr;
3032         uint16_t i;
3033
3034         for (i = 0; i < rxq->nb_rx_desc; i++) {
3035                 volatile union i40e_rx_desc *rxd;
3036                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
3037
3038                 if (unlikely(!mbuf)) {
3039                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
3040                         return -ENOMEM;
3041                 }
3042
3043                 rte_mbuf_refcnt_set(mbuf, 1);
3044                 mbuf->next = NULL;
3045                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3046                 mbuf->nb_segs = 1;
3047                 mbuf->port = rxq->port_id;
3048
3049                 dma_addr =
3050                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3051
3052                 rxd = &rxq->rx_ring[i];
3053                 rxd->read.pkt_addr = dma_addr;
3054                 rxd->read.hdr_addr = 0;
3055 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3056                 rxd->read.rsvd1 = 0;
3057                 rxd->read.rsvd2 = 0;
3058 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
3059
3060                 rxe[i].mbuf = mbuf;
3061         }
3062
3063         return 0;
3064 }
3065
3066 /*
3067  * Calculate the buffer length, and check the jumbo frame
3068  * and maximum packet length.
3069  */
3070 static int
3071 i40e_rx_queue_config(struct i40e_rx_queue *rxq)
3072 {
3073         struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
3074         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
3075         struct rte_eth_dev_data *data = pf->dev_data;
3076         uint16_t buf_size, len;
3077
3078         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
3079                 RTE_PKTMBUF_HEADROOM);
3080
3081         switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
3082                         I40E_FLAG_HEADER_SPLIT_ENABLED)) {
3083         case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
3084                 rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
3085                                 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
3086                 rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
3087                                 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
3088                 rxq->hs_mode = i40e_header_split_enabled;
3089                 break;
3090         case I40E_FLAG_HEADER_SPLIT_DISABLED:
3091         default:
3092                 rxq->rx_hdr_len = 0;
3093                 rxq->rx_buf_len = RTE_ALIGN(buf_size,
3094                         (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
3095                 rxq->hs_mode = i40e_header_split_none;
3096                 break;
3097         }
3098
3099         len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
3100         rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
3101         if (data->dev_conf.rxmode.jumbo_frame == 1) {
3102                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
3103                         rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
3104                         PMD_DRV_LOG(ERR, "maximum packet length must "
3105                                     "be larger than %u and smaller than %u,"
3106                                     "as jumbo frame is enabled",
3107                                     (uint32_t)ETHER_MAX_LEN,
3108                                     (uint32_t)I40E_FRAME_SIZE_MAX);
3109                         return I40E_ERR_CONFIG;
3110                 }
3111         } else {
3112                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
3113                         rxq->max_pkt_len > ETHER_MAX_LEN) {
3114                         PMD_DRV_LOG(ERR, "maximum packet length must be "
3115                                     "larger than %u and smaller than %u, "
3116                                     "as jumbo frame is disabled",
3117                                     (uint32_t)ETHER_MIN_LEN,
3118                                     (uint32_t)ETHER_MAX_LEN);
3119                         return I40E_ERR_CONFIG;
3120                 }
3121         }
3122
3123         return 0;
3124 }
3125
3126 /* Init the RX queue in hardware */
3127 int
3128 i40e_rx_queue_init(struct i40e_rx_queue *rxq)
3129 {
3130         int err = I40E_SUCCESS;
3131         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
3132         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
3133         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
3134         uint16_t pf_q = rxq->reg_idx;
3135         uint16_t buf_size;
3136         struct i40e_hmc_obj_rxq rx_ctx;
3137
3138         err = i40e_rx_queue_config(rxq);
3139         if (err < 0) {
3140                 PMD_DRV_LOG(ERR, "Failed to config RX queue");
3141                 return err;
3142         }
3143
3144         /* Clear the context structure first */
3145         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
3146         rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
3147         rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
3148
3149         rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
3150         rx_ctx.qlen = rxq->nb_rx_desc;
3151 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3152         rx_ctx.dsize = 1;
3153 #endif
3154         rx_ctx.dtype = rxq->hs_mode;
3155         if (rxq->hs_mode)
3156                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
3157         else
3158                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
3159         rx_ctx.rxmax = rxq->max_pkt_len;
3160         rx_ctx.tphrdesc_ena = 1;
3161         rx_ctx.tphwdesc_ena = 1;
3162         rx_ctx.tphdata_ena = 1;
3163         rx_ctx.tphhead_ena = 1;
3164         rx_ctx.lrxqthresh = 2;
3165         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
3166         rx_ctx.l2tsel = 1;
3167         rx_ctx.showiv = 1;
3168         rx_ctx.prefena = 1;
3169
3170         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3171         if (err != I40E_SUCCESS) {
3172                 PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
3173                 return err;
3174         }
3175         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3176         if (err != I40E_SUCCESS) {
3177                 PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
3178                 return err;
3179         }
3180
3181         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3182
3183         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
3184                 RTE_PKTMBUF_HEADROOM);
3185
3186         /* Check if scattered RX needs to be used. */
3187         if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
3188                 dev_data->scattered_rx = 1;
3189                 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
3190         }
3191
3192         /* Init the RX tail regieter. */
3193         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3194
3195         return 0;
3196 }
3197
3198 void
3199 i40e_dev_clear_queues(struct rte_eth_dev *dev)
3200 {
3201         uint16_t i;
3202
3203         PMD_INIT_FUNC_TRACE();
3204
3205         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3206                 i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
3207                 i40e_reset_tx_queue(dev->data->tx_queues[i]);
3208         }
3209
3210         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3211                 i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
3212                 i40e_reset_rx_queue(dev->data->rx_queues[i]);
3213         }
3214 }
3215
3216 void
3217 i40e_dev_free_queues(struct rte_eth_dev *dev)
3218 {
3219         uint16_t i;
3220
3221         PMD_INIT_FUNC_TRACE();
3222
3223         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3224                 i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
3225                 dev->data->rx_queues[i] = NULL;
3226         }
3227         dev->data->nb_rx_queues = 0;
3228
3229         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3230                 i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
3231                 dev->data->tx_queues[i] = NULL;
3232         }
3233         dev->data->nb_tx_queues = 0;
3234 }
3235
3236 #define I40E_FDIR_NUM_TX_DESC  I40E_MIN_RING_DESC
3237 #define I40E_FDIR_NUM_RX_DESC  I40E_MIN_RING_DESC
3238
3239 enum i40e_status_code
3240 i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
3241 {
3242         struct i40e_tx_queue *txq;
3243         const struct rte_memzone *tz = NULL;
3244         uint32_t ring_size;
3245         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3246
3247         if (!pf) {
3248                 PMD_DRV_LOG(ERR, "PF is not available");
3249                 return I40E_ERR_BAD_PTR;
3250         }
3251
3252         /* Allocate the TX queue data structure. */
3253         txq = rte_zmalloc_socket("i40e fdir tx queue",
3254                                   sizeof(struct i40e_tx_queue),
3255                                   RTE_CACHE_LINE_SIZE,
3256                                   SOCKET_ID_ANY);
3257         if (!txq) {
3258                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3259                                         "tx queue structure.");
3260                 return I40E_ERR_NO_MEMORY;
3261         }
3262
3263         /* Allocate TX hardware ring descriptors. */
3264         ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
3265         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3266
3267         tz = i40e_ring_dma_zone_reserve(dev,
3268                                         "fdir_tx_ring",
3269                                         I40E_FDIR_QUEUE_ID,
3270                                         ring_size,
3271                                         SOCKET_ID_ANY);
3272         if (!tz) {
3273                 i40e_dev_tx_queue_release(txq);
3274                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
3275                 return I40E_ERR_NO_MEMORY;
3276         }
3277
3278         txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
3279         txq->queue_id = I40E_FDIR_QUEUE_ID;
3280         txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3281         txq->vsi = pf->fdir.fdir_vsi;
3282
3283 #ifdef RTE_LIBRTE_XEN_DOM0
3284         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
3285 #else
3286         txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
3287 #endif
3288         txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
3289         /*
3290          * don't need to allocate software ring and reset for the fdir
3291          * program queue just set the queue has been configured.
3292          */
3293         txq->q_set = TRUE;
3294         pf->fdir.txq = txq;
3295
3296         return I40E_SUCCESS;
3297 }
3298
3299 enum i40e_status_code
3300 i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
3301 {
3302         struct i40e_rx_queue *rxq;
3303         const struct rte_memzone *rz = NULL;
3304         uint32_t ring_size;
3305         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3306
3307         if (!pf) {
3308                 PMD_DRV_LOG(ERR, "PF is not available");
3309                 return I40E_ERR_BAD_PTR;
3310         }
3311
3312         /* Allocate the RX queue data structure. */
3313         rxq = rte_zmalloc_socket("i40e fdir rx queue",
3314                                   sizeof(struct i40e_rx_queue),
3315                                   RTE_CACHE_LINE_SIZE,
3316                                   SOCKET_ID_ANY);
3317         if (!rxq) {
3318                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3319                                         "rx queue structure.");
3320                 return I40E_ERR_NO_MEMORY;
3321         }
3322
3323         /* Allocate RX hardware ring descriptors. */
3324         ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
3325         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3326
3327         rz = i40e_ring_dma_zone_reserve(dev,
3328                                         "fdir_rx_ring",
3329                                         I40E_FDIR_QUEUE_ID,
3330                                         ring_size,
3331                                         SOCKET_ID_ANY);
3332         if (!rz) {
3333                 i40e_dev_rx_queue_release(rxq);
3334                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
3335                 return I40E_ERR_NO_MEMORY;
3336         }
3337
3338         rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
3339         rxq->queue_id = I40E_FDIR_QUEUE_ID;
3340         rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3341         rxq->vsi = pf->fdir.fdir_vsi;
3342
3343 #ifdef RTE_LIBRTE_XEN_DOM0
3344         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
3345 #else
3346         rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
3347 #endif
3348         rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
3349
3350         /*
3351          * Don't need to allocate software ring and reset for the fdir
3352          * rx queue, just set the queue has been configured.
3353          */
3354         rxq->q_set = TRUE;
3355         pf->fdir.rxq = rxq;
3356
3357         return I40E_SUCCESS;
3358 }