net/iavf: support flex desc metadata extraction
[dpdk.git] / drivers / net / iavf / iavf_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_mbuf.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_udp.h>
24 #include <rte_ip.h>
25 #include <rte_net.h>
26 #include <rte_vect.h>
27
28 #include "iavf.h"
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
31
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
34
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
42
43 uint8_t
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
45 {
46         static uint8_t rxdid_map[] = {
47                 [IAVF_PROTO_XTR_NONE]      = IAVF_RXDID_COMMS_OVS_1,
48                 [IAVF_PROTO_XTR_VLAN]      = IAVF_RXDID_COMMS_AUX_VLAN,
49                 [IAVF_PROTO_XTR_IPV4]      = IAVF_RXDID_COMMS_AUX_IPV4,
50                 [IAVF_PROTO_XTR_IPV6]      = IAVF_RXDID_COMMS_AUX_IPV6,
51                 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52                 [IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
53                 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
54         };
55
56         return flex_type < RTE_DIM(rxdid_map) ?
57                                 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
58 }
59
60 static inline int
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
62 {
63         /* The following constraints must be satisfied:
64          *   thresh < rxq->nb_rx_desc
65          */
66         if (thresh >= nb_desc) {
67                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
68                              thresh, nb_desc);
69                 return -EINVAL;
70         }
71         return 0;
72 }
73
74 static inline int
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76                 uint16_t tx_free_thresh)
77 {
78         /* TX descriptors will have their RS bit set after tx_rs_thresh
79          * descriptors have been used. The TX descriptor ring will be cleaned
80          * after tx_free_thresh descriptors are used or if the number of
81          * descriptors required to transmit a packet is greater than the
82          * number of free TX descriptors.
83          *
84          * The following constraints must be satisfied:
85          *  - tx_rs_thresh must be less than the size of the ring minus 2.
86          *  - tx_free_thresh must be less than the size of the ring minus 3.
87          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
88          *  - tx_rs_thresh must be a divisor of the ring size.
89          *
90          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91          * race condition, hence the maximum threshold constraints. When set
92          * to zero use default values.
93          */
94         if (tx_rs_thresh >= (nb_desc - 2)) {
95                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96                              "number of TX descriptors (%u) minus 2",
97                              tx_rs_thresh, nb_desc);
98                 return -EINVAL;
99         }
100         if (tx_free_thresh >= (nb_desc - 3)) {
101                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102                              "number of TX descriptors (%u) minus 3.",
103                              tx_free_thresh, nb_desc);
104                 return -EINVAL;
105         }
106         if (tx_rs_thresh > tx_free_thresh) {
107                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108                              "equal to tx_free_thresh (%u).",
109                              tx_rs_thresh, tx_free_thresh);
110                 return -EINVAL;
111         }
112         if ((nb_desc % tx_rs_thresh) != 0) {
113                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114                              "number of TX descriptors (%u).",
115                              tx_rs_thresh, nb_desc);
116                 return -EINVAL;
117         }
118
119         return 0;
120 }
121
122 static inline bool
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
124 {
125         if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126             rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127                 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
128                 return true;
129         }
130
131         PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
132         return false;
133 }
134
135 static inline bool
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
137 {
138         if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139             txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140             txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141                 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
142                 return true;
143         }
144         PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
145         return false;
146 }
147
148 static inline bool
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
150 {
151         int ret = true;
152
153         if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155                              "rxq->rx_free_thresh=%d, "
156                              "IAVF_RX_MAX_BURST=%d",
157                              rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
158                 ret = false;
159         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161                              "rxq->nb_rx_desc=%d, "
162                              "rxq->rx_free_thresh=%d",
163                              rxq->nb_rx_desc, rxq->rx_free_thresh);
164                 ret = false;
165         }
166         return ret;
167 }
168
169 static inline void
170 reset_rx_queue(struct iavf_rx_queue *rxq)
171 {
172         uint16_t len;
173         uint32_t i;
174
175         if (!rxq)
176                 return;
177
178         len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
179
180         for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181                 ((volatile char *)rxq->rx_ring)[i] = 0;
182
183         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
184
185         for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186                 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
187
188         /* for rx bulk */
189         rxq->rx_nb_avail = 0;
190         rxq->rx_next_avail = 0;
191         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
192
193         rxq->rx_tail = 0;
194         rxq->nb_rx_hold = 0;
195         rxq->pkt_first_seg = NULL;
196         rxq->pkt_last_seg = NULL;
197 }
198
199 static inline void
200 reset_tx_queue(struct iavf_tx_queue *txq)
201 {
202         struct iavf_tx_entry *txe;
203         uint32_t i, size;
204         uint16_t prev;
205
206         if (!txq) {
207                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
208                 return;
209         }
210
211         txe = txq->sw_ring;
212         size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
213         for (i = 0; i < size; i++)
214                 ((volatile char *)txq->tx_ring)[i] = 0;
215
216         prev = (uint16_t)(txq->nb_tx_desc - 1);
217         for (i = 0; i < txq->nb_tx_desc; i++) {
218                 txq->tx_ring[i].cmd_type_offset_bsz =
219                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
220                 txe[i].mbuf =  NULL;
221                 txe[i].last_id = i;
222                 txe[prev].next_id = i;
223                 prev = i;
224         }
225
226         txq->tx_tail = 0;
227         txq->nb_used = 0;
228
229         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
230         txq->nb_free = txq->nb_tx_desc - 1;
231
232         txq->next_dd = txq->rs_thresh - 1;
233         txq->next_rs = txq->rs_thresh - 1;
234 }
235
236 static int
237 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
238 {
239         volatile union iavf_rx_desc *rxd;
240         struct rte_mbuf *mbuf = NULL;
241         uint64_t dma_addr;
242         uint16_t i;
243
244         for (i = 0; i < rxq->nb_rx_desc; i++) {
245                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
246                 if (unlikely(!mbuf)) {
247                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
248                         return -ENOMEM;
249                 }
250
251                 rte_mbuf_refcnt_set(mbuf, 1);
252                 mbuf->next = NULL;
253                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
254                 mbuf->nb_segs = 1;
255                 mbuf->port = rxq->port_id;
256
257                 dma_addr =
258                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
259
260                 rxd = &rxq->rx_ring[i];
261                 rxd->read.pkt_addr = dma_addr;
262                 rxd->read.hdr_addr = 0;
263 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
264                 rxd->read.rsvd1 = 0;
265                 rxd->read.rsvd2 = 0;
266 #endif
267
268                 rxq->sw_ring[i] = mbuf;
269         }
270
271         return 0;
272 }
273
274 static inline void
275 release_rxq_mbufs(struct iavf_rx_queue *rxq)
276 {
277         uint16_t i;
278
279         if (!rxq->sw_ring)
280                 return;
281
282         for (i = 0; i < rxq->nb_rx_desc; i++) {
283                 if (rxq->sw_ring[i]) {
284                         rte_pktmbuf_free_seg(rxq->sw_ring[i]);
285                         rxq->sw_ring[i] = NULL;
286                 }
287         }
288
289         /* for rx bulk */
290         if (rxq->rx_nb_avail == 0)
291                 return;
292         for (i = 0; i < rxq->rx_nb_avail; i++) {
293                 struct rte_mbuf *mbuf;
294
295                 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
296                 rte_pktmbuf_free_seg(mbuf);
297         }
298         rxq->rx_nb_avail = 0;
299 }
300
301 static inline void
302 release_txq_mbufs(struct iavf_tx_queue *txq)
303 {
304         uint16_t i;
305
306         if (!txq || !txq->sw_ring) {
307                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
308                 return;
309         }
310
311         for (i = 0; i < txq->nb_tx_desc; i++) {
312                 if (txq->sw_ring[i].mbuf) {
313                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
314                         txq->sw_ring[i].mbuf = NULL;
315                 }
316         }
317 }
318
319 static const struct iavf_rxq_ops def_rxq_ops = {
320         .release_mbufs = release_rxq_mbufs,
321 };
322
323 static const struct iavf_txq_ops def_txq_ops = {
324         .release_mbufs = release_txq_mbufs,
325 };
326
327 static inline void
328 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
329                                     struct rte_mbuf *mb,
330                                     volatile union iavf_rx_flex_desc *rxdp)
331 {
332         volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
333                         (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
334 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
335         uint16_t stat_err;
336 #endif
337
338         if (desc->flow_id != 0xFFFFFFFF) {
339                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
340                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
341         }
342
343 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
344         stat_err = rte_le_to_cpu_16(desc->status_error0);
345         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
346                 mb->ol_flags |= PKT_RX_RSS_HASH;
347                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
348         }
349 #endif
350 }
351
352 static inline void
353 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
354                                        struct rte_mbuf *mb,
355                                        volatile union iavf_rx_flex_desc *rxdp)
356 {
357         volatile struct iavf_32b_rx_flex_desc_comms *desc =
358                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
359         uint16_t stat_err;
360
361         stat_err = rte_le_to_cpu_16(desc->status_error0);
362         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
363                 mb->ol_flags |= PKT_RX_RSS_HASH;
364                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
365         }
366
367 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
368         if (desc->flow_id != 0xFFFFFFFF) {
369                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
370                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
371         }
372
373         if (rxq->xtr_ol_flag) {
374                 uint32_t metadata = 0;
375
376                 stat_err = rte_le_to_cpu_16(desc->status_error1);
377
378                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
379                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
380
381                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
382                         metadata |=
383                                 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
384
385                 if (metadata) {
386                         mb->ol_flags |= rxq->xtr_ol_flag;
387
388                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
389                 }
390         }
391 #endif
392 }
393
394 static inline void
395 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
396                                        struct rte_mbuf *mb,
397                                        volatile union iavf_rx_flex_desc *rxdp)
398 {
399         volatile struct iavf_32b_rx_flex_desc_comms *desc =
400                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
401         uint16_t stat_err;
402
403         stat_err = rte_le_to_cpu_16(desc->status_error0);
404         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
405                 mb->ol_flags |= PKT_RX_RSS_HASH;
406                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
407         }
408
409 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
410         if (desc->flow_id != 0xFFFFFFFF) {
411                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
412                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
413         }
414
415         if (rxq->xtr_ol_flag) {
416                 uint32_t metadata = 0;
417
418                 if (desc->flex_ts.flex.aux0 != 0xFFFF)
419                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
420                 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
421                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
422
423                 if (metadata) {
424                         mb->ol_flags |= rxq->xtr_ol_flag;
425
426                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
427                 }
428         }
429 #endif
430 }
431
432 static void
433 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
434 {
435         switch (rxdid) {
436         case IAVF_RXDID_COMMS_AUX_VLAN:
437                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
438                 rxq->rxd_to_pkt_fields =
439                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
440                 break;
441         case IAVF_RXDID_COMMS_AUX_IPV4:
442                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
443                 rxq->rxd_to_pkt_fields =
444                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
445                 break;
446         case IAVF_RXDID_COMMS_AUX_IPV6:
447                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
448                 rxq->rxd_to_pkt_fields =
449                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
450                 break;
451         case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
452                 rxq->xtr_ol_flag =
453                         rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
454                 rxq->rxd_to_pkt_fields =
455                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
456                 break;
457         case IAVF_RXDID_COMMS_AUX_TCP:
458                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
459                 rxq->rxd_to_pkt_fields =
460                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
461                 break;
462         case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
463                 rxq->xtr_ol_flag =
464                         rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
465                 rxq->rxd_to_pkt_fields =
466                         iavf_rxd_to_pkt_fields_by_comms_aux_v2;
467                 break;
468         case IAVF_RXDID_COMMS_OVS_1:
469                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
470                 break;
471         default:
472                 /* update this according to the RXDID for FLEX_DESC_NONE */
473                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
474                 break;
475         }
476
477         if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
478                 rxq->xtr_ol_flag = 0;
479 }
480
481 int
482 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
483                        uint16_t nb_desc, unsigned int socket_id,
484                        const struct rte_eth_rxconf *rx_conf,
485                        struct rte_mempool *mp)
486 {
487         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488         struct iavf_adapter *ad =
489                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
490         struct iavf_info *vf =
491                 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
492         struct iavf_vsi *vsi = &vf->vsi;
493         struct iavf_rx_queue *rxq;
494         const struct rte_memzone *mz;
495         uint32_t ring_size;
496         uint8_t proto_xtr;
497         uint16_t len;
498         uint16_t rx_free_thresh;
499
500         PMD_INIT_FUNC_TRACE();
501
502         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
503             nb_desc > IAVF_MAX_RING_DESC ||
504             nb_desc < IAVF_MIN_RING_DESC) {
505                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
506                              "invalid", nb_desc);
507                 return -EINVAL;
508         }
509
510         /* Check free threshold */
511         rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
512                          IAVF_DEFAULT_RX_FREE_THRESH :
513                          rx_conf->rx_free_thresh;
514         if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
515                 return -EINVAL;
516
517         /* Free memory if needed */
518         if (dev->data->rx_queues[queue_idx]) {
519                 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
520                 dev->data->rx_queues[queue_idx] = NULL;
521         }
522
523         /* Allocate the rx queue data structure */
524         rxq = rte_zmalloc_socket("iavf rxq",
525                                  sizeof(struct iavf_rx_queue),
526                                  RTE_CACHE_LINE_SIZE,
527                                  socket_id);
528         if (!rxq) {
529                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
530                              "rx queue data structure");
531                 return -ENOMEM;
532         }
533
534         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
535                 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
536                                 IAVF_PROTO_XTR_NONE;
537                 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
538                 rxq->proto_xtr = proto_xtr;
539         } else {
540                 rxq->rxdid = IAVF_RXDID_LEGACY_1;
541                 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
542         }
543
544         iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
545
546         rxq->mp = mp;
547         rxq->nb_rx_desc = nb_desc;
548         rxq->rx_free_thresh = rx_free_thresh;
549         rxq->queue_id = queue_idx;
550         rxq->port_id = dev->data->port_id;
551         rxq->crc_len = 0; /* crc stripping by default */
552         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
553         rxq->rx_hdr_len = 0;
554         rxq->vsi = vsi;
555
556         len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
557         rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
558
559         /* Allocate the software ring. */
560         len = nb_desc + IAVF_RX_MAX_BURST;
561         rxq->sw_ring =
562                 rte_zmalloc_socket("iavf rx sw ring",
563                                    sizeof(struct rte_mbuf *) * len,
564                                    RTE_CACHE_LINE_SIZE,
565                                    socket_id);
566         if (!rxq->sw_ring) {
567                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
568                 rte_free(rxq);
569                 return -ENOMEM;
570         }
571
572         /* Allocate the maximun number of RX ring hardware descriptor with
573          * a liitle more to support bulk allocate.
574          */
575         len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
576         ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
577                               IAVF_DMA_MEM_ALIGN);
578         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
579                                       ring_size, IAVF_RING_BASE_ALIGN,
580                                       socket_id);
581         if (!mz) {
582                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
583                 rte_free(rxq->sw_ring);
584                 rte_free(rxq);
585                 return -ENOMEM;
586         }
587         /* Zero all the descriptors in the ring. */
588         memset(mz->addr, 0, ring_size);
589         rxq->rx_ring_phys_addr = mz->iova;
590         rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
591
592         rxq->mz = mz;
593         reset_rx_queue(rxq);
594         rxq->q_set = true;
595         dev->data->rx_queues[queue_idx] = rxq;
596         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
597         rxq->ops = &def_rxq_ops;
598
599         if (check_rx_bulk_allow(rxq) == true) {
600                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
601                              "satisfied. Rx Burst Bulk Alloc function will be "
602                              "used on port=%d, queue=%d.",
603                              rxq->port_id, rxq->queue_id);
604         } else {
605                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
606                              "not satisfied, Scattered Rx is requested "
607                              "on port=%d, queue=%d.",
608                              rxq->port_id, rxq->queue_id);
609                 ad->rx_bulk_alloc_allowed = false;
610         }
611
612         if (check_rx_vec_allow(rxq) == false)
613                 ad->rx_vec_allowed = false;
614
615         return 0;
616 }
617
618 int
619 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
620                        uint16_t queue_idx,
621                        uint16_t nb_desc,
622                        unsigned int socket_id,
623                        const struct rte_eth_txconf *tx_conf)
624 {
625         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
626         struct iavf_tx_queue *txq;
627         const struct rte_memzone *mz;
628         uint32_t ring_size;
629         uint16_t tx_rs_thresh, tx_free_thresh;
630         uint64_t offloads;
631
632         PMD_INIT_FUNC_TRACE();
633
634         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
635
636         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
637             nb_desc > IAVF_MAX_RING_DESC ||
638             nb_desc < IAVF_MIN_RING_DESC) {
639                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
640                             "invalid", nb_desc);
641                 return -EINVAL;
642         }
643
644         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
645                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
646         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
647                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
648         check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
649
650         /* Free memory if needed. */
651         if (dev->data->tx_queues[queue_idx]) {
652                 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
653                 dev->data->tx_queues[queue_idx] = NULL;
654         }
655
656         /* Allocate the TX queue data structure. */
657         txq = rte_zmalloc_socket("iavf txq",
658                                  sizeof(struct iavf_tx_queue),
659                                  RTE_CACHE_LINE_SIZE,
660                                  socket_id);
661         if (!txq) {
662                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
663                              "tx queue structure");
664                 return -ENOMEM;
665         }
666
667         txq->nb_tx_desc = nb_desc;
668         txq->rs_thresh = tx_rs_thresh;
669         txq->free_thresh = tx_free_thresh;
670         txq->queue_id = queue_idx;
671         txq->port_id = dev->data->port_id;
672         txq->offloads = offloads;
673         txq->tx_deferred_start = tx_conf->tx_deferred_start;
674
675         /* Allocate software ring */
676         txq->sw_ring =
677                 rte_zmalloc_socket("iavf tx sw ring",
678                                    sizeof(struct iavf_tx_entry) * nb_desc,
679                                    RTE_CACHE_LINE_SIZE,
680                                    socket_id);
681         if (!txq->sw_ring) {
682                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
683                 rte_free(txq);
684                 return -ENOMEM;
685         }
686
687         /* Allocate TX hardware ring descriptors. */
688         ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
689         ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
690         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
691                                       ring_size, IAVF_RING_BASE_ALIGN,
692                                       socket_id);
693         if (!mz) {
694                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
695                 rte_free(txq->sw_ring);
696                 rte_free(txq);
697                 return -ENOMEM;
698         }
699         txq->tx_ring_phys_addr = mz->iova;
700         txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
701
702         txq->mz = mz;
703         reset_tx_queue(txq);
704         txq->q_set = true;
705         dev->data->tx_queues[queue_idx] = txq;
706         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
707         txq->ops = &def_txq_ops;
708
709         if (check_tx_vec_allow(txq) == false) {
710                 struct iavf_adapter *ad =
711                         IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
712                 ad->tx_vec_allowed = false;
713         }
714
715         return 0;
716 }
717
718 int
719 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
720 {
721         struct iavf_adapter *adapter =
722                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
723         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
724         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
725         struct iavf_rx_queue *rxq;
726         int err = 0;
727
728         PMD_DRV_FUNC_TRACE();
729
730         if (rx_queue_id >= dev->data->nb_rx_queues)
731                 return -EINVAL;
732
733         rxq = dev->data->rx_queues[rx_queue_id];
734
735         err = alloc_rxq_mbufs(rxq);
736         if (err) {
737                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
738                 return err;
739         }
740
741         rte_wmb();
742
743         /* Init the RX tail register. */
744         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
745         IAVF_WRITE_FLUSH(hw);
746
747         /* Ready to switch the queue on */
748         if (!vf->lv_enabled)
749                 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
750         else
751                 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
752
753         if (err)
754                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
755                             rx_queue_id);
756         else
757                 dev->data->rx_queue_state[rx_queue_id] =
758                         RTE_ETH_QUEUE_STATE_STARTED;
759
760         return err;
761 }
762
763 int
764 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
765 {
766         struct iavf_adapter *adapter =
767                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
768         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
769         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770         struct iavf_tx_queue *txq;
771         int err = 0;
772
773         PMD_DRV_FUNC_TRACE();
774
775         if (tx_queue_id >= dev->data->nb_tx_queues)
776                 return -EINVAL;
777
778         txq = dev->data->tx_queues[tx_queue_id];
779
780         /* Init the RX tail register. */
781         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
782         IAVF_WRITE_FLUSH(hw);
783
784         /* Ready to switch the queue on */
785         if (!vf->lv_enabled)
786                 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
787         else
788                 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
789
790         if (err)
791                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
792                             tx_queue_id);
793         else
794                 dev->data->tx_queue_state[tx_queue_id] =
795                         RTE_ETH_QUEUE_STATE_STARTED;
796
797         return err;
798 }
799
800 int
801 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
802 {
803         struct iavf_adapter *adapter =
804                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
805         struct iavf_rx_queue *rxq;
806         int err;
807
808         PMD_DRV_FUNC_TRACE();
809
810         if (rx_queue_id >= dev->data->nb_rx_queues)
811                 return -EINVAL;
812
813         err = iavf_switch_queue(adapter, rx_queue_id, true, false);
814         if (err) {
815                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
816                             rx_queue_id);
817                 return err;
818         }
819
820         rxq = dev->data->rx_queues[rx_queue_id];
821         rxq->ops->release_mbufs(rxq);
822         reset_rx_queue(rxq);
823         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
824
825         return 0;
826 }
827
828 int
829 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
830 {
831         struct iavf_adapter *adapter =
832                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
833         struct iavf_tx_queue *txq;
834         int err;
835
836         PMD_DRV_FUNC_TRACE();
837
838         if (tx_queue_id >= dev->data->nb_tx_queues)
839                 return -EINVAL;
840
841         err = iavf_switch_queue(adapter, tx_queue_id, false, false);
842         if (err) {
843                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
844                             tx_queue_id);
845                 return err;
846         }
847
848         txq = dev->data->tx_queues[tx_queue_id];
849         txq->ops->release_mbufs(txq);
850         reset_tx_queue(txq);
851         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
852
853         return 0;
854 }
855
856 void
857 iavf_dev_rx_queue_release(void *rxq)
858 {
859         struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
860
861         if (!q)
862                 return;
863
864         q->ops->release_mbufs(q);
865         rte_free(q->sw_ring);
866         rte_memzone_free(q->mz);
867         rte_free(q);
868 }
869
870 void
871 iavf_dev_tx_queue_release(void *txq)
872 {
873         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
874
875         if (!q)
876                 return;
877
878         q->ops->release_mbufs(q);
879         rte_free(q->sw_ring);
880         rte_memzone_free(q->mz);
881         rte_free(q);
882 }
883
884 void
885 iavf_stop_queues(struct rte_eth_dev *dev)
886 {
887         struct iavf_adapter *adapter =
888                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
889         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
890         struct iavf_rx_queue *rxq;
891         struct iavf_tx_queue *txq;
892         int ret, i;
893
894         /* Stop All queues */
895         if (!vf->lv_enabled) {
896                 ret = iavf_disable_queues(adapter);
897                 if (ret)
898                         PMD_DRV_LOG(WARNING, "Fail to stop queues");
899         } else {
900                 ret = iavf_disable_queues_lv(adapter);
901                 if (ret)
902                         PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
903         }
904
905         if (ret)
906                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
907
908         for (i = 0; i < dev->data->nb_tx_queues; i++) {
909                 txq = dev->data->tx_queues[i];
910                 if (!txq)
911                         continue;
912                 txq->ops->release_mbufs(txq);
913                 reset_tx_queue(txq);
914                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
915         }
916         for (i = 0; i < dev->data->nb_rx_queues; i++) {
917                 rxq = dev->data->rx_queues[i];
918                 if (!rxq)
919                         continue;
920                 rxq->ops->release_mbufs(rxq);
921                 reset_rx_queue(rxq);
922                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
923         }
924 }
925
926 #define IAVF_RX_FLEX_ERR0_BITS  \
927         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
928          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
929          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
930          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
931          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
932          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
933
934 static inline void
935 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
936 {
937         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
938                 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
939                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
940                 mb->vlan_tci =
941                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
942         } else {
943                 mb->vlan_tci = 0;
944         }
945 }
946
947 static inline void
948 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
949                           volatile union iavf_rx_flex_desc *rxdp)
950 {
951         if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
952                 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
953                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
954                 mb->vlan_tci =
955                         rte_le_to_cpu_16(rxdp->wb.l2tag1);
956         } else {
957                 mb->vlan_tci = 0;
958         }
959
960 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
961         if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
962             (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
963                 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
964                                 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
965                 mb->vlan_tci_outer = mb->vlan_tci;
966                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
967                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
968                            rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
969                            rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
970         } else {
971                 mb->vlan_tci_outer = 0;
972         }
973 #endif
974 }
975
976 /* Translate the rx descriptor status and error fields to pkt flags */
977 static inline uint64_t
978 iavf_rxd_to_pkt_flags(uint64_t qword)
979 {
980         uint64_t flags;
981         uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
982
983 #define IAVF_RX_ERR_BITS 0x3f
984
985         /* Check if RSS_HASH */
986         flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
987                                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
988                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
989
990         /* Check if FDIR Match */
991         flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
992                                 PKT_RX_FDIR : 0);
993
994         if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
995                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
996                 return flags;
997         }
998
999         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1000                 flags |= PKT_RX_IP_CKSUM_BAD;
1001         else
1002                 flags |= PKT_RX_IP_CKSUM_GOOD;
1003
1004         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1005                 flags |= PKT_RX_L4_CKSUM_BAD;
1006         else
1007                 flags |= PKT_RX_L4_CKSUM_GOOD;
1008
1009         /* TODO: Oversize error bit is not processed here */
1010
1011         return flags;
1012 }
1013
1014 static inline uint64_t
1015 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1016 {
1017         uint64_t flags = 0;
1018 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1019         uint16_t flexbh;
1020
1021         flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1022                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1023                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1024
1025         if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1026                 mb->hash.fdir.hi =
1027                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1028                 flags |= PKT_RX_FDIR_ID;
1029         }
1030 #else
1031         mb->hash.fdir.hi =
1032                 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1033         flags |= PKT_RX_FDIR_ID;
1034 #endif
1035         return flags;
1036 }
1037
1038 #define IAVF_RX_FLEX_ERR0_BITS  \
1039         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
1040          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
1041          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
1042          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1043          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
1044          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1045
1046 /* Rx L3/L4 checksum */
1047 static inline uint64_t
1048 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1049 {
1050         uint64_t flags = 0;
1051
1052         /* check if HW has decoded the packet and checksum */
1053         if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1054                 return 0;
1055
1056         if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1057                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1058                 return flags;
1059         }
1060
1061         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1062                 flags |= PKT_RX_IP_CKSUM_BAD;
1063         else
1064                 flags |= PKT_RX_IP_CKSUM_GOOD;
1065
1066         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1067                 flags |= PKT_RX_L4_CKSUM_BAD;
1068         else
1069                 flags |= PKT_RX_L4_CKSUM_GOOD;
1070
1071         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1072                 flags |= PKT_RX_EIP_CKSUM_BAD;
1073
1074         return flags;
1075 }
1076
1077 /* If the number of free RX descriptors is greater than the RX free
1078  * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1079  * register. Update the RDT with the value of the last processed RX
1080  * descriptor minus 1, to guarantee that the RDT register is never
1081  * equal to the RDH register, which creates a "full" ring situation
1082  * from the hardware point of view.
1083  */
1084 static inline void
1085 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1086 {
1087         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1088
1089         if (nb_hold > rxq->rx_free_thresh) {
1090                 PMD_RX_LOG(DEBUG,
1091                            "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1092                            rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1093                 rx_id = (uint16_t)((rx_id == 0) ?
1094                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1095                 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1096                 nb_hold = 0;
1097         }
1098         rxq->nb_rx_hold = nb_hold;
1099 }
1100
1101 /* implement recv_pkts */
1102 uint16_t
1103 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1104 {
1105         volatile union iavf_rx_desc *rx_ring;
1106         volatile union iavf_rx_desc *rxdp;
1107         struct iavf_rx_queue *rxq;
1108         union iavf_rx_desc rxd;
1109         struct rte_mbuf *rxe;
1110         struct rte_eth_dev *dev;
1111         struct rte_mbuf *rxm;
1112         struct rte_mbuf *nmb;
1113         uint16_t nb_rx;
1114         uint32_t rx_status;
1115         uint64_t qword1;
1116         uint16_t rx_packet_len;
1117         uint16_t rx_id, nb_hold;
1118         uint64_t dma_addr;
1119         uint64_t pkt_flags;
1120         const uint32_t *ptype_tbl;
1121
1122         nb_rx = 0;
1123         nb_hold = 0;
1124         rxq = rx_queue;
1125         rx_id = rxq->rx_tail;
1126         rx_ring = rxq->rx_ring;
1127         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1128
1129         while (nb_rx < nb_pkts) {
1130                 rxdp = &rx_ring[rx_id];
1131                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1132                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1133                             IAVF_RXD_QW1_STATUS_SHIFT;
1134
1135                 /* Check the DD bit first */
1136                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1137                         break;
1138                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1139
1140                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1141                 if (unlikely(!nmb)) {
1142                         dev = &rte_eth_devices[rxq->port_id];
1143                         dev->data->rx_mbuf_alloc_failed++;
1144                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1145                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1146                         break;
1147                 }
1148
1149                 rxd = *rxdp;
1150                 nb_hold++;
1151                 rxe = rxq->sw_ring[rx_id];
1152                 rx_id++;
1153                 if (unlikely(rx_id == rxq->nb_rx_desc))
1154                         rx_id = 0;
1155
1156                 /* Prefetch next mbuf */
1157                 rte_prefetch0(rxq->sw_ring[rx_id]);
1158
1159                 /* When next RX descriptor is on a cache line boundary,
1160                  * prefetch the next 4 RX descriptors and next 8 pointers
1161                  * to mbufs.
1162                  */
1163                 if ((rx_id & 0x3) == 0) {
1164                         rte_prefetch0(&rx_ring[rx_id]);
1165                         rte_prefetch0(rxq->sw_ring[rx_id]);
1166                 }
1167                 rxm = rxe;
1168                 dma_addr =
1169                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1170                 rxdp->read.hdr_addr = 0;
1171                 rxdp->read.pkt_addr = dma_addr;
1172
1173                 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1174                                 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1175
1176                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1177                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1178                 rxm->nb_segs = 1;
1179                 rxm->next = NULL;
1180                 rxm->pkt_len = rx_packet_len;
1181                 rxm->data_len = rx_packet_len;
1182                 rxm->port = rxq->port_id;
1183                 rxm->ol_flags = 0;
1184                 iavf_rxd_to_vlan_tci(rxm, &rxd);
1185                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1186                 rxm->packet_type =
1187                         ptype_tbl[(uint8_t)((qword1 &
1188                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1189
1190                 if (pkt_flags & PKT_RX_RSS_HASH)
1191                         rxm->hash.rss =
1192                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1193
1194                 if (pkt_flags & PKT_RX_FDIR)
1195                         pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1196
1197                 rxm->ol_flags |= pkt_flags;
1198
1199                 rx_pkts[nb_rx++] = rxm;
1200         }
1201         rxq->rx_tail = rx_id;
1202
1203         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1204
1205         return nb_rx;
1206 }
1207
1208 /* implement recv_pkts for flexible Rx descriptor */
1209 uint16_t
1210 iavf_recv_pkts_flex_rxd(void *rx_queue,
1211                         struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1212 {
1213         volatile union iavf_rx_desc *rx_ring;
1214         volatile union iavf_rx_flex_desc *rxdp;
1215         struct iavf_rx_queue *rxq;
1216         union iavf_rx_flex_desc rxd;
1217         struct rte_mbuf *rxe;
1218         struct rte_eth_dev *dev;
1219         struct rte_mbuf *rxm;
1220         struct rte_mbuf *nmb;
1221         uint16_t nb_rx;
1222         uint16_t rx_stat_err0;
1223         uint16_t rx_packet_len;
1224         uint16_t rx_id, nb_hold;
1225         uint64_t dma_addr;
1226         uint64_t pkt_flags;
1227         const uint32_t *ptype_tbl;
1228
1229         nb_rx = 0;
1230         nb_hold = 0;
1231         rxq = rx_queue;
1232         rx_id = rxq->rx_tail;
1233         rx_ring = rxq->rx_ring;
1234         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1235
1236         while (nb_rx < nb_pkts) {
1237                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1238                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1239
1240                 /* Check the DD bit first */
1241                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1242                         break;
1243                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1244
1245                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1246                 if (unlikely(!nmb)) {
1247                         dev = &rte_eth_devices[rxq->port_id];
1248                         dev->data->rx_mbuf_alloc_failed++;
1249                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1250                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1251                         break;
1252                 }
1253
1254                 rxd = *rxdp;
1255                 nb_hold++;
1256                 rxe = rxq->sw_ring[rx_id];
1257                 rx_id++;
1258                 if (unlikely(rx_id == rxq->nb_rx_desc))
1259                         rx_id = 0;
1260
1261                 /* Prefetch next mbuf */
1262                 rte_prefetch0(rxq->sw_ring[rx_id]);
1263
1264                 /* When next RX descriptor is on a cache line boundary,
1265                  * prefetch the next 4 RX descriptors and next 8 pointers
1266                  * to mbufs.
1267                  */
1268                 if ((rx_id & 0x3) == 0) {
1269                         rte_prefetch0(&rx_ring[rx_id]);
1270                         rte_prefetch0(rxq->sw_ring[rx_id]);
1271                 }
1272                 rxm = rxe;
1273                 dma_addr =
1274                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1275                 rxdp->read.hdr_addr = 0;
1276                 rxdp->read.pkt_addr = dma_addr;
1277
1278                 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1279                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1280
1281                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1282                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1283                 rxm->nb_segs = 1;
1284                 rxm->next = NULL;
1285                 rxm->pkt_len = rx_packet_len;
1286                 rxm->data_len = rx_packet_len;
1287                 rxm->port = rxq->port_id;
1288                 rxm->ol_flags = 0;
1289                 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1290                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1291                 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1292                 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1293                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1294                 rxm->ol_flags |= pkt_flags;
1295
1296                 rx_pkts[nb_rx++] = rxm;
1297         }
1298         rxq->rx_tail = rx_id;
1299
1300         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1301
1302         return nb_rx;
1303 }
1304
1305 /* implement recv_scattered_pkts for flexible Rx descriptor */
1306 uint16_t
1307 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1308                                   uint16_t nb_pkts)
1309 {
1310         struct iavf_rx_queue *rxq = rx_queue;
1311         union iavf_rx_flex_desc rxd;
1312         struct rte_mbuf *rxe;
1313         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1314         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1315         struct rte_mbuf *nmb, *rxm;
1316         uint16_t rx_id = rxq->rx_tail;
1317         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1318         struct rte_eth_dev *dev;
1319         uint16_t rx_stat_err0;
1320         uint64_t dma_addr;
1321         uint64_t pkt_flags;
1322
1323         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1324         volatile union iavf_rx_flex_desc *rxdp;
1325         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1326
1327         while (nb_rx < nb_pkts) {
1328                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1329                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1330
1331                 /* Check the DD bit */
1332                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1333                         break;
1334                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1335
1336                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1337                 if (unlikely(!nmb)) {
1338                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1339                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1340                         dev = &rte_eth_devices[rxq->port_id];
1341                         dev->data->rx_mbuf_alloc_failed++;
1342                         break;
1343                 }
1344
1345                 rxd = *rxdp;
1346                 nb_hold++;
1347                 rxe = rxq->sw_ring[rx_id];
1348                 rx_id++;
1349                 if (rx_id == rxq->nb_rx_desc)
1350                         rx_id = 0;
1351
1352                 /* Prefetch next mbuf */
1353                 rte_prefetch0(rxq->sw_ring[rx_id]);
1354
1355                 /* When next RX descriptor is on a cache line boundary,
1356                  * prefetch the next 4 RX descriptors and next 8 pointers
1357                  * to mbufs.
1358                  */
1359                 if ((rx_id & 0x3) == 0) {
1360                         rte_prefetch0(&rx_ring[rx_id]);
1361                         rte_prefetch0(rxq->sw_ring[rx_id]);
1362                 }
1363
1364                 rxm = rxe;
1365                 dma_addr =
1366                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1367
1368                 /* Set data buffer address and data length of the mbuf */
1369                 rxdp->read.hdr_addr = 0;
1370                 rxdp->read.pkt_addr = dma_addr;
1371                 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1372                                 IAVF_RX_FLX_DESC_PKT_LEN_M;
1373                 rxm->data_len = rx_packet_len;
1374                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1375
1376                 /* If this is the first buffer of the received packet, set the
1377                  * pointer to the first mbuf of the packet and initialize its
1378                  * context. Otherwise, update the total length and the number
1379                  * of segments of the current scattered packet, and update the
1380                  * pointer to the last mbuf of the current packet.
1381                  */
1382                 if (!first_seg) {
1383                         first_seg = rxm;
1384                         first_seg->nb_segs = 1;
1385                         first_seg->pkt_len = rx_packet_len;
1386                 } else {
1387                         first_seg->pkt_len =
1388                                 (uint16_t)(first_seg->pkt_len +
1389                                                 rx_packet_len);
1390                         first_seg->nb_segs++;
1391                         last_seg->next = rxm;
1392                 }
1393
1394                 /* If this is not the last buffer of the received packet,
1395                  * update the pointer to the last mbuf of the current scattered
1396                  * packet and continue to parse the RX ring.
1397                  */
1398                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1399                         last_seg = rxm;
1400                         continue;
1401                 }
1402
1403                 /* This is the last buffer of the received packet. If the CRC
1404                  * is not stripped by the hardware:
1405                  *  - Subtract the CRC length from the total packet length.
1406                  *  - If the last buffer only contains the whole CRC or a part
1407                  *  of it, free the mbuf associated to the last buffer. If part
1408                  *  of the CRC is also contained in the previous mbuf, subtract
1409                  *  the length of that CRC part from the data length of the
1410                  *  previous mbuf.
1411                  */
1412                 rxm->next = NULL;
1413                 if (unlikely(rxq->crc_len > 0)) {
1414                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1415                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1416                                 rte_pktmbuf_free_seg(rxm);
1417                                 first_seg->nb_segs--;
1418                                 last_seg->data_len =
1419                                         (uint16_t)(last_seg->data_len -
1420                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1421                                 last_seg->next = NULL;
1422                         } else {
1423                                 rxm->data_len = (uint16_t)(rx_packet_len -
1424                                                         RTE_ETHER_CRC_LEN);
1425                         }
1426                 }
1427
1428                 first_seg->port = rxq->port_id;
1429                 first_seg->ol_flags = 0;
1430                 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1431                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1432                 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1433                 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1434                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1435
1436                 first_seg->ol_flags |= pkt_flags;
1437
1438                 /* Prefetch data of first segment, if configured to do so. */
1439                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1440                                           first_seg->data_off));
1441                 rx_pkts[nb_rx++] = first_seg;
1442                 first_seg = NULL;
1443         }
1444
1445         /* Record index of the next RX descriptor to probe. */
1446         rxq->rx_tail = rx_id;
1447         rxq->pkt_first_seg = first_seg;
1448         rxq->pkt_last_seg = last_seg;
1449
1450         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1451
1452         return nb_rx;
1453 }
1454
1455 /* implement recv_scattered_pkts  */
1456 uint16_t
1457 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1458                         uint16_t nb_pkts)
1459 {
1460         struct iavf_rx_queue *rxq = rx_queue;
1461         union iavf_rx_desc rxd;
1462         struct rte_mbuf *rxe;
1463         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1464         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1465         struct rte_mbuf *nmb, *rxm;
1466         uint16_t rx_id = rxq->rx_tail;
1467         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1468         struct rte_eth_dev *dev;
1469         uint32_t rx_status;
1470         uint64_t qword1;
1471         uint64_t dma_addr;
1472         uint64_t pkt_flags;
1473
1474         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1475         volatile union iavf_rx_desc *rxdp;
1476         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1477
1478         while (nb_rx < nb_pkts) {
1479                 rxdp = &rx_ring[rx_id];
1480                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1481                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1482                             IAVF_RXD_QW1_STATUS_SHIFT;
1483
1484                 /* Check the DD bit */
1485                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1486                         break;
1487                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1488
1489                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1490                 if (unlikely(!nmb)) {
1491                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1492                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1493                         dev = &rte_eth_devices[rxq->port_id];
1494                         dev->data->rx_mbuf_alloc_failed++;
1495                         break;
1496                 }
1497
1498                 rxd = *rxdp;
1499                 nb_hold++;
1500                 rxe = rxq->sw_ring[rx_id];
1501                 rx_id++;
1502                 if (rx_id == rxq->nb_rx_desc)
1503                         rx_id = 0;
1504
1505                 /* Prefetch next mbuf */
1506                 rte_prefetch0(rxq->sw_ring[rx_id]);
1507
1508                 /* When next RX descriptor is on a cache line boundary,
1509                  * prefetch the next 4 RX descriptors and next 8 pointers
1510                  * to mbufs.
1511                  */
1512                 if ((rx_id & 0x3) == 0) {
1513                         rte_prefetch0(&rx_ring[rx_id]);
1514                         rte_prefetch0(rxq->sw_ring[rx_id]);
1515                 }
1516
1517                 rxm = rxe;
1518                 dma_addr =
1519                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1520
1521                 /* Set data buffer address and data length of the mbuf */
1522                 rxdp->read.hdr_addr = 0;
1523                 rxdp->read.pkt_addr = dma_addr;
1524                 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1525                                  IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1526                 rxm->data_len = rx_packet_len;
1527                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1528
1529                 /* If this is the first buffer of the received packet, set the
1530                  * pointer to the first mbuf of the packet and initialize its
1531                  * context. Otherwise, update the total length and the number
1532                  * of segments of the current scattered packet, and update the
1533                  * pointer to the last mbuf of the current packet.
1534                  */
1535                 if (!first_seg) {
1536                         first_seg = rxm;
1537                         first_seg->nb_segs = 1;
1538                         first_seg->pkt_len = rx_packet_len;
1539                 } else {
1540                         first_seg->pkt_len =
1541                                 (uint16_t)(first_seg->pkt_len +
1542                                                 rx_packet_len);
1543                         first_seg->nb_segs++;
1544                         last_seg->next = rxm;
1545                 }
1546
1547                 /* If this is not the last buffer of the received packet,
1548                  * update the pointer to the last mbuf of the current scattered
1549                  * packet and continue to parse the RX ring.
1550                  */
1551                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1552                         last_seg = rxm;
1553                         continue;
1554                 }
1555
1556                 /* This is the last buffer of the received packet. If the CRC
1557                  * is not stripped by the hardware:
1558                  *  - Subtract the CRC length from the total packet length.
1559                  *  - If the last buffer only contains the whole CRC or a part
1560                  *  of it, free the mbuf associated to the last buffer. If part
1561                  *  of the CRC is also contained in the previous mbuf, subtract
1562                  *  the length of that CRC part from the data length of the
1563                  *  previous mbuf.
1564                  */
1565                 rxm->next = NULL;
1566                 if (unlikely(rxq->crc_len > 0)) {
1567                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1568                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1569                                 rte_pktmbuf_free_seg(rxm);
1570                                 first_seg->nb_segs--;
1571                                 last_seg->data_len =
1572                                         (uint16_t)(last_seg->data_len -
1573                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1574                                 last_seg->next = NULL;
1575                         } else
1576                                 rxm->data_len = (uint16_t)(rx_packet_len -
1577                                                         RTE_ETHER_CRC_LEN);
1578                 }
1579
1580                 first_seg->port = rxq->port_id;
1581                 first_seg->ol_flags = 0;
1582                 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1583                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1584                 first_seg->packet_type =
1585                         ptype_tbl[(uint8_t)((qword1 &
1586                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1587
1588                 if (pkt_flags & PKT_RX_RSS_HASH)
1589                         first_seg->hash.rss =
1590                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1591
1592                 if (pkt_flags & PKT_RX_FDIR)
1593                         pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1594
1595                 first_seg->ol_flags |= pkt_flags;
1596
1597                 /* Prefetch data of first segment, if configured to do so. */
1598                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1599                                           first_seg->data_off));
1600                 rx_pkts[nb_rx++] = first_seg;
1601                 first_seg = NULL;
1602         }
1603
1604         /* Record index of the next RX descriptor to probe. */
1605         rxq->rx_tail = rx_id;
1606         rxq->pkt_first_seg = first_seg;
1607         rxq->pkt_last_seg = last_seg;
1608
1609         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1610
1611         return nb_rx;
1612 }
1613
1614 #define IAVF_LOOK_AHEAD 8
1615 static inline int
1616 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1617 {
1618         volatile union iavf_rx_flex_desc *rxdp;
1619         struct rte_mbuf **rxep;
1620         struct rte_mbuf *mb;
1621         uint16_t stat_err0;
1622         uint16_t pkt_len;
1623         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1624         int32_t i, j, nb_rx = 0;
1625         uint64_t pkt_flags;
1626         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1627
1628         rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1629         rxep = &rxq->sw_ring[rxq->rx_tail];
1630
1631         stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1632
1633         /* Make sure there is at least 1 packet to receive */
1634         if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1635                 return 0;
1636
1637         /* Scan LOOK_AHEAD descriptors at a time to determine which
1638          * descriptors reference packets that are ready to be received.
1639          */
1640         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1641              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1642                 /* Read desc statuses backwards to avoid race condition */
1643                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1644                         s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1645
1646                 rte_smp_rmb();
1647
1648                 /* Compute how many status bits were set */
1649                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1650                         nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1651
1652                 nb_rx += nb_dd;
1653
1654                 /* Translate descriptor info to mbuf parameters */
1655                 for (j = 0; j < nb_dd; j++) {
1656                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1657                                           rxq->rx_tail +
1658                                           i * IAVF_LOOK_AHEAD + j);
1659
1660                         mb = rxep[j];
1661                         pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1662                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1663                         mb->data_len = pkt_len;
1664                         mb->pkt_len = pkt_len;
1665                         mb->ol_flags = 0;
1666
1667                         mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1668                                 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1669                         iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1670                         rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1671                         stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1672                         pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1673
1674                         mb->ol_flags |= pkt_flags;
1675                 }
1676
1677                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1678                         rxq->rx_stage[i + j] = rxep[j];
1679
1680                 if (nb_dd != IAVF_LOOK_AHEAD)
1681                         break;
1682         }
1683
1684         /* Clear software ring entries */
1685         for (i = 0; i < nb_rx; i++)
1686                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1687
1688         return nb_rx;
1689 }
1690
1691 static inline int
1692 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1693 {
1694         volatile union iavf_rx_desc *rxdp;
1695         struct rte_mbuf **rxep;
1696         struct rte_mbuf *mb;
1697         uint16_t pkt_len;
1698         uint64_t qword1;
1699         uint32_t rx_status;
1700         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1701         int32_t i, j, nb_rx = 0;
1702         uint64_t pkt_flags;
1703         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1704
1705         rxdp = &rxq->rx_ring[rxq->rx_tail];
1706         rxep = &rxq->sw_ring[rxq->rx_tail];
1707
1708         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1709         rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1710                     IAVF_RXD_QW1_STATUS_SHIFT;
1711
1712         /* Make sure there is at least 1 packet to receive */
1713         if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1714                 return 0;
1715
1716         /* Scan LOOK_AHEAD descriptors at a time to determine which
1717          * descriptors reference packets that are ready to be received.
1718          */
1719         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1720              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1721                 /* Read desc statuses backwards to avoid race condition */
1722                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1723                         qword1 = rte_le_to_cpu_64(
1724                                 rxdp[j].wb.qword1.status_error_len);
1725                         s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1726                                IAVF_RXD_QW1_STATUS_SHIFT;
1727                 }
1728
1729                 rte_smp_rmb();
1730
1731                 /* Compute how many status bits were set */
1732                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1733                         nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1734
1735                 nb_rx += nb_dd;
1736
1737                 /* Translate descriptor info to mbuf parameters */
1738                 for (j = 0; j < nb_dd; j++) {
1739                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1740                                          rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1741
1742                         mb = rxep[j];
1743                         qword1 = rte_le_to_cpu_64
1744                                         (rxdp[j].wb.qword1.status_error_len);
1745                         pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1746                                   IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1747                         mb->data_len = pkt_len;
1748                         mb->pkt_len = pkt_len;
1749                         mb->ol_flags = 0;
1750                         iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1751                         pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1752                         mb->packet_type =
1753                                 ptype_tbl[(uint8_t)((qword1 &
1754                                 IAVF_RXD_QW1_PTYPE_MASK) >>
1755                                 IAVF_RXD_QW1_PTYPE_SHIFT)];
1756
1757                         if (pkt_flags & PKT_RX_RSS_HASH)
1758                                 mb->hash.rss = rte_le_to_cpu_32(
1759                                         rxdp[j].wb.qword0.hi_dword.rss);
1760
1761                         if (pkt_flags & PKT_RX_FDIR)
1762                                 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1763
1764                         mb->ol_flags |= pkt_flags;
1765                 }
1766
1767                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1768                         rxq->rx_stage[i + j] = rxep[j];
1769
1770                 if (nb_dd != IAVF_LOOK_AHEAD)
1771                         break;
1772         }
1773
1774         /* Clear software ring entries */
1775         for (i = 0; i < nb_rx; i++)
1776                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1777
1778         return nb_rx;
1779 }
1780
1781 static inline uint16_t
1782 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1783                        struct rte_mbuf **rx_pkts,
1784                        uint16_t nb_pkts)
1785 {
1786         uint16_t i;
1787         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1788
1789         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1790
1791         for (i = 0; i < nb_pkts; i++)
1792                 rx_pkts[i] = stage[i];
1793
1794         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1795         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1796
1797         return nb_pkts;
1798 }
1799
1800 static inline int
1801 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1802 {
1803         volatile union iavf_rx_desc *rxdp;
1804         struct rte_mbuf **rxep;
1805         struct rte_mbuf *mb;
1806         uint16_t alloc_idx, i;
1807         uint64_t dma_addr;
1808         int diag;
1809
1810         /* Allocate buffers in bulk */
1811         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1812                                 (rxq->rx_free_thresh - 1));
1813         rxep = &rxq->sw_ring[alloc_idx];
1814         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1815                                     rxq->rx_free_thresh);
1816         if (unlikely(diag != 0)) {
1817                 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1818                 return -ENOMEM;
1819         }
1820
1821         rxdp = &rxq->rx_ring[alloc_idx];
1822         for (i = 0; i < rxq->rx_free_thresh; i++) {
1823                 if (likely(i < (rxq->rx_free_thresh - 1)))
1824                         /* Prefetch next mbuf */
1825                         rte_prefetch0(rxep[i + 1]);
1826
1827                 mb = rxep[i];
1828                 rte_mbuf_refcnt_set(mb, 1);
1829                 mb->next = NULL;
1830                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1831                 mb->nb_segs = 1;
1832                 mb->port = rxq->port_id;
1833                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1834                 rxdp[i].read.hdr_addr = 0;
1835                 rxdp[i].read.pkt_addr = dma_addr;
1836         }
1837
1838         /* Update rx tail register */
1839         rte_wmb();
1840         IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1841
1842         rxq->rx_free_trigger =
1843                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1844         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1845                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1846
1847         return 0;
1848 }
1849
1850 static inline uint16_t
1851 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1852 {
1853         struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1854         uint16_t nb_rx = 0;
1855
1856         if (!nb_pkts)
1857                 return 0;
1858
1859         if (rxq->rx_nb_avail)
1860                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1861
1862         if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1863                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1864         else
1865                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1866         rxq->rx_next_avail = 0;
1867         rxq->rx_nb_avail = nb_rx;
1868         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1869
1870         if (rxq->rx_tail > rxq->rx_free_trigger) {
1871                 if (iavf_rx_alloc_bufs(rxq) != 0) {
1872                         uint16_t i, j;
1873
1874                         /* TODO: count rx_mbuf_alloc_failed here */
1875
1876                         rxq->rx_nb_avail = 0;
1877                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1878                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1879                                 rxq->sw_ring[j] = rxq->rx_stage[i];
1880
1881                         return 0;
1882                 }
1883         }
1884
1885         if (rxq->rx_tail >= rxq->nb_rx_desc)
1886                 rxq->rx_tail = 0;
1887
1888         PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1889                    rxq->port_id, rxq->queue_id,
1890                    rxq->rx_tail, nb_rx);
1891
1892         if (rxq->rx_nb_avail)
1893                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1894
1895         return 0;
1896 }
1897
1898 static uint16_t
1899 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1900                          struct rte_mbuf **rx_pkts,
1901                          uint16_t nb_pkts)
1902 {
1903         uint16_t nb_rx = 0, n, count;
1904
1905         if (unlikely(nb_pkts == 0))
1906                 return 0;
1907
1908         if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1909                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1910
1911         while (nb_pkts) {
1912                 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1913                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1914                 nb_rx = (uint16_t)(nb_rx + count);
1915                 nb_pkts = (uint16_t)(nb_pkts - count);
1916                 if (count < n)
1917                         break;
1918         }
1919
1920         return nb_rx;
1921 }
1922
1923 static inline int
1924 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1925 {
1926         struct iavf_tx_entry *sw_ring = txq->sw_ring;
1927         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1928         uint16_t nb_tx_desc = txq->nb_tx_desc;
1929         uint16_t desc_to_clean_to;
1930         uint16_t nb_tx_to_clean;
1931
1932         volatile struct iavf_tx_desc *txd = txq->tx_ring;
1933
1934         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1935         if (desc_to_clean_to >= nb_tx_desc)
1936                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1937
1938         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1939         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1940                         rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1941                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1942                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1943                                 "(port=%d queue=%d)", desc_to_clean_to,
1944                                 txq->port_id, txq->queue_id);
1945                 return -1;
1946         }
1947
1948         if (last_desc_cleaned > desc_to_clean_to)
1949                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1950                                                         desc_to_clean_to);
1951         else
1952                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1953                                         last_desc_cleaned);
1954
1955         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1956
1957         txq->last_desc_cleaned = desc_to_clean_to;
1958         txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1959
1960         return 0;
1961 }
1962
1963 /* Check if the context descriptor is needed for TX offloading */
1964 static inline uint16_t
1965 iavf_calc_context_desc(uint64_t flags)
1966 {
1967         static uint64_t mask = PKT_TX_TCP_SEG;
1968
1969         return (flags & mask) ? 1 : 0;
1970 }
1971
1972 static inline void
1973 iavf_txd_enable_checksum(uint64_t ol_flags,
1974                         uint32_t *td_cmd,
1975                         uint32_t *td_offset,
1976                         union iavf_tx_offload tx_offload)
1977 {
1978         /* Set MACLEN */
1979         *td_offset |= (tx_offload.l2_len >> 1) <<
1980                       IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1981
1982         /* Enable L3 checksum offloads */
1983         if (ol_flags & PKT_TX_IP_CKSUM) {
1984                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1985                 *td_offset |= (tx_offload.l3_len >> 2) <<
1986                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1987         } else if (ol_flags & PKT_TX_IPV4) {
1988                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1989                 *td_offset |= (tx_offload.l3_len >> 2) <<
1990                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1991         } else if (ol_flags & PKT_TX_IPV6) {
1992                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1993                 *td_offset |= (tx_offload.l3_len >> 2) <<
1994                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1995         }
1996
1997         if (ol_flags & PKT_TX_TCP_SEG) {
1998                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1999                 *td_offset |= (tx_offload.l4_len >> 2) <<
2000                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2001                 return;
2002         }
2003
2004         /* Enable L4 checksum offloads */
2005         switch (ol_flags & PKT_TX_L4_MASK) {
2006         case PKT_TX_TCP_CKSUM:
2007                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2008                 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2009                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2010                 break;
2011         case PKT_TX_SCTP_CKSUM:
2012                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2013                 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2014                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2015                 break;
2016         case PKT_TX_UDP_CKSUM:
2017                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2018                 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2019                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2020                 break;
2021         default:
2022                 break;
2023         }
2024 }
2025
2026 /* set TSO context descriptor
2027  * support IP -> L4 and IP -> IP -> L4
2028  */
2029 static inline uint64_t
2030 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2031 {
2032         uint64_t ctx_desc = 0;
2033         uint32_t cd_cmd, hdr_len, cd_tso_len;
2034
2035         if (!tx_offload.l4_len) {
2036                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2037                 return ctx_desc;
2038         }
2039
2040         hdr_len = tx_offload.l2_len +
2041                   tx_offload.l3_len +
2042                   tx_offload.l4_len;
2043
2044         cd_cmd = IAVF_TX_CTX_DESC_TSO;
2045         cd_tso_len = mbuf->pkt_len - hdr_len;
2046         ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2047                      ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2048                      ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2049
2050         return ctx_desc;
2051 }
2052
2053 /* Construct the tx flags */
2054 static inline uint64_t
2055 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2056                uint32_t td_tag)
2057 {
2058         return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2059                                 ((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
2060                                 ((uint64_t)td_offset <<
2061                                  IAVF_TXD_QW1_OFFSET_SHIFT) |
2062                                 ((uint64_t)size  <<
2063                                  IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2064                                 ((uint64_t)td_tag  <<
2065                                  IAVF_TXD_QW1_L2TAG1_SHIFT));
2066 }
2067
2068 /* TX function */
2069 uint16_t
2070 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2071 {
2072         volatile struct iavf_tx_desc *txd;
2073         volatile struct iavf_tx_desc *txr;
2074         struct iavf_tx_queue *txq;
2075         struct iavf_tx_entry *sw_ring;
2076         struct iavf_tx_entry *txe, *txn;
2077         struct rte_mbuf *tx_pkt;
2078         struct rte_mbuf *m_seg;
2079         uint16_t tx_id;
2080         uint16_t nb_tx;
2081         uint32_t td_cmd;
2082         uint32_t td_offset;
2083         uint32_t td_tag;
2084         uint64_t ol_flags;
2085         uint16_t nb_used;
2086         uint16_t nb_ctx;
2087         uint16_t tx_last;
2088         uint16_t slen;
2089         uint64_t buf_dma_addr;
2090         union iavf_tx_offload tx_offload = {0};
2091
2092         txq = tx_queue;
2093         sw_ring = txq->sw_ring;
2094         txr = txq->tx_ring;
2095         tx_id = txq->tx_tail;
2096         txe = &sw_ring[tx_id];
2097
2098         /* Check if the descriptor ring needs to be cleaned. */
2099         if (txq->nb_free < txq->free_thresh)
2100                 (void)iavf_xmit_cleanup(txq);
2101
2102         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2103                 td_cmd = 0;
2104                 td_tag = 0;
2105                 td_offset = 0;
2106
2107                 tx_pkt = *tx_pkts++;
2108                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2109
2110                 ol_flags = tx_pkt->ol_flags;
2111                 tx_offload.l2_len = tx_pkt->l2_len;
2112                 tx_offload.l3_len = tx_pkt->l3_len;
2113                 tx_offload.l4_len = tx_pkt->l4_len;
2114                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2115                 /* Calculate the number of context descriptors needed. */
2116                 nb_ctx = iavf_calc_context_desc(ol_flags);
2117
2118                 /* The number of descriptors that must be allocated for
2119                  * a packet equals to the number of the segments of that
2120                  * packet plus 1 context descriptor if needed.
2121                  */
2122                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2123                 tx_last = (uint16_t)(tx_id + nb_used - 1);
2124
2125                 /* Circular ring */
2126                 if (tx_last >= txq->nb_tx_desc)
2127                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2128
2129                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2130                            " tx_first=%u tx_last=%u",
2131                            txq->port_id, txq->queue_id, tx_id, tx_last);
2132
2133                 if (nb_used > txq->nb_free) {
2134                         if (iavf_xmit_cleanup(txq)) {
2135                                 if (nb_tx == 0)
2136                                         return 0;
2137                                 goto end_of_tx;
2138                         }
2139                         if (unlikely(nb_used > txq->rs_thresh)) {
2140                                 while (nb_used > txq->nb_free) {
2141                                         if (iavf_xmit_cleanup(txq)) {
2142                                                 if (nb_tx == 0)
2143                                                         return 0;
2144                                                 goto end_of_tx;
2145                                         }
2146                                 }
2147                         }
2148                 }
2149
2150                 /* Descriptor based VLAN insertion */
2151                 if (ol_flags & PKT_TX_VLAN_PKT) {
2152                         td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2153                         td_tag = tx_pkt->vlan_tci;
2154                 }
2155
2156                 /* According to datasheet, the bit2 is reserved and must be
2157                  * set to 1.
2158                  */
2159                 td_cmd |= 0x04;
2160
2161                 /* Enable checksum offloading */
2162                 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2163                         iavf_txd_enable_checksum(ol_flags, &td_cmd,
2164                                                 &td_offset, tx_offload);
2165
2166                 if (nb_ctx) {
2167                         /* Setup TX context descriptor if required */
2168                         uint64_t cd_type_cmd_tso_mss =
2169                                 IAVF_TX_DESC_DTYPE_CONTEXT;
2170                         volatile struct iavf_tx_context_desc *ctx_txd =
2171                                 (volatile struct iavf_tx_context_desc *)
2172                                                         &txr[tx_id];
2173
2174                         txn = &sw_ring[txe->next_id];
2175                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2176                         if (txe->mbuf) {
2177                                 rte_pktmbuf_free_seg(txe->mbuf);
2178                                 txe->mbuf = NULL;
2179                         }
2180
2181                         /* TSO enabled */
2182                         if (ol_flags & PKT_TX_TCP_SEG)
2183                                 cd_type_cmd_tso_mss |=
2184                                         iavf_set_tso_ctx(tx_pkt, tx_offload);
2185
2186                         ctx_txd->type_cmd_tso_mss =
2187                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2188
2189                         IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2190                         txe->last_id = tx_last;
2191                         tx_id = txe->next_id;
2192                         txe = txn;
2193                 }
2194
2195                 m_seg = tx_pkt;
2196                 do {
2197                         txd = &txr[tx_id];
2198                         txn = &sw_ring[txe->next_id];
2199
2200                         if (txe->mbuf)
2201                                 rte_pktmbuf_free_seg(txe->mbuf);
2202                         txe->mbuf = m_seg;
2203
2204                         /* Setup TX Descriptor */
2205                         slen = m_seg->data_len;
2206                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
2207                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2208                         txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2209                                                                   td_offset,
2210                                                                   slen,
2211                                                                   td_tag);
2212
2213                         IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2214                         txe->last_id = tx_last;
2215                         tx_id = txe->next_id;
2216                         txe = txn;
2217                         m_seg = m_seg->next;
2218                 } while (m_seg);
2219
2220                 /* The last packet data descriptor needs End Of Packet (EOP) */
2221                 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2222                 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2223                 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2224
2225                 if (txq->nb_used >= txq->rs_thresh) {
2226                         PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2227                                    "%4u (port=%d queue=%d)",
2228                                    tx_last, txq->port_id, txq->queue_id);
2229
2230                         td_cmd |= IAVF_TX_DESC_CMD_RS;
2231
2232                         /* Update txq RS bit counters */
2233                         txq->nb_used = 0;
2234                 }
2235
2236                 txd->cmd_type_offset_bsz |=
2237                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2238                                          IAVF_TXD_QW1_CMD_SHIFT);
2239                 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2240         }
2241
2242 end_of_tx:
2243         rte_wmb();
2244
2245         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2246                    txq->port_id, txq->queue_id, tx_id, nb_tx);
2247
2248         IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2249         txq->tx_tail = tx_id;
2250
2251         return nb_tx;
2252 }
2253
2254 /* TX prep functions */
2255 uint16_t
2256 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2257               uint16_t nb_pkts)
2258 {
2259         int i, ret;
2260         uint64_t ol_flags;
2261         struct rte_mbuf *m;
2262
2263         for (i = 0; i < nb_pkts; i++) {
2264                 m = tx_pkts[i];
2265                 ol_flags = m->ol_flags;
2266
2267                 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2268                 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2269                         if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2270                                 rte_errno = EINVAL;
2271                                 return i;
2272                         }
2273                 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2274                            (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2275                         /* MSS outside the range are considered malicious */
2276                         rte_errno = EINVAL;
2277                         return i;
2278                 }
2279
2280                 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2281                         rte_errno = ENOTSUP;
2282                         return i;
2283                 }
2284
2285 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2286                 ret = rte_validate_tx_offload(m);
2287                 if (ret != 0) {
2288                         rte_errno = -ret;
2289                         return i;
2290                 }
2291 #endif
2292                 ret = rte_net_intel_cksum_prepare(m);
2293                 if (ret != 0) {
2294                         rte_errno = -ret;
2295                         return i;
2296                 }
2297         }
2298
2299         return i;
2300 }
2301
2302 /* choose rx function*/
2303 void
2304 iavf_set_rx_function(struct rte_eth_dev *dev)
2305 {
2306         struct iavf_adapter *adapter =
2307                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2308         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2309
2310 #ifdef RTE_ARCH_X86
2311         struct iavf_rx_queue *rxq;
2312         int i;
2313         bool use_avx2 = false;
2314 #ifdef CC_AVX512_SUPPORT
2315         bool use_avx512 = false;
2316 #endif
2317
2318         if (!iavf_rx_vec_dev_check(dev) &&
2319                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2320                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2321                         rxq = dev->data->rx_queues[i];
2322                         (void)iavf_rxq_vec_setup(rxq);
2323                 }
2324
2325                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2326                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2327                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2328                         use_avx2 = true;
2329 #ifdef CC_AVX512_SUPPORT
2330                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2331                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2332                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2333                         use_avx512 = true;
2334 #endif
2335
2336                 if (dev->data->scattered_rx) {
2337                         PMD_DRV_LOG(DEBUG,
2338                                     "Using %sVector Scattered Rx (port %d).",
2339                                     use_avx2 ? "avx2 " : "",
2340                                     dev->data->port_id);
2341                         if (vf->vf_res->vf_cap_flags &
2342                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2343                                 dev->rx_pkt_burst = use_avx2 ?
2344                                         iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2345                                         iavf_recv_scattered_pkts_vec_flex_rxd;
2346 #ifdef CC_AVX512_SUPPORT
2347                                 if (use_avx512)
2348                                         dev->rx_pkt_burst =
2349                                                 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2350 #endif
2351                         } else {
2352                                 dev->rx_pkt_burst = use_avx2 ?
2353                                         iavf_recv_scattered_pkts_vec_avx2 :
2354                                         iavf_recv_scattered_pkts_vec;
2355 #ifdef CC_AVX512_SUPPORT
2356                                 if (use_avx512)
2357                                         dev->rx_pkt_burst =
2358                                                 iavf_recv_scattered_pkts_vec_avx512;
2359 #endif
2360                         }
2361                 } else {
2362                         PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2363                                     use_avx2 ? "avx2 " : "",
2364                                     dev->data->port_id);
2365                         if (vf->vf_res->vf_cap_flags &
2366                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2367                                 dev->rx_pkt_burst = use_avx2 ?
2368                                         iavf_recv_pkts_vec_avx2_flex_rxd :
2369                                         iavf_recv_pkts_vec_flex_rxd;
2370 #ifdef CC_AVX512_SUPPORT
2371                                 if (use_avx512)
2372                                         dev->rx_pkt_burst =
2373                                                 iavf_recv_pkts_vec_avx512_flex_rxd;
2374 #endif
2375                         } else {
2376                                 dev->rx_pkt_burst = use_avx2 ?
2377                                         iavf_recv_pkts_vec_avx2 :
2378                                         iavf_recv_pkts_vec;
2379 #ifdef CC_AVX512_SUPPORT
2380                                 if (use_avx512)
2381                                         dev->rx_pkt_burst =
2382                                                 iavf_recv_pkts_vec_avx512;
2383 #endif
2384                         }
2385                 }
2386
2387                 return;
2388         }
2389 #endif
2390
2391         if (dev->data->scattered_rx) {
2392                 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2393                             dev->data->port_id);
2394                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2395                         dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2396                 else
2397                         dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2398         } else if (adapter->rx_bulk_alloc_allowed) {
2399                 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2400                             dev->data->port_id);
2401                 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2402         } else {
2403                 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2404                             dev->data->port_id);
2405                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2406                         dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2407                 else
2408                         dev->rx_pkt_burst = iavf_recv_pkts;
2409         }
2410 }
2411
2412 /* choose tx function*/
2413 void
2414 iavf_set_tx_function(struct rte_eth_dev *dev)
2415 {
2416 #ifdef RTE_ARCH_X86
2417         struct iavf_tx_queue *txq;
2418         int i;
2419         bool use_avx2 = false;
2420 #ifdef CC_AVX512_SUPPORT
2421         bool use_avx512 = false;
2422 #endif
2423
2424         if (!iavf_tx_vec_dev_check(dev) &&
2425                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2426                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2427                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2428                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2429                         use_avx2 = true;
2430 #ifdef CC_AVX512_SUPPORT
2431                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2432                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2433                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2434                         use_avx512 = true;
2435 #endif
2436
2437                 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2438                             use_avx2 ? "avx2 " : "",
2439                             dev->data->port_id);
2440                 dev->tx_pkt_burst = use_avx2 ?
2441                                     iavf_xmit_pkts_vec_avx2 :
2442                                     iavf_xmit_pkts_vec;
2443 #ifdef CC_AVX512_SUPPORT
2444                 if (use_avx512)
2445                         dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2446 #endif
2447                 dev->tx_pkt_prepare = NULL;
2448
2449                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2450                         txq = dev->data->tx_queues[i];
2451                         if (!txq)
2452                                 continue;
2453 #ifdef CC_AVX512_SUPPORT
2454                         if (use_avx512)
2455                                 iavf_txq_vec_setup_avx512(txq);
2456                         else
2457                                 iavf_txq_vec_setup(txq);
2458 #else
2459                         iavf_txq_vec_setup(txq);
2460 #endif
2461                 }
2462
2463                 return;
2464         }
2465 #endif
2466
2467         PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2468                     dev->data->port_id);
2469         dev->tx_pkt_burst = iavf_xmit_pkts;
2470         dev->tx_pkt_prepare = iavf_prep_pkts;
2471 }
2472
2473 static int
2474 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2475                         uint32_t free_cnt)
2476 {
2477         struct iavf_tx_entry *swr_ring = txq->sw_ring;
2478         uint16_t i, tx_last, tx_id;
2479         uint16_t nb_tx_free_last;
2480         uint16_t nb_tx_to_clean;
2481         uint32_t pkt_cnt;
2482
2483         /* Start free mbuf from the next of tx_tail */
2484         tx_last = txq->tx_tail;
2485         tx_id  = swr_ring[tx_last].next_id;
2486
2487         if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2488                 return 0;
2489
2490         nb_tx_to_clean = txq->nb_free;
2491         nb_tx_free_last = txq->nb_free;
2492         if (!free_cnt)
2493                 free_cnt = txq->nb_tx_desc;
2494
2495         /* Loop through swr_ring to count the amount of
2496          * freeable mubfs and packets.
2497          */
2498         for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2499                 for (i = 0; i < nb_tx_to_clean &&
2500                         pkt_cnt < free_cnt &&
2501                         tx_id != tx_last; i++) {
2502                         if (swr_ring[tx_id].mbuf != NULL) {
2503                                 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2504                                 swr_ring[tx_id].mbuf = NULL;
2505
2506                                 /*
2507                                  * last segment in the packet,
2508                                  * increment packet count
2509                                  */
2510                                 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2511                         }
2512
2513                         tx_id = swr_ring[tx_id].next_id;
2514                 }
2515
2516                 if (txq->rs_thresh > txq->nb_tx_desc -
2517                         txq->nb_free || tx_id == tx_last)
2518                         break;
2519
2520                 if (pkt_cnt < free_cnt) {
2521                         if (iavf_xmit_cleanup(txq))
2522                                 break;
2523
2524                         nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2525                         nb_tx_free_last = txq->nb_free;
2526                 }
2527         }
2528
2529         return (int)pkt_cnt;
2530 }
2531
2532 int
2533 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2534 {
2535         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2536
2537         return iavf_tx_done_cleanup_full(q, free_cnt);
2538 }
2539
2540 void
2541 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2542                      struct rte_eth_rxq_info *qinfo)
2543 {
2544         struct iavf_rx_queue *rxq;
2545
2546         rxq = dev->data->rx_queues[queue_id];
2547
2548         qinfo->mp = rxq->mp;
2549         qinfo->scattered_rx = dev->data->scattered_rx;
2550         qinfo->nb_desc = rxq->nb_rx_desc;
2551
2552         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2553         qinfo->conf.rx_drop_en = true;
2554         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2555 }
2556
2557 void
2558 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2559                      struct rte_eth_txq_info *qinfo)
2560 {
2561         struct iavf_tx_queue *txq;
2562
2563         txq = dev->data->tx_queues[queue_id];
2564
2565         qinfo->nb_desc = txq->nb_tx_desc;
2566
2567         qinfo->conf.tx_free_thresh = txq->free_thresh;
2568         qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2569         qinfo->conf.offloads = txq->offloads;
2570         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2571 }
2572
2573 /* Get the number of used descriptors of a rx queue */
2574 uint32_t
2575 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2576 {
2577 #define IAVF_RXQ_SCAN_INTERVAL 4
2578         volatile union iavf_rx_desc *rxdp;
2579         struct iavf_rx_queue *rxq;
2580         uint16_t desc = 0;
2581
2582         rxq = dev->data->rx_queues[queue_id];
2583         rxdp = &rxq->rx_ring[rxq->rx_tail];
2584
2585         while ((desc < rxq->nb_rx_desc) &&
2586                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2587                  IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2588                (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2589                 /* Check the DD bit of a rx descriptor of each 4 in a group,
2590                  * to avoid checking too frequently and downgrading performance
2591                  * too much.
2592                  */
2593                 desc += IAVF_RXQ_SCAN_INTERVAL;
2594                 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2595                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2596                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2597                                         desc - rxq->nb_rx_desc]);
2598         }
2599
2600         return desc;
2601 }
2602
2603 int
2604 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2605 {
2606         struct iavf_rx_queue *rxq = rx_queue;
2607         volatile uint64_t *status;
2608         uint64_t mask;
2609         uint32_t desc;
2610
2611         if (unlikely(offset >= rxq->nb_rx_desc))
2612                 return -EINVAL;
2613
2614         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2615                 return RTE_ETH_RX_DESC_UNAVAIL;
2616
2617         desc = rxq->rx_tail + offset;
2618         if (desc >= rxq->nb_rx_desc)
2619                 desc -= rxq->nb_rx_desc;
2620
2621         status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2622         mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2623                 << IAVF_RXD_QW1_STATUS_SHIFT);
2624         if (*status & mask)
2625                 return RTE_ETH_RX_DESC_DONE;
2626
2627         return RTE_ETH_RX_DESC_AVAIL;
2628 }
2629
2630 int
2631 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2632 {
2633         struct iavf_tx_queue *txq = tx_queue;
2634         volatile uint64_t *status;
2635         uint64_t mask, expect;
2636         uint32_t desc;
2637
2638         if (unlikely(offset >= txq->nb_tx_desc))
2639                 return -EINVAL;
2640
2641         desc = txq->tx_tail + offset;
2642         /* go to next desc that has the RS bit */
2643         desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2644                 txq->rs_thresh;
2645         if (desc >= txq->nb_tx_desc) {
2646                 desc -= txq->nb_tx_desc;
2647                 if (desc >= txq->nb_tx_desc)
2648                         desc -= txq->nb_tx_desc;
2649         }
2650
2651         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2652         mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2653         expect = rte_cpu_to_le_64(
2654                  IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2655         if ((*status & mask) == expect)
2656                 return RTE_ETH_TX_DESC_DONE;
2657
2658         return RTE_ETH_TX_DESC_FULL;
2659 }
2660
2661 const uint32_t *
2662 iavf_get_default_ptype_table(void)
2663 {
2664         static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2665                 __rte_cache_aligned = {
2666                 /* L2 types */
2667                 /* [0] reserved */
2668                 [1] = RTE_PTYPE_L2_ETHER,
2669                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2670                 /* [3] - [5] reserved */
2671                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2672                 /* [7] - [10] reserved */
2673                 [11] = RTE_PTYPE_L2_ETHER_ARP,
2674                 /* [12] - [21] reserved */
2675
2676                 /* Non tunneled IPv4 */
2677                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2678                        RTE_PTYPE_L4_FRAG,
2679                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680                        RTE_PTYPE_L4_NONFRAG,
2681                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2682                        RTE_PTYPE_L4_UDP,
2683                 /* [25] reserved */
2684                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2685                        RTE_PTYPE_L4_TCP,
2686                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2687                        RTE_PTYPE_L4_SCTP,
2688                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2689                        RTE_PTYPE_L4_ICMP,
2690
2691                 /* IPv4 --> IPv4 */
2692                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2693                        RTE_PTYPE_TUNNEL_IP |
2694                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2695                        RTE_PTYPE_INNER_L4_FRAG,
2696                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2697                        RTE_PTYPE_TUNNEL_IP |
2698                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2699                        RTE_PTYPE_INNER_L4_NONFRAG,
2700                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2701                        RTE_PTYPE_TUNNEL_IP |
2702                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2703                        RTE_PTYPE_INNER_L4_UDP,
2704                 /* [32] reserved */
2705                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2706                        RTE_PTYPE_TUNNEL_IP |
2707                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2708                        RTE_PTYPE_INNER_L4_TCP,
2709                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2710                        RTE_PTYPE_TUNNEL_IP |
2711                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2712                        RTE_PTYPE_INNER_L4_SCTP,
2713                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2714                        RTE_PTYPE_TUNNEL_IP |
2715                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2716                        RTE_PTYPE_INNER_L4_ICMP,
2717
2718                 /* IPv4 --> IPv6 */
2719                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2720                        RTE_PTYPE_TUNNEL_IP |
2721                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2722                        RTE_PTYPE_INNER_L4_FRAG,
2723                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2724                        RTE_PTYPE_TUNNEL_IP |
2725                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2726                        RTE_PTYPE_INNER_L4_NONFRAG,
2727                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2728                        RTE_PTYPE_TUNNEL_IP |
2729                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2730                        RTE_PTYPE_INNER_L4_UDP,
2731                 /* [39] reserved */
2732                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2733                        RTE_PTYPE_TUNNEL_IP |
2734                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2735                        RTE_PTYPE_INNER_L4_TCP,
2736                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2737                        RTE_PTYPE_TUNNEL_IP |
2738                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2739                        RTE_PTYPE_INNER_L4_SCTP,
2740                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2741                        RTE_PTYPE_TUNNEL_IP |
2742                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2743                        RTE_PTYPE_INNER_L4_ICMP,
2744
2745                 /* IPv4 --> GRE/Teredo/VXLAN */
2746                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2747                        RTE_PTYPE_TUNNEL_GRENAT,
2748
2749                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2750                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2751                        RTE_PTYPE_TUNNEL_GRENAT |
2752                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2753                        RTE_PTYPE_INNER_L4_FRAG,
2754                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2755                        RTE_PTYPE_TUNNEL_GRENAT |
2756                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2757                        RTE_PTYPE_INNER_L4_NONFRAG,
2758                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759                        RTE_PTYPE_TUNNEL_GRENAT |
2760                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2761                        RTE_PTYPE_INNER_L4_UDP,
2762                 /* [47] reserved */
2763                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2764                        RTE_PTYPE_TUNNEL_GRENAT |
2765                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2766                        RTE_PTYPE_INNER_L4_TCP,
2767                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2768                        RTE_PTYPE_TUNNEL_GRENAT |
2769                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2770                        RTE_PTYPE_INNER_L4_SCTP,
2771                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2772                        RTE_PTYPE_TUNNEL_GRENAT |
2773                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2774                        RTE_PTYPE_INNER_L4_ICMP,
2775
2776                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2777                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2778                        RTE_PTYPE_TUNNEL_GRENAT |
2779                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2780                        RTE_PTYPE_INNER_L4_FRAG,
2781                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2782                        RTE_PTYPE_TUNNEL_GRENAT |
2783                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2784                        RTE_PTYPE_INNER_L4_NONFRAG,
2785                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786                        RTE_PTYPE_TUNNEL_GRENAT |
2787                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2788                        RTE_PTYPE_INNER_L4_UDP,
2789                 /* [54] reserved */
2790                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2791                        RTE_PTYPE_TUNNEL_GRENAT |
2792                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2793                        RTE_PTYPE_INNER_L4_TCP,
2794                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2795                        RTE_PTYPE_TUNNEL_GRENAT |
2796                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2797                        RTE_PTYPE_INNER_L4_SCTP,
2798                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2799                        RTE_PTYPE_TUNNEL_GRENAT |
2800                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2801                        RTE_PTYPE_INNER_L4_ICMP,
2802
2803                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2804                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2805                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2806
2807                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2808                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2809                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2810                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2811                        RTE_PTYPE_INNER_L4_FRAG,
2812                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2813                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2814                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2815                        RTE_PTYPE_INNER_L4_NONFRAG,
2816                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2817                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2818                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2819                        RTE_PTYPE_INNER_L4_UDP,
2820                 /* [62] reserved */
2821                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2822                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2823                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2824                        RTE_PTYPE_INNER_L4_TCP,
2825                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2826                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2827                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2828                        RTE_PTYPE_INNER_L4_SCTP,
2829                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2830                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2831                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2832                        RTE_PTYPE_INNER_L4_ICMP,
2833
2834                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2835                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2836                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2837                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2838                        RTE_PTYPE_INNER_L4_FRAG,
2839                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2840                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2841                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2842                        RTE_PTYPE_INNER_L4_NONFRAG,
2843                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2844                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2845                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2846                        RTE_PTYPE_INNER_L4_UDP,
2847                 /* [69] reserved */
2848                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2849                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2850                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2851                        RTE_PTYPE_INNER_L4_TCP,
2852                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2853                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2854                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2855                        RTE_PTYPE_INNER_L4_SCTP,
2856                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2857                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2858                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2859                        RTE_PTYPE_INNER_L4_ICMP,
2860                 /* [73] - [87] reserved */
2861
2862                 /* Non tunneled IPv6 */
2863                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2864                        RTE_PTYPE_L4_FRAG,
2865                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2866                        RTE_PTYPE_L4_NONFRAG,
2867                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2868                        RTE_PTYPE_L4_UDP,
2869                 /* [91] reserved */
2870                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2871                        RTE_PTYPE_L4_TCP,
2872                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2873                        RTE_PTYPE_L4_SCTP,
2874                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2875                        RTE_PTYPE_L4_ICMP,
2876
2877                 /* IPv6 --> IPv4 */
2878                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2879                        RTE_PTYPE_TUNNEL_IP |
2880                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2881                        RTE_PTYPE_INNER_L4_FRAG,
2882                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2883                        RTE_PTYPE_TUNNEL_IP |
2884                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2885                        RTE_PTYPE_INNER_L4_NONFRAG,
2886                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2887                        RTE_PTYPE_TUNNEL_IP |
2888                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2889                        RTE_PTYPE_INNER_L4_UDP,
2890                 /* [98] reserved */
2891                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2892                        RTE_PTYPE_TUNNEL_IP |
2893                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2894                        RTE_PTYPE_INNER_L4_TCP,
2895                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2896                         RTE_PTYPE_TUNNEL_IP |
2897                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2898                         RTE_PTYPE_INNER_L4_SCTP,
2899                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2900                         RTE_PTYPE_TUNNEL_IP |
2901                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2902                         RTE_PTYPE_INNER_L4_ICMP,
2903
2904                 /* IPv6 --> IPv6 */
2905                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2906                         RTE_PTYPE_TUNNEL_IP |
2907                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2908                         RTE_PTYPE_INNER_L4_FRAG,
2909                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2910                         RTE_PTYPE_TUNNEL_IP |
2911                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2912                         RTE_PTYPE_INNER_L4_NONFRAG,
2913                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2914                         RTE_PTYPE_TUNNEL_IP |
2915                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2916                         RTE_PTYPE_INNER_L4_UDP,
2917                 /* [105] reserved */
2918                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2919                         RTE_PTYPE_TUNNEL_IP |
2920                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2921                         RTE_PTYPE_INNER_L4_TCP,
2922                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2923                         RTE_PTYPE_TUNNEL_IP |
2924                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2925                         RTE_PTYPE_INNER_L4_SCTP,
2926                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2927                         RTE_PTYPE_TUNNEL_IP |
2928                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2929                         RTE_PTYPE_INNER_L4_ICMP,
2930
2931                 /* IPv6 --> GRE/Teredo/VXLAN */
2932                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2933                         RTE_PTYPE_TUNNEL_GRENAT,
2934
2935                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2936                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2937                         RTE_PTYPE_TUNNEL_GRENAT |
2938                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2939                         RTE_PTYPE_INNER_L4_FRAG,
2940                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2941                         RTE_PTYPE_TUNNEL_GRENAT |
2942                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2943                         RTE_PTYPE_INNER_L4_NONFRAG,
2944                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2945                         RTE_PTYPE_TUNNEL_GRENAT |
2946                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2947                         RTE_PTYPE_INNER_L4_UDP,
2948                 /* [113] reserved */
2949                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2950                         RTE_PTYPE_TUNNEL_GRENAT |
2951                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2952                         RTE_PTYPE_INNER_L4_TCP,
2953                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2954                         RTE_PTYPE_TUNNEL_GRENAT |
2955                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2956                         RTE_PTYPE_INNER_L4_SCTP,
2957                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2958                         RTE_PTYPE_TUNNEL_GRENAT |
2959                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2960                         RTE_PTYPE_INNER_L4_ICMP,
2961
2962                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2963                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2964                         RTE_PTYPE_TUNNEL_GRENAT |
2965                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2966                         RTE_PTYPE_INNER_L4_FRAG,
2967                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2968                         RTE_PTYPE_TUNNEL_GRENAT |
2969                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2970                         RTE_PTYPE_INNER_L4_NONFRAG,
2971                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2972                         RTE_PTYPE_TUNNEL_GRENAT |
2973                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2974                         RTE_PTYPE_INNER_L4_UDP,
2975                 /* [120] reserved */
2976                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2977                         RTE_PTYPE_TUNNEL_GRENAT |
2978                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2979                         RTE_PTYPE_INNER_L4_TCP,
2980                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2981                         RTE_PTYPE_TUNNEL_GRENAT |
2982                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2983                         RTE_PTYPE_INNER_L4_SCTP,
2984                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2985                         RTE_PTYPE_TUNNEL_GRENAT |
2986                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2987                         RTE_PTYPE_INNER_L4_ICMP,
2988
2989                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2990                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2991                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2992
2993                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2994                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2995                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2996                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2997                         RTE_PTYPE_INNER_L4_FRAG,
2998                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2999                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3000                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3001                         RTE_PTYPE_INNER_L4_NONFRAG,
3002                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3003                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3004                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3005                         RTE_PTYPE_INNER_L4_UDP,
3006                 /* [128] reserved */
3007                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3008                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3009                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3010                         RTE_PTYPE_INNER_L4_TCP,
3011                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3012                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3013                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3014                         RTE_PTYPE_INNER_L4_SCTP,
3015                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3016                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3017                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3018                         RTE_PTYPE_INNER_L4_ICMP,
3019
3020                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3021                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3022                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3023                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3024                         RTE_PTYPE_INNER_L4_FRAG,
3025                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3026                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3027                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3028                         RTE_PTYPE_INNER_L4_NONFRAG,
3029                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3030                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3031                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3032                         RTE_PTYPE_INNER_L4_UDP,
3033                 /* [135] reserved */
3034                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3035                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3036                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3037                         RTE_PTYPE_INNER_L4_TCP,
3038                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3039                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3040                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3041                         RTE_PTYPE_INNER_L4_SCTP,
3042                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3043                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3044                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3045                         RTE_PTYPE_INNER_L4_ICMP,
3046                 /* [139] - [299] reserved */
3047
3048                 /* PPPoE */
3049                 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3050                 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3051
3052                 /* PPPoE --> IPv4 */
3053                 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3054                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3055                         RTE_PTYPE_L4_FRAG,
3056                 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3057                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3058                         RTE_PTYPE_L4_NONFRAG,
3059                 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3060                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3061                         RTE_PTYPE_L4_UDP,
3062                 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3063                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3064                         RTE_PTYPE_L4_TCP,
3065                 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3066                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3067                         RTE_PTYPE_L4_SCTP,
3068                 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3069                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3070                         RTE_PTYPE_L4_ICMP,
3071
3072                 /* PPPoE --> IPv6 */
3073                 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3074                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3075                         RTE_PTYPE_L4_FRAG,
3076                 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3077                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3078                         RTE_PTYPE_L4_NONFRAG,
3079                 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3080                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3081                         RTE_PTYPE_L4_UDP,
3082                 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3083                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3084                         RTE_PTYPE_L4_TCP,
3085                 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3086                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3087                         RTE_PTYPE_L4_SCTP,
3088                 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3089                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3090                         RTE_PTYPE_L4_ICMP,
3091                 /* [314] - [324] reserved */
3092
3093                 /* IPv4/IPv6 --> GTPC/GTPU */
3094                 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3095                         RTE_PTYPE_TUNNEL_GTPC,
3096                 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3097                         RTE_PTYPE_TUNNEL_GTPC,
3098                 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3099                         RTE_PTYPE_TUNNEL_GTPC,
3100                 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3101                         RTE_PTYPE_TUNNEL_GTPC,
3102                 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3103                         RTE_PTYPE_TUNNEL_GTPU,
3104                 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3105                         RTE_PTYPE_TUNNEL_GTPU,
3106
3107                 /* IPv4 --> GTPU --> IPv4 */
3108                 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3109                         RTE_PTYPE_TUNNEL_GTPU |
3110                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3111                         RTE_PTYPE_INNER_L4_FRAG,
3112                 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3113                         RTE_PTYPE_TUNNEL_GTPU |
3114                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3115                         RTE_PTYPE_INNER_L4_NONFRAG,
3116                 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3117                         RTE_PTYPE_TUNNEL_GTPU |
3118                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3119                         RTE_PTYPE_INNER_L4_UDP,
3120                 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3121                         RTE_PTYPE_TUNNEL_GTPU |
3122                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3123                         RTE_PTYPE_INNER_L4_TCP,
3124                 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3125                         RTE_PTYPE_TUNNEL_GTPU |
3126                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3127                         RTE_PTYPE_INNER_L4_ICMP,
3128
3129                 /* IPv6 --> GTPU --> IPv4 */
3130                 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131                         RTE_PTYPE_TUNNEL_GTPU |
3132                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3133                         RTE_PTYPE_INNER_L4_FRAG,
3134                 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3135                         RTE_PTYPE_TUNNEL_GTPU |
3136                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3137                         RTE_PTYPE_INNER_L4_NONFRAG,
3138                 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3139                         RTE_PTYPE_TUNNEL_GTPU |
3140                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3141                         RTE_PTYPE_INNER_L4_UDP,
3142                 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3143                         RTE_PTYPE_TUNNEL_GTPU |
3144                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3145                         RTE_PTYPE_INNER_L4_TCP,
3146                 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3147                         RTE_PTYPE_TUNNEL_GTPU |
3148                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3149                         RTE_PTYPE_INNER_L4_ICMP,
3150
3151                 /* IPv4 --> GTPU --> IPv6 */
3152                 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3153                         RTE_PTYPE_TUNNEL_GTPU |
3154                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3155                         RTE_PTYPE_INNER_L4_FRAG,
3156                 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3157                         RTE_PTYPE_TUNNEL_GTPU |
3158                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3159                         RTE_PTYPE_INNER_L4_NONFRAG,
3160                 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3161                         RTE_PTYPE_TUNNEL_GTPU |
3162                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3163                         RTE_PTYPE_INNER_L4_UDP,
3164                 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3165                         RTE_PTYPE_TUNNEL_GTPU |
3166                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3167                         RTE_PTYPE_INNER_L4_TCP,
3168                 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3169                         RTE_PTYPE_TUNNEL_GTPU |
3170                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3171                         RTE_PTYPE_INNER_L4_ICMP,
3172
3173                 /* IPv6 --> GTPU --> IPv6 */
3174                 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3175                         RTE_PTYPE_TUNNEL_GTPU |
3176                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3177                         RTE_PTYPE_INNER_L4_FRAG,
3178                 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3179                         RTE_PTYPE_TUNNEL_GTPU |
3180                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3181                         RTE_PTYPE_INNER_L4_NONFRAG,
3182                 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3183                         RTE_PTYPE_TUNNEL_GTPU |
3184                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3185                         RTE_PTYPE_INNER_L4_UDP,
3186                 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3187                         RTE_PTYPE_TUNNEL_GTPU |
3188                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3189                         RTE_PTYPE_INNER_L4_TCP,
3190                 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3191                         RTE_PTYPE_TUNNEL_GTPU |
3192                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3193                         RTE_PTYPE_INNER_L4_ICMP,
3194                 /* All others reserved */
3195         };
3196
3197         return ptype_tbl;
3198 }