mbuf: rename outer IP checksum macro
[dpdk.git] / drivers / net / iavf / iavf_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_mbuf.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_udp.h>
24 #include <rte_ip.h>
25 #include <rte_net.h>
26 #include <rte_vect.h>
27
28 #include "iavf.h"
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
31
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
34
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
42
43 uint8_t
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
45 {
46         static uint8_t rxdid_map[] = {
47                 [IAVF_PROTO_XTR_NONE]      = IAVF_RXDID_COMMS_OVS_1,
48                 [IAVF_PROTO_XTR_VLAN]      = IAVF_RXDID_COMMS_AUX_VLAN,
49                 [IAVF_PROTO_XTR_IPV4]      = IAVF_RXDID_COMMS_AUX_IPV4,
50                 [IAVF_PROTO_XTR_IPV6]      = IAVF_RXDID_COMMS_AUX_IPV6,
51                 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52                 [IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
53                 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
54         };
55
56         return flex_type < RTE_DIM(rxdid_map) ?
57                                 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
58 }
59
60 static inline int
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
62 {
63         /* The following constraints must be satisfied:
64          *   thresh < rxq->nb_rx_desc
65          */
66         if (thresh >= nb_desc) {
67                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
68                              thresh, nb_desc);
69                 return -EINVAL;
70         }
71         return 0;
72 }
73
74 static inline int
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76                 uint16_t tx_free_thresh)
77 {
78         /* TX descriptors will have their RS bit set after tx_rs_thresh
79          * descriptors have been used. The TX descriptor ring will be cleaned
80          * after tx_free_thresh descriptors are used or if the number of
81          * descriptors required to transmit a packet is greater than the
82          * number of free TX descriptors.
83          *
84          * The following constraints must be satisfied:
85          *  - tx_rs_thresh must be less than the size of the ring minus 2.
86          *  - tx_free_thresh must be less than the size of the ring minus 3.
87          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
88          *  - tx_rs_thresh must be a divisor of the ring size.
89          *
90          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91          * race condition, hence the maximum threshold constraints. When set
92          * to zero use default values.
93          */
94         if (tx_rs_thresh >= (nb_desc - 2)) {
95                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96                              "number of TX descriptors (%u) minus 2",
97                              tx_rs_thresh, nb_desc);
98                 return -EINVAL;
99         }
100         if (tx_free_thresh >= (nb_desc - 3)) {
101                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102                              "number of TX descriptors (%u) minus 3.",
103                              tx_free_thresh, nb_desc);
104                 return -EINVAL;
105         }
106         if (tx_rs_thresh > tx_free_thresh) {
107                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108                              "equal to tx_free_thresh (%u).",
109                              tx_rs_thresh, tx_free_thresh);
110                 return -EINVAL;
111         }
112         if ((nb_desc % tx_rs_thresh) != 0) {
113                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114                              "number of TX descriptors (%u).",
115                              tx_rs_thresh, nb_desc);
116                 return -EINVAL;
117         }
118
119         return 0;
120 }
121
122 static inline bool
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
124 {
125         if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126             rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127                 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
128                 return true;
129         }
130
131         PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
132         return false;
133 }
134
135 static inline bool
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
137 {
138         if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139             txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140             txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141                 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
142                 return true;
143         }
144         PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
145         return false;
146 }
147
148 static inline bool
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
150 {
151         int ret = true;
152
153         if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155                              "rxq->rx_free_thresh=%d, "
156                              "IAVF_RX_MAX_BURST=%d",
157                              rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
158                 ret = false;
159         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161                              "rxq->nb_rx_desc=%d, "
162                              "rxq->rx_free_thresh=%d",
163                              rxq->nb_rx_desc, rxq->rx_free_thresh);
164                 ret = false;
165         }
166         return ret;
167 }
168
169 static inline void
170 reset_rx_queue(struct iavf_rx_queue *rxq)
171 {
172         uint16_t len;
173         uint32_t i;
174
175         if (!rxq)
176                 return;
177
178         len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
179
180         for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181                 ((volatile char *)rxq->rx_ring)[i] = 0;
182
183         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
184
185         for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186                 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
187
188         /* for rx bulk */
189         rxq->rx_nb_avail = 0;
190         rxq->rx_next_avail = 0;
191         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
192
193         rxq->rx_tail = 0;
194         rxq->nb_rx_hold = 0;
195         rxq->pkt_first_seg = NULL;
196         rxq->pkt_last_seg = NULL;
197         rxq->rxrearm_nb = 0;
198         rxq->rxrearm_start = 0;
199 }
200
201 static inline void
202 reset_tx_queue(struct iavf_tx_queue *txq)
203 {
204         struct iavf_tx_entry *txe;
205         uint32_t i, size;
206         uint16_t prev;
207
208         if (!txq) {
209                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
210                 return;
211         }
212
213         txe = txq->sw_ring;
214         size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
215         for (i = 0; i < size; i++)
216                 ((volatile char *)txq->tx_ring)[i] = 0;
217
218         prev = (uint16_t)(txq->nb_tx_desc - 1);
219         for (i = 0; i < txq->nb_tx_desc; i++) {
220                 txq->tx_ring[i].cmd_type_offset_bsz =
221                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
222                 txe[i].mbuf =  NULL;
223                 txe[i].last_id = i;
224                 txe[prev].next_id = i;
225                 prev = i;
226         }
227
228         txq->tx_tail = 0;
229         txq->nb_used = 0;
230
231         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
232         txq->nb_free = txq->nb_tx_desc - 1;
233
234         txq->next_dd = txq->rs_thresh - 1;
235         txq->next_rs = txq->rs_thresh - 1;
236 }
237
238 static int
239 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
240 {
241         volatile union iavf_rx_desc *rxd;
242         struct rte_mbuf *mbuf = NULL;
243         uint64_t dma_addr;
244         uint16_t i;
245
246         for (i = 0; i < rxq->nb_rx_desc; i++) {
247                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
248                 if (unlikely(!mbuf)) {
249                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
250                         return -ENOMEM;
251                 }
252
253                 rte_mbuf_refcnt_set(mbuf, 1);
254                 mbuf->next = NULL;
255                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256                 mbuf->nb_segs = 1;
257                 mbuf->port = rxq->port_id;
258
259                 dma_addr =
260                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
261
262                 rxd = &rxq->rx_ring[i];
263                 rxd->read.pkt_addr = dma_addr;
264                 rxd->read.hdr_addr = 0;
265 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
266                 rxd->read.rsvd1 = 0;
267                 rxd->read.rsvd2 = 0;
268 #endif
269
270                 rxq->sw_ring[i] = mbuf;
271         }
272
273         return 0;
274 }
275
276 static inline void
277 release_rxq_mbufs(struct iavf_rx_queue *rxq)
278 {
279         uint16_t i;
280
281         if (!rxq->sw_ring)
282                 return;
283
284         for (i = 0; i < rxq->nb_rx_desc; i++) {
285                 if (rxq->sw_ring[i]) {
286                         rte_pktmbuf_free_seg(rxq->sw_ring[i]);
287                         rxq->sw_ring[i] = NULL;
288                 }
289         }
290
291         /* for rx bulk */
292         if (rxq->rx_nb_avail == 0)
293                 return;
294         for (i = 0; i < rxq->rx_nb_avail; i++) {
295                 struct rte_mbuf *mbuf;
296
297                 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
298                 rte_pktmbuf_free_seg(mbuf);
299         }
300         rxq->rx_nb_avail = 0;
301 }
302
303 static inline void
304 release_txq_mbufs(struct iavf_tx_queue *txq)
305 {
306         uint16_t i;
307
308         if (!txq || !txq->sw_ring) {
309                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
310                 return;
311         }
312
313         for (i = 0; i < txq->nb_tx_desc; i++) {
314                 if (txq->sw_ring[i].mbuf) {
315                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
316                         txq->sw_ring[i].mbuf = NULL;
317                 }
318         }
319 }
320
321 static const struct iavf_rxq_ops def_rxq_ops = {
322         .release_mbufs = release_rxq_mbufs,
323 };
324
325 static const struct iavf_txq_ops def_txq_ops = {
326         .release_mbufs = release_txq_mbufs,
327 };
328
329 static inline void
330 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
331                                     struct rte_mbuf *mb,
332                                     volatile union iavf_rx_flex_desc *rxdp)
333 {
334         volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
335                         (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
336 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
337         uint16_t stat_err;
338 #endif
339
340         if (desc->flow_id != 0xFFFFFFFF) {
341                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
342                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
343         }
344
345 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
346         stat_err = rte_le_to_cpu_16(desc->status_error0);
347         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
348                 mb->ol_flags |= PKT_RX_RSS_HASH;
349                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
350         }
351 #endif
352 }
353
354 static inline void
355 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
356                                        struct rte_mbuf *mb,
357                                        volatile union iavf_rx_flex_desc *rxdp)
358 {
359         volatile struct iavf_32b_rx_flex_desc_comms *desc =
360                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
361         uint16_t stat_err;
362
363         stat_err = rte_le_to_cpu_16(desc->status_error0);
364         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
365                 mb->ol_flags |= PKT_RX_RSS_HASH;
366                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
367         }
368
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
370         if (desc->flow_id != 0xFFFFFFFF) {
371                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
372                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
373         }
374
375         if (rxq->xtr_ol_flag) {
376                 uint32_t metadata = 0;
377
378                 stat_err = rte_le_to_cpu_16(desc->status_error1);
379
380                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
381                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
382
383                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
384                         metadata |=
385                                 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
386
387                 if (metadata) {
388                         mb->ol_flags |= rxq->xtr_ol_flag;
389
390                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
391                 }
392         }
393 #endif
394 }
395
396 static inline void
397 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
398                                        struct rte_mbuf *mb,
399                                        volatile union iavf_rx_flex_desc *rxdp)
400 {
401         volatile struct iavf_32b_rx_flex_desc_comms *desc =
402                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
403         uint16_t stat_err;
404
405         stat_err = rte_le_to_cpu_16(desc->status_error0);
406         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
407                 mb->ol_flags |= PKT_RX_RSS_HASH;
408                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
409         }
410
411 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
412         if (desc->flow_id != 0xFFFFFFFF) {
413                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
414                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
415         }
416
417         if (rxq->xtr_ol_flag) {
418                 uint32_t metadata = 0;
419
420                 if (desc->flex_ts.flex.aux0 != 0xFFFF)
421                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
422                 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
423                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
424
425                 if (metadata) {
426                         mb->ol_flags |= rxq->xtr_ol_flag;
427
428                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
429                 }
430         }
431 #endif
432 }
433
434 static void
435 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
436 {
437         switch (rxdid) {
438         case IAVF_RXDID_COMMS_AUX_VLAN:
439                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
440                 rxq->rxd_to_pkt_fields =
441                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
442                 break;
443         case IAVF_RXDID_COMMS_AUX_IPV4:
444                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
445                 rxq->rxd_to_pkt_fields =
446                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
447                 break;
448         case IAVF_RXDID_COMMS_AUX_IPV6:
449                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
450                 rxq->rxd_to_pkt_fields =
451                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
452                 break;
453         case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
454                 rxq->xtr_ol_flag =
455                         rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
456                 rxq->rxd_to_pkt_fields =
457                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
458                 break;
459         case IAVF_RXDID_COMMS_AUX_TCP:
460                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
461                 rxq->rxd_to_pkt_fields =
462                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
463                 break;
464         case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
465                 rxq->xtr_ol_flag =
466                         rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
467                 rxq->rxd_to_pkt_fields =
468                         iavf_rxd_to_pkt_fields_by_comms_aux_v2;
469                 break;
470         case IAVF_RXDID_COMMS_OVS_1:
471                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
472                 break;
473         default:
474                 /* update this according to the RXDID for FLEX_DESC_NONE */
475                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
476                 break;
477         }
478
479         if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
480                 rxq->xtr_ol_flag = 0;
481 }
482
483 int
484 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
485                        uint16_t nb_desc, unsigned int socket_id,
486                        const struct rte_eth_rxconf *rx_conf,
487                        struct rte_mempool *mp)
488 {
489         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490         struct iavf_adapter *ad =
491                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
492         struct iavf_info *vf =
493                 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
494         struct iavf_vsi *vsi = &vf->vsi;
495         struct iavf_rx_queue *rxq;
496         const struct rte_memzone *mz;
497         uint32_t ring_size;
498         uint8_t proto_xtr;
499         uint16_t len;
500         uint16_t rx_free_thresh;
501
502         PMD_INIT_FUNC_TRACE();
503
504         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
505             nb_desc > IAVF_MAX_RING_DESC ||
506             nb_desc < IAVF_MIN_RING_DESC) {
507                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
508                              "invalid", nb_desc);
509                 return -EINVAL;
510         }
511
512         /* Check free threshold */
513         rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
514                          IAVF_DEFAULT_RX_FREE_THRESH :
515                          rx_conf->rx_free_thresh;
516         if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
517                 return -EINVAL;
518
519         /* Free memory if needed */
520         if (dev->data->rx_queues[queue_idx]) {
521                 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
522                 dev->data->rx_queues[queue_idx] = NULL;
523         }
524
525         /* Allocate the rx queue data structure */
526         rxq = rte_zmalloc_socket("iavf rxq",
527                                  sizeof(struct iavf_rx_queue),
528                                  RTE_CACHE_LINE_SIZE,
529                                  socket_id);
530         if (!rxq) {
531                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
532                              "rx queue data structure");
533                 return -ENOMEM;
534         }
535
536         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
537                 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
538                                 IAVF_PROTO_XTR_NONE;
539                 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
540                 rxq->proto_xtr = proto_xtr;
541         } else {
542                 rxq->rxdid = IAVF_RXDID_LEGACY_1;
543                 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
544         }
545
546         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
547                 struct virtchnl_vlan_supported_caps *stripping_support =
548                                 &vf->vlan_v2_caps.offloads.stripping_support;
549                 uint32_t stripping_cap;
550
551                 if (stripping_support->outer)
552                         stripping_cap = stripping_support->outer;
553                 else
554                         stripping_cap = stripping_support->inner;
555
556                 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
557                         rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
558                 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
559                         rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
560         } else {
561                 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
562         }
563
564         iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
565
566         rxq->mp = mp;
567         rxq->nb_rx_desc = nb_desc;
568         rxq->rx_free_thresh = rx_free_thresh;
569         rxq->queue_id = queue_idx;
570         rxq->port_id = dev->data->port_id;
571         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
572         rxq->rx_hdr_len = 0;
573         rxq->vsi = vsi;
574
575         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
576                 rxq->crc_len = RTE_ETHER_CRC_LEN;
577         else
578                 rxq->crc_len = 0;
579
580         len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
581         rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
582
583         /* Allocate the software ring. */
584         len = nb_desc + IAVF_RX_MAX_BURST;
585         rxq->sw_ring =
586                 rte_zmalloc_socket("iavf rx sw ring",
587                                    sizeof(struct rte_mbuf *) * len,
588                                    RTE_CACHE_LINE_SIZE,
589                                    socket_id);
590         if (!rxq->sw_ring) {
591                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
592                 rte_free(rxq);
593                 return -ENOMEM;
594         }
595
596         /* Allocate the maximun number of RX ring hardware descriptor with
597          * a liitle more to support bulk allocate.
598          */
599         len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
600         ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
601                               IAVF_DMA_MEM_ALIGN);
602         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
603                                       ring_size, IAVF_RING_BASE_ALIGN,
604                                       socket_id);
605         if (!mz) {
606                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
607                 rte_free(rxq->sw_ring);
608                 rte_free(rxq);
609                 return -ENOMEM;
610         }
611         /* Zero all the descriptors in the ring. */
612         memset(mz->addr, 0, ring_size);
613         rxq->rx_ring_phys_addr = mz->iova;
614         rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
615
616         rxq->mz = mz;
617         reset_rx_queue(rxq);
618         rxq->q_set = true;
619         dev->data->rx_queues[queue_idx] = rxq;
620         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
621         rxq->ops = &def_rxq_ops;
622
623         if (check_rx_bulk_allow(rxq) == true) {
624                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
625                              "satisfied. Rx Burst Bulk Alloc function will be "
626                              "used on port=%d, queue=%d.",
627                              rxq->port_id, rxq->queue_id);
628         } else {
629                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
630                              "not satisfied, Scattered Rx is requested "
631                              "on port=%d, queue=%d.",
632                              rxq->port_id, rxq->queue_id);
633                 ad->rx_bulk_alloc_allowed = false;
634         }
635
636         if (check_rx_vec_allow(rxq) == false)
637                 ad->rx_vec_allowed = false;
638
639         return 0;
640 }
641
642 int
643 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
644                        uint16_t queue_idx,
645                        uint16_t nb_desc,
646                        unsigned int socket_id,
647                        const struct rte_eth_txconf *tx_conf)
648 {
649         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650         struct iavf_info *vf =
651                 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
652         struct iavf_tx_queue *txq;
653         const struct rte_memzone *mz;
654         uint32_t ring_size;
655         uint16_t tx_rs_thresh, tx_free_thresh;
656         uint64_t offloads;
657
658         PMD_INIT_FUNC_TRACE();
659
660         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
661
662         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
663             nb_desc > IAVF_MAX_RING_DESC ||
664             nb_desc < IAVF_MIN_RING_DESC) {
665                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
666                             "invalid", nb_desc);
667                 return -EINVAL;
668         }
669
670         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
671                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
672         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
673                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
674         check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
675
676         /* Free memory if needed. */
677         if (dev->data->tx_queues[queue_idx]) {
678                 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
679                 dev->data->tx_queues[queue_idx] = NULL;
680         }
681
682         /* Allocate the TX queue data structure. */
683         txq = rte_zmalloc_socket("iavf txq",
684                                  sizeof(struct iavf_tx_queue),
685                                  RTE_CACHE_LINE_SIZE,
686                                  socket_id);
687         if (!txq) {
688                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
689                              "tx queue structure");
690                 return -ENOMEM;
691         }
692
693         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
694                 struct virtchnl_vlan_supported_caps *insertion_support =
695                         &vf->vlan_v2_caps.offloads.insertion_support;
696                 uint32_t insertion_cap;
697
698                 if (insertion_support->outer)
699                         insertion_cap = insertion_support->outer;
700                 else
701                         insertion_cap = insertion_support->inner;
702
703                 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
704                         txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
705                 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
706                         txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
707         } else {
708                 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
709         }
710
711         txq->nb_tx_desc = nb_desc;
712         txq->rs_thresh = tx_rs_thresh;
713         txq->free_thresh = tx_free_thresh;
714         txq->queue_id = queue_idx;
715         txq->port_id = dev->data->port_id;
716         txq->offloads = offloads;
717         txq->tx_deferred_start = tx_conf->tx_deferred_start;
718
719         /* Allocate software ring */
720         txq->sw_ring =
721                 rte_zmalloc_socket("iavf tx sw ring",
722                                    sizeof(struct iavf_tx_entry) * nb_desc,
723                                    RTE_CACHE_LINE_SIZE,
724                                    socket_id);
725         if (!txq->sw_ring) {
726                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
727                 rte_free(txq);
728                 return -ENOMEM;
729         }
730
731         /* Allocate TX hardware ring descriptors. */
732         ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
733         ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
734         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
735                                       ring_size, IAVF_RING_BASE_ALIGN,
736                                       socket_id);
737         if (!mz) {
738                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
739                 rte_free(txq->sw_ring);
740                 rte_free(txq);
741                 return -ENOMEM;
742         }
743         txq->tx_ring_phys_addr = mz->iova;
744         txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
745
746         txq->mz = mz;
747         reset_tx_queue(txq);
748         txq->q_set = true;
749         dev->data->tx_queues[queue_idx] = txq;
750         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
751         txq->ops = &def_txq_ops;
752
753         if (check_tx_vec_allow(txq) == false) {
754                 struct iavf_adapter *ad =
755                         IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
756                 ad->tx_vec_allowed = false;
757         }
758
759         return 0;
760 }
761
762 int
763 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
764 {
765         struct iavf_adapter *adapter =
766                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
767         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
768         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
769         struct iavf_rx_queue *rxq;
770         int err = 0;
771
772         PMD_DRV_FUNC_TRACE();
773
774         if (rx_queue_id >= dev->data->nb_rx_queues)
775                 return -EINVAL;
776
777         rxq = dev->data->rx_queues[rx_queue_id];
778
779         err = alloc_rxq_mbufs(rxq);
780         if (err) {
781                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
782                 return err;
783         }
784
785         rte_wmb();
786
787         /* Init the RX tail register. */
788         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
789         IAVF_WRITE_FLUSH(hw);
790
791         /* Ready to switch the queue on */
792         if (!vf->lv_enabled)
793                 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
794         else
795                 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
796
797         if (err)
798                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
799                             rx_queue_id);
800         else
801                 dev->data->rx_queue_state[rx_queue_id] =
802                         RTE_ETH_QUEUE_STATE_STARTED;
803
804         return err;
805 }
806
807 int
808 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
809 {
810         struct iavf_adapter *adapter =
811                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
812         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
813         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
814         struct iavf_tx_queue *txq;
815         int err = 0;
816
817         PMD_DRV_FUNC_TRACE();
818
819         if (tx_queue_id >= dev->data->nb_tx_queues)
820                 return -EINVAL;
821
822         txq = dev->data->tx_queues[tx_queue_id];
823
824         /* Init the RX tail register. */
825         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
826         IAVF_WRITE_FLUSH(hw);
827
828         /* Ready to switch the queue on */
829         if (!vf->lv_enabled)
830                 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
831         else
832                 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
833
834         if (err)
835                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
836                             tx_queue_id);
837         else
838                 dev->data->tx_queue_state[tx_queue_id] =
839                         RTE_ETH_QUEUE_STATE_STARTED;
840
841         return err;
842 }
843
844 int
845 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
846 {
847         struct iavf_adapter *adapter =
848                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
849         struct iavf_rx_queue *rxq;
850         int err;
851
852         PMD_DRV_FUNC_TRACE();
853
854         if (rx_queue_id >= dev->data->nb_rx_queues)
855                 return -EINVAL;
856
857         err = iavf_switch_queue(adapter, rx_queue_id, true, false);
858         if (err) {
859                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
860                             rx_queue_id);
861                 return err;
862         }
863
864         rxq = dev->data->rx_queues[rx_queue_id];
865         rxq->ops->release_mbufs(rxq);
866         reset_rx_queue(rxq);
867         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
868
869         return 0;
870 }
871
872 int
873 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
874 {
875         struct iavf_adapter *adapter =
876                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
877         struct iavf_tx_queue *txq;
878         int err;
879
880         PMD_DRV_FUNC_TRACE();
881
882         if (tx_queue_id >= dev->data->nb_tx_queues)
883                 return -EINVAL;
884
885         err = iavf_switch_queue(adapter, tx_queue_id, false, false);
886         if (err) {
887                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
888                             tx_queue_id);
889                 return err;
890         }
891
892         txq = dev->data->tx_queues[tx_queue_id];
893         txq->ops->release_mbufs(txq);
894         reset_tx_queue(txq);
895         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
896
897         return 0;
898 }
899
900 void
901 iavf_dev_rx_queue_release(void *rxq)
902 {
903         struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
904
905         if (!q)
906                 return;
907
908         q->ops->release_mbufs(q);
909         rte_free(q->sw_ring);
910         rte_memzone_free(q->mz);
911         rte_free(q);
912 }
913
914 void
915 iavf_dev_tx_queue_release(void *txq)
916 {
917         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
918
919         if (!q)
920                 return;
921
922         q->ops->release_mbufs(q);
923         rte_free(q->sw_ring);
924         rte_memzone_free(q->mz);
925         rte_free(q);
926 }
927
928 void
929 iavf_stop_queues(struct rte_eth_dev *dev)
930 {
931         struct iavf_adapter *adapter =
932                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
933         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
934         struct iavf_rx_queue *rxq;
935         struct iavf_tx_queue *txq;
936         int ret, i;
937
938         /* Stop All queues */
939         if (!vf->lv_enabled) {
940                 ret = iavf_disable_queues(adapter);
941                 if (ret)
942                         PMD_DRV_LOG(WARNING, "Fail to stop queues");
943         } else {
944                 ret = iavf_disable_queues_lv(adapter);
945                 if (ret)
946                         PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
947         }
948
949         if (ret)
950                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
951
952         for (i = 0; i < dev->data->nb_tx_queues; i++) {
953                 txq = dev->data->tx_queues[i];
954                 if (!txq)
955                         continue;
956                 txq->ops->release_mbufs(txq);
957                 reset_tx_queue(txq);
958                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
959         }
960         for (i = 0; i < dev->data->nb_rx_queues; i++) {
961                 rxq = dev->data->rx_queues[i];
962                 if (!rxq)
963                         continue;
964                 rxq->ops->release_mbufs(rxq);
965                 reset_rx_queue(rxq);
966                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
967         }
968 }
969
970 #define IAVF_RX_FLEX_ERR0_BITS  \
971         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
972          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
973          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
974          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
975          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
976          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
977
978 static inline void
979 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
980 {
981         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
982                 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
983                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
984                 mb->vlan_tci =
985                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
986         } else {
987                 mb->vlan_tci = 0;
988         }
989 }
990
991 static inline void
992 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
993                           volatile union iavf_rx_flex_desc *rxdp,
994                           uint8_t rx_flags)
995 {
996         uint16_t vlan_tci = 0;
997
998         if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
999             rte_le_to_cpu_64(rxdp->wb.status_error0) &
1000             (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
1001                 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
1002
1003 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1004         if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
1005             rte_le_to_cpu_16(rxdp->wb.status_error1) &
1006             (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
1007                 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1008 #endif
1009
1010         if (vlan_tci) {
1011                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1012                 mb->vlan_tci = vlan_tci;
1013         }
1014 }
1015
1016 /* Translate the rx descriptor status and error fields to pkt flags */
1017 static inline uint64_t
1018 iavf_rxd_to_pkt_flags(uint64_t qword)
1019 {
1020         uint64_t flags;
1021         uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1022
1023 #define IAVF_RX_ERR_BITS 0x3f
1024
1025         /* Check if RSS_HASH */
1026         flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1027                                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1028                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1029
1030         /* Check if FDIR Match */
1031         flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1032                                 PKT_RX_FDIR : 0);
1033
1034         if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1035                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1036                 return flags;
1037         }
1038
1039         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1040                 flags |= PKT_RX_IP_CKSUM_BAD;
1041         else
1042                 flags |= PKT_RX_IP_CKSUM_GOOD;
1043
1044         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1045                 flags |= PKT_RX_L4_CKSUM_BAD;
1046         else
1047                 flags |= PKT_RX_L4_CKSUM_GOOD;
1048
1049         /* TODO: Oversize error bit is not processed here */
1050
1051         return flags;
1052 }
1053
1054 static inline uint64_t
1055 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1056 {
1057         uint64_t flags = 0;
1058 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1059         uint16_t flexbh;
1060
1061         flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1062                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1063                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1064
1065         if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1066                 mb->hash.fdir.hi =
1067                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1068                 flags |= PKT_RX_FDIR_ID;
1069         }
1070 #else
1071         mb->hash.fdir.hi =
1072                 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1073         flags |= PKT_RX_FDIR_ID;
1074 #endif
1075         return flags;
1076 }
1077
1078 #define IAVF_RX_FLEX_ERR0_BITS  \
1079         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
1080          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
1081          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
1082          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1083          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
1084          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1085
1086 /* Rx L3/L4 checksum */
1087 static inline uint64_t
1088 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1089 {
1090         uint64_t flags = 0;
1091
1092         /* check if HW has decoded the packet and checksum */
1093         if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1094                 return 0;
1095
1096         if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1097                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1098                 return flags;
1099         }
1100
1101         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1102                 flags |= PKT_RX_IP_CKSUM_BAD;
1103         else
1104                 flags |= PKT_RX_IP_CKSUM_GOOD;
1105
1106         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1107                 flags |= PKT_RX_L4_CKSUM_BAD;
1108         else
1109                 flags |= PKT_RX_L4_CKSUM_GOOD;
1110
1111         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1112                 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1113
1114         return flags;
1115 }
1116
1117 /* If the number of free RX descriptors is greater than the RX free
1118  * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1119  * register. Update the RDT with the value of the last processed RX
1120  * descriptor minus 1, to guarantee that the RDT register is never
1121  * equal to the RDH register, which creates a "full" ring situation
1122  * from the hardware point of view.
1123  */
1124 static inline void
1125 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1126 {
1127         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1128
1129         if (nb_hold > rxq->rx_free_thresh) {
1130                 PMD_RX_LOG(DEBUG,
1131                            "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1132                            rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1133                 rx_id = (uint16_t)((rx_id == 0) ?
1134                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1135                 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1136                 nb_hold = 0;
1137         }
1138         rxq->nb_rx_hold = nb_hold;
1139 }
1140
1141 /* implement recv_pkts */
1142 uint16_t
1143 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1144 {
1145         volatile union iavf_rx_desc *rx_ring;
1146         volatile union iavf_rx_desc *rxdp;
1147         struct iavf_rx_queue *rxq;
1148         union iavf_rx_desc rxd;
1149         struct rte_mbuf *rxe;
1150         struct rte_eth_dev *dev;
1151         struct rte_mbuf *rxm;
1152         struct rte_mbuf *nmb;
1153         uint16_t nb_rx;
1154         uint32_t rx_status;
1155         uint64_t qword1;
1156         uint16_t rx_packet_len;
1157         uint16_t rx_id, nb_hold;
1158         uint64_t dma_addr;
1159         uint64_t pkt_flags;
1160         const uint32_t *ptype_tbl;
1161
1162         nb_rx = 0;
1163         nb_hold = 0;
1164         rxq = rx_queue;
1165         rx_id = rxq->rx_tail;
1166         rx_ring = rxq->rx_ring;
1167         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1168
1169         while (nb_rx < nb_pkts) {
1170                 rxdp = &rx_ring[rx_id];
1171                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1172                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1173                             IAVF_RXD_QW1_STATUS_SHIFT;
1174
1175                 /* Check the DD bit first */
1176                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1177                         break;
1178                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1179
1180                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1181                 if (unlikely(!nmb)) {
1182                         dev = &rte_eth_devices[rxq->port_id];
1183                         dev->data->rx_mbuf_alloc_failed++;
1184                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1185                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1186                         break;
1187                 }
1188
1189                 rxd = *rxdp;
1190                 nb_hold++;
1191                 rxe = rxq->sw_ring[rx_id];
1192                 rx_id++;
1193                 if (unlikely(rx_id == rxq->nb_rx_desc))
1194                         rx_id = 0;
1195
1196                 /* Prefetch next mbuf */
1197                 rte_prefetch0(rxq->sw_ring[rx_id]);
1198
1199                 /* When next RX descriptor is on a cache line boundary,
1200                  * prefetch the next 4 RX descriptors and next 8 pointers
1201                  * to mbufs.
1202                  */
1203                 if ((rx_id & 0x3) == 0) {
1204                         rte_prefetch0(&rx_ring[rx_id]);
1205                         rte_prefetch0(rxq->sw_ring[rx_id]);
1206                 }
1207                 rxm = rxe;
1208                 dma_addr =
1209                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1210                 rxdp->read.hdr_addr = 0;
1211                 rxdp->read.pkt_addr = dma_addr;
1212
1213                 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1214                                 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1215
1216                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1217                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1218                 rxm->nb_segs = 1;
1219                 rxm->next = NULL;
1220                 rxm->pkt_len = rx_packet_len;
1221                 rxm->data_len = rx_packet_len;
1222                 rxm->port = rxq->port_id;
1223                 rxm->ol_flags = 0;
1224                 iavf_rxd_to_vlan_tci(rxm, &rxd);
1225                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1226                 rxm->packet_type =
1227                         ptype_tbl[(uint8_t)((qword1 &
1228                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1229
1230                 if (pkt_flags & PKT_RX_RSS_HASH)
1231                         rxm->hash.rss =
1232                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1233
1234                 if (pkt_flags & PKT_RX_FDIR)
1235                         pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1236
1237                 rxm->ol_flags |= pkt_flags;
1238
1239                 rx_pkts[nb_rx++] = rxm;
1240         }
1241         rxq->rx_tail = rx_id;
1242
1243         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1244
1245         return nb_rx;
1246 }
1247
1248 /* implement recv_pkts for flexible Rx descriptor */
1249 uint16_t
1250 iavf_recv_pkts_flex_rxd(void *rx_queue,
1251                         struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1252 {
1253         volatile union iavf_rx_desc *rx_ring;
1254         volatile union iavf_rx_flex_desc *rxdp;
1255         struct iavf_rx_queue *rxq;
1256         union iavf_rx_flex_desc rxd;
1257         struct rte_mbuf *rxe;
1258         struct rte_eth_dev *dev;
1259         struct rte_mbuf *rxm;
1260         struct rte_mbuf *nmb;
1261         uint16_t nb_rx;
1262         uint16_t rx_stat_err0;
1263         uint16_t rx_packet_len;
1264         uint16_t rx_id, nb_hold;
1265         uint64_t dma_addr;
1266         uint64_t pkt_flags;
1267         const uint32_t *ptype_tbl;
1268
1269         nb_rx = 0;
1270         nb_hold = 0;
1271         rxq = rx_queue;
1272         rx_id = rxq->rx_tail;
1273         rx_ring = rxq->rx_ring;
1274         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1275
1276         while (nb_rx < nb_pkts) {
1277                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1278                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1279
1280                 /* Check the DD bit first */
1281                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1282                         break;
1283                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1284
1285                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1286                 if (unlikely(!nmb)) {
1287                         dev = &rte_eth_devices[rxq->port_id];
1288                         dev->data->rx_mbuf_alloc_failed++;
1289                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1290                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1291                         break;
1292                 }
1293
1294                 rxd = *rxdp;
1295                 nb_hold++;
1296                 rxe = rxq->sw_ring[rx_id];
1297                 rx_id++;
1298                 if (unlikely(rx_id == rxq->nb_rx_desc))
1299                         rx_id = 0;
1300
1301                 /* Prefetch next mbuf */
1302                 rte_prefetch0(rxq->sw_ring[rx_id]);
1303
1304                 /* When next RX descriptor is on a cache line boundary,
1305                  * prefetch the next 4 RX descriptors and next 8 pointers
1306                  * to mbufs.
1307                  */
1308                 if ((rx_id & 0x3) == 0) {
1309                         rte_prefetch0(&rx_ring[rx_id]);
1310                         rte_prefetch0(rxq->sw_ring[rx_id]);
1311                 }
1312                 rxm = rxe;
1313                 dma_addr =
1314                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1315                 rxdp->read.hdr_addr = 0;
1316                 rxdp->read.pkt_addr = dma_addr;
1317
1318                 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1319                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1320
1321                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1322                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1323                 rxm->nb_segs = 1;
1324                 rxm->next = NULL;
1325                 rxm->pkt_len = rx_packet_len;
1326                 rxm->data_len = rx_packet_len;
1327                 rxm->port = rxq->port_id;
1328                 rxm->ol_flags = 0;
1329                 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1330                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1331                 iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
1332                 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1333                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1334                 rxm->ol_flags |= pkt_flags;
1335
1336                 rx_pkts[nb_rx++] = rxm;
1337         }
1338         rxq->rx_tail = rx_id;
1339
1340         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1341
1342         return nb_rx;
1343 }
1344
1345 /* implement recv_scattered_pkts for flexible Rx descriptor */
1346 uint16_t
1347 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1348                                   uint16_t nb_pkts)
1349 {
1350         struct iavf_rx_queue *rxq = rx_queue;
1351         union iavf_rx_flex_desc rxd;
1352         struct rte_mbuf *rxe;
1353         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1354         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1355         struct rte_mbuf *nmb, *rxm;
1356         uint16_t rx_id = rxq->rx_tail;
1357         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1358         struct rte_eth_dev *dev;
1359         uint16_t rx_stat_err0;
1360         uint64_t dma_addr;
1361         uint64_t pkt_flags;
1362
1363         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1364         volatile union iavf_rx_flex_desc *rxdp;
1365         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1366
1367         while (nb_rx < nb_pkts) {
1368                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1369                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1370
1371                 /* Check the DD bit */
1372                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1373                         break;
1374                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1375
1376                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1377                 if (unlikely(!nmb)) {
1378                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1379                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1380                         dev = &rte_eth_devices[rxq->port_id];
1381                         dev->data->rx_mbuf_alloc_failed++;
1382                         break;
1383                 }
1384
1385                 rxd = *rxdp;
1386                 nb_hold++;
1387                 rxe = rxq->sw_ring[rx_id];
1388                 rx_id++;
1389                 if (rx_id == rxq->nb_rx_desc)
1390                         rx_id = 0;
1391
1392                 /* Prefetch next mbuf */
1393                 rte_prefetch0(rxq->sw_ring[rx_id]);
1394
1395                 /* When next RX descriptor is on a cache line boundary,
1396                  * prefetch the next 4 RX descriptors and next 8 pointers
1397                  * to mbufs.
1398                  */
1399                 if ((rx_id & 0x3) == 0) {
1400                         rte_prefetch0(&rx_ring[rx_id]);
1401                         rte_prefetch0(rxq->sw_ring[rx_id]);
1402                 }
1403
1404                 rxm = rxe;
1405                 dma_addr =
1406                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1407
1408                 /* Set data buffer address and data length of the mbuf */
1409                 rxdp->read.hdr_addr = 0;
1410                 rxdp->read.pkt_addr = dma_addr;
1411                 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1412                                 IAVF_RX_FLX_DESC_PKT_LEN_M;
1413                 rxm->data_len = rx_packet_len;
1414                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1415
1416                 /* If this is the first buffer of the received packet, set the
1417                  * pointer to the first mbuf of the packet and initialize its
1418                  * context. Otherwise, update the total length and the number
1419                  * of segments of the current scattered packet, and update the
1420                  * pointer to the last mbuf of the current packet.
1421                  */
1422                 if (!first_seg) {
1423                         first_seg = rxm;
1424                         first_seg->nb_segs = 1;
1425                         first_seg->pkt_len = rx_packet_len;
1426                 } else {
1427                         first_seg->pkt_len =
1428                                 (uint16_t)(first_seg->pkt_len +
1429                                                 rx_packet_len);
1430                         first_seg->nb_segs++;
1431                         last_seg->next = rxm;
1432                 }
1433
1434                 /* If this is not the last buffer of the received packet,
1435                  * update the pointer to the last mbuf of the current scattered
1436                  * packet and continue to parse the RX ring.
1437                  */
1438                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1439                         last_seg = rxm;
1440                         continue;
1441                 }
1442
1443                 /* This is the last buffer of the received packet. If the CRC
1444                  * is not stripped by the hardware:
1445                  *  - Subtract the CRC length from the total packet length.
1446                  *  - If the last buffer only contains the whole CRC or a part
1447                  *  of it, free the mbuf associated to the last buffer. If part
1448                  *  of the CRC is also contained in the previous mbuf, subtract
1449                  *  the length of that CRC part from the data length of the
1450                  *  previous mbuf.
1451                  */
1452                 rxm->next = NULL;
1453                 if (unlikely(rxq->crc_len > 0)) {
1454                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1455                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1456                                 rte_pktmbuf_free_seg(rxm);
1457                                 first_seg->nb_segs--;
1458                                 last_seg->data_len =
1459                                         (uint16_t)(last_seg->data_len -
1460                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1461                                 last_seg->next = NULL;
1462                         } else {
1463                                 rxm->data_len = (uint16_t)(rx_packet_len -
1464                                                         RTE_ETHER_CRC_LEN);
1465                         }
1466                 }
1467
1468                 first_seg->port = rxq->port_id;
1469                 first_seg->ol_flags = 0;
1470                 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1471                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1472                 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
1473                 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1474                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1475
1476                 first_seg->ol_flags |= pkt_flags;
1477
1478                 /* Prefetch data of first segment, if configured to do so. */
1479                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1480                                           first_seg->data_off));
1481                 rx_pkts[nb_rx++] = first_seg;
1482                 first_seg = NULL;
1483         }
1484
1485         /* Record index of the next RX descriptor to probe. */
1486         rxq->rx_tail = rx_id;
1487         rxq->pkt_first_seg = first_seg;
1488         rxq->pkt_last_seg = last_seg;
1489
1490         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1491
1492         return nb_rx;
1493 }
1494
1495 /* implement recv_scattered_pkts  */
1496 uint16_t
1497 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1498                         uint16_t nb_pkts)
1499 {
1500         struct iavf_rx_queue *rxq = rx_queue;
1501         union iavf_rx_desc rxd;
1502         struct rte_mbuf *rxe;
1503         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1504         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1505         struct rte_mbuf *nmb, *rxm;
1506         uint16_t rx_id = rxq->rx_tail;
1507         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1508         struct rte_eth_dev *dev;
1509         uint32_t rx_status;
1510         uint64_t qword1;
1511         uint64_t dma_addr;
1512         uint64_t pkt_flags;
1513
1514         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1515         volatile union iavf_rx_desc *rxdp;
1516         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1517
1518         while (nb_rx < nb_pkts) {
1519                 rxdp = &rx_ring[rx_id];
1520                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1521                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1522                             IAVF_RXD_QW1_STATUS_SHIFT;
1523
1524                 /* Check the DD bit */
1525                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1526                         break;
1527                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1528
1529                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1530                 if (unlikely(!nmb)) {
1531                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1532                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1533                         dev = &rte_eth_devices[rxq->port_id];
1534                         dev->data->rx_mbuf_alloc_failed++;
1535                         break;
1536                 }
1537
1538                 rxd = *rxdp;
1539                 nb_hold++;
1540                 rxe = rxq->sw_ring[rx_id];
1541                 rx_id++;
1542                 if (rx_id == rxq->nb_rx_desc)
1543                         rx_id = 0;
1544
1545                 /* Prefetch next mbuf */
1546                 rte_prefetch0(rxq->sw_ring[rx_id]);
1547
1548                 /* When next RX descriptor is on a cache line boundary,
1549                  * prefetch the next 4 RX descriptors and next 8 pointers
1550                  * to mbufs.
1551                  */
1552                 if ((rx_id & 0x3) == 0) {
1553                         rte_prefetch0(&rx_ring[rx_id]);
1554                         rte_prefetch0(rxq->sw_ring[rx_id]);
1555                 }
1556
1557                 rxm = rxe;
1558                 dma_addr =
1559                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1560
1561                 /* Set data buffer address and data length of the mbuf */
1562                 rxdp->read.hdr_addr = 0;
1563                 rxdp->read.pkt_addr = dma_addr;
1564                 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1565                                  IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1566                 rxm->data_len = rx_packet_len;
1567                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1568
1569                 /* If this is the first buffer of the received packet, set the
1570                  * pointer to the first mbuf of the packet and initialize its
1571                  * context. Otherwise, update the total length and the number
1572                  * of segments of the current scattered packet, and update the
1573                  * pointer to the last mbuf of the current packet.
1574                  */
1575                 if (!first_seg) {
1576                         first_seg = rxm;
1577                         first_seg->nb_segs = 1;
1578                         first_seg->pkt_len = rx_packet_len;
1579                 } else {
1580                         first_seg->pkt_len =
1581                                 (uint16_t)(first_seg->pkt_len +
1582                                                 rx_packet_len);
1583                         first_seg->nb_segs++;
1584                         last_seg->next = rxm;
1585                 }
1586
1587                 /* If this is not the last buffer of the received packet,
1588                  * update the pointer to the last mbuf of the current scattered
1589                  * packet and continue to parse the RX ring.
1590                  */
1591                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1592                         last_seg = rxm;
1593                         continue;
1594                 }
1595
1596                 /* This is the last buffer of the received packet. If the CRC
1597                  * is not stripped by the hardware:
1598                  *  - Subtract the CRC length from the total packet length.
1599                  *  - If the last buffer only contains the whole CRC or a part
1600                  *  of it, free the mbuf associated to the last buffer. If part
1601                  *  of the CRC is also contained in the previous mbuf, subtract
1602                  *  the length of that CRC part from the data length of the
1603                  *  previous mbuf.
1604                  */
1605                 rxm->next = NULL;
1606                 if (unlikely(rxq->crc_len > 0)) {
1607                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1608                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1609                                 rte_pktmbuf_free_seg(rxm);
1610                                 first_seg->nb_segs--;
1611                                 last_seg->data_len =
1612                                         (uint16_t)(last_seg->data_len -
1613                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1614                                 last_seg->next = NULL;
1615                         } else
1616                                 rxm->data_len = (uint16_t)(rx_packet_len -
1617                                                         RTE_ETHER_CRC_LEN);
1618                 }
1619
1620                 first_seg->port = rxq->port_id;
1621                 first_seg->ol_flags = 0;
1622                 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1623                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1624                 first_seg->packet_type =
1625                         ptype_tbl[(uint8_t)((qword1 &
1626                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1627
1628                 if (pkt_flags & PKT_RX_RSS_HASH)
1629                         first_seg->hash.rss =
1630                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1631
1632                 if (pkt_flags & PKT_RX_FDIR)
1633                         pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1634
1635                 first_seg->ol_flags |= pkt_flags;
1636
1637                 /* Prefetch data of first segment, if configured to do so. */
1638                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1639                                           first_seg->data_off));
1640                 rx_pkts[nb_rx++] = first_seg;
1641                 first_seg = NULL;
1642         }
1643
1644         /* Record index of the next RX descriptor to probe. */
1645         rxq->rx_tail = rx_id;
1646         rxq->pkt_first_seg = first_seg;
1647         rxq->pkt_last_seg = last_seg;
1648
1649         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1650
1651         return nb_rx;
1652 }
1653
1654 #define IAVF_LOOK_AHEAD 8
1655 static inline int
1656 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1657 {
1658         volatile union iavf_rx_flex_desc *rxdp;
1659         struct rte_mbuf **rxep;
1660         struct rte_mbuf *mb;
1661         uint16_t stat_err0;
1662         uint16_t pkt_len;
1663         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1664         int32_t i, j, nb_rx = 0;
1665         uint64_t pkt_flags;
1666         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1667
1668         rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1669         rxep = &rxq->sw_ring[rxq->rx_tail];
1670
1671         stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1672
1673         /* Make sure there is at least 1 packet to receive */
1674         if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1675                 return 0;
1676
1677         /* Scan LOOK_AHEAD descriptors at a time to determine which
1678          * descriptors reference packets that are ready to be received.
1679          */
1680         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1681              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1682                 /* Read desc statuses backwards to avoid race condition */
1683                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1684                         s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1685
1686                 rte_smp_rmb();
1687
1688                 /* Compute how many status bits were set */
1689                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1690                         nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1691
1692                 nb_rx += nb_dd;
1693
1694                 /* Translate descriptor info to mbuf parameters */
1695                 for (j = 0; j < nb_dd; j++) {
1696                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1697                                           rxq->rx_tail +
1698                                           i * IAVF_LOOK_AHEAD + j);
1699
1700                         mb = rxep[j];
1701                         pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1702                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1703                         mb->data_len = pkt_len;
1704                         mb->pkt_len = pkt_len;
1705                         mb->ol_flags = 0;
1706
1707                         mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1708                                 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1709                         iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
1710                         rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1711                         stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1712                         pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1713
1714                         mb->ol_flags |= pkt_flags;
1715                 }
1716
1717                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1718                         rxq->rx_stage[i + j] = rxep[j];
1719
1720                 if (nb_dd != IAVF_LOOK_AHEAD)
1721                         break;
1722         }
1723
1724         /* Clear software ring entries */
1725         for (i = 0; i < nb_rx; i++)
1726                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1727
1728         return nb_rx;
1729 }
1730
1731 static inline int
1732 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1733 {
1734         volatile union iavf_rx_desc *rxdp;
1735         struct rte_mbuf **rxep;
1736         struct rte_mbuf *mb;
1737         uint16_t pkt_len;
1738         uint64_t qword1;
1739         uint32_t rx_status;
1740         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1741         int32_t i, j, nb_rx = 0;
1742         uint64_t pkt_flags;
1743         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1744
1745         rxdp = &rxq->rx_ring[rxq->rx_tail];
1746         rxep = &rxq->sw_ring[rxq->rx_tail];
1747
1748         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1749         rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1750                     IAVF_RXD_QW1_STATUS_SHIFT;
1751
1752         /* Make sure there is at least 1 packet to receive */
1753         if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1754                 return 0;
1755
1756         /* Scan LOOK_AHEAD descriptors at a time to determine which
1757          * descriptors reference packets that are ready to be received.
1758          */
1759         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1760              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1761                 /* Read desc statuses backwards to avoid race condition */
1762                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1763                         qword1 = rte_le_to_cpu_64(
1764                                 rxdp[j].wb.qword1.status_error_len);
1765                         s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1766                                IAVF_RXD_QW1_STATUS_SHIFT;
1767                 }
1768
1769                 rte_smp_rmb();
1770
1771                 /* Compute how many status bits were set */
1772                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1773                         nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1774
1775                 nb_rx += nb_dd;
1776
1777                 /* Translate descriptor info to mbuf parameters */
1778                 for (j = 0; j < nb_dd; j++) {
1779                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1780                                          rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1781
1782                         mb = rxep[j];
1783                         qword1 = rte_le_to_cpu_64
1784                                         (rxdp[j].wb.qword1.status_error_len);
1785                         pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1786                                   IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1787                         mb->data_len = pkt_len;
1788                         mb->pkt_len = pkt_len;
1789                         mb->ol_flags = 0;
1790                         iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1791                         pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1792                         mb->packet_type =
1793                                 ptype_tbl[(uint8_t)((qword1 &
1794                                 IAVF_RXD_QW1_PTYPE_MASK) >>
1795                                 IAVF_RXD_QW1_PTYPE_SHIFT)];
1796
1797                         if (pkt_flags & PKT_RX_RSS_HASH)
1798                                 mb->hash.rss = rte_le_to_cpu_32(
1799                                         rxdp[j].wb.qword0.hi_dword.rss);
1800
1801                         if (pkt_flags & PKT_RX_FDIR)
1802                                 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1803
1804                         mb->ol_flags |= pkt_flags;
1805                 }
1806
1807                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1808                         rxq->rx_stage[i + j] = rxep[j];
1809
1810                 if (nb_dd != IAVF_LOOK_AHEAD)
1811                         break;
1812         }
1813
1814         /* Clear software ring entries */
1815         for (i = 0; i < nb_rx; i++)
1816                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1817
1818         return nb_rx;
1819 }
1820
1821 static inline uint16_t
1822 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1823                        struct rte_mbuf **rx_pkts,
1824                        uint16_t nb_pkts)
1825 {
1826         uint16_t i;
1827         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1828
1829         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1830
1831         for (i = 0; i < nb_pkts; i++)
1832                 rx_pkts[i] = stage[i];
1833
1834         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1835         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1836
1837         return nb_pkts;
1838 }
1839
1840 static inline int
1841 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1842 {
1843         volatile union iavf_rx_desc *rxdp;
1844         struct rte_mbuf **rxep;
1845         struct rte_mbuf *mb;
1846         uint16_t alloc_idx, i;
1847         uint64_t dma_addr;
1848         int diag;
1849
1850         /* Allocate buffers in bulk */
1851         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1852                                 (rxq->rx_free_thresh - 1));
1853         rxep = &rxq->sw_ring[alloc_idx];
1854         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1855                                     rxq->rx_free_thresh);
1856         if (unlikely(diag != 0)) {
1857                 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1858                 return -ENOMEM;
1859         }
1860
1861         rxdp = &rxq->rx_ring[alloc_idx];
1862         for (i = 0; i < rxq->rx_free_thresh; i++) {
1863                 if (likely(i < (rxq->rx_free_thresh - 1)))
1864                         /* Prefetch next mbuf */
1865                         rte_prefetch0(rxep[i + 1]);
1866
1867                 mb = rxep[i];
1868                 rte_mbuf_refcnt_set(mb, 1);
1869                 mb->next = NULL;
1870                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1871                 mb->nb_segs = 1;
1872                 mb->port = rxq->port_id;
1873                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1874                 rxdp[i].read.hdr_addr = 0;
1875                 rxdp[i].read.pkt_addr = dma_addr;
1876         }
1877
1878         /* Update rx tail register */
1879         rte_wmb();
1880         IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1881
1882         rxq->rx_free_trigger =
1883                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1884         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1885                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1886
1887         return 0;
1888 }
1889
1890 static inline uint16_t
1891 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1892 {
1893         struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1894         uint16_t nb_rx = 0;
1895
1896         if (!nb_pkts)
1897                 return 0;
1898
1899         if (rxq->rx_nb_avail)
1900                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1901
1902         if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1903                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1904         else
1905                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1906         rxq->rx_next_avail = 0;
1907         rxq->rx_nb_avail = nb_rx;
1908         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1909
1910         if (rxq->rx_tail > rxq->rx_free_trigger) {
1911                 if (iavf_rx_alloc_bufs(rxq) != 0) {
1912                         uint16_t i, j;
1913
1914                         /* TODO: count rx_mbuf_alloc_failed here */
1915
1916                         rxq->rx_nb_avail = 0;
1917                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1918                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1919                                 rxq->sw_ring[j] = rxq->rx_stage[i];
1920
1921                         return 0;
1922                 }
1923         }
1924
1925         if (rxq->rx_tail >= rxq->nb_rx_desc)
1926                 rxq->rx_tail = 0;
1927
1928         PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1929                    rxq->port_id, rxq->queue_id,
1930                    rxq->rx_tail, nb_rx);
1931
1932         if (rxq->rx_nb_avail)
1933                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1934
1935         return 0;
1936 }
1937
1938 static uint16_t
1939 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1940                          struct rte_mbuf **rx_pkts,
1941                          uint16_t nb_pkts)
1942 {
1943         uint16_t nb_rx = 0, n, count;
1944
1945         if (unlikely(nb_pkts == 0))
1946                 return 0;
1947
1948         if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1949                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1950
1951         while (nb_pkts) {
1952                 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1953                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1954                 nb_rx = (uint16_t)(nb_rx + count);
1955                 nb_pkts = (uint16_t)(nb_pkts - count);
1956                 if (count < n)
1957                         break;
1958         }
1959
1960         return nb_rx;
1961 }
1962
1963 static inline int
1964 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1965 {
1966         struct iavf_tx_entry *sw_ring = txq->sw_ring;
1967         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1968         uint16_t nb_tx_desc = txq->nb_tx_desc;
1969         uint16_t desc_to_clean_to;
1970         uint16_t nb_tx_to_clean;
1971
1972         volatile struct iavf_tx_desc *txd = txq->tx_ring;
1973
1974         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1975         if (desc_to_clean_to >= nb_tx_desc)
1976                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1977
1978         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1979         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1980                         rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1981                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1982                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1983                                 "(port=%d queue=%d)", desc_to_clean_to,
1984                                 txq->port_id, txq->queue_id);
1985                 return -1;
1986         }
1987
1988         if (last_desc_cleaned > desc_to_clean_to)
1989                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1990                                                         desc_to_clean_to);
1991         else
1992                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1993                                         last_desc_cleaned);
1994
1995         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1996
1997         txq->last_desc_cleaned = desc_to_clean_to;
1998         txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1999
2000         return 0;
2001 }
2002
2003 /* Check if the context descriptor is needed for TX offloading */
2004 static inline uint16_t
2005 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2006 {
2007         if (flags & PKT_TX_TCP_SEG)
2008                 return 1;
2009         if (flags & PKT_TX_VLAN_PKT &&
2010             vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2011                 return 1;
2012         return 0;
2013 }
2014
2015 static inline void
2016 iavf_txd_enable_checksum(uint64_t ol_flags,
2017                         uint32_t *td_cmd,
2018                         uint32_t *td_offset,
2019                         union iavf_tx_offload tx_offload)
2020 {
2021         /* Set MACLEN */
2022         *td_offset |= (tx_offload.l2_len >> 1) <<
2023                       IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2024
2025         /* Enable L3 checksum offloads */
2026         if (ol_flags & PKT_TX_IP_CKSUM) {
2027                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2028                 *td_offset |= (tx_offload.l3_len >> 2) <<
2029                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2030         } else if (ol_flags & PKT_TX_IPV4) {
2031                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2032                 *td_offset |= (tx_offload.l3_len >> 2) <<
2033                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2034         } else if (ol_flags & PKT_TX_IPV6) {
2035                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2036                 *td_offset |= (tx_offload.l3_len >> 2) <<
2037                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2038         }
2039
2040         if (ol_flags & PKT_TX_TCP_SEG) {
2041                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2042                 *td_offset |= (tx_offload.l4_len >> 2) <<
2043                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2044                 return;
2045         }
2046
2047         /* Enable L4 checksum offloads */
2048         switch (ol_flags & PKT_TX_L4_MASK) {
2049         case PKT_TX_TCP_CKSUM:
2050                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2051                 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2052                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2053                 break;
2054         case PKT_TX_SCTP_CKSUM:
2055                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2056                 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2057                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2058                 break;
2059         case PKT_TX_UDP_CKSUM:
2060                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2061                 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2062                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2063                 break;
2064         default:
2065                 break;
2066         }
2067 }
2068
2069 /* set TSO context descriptor
2070  * support IP -> L4 and IP -> IP -> L4
2071  */
2072 static inline uint64_t
2073 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2074 {
2075         uint64_t ctx_desc = 0;
2076         uint32_t cd_cmd, hdr_len, cd_tso_len;
2077
2078         if (!tx_offload.l4_len) {
2079                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2080                 return ctx_desc;
2081         }
2082
2083         hdr_len = tx_offload.l2_len +
2084                   tx_offload.l3_len +
2085                   tx_offload.l4_len;
2086
2087         cd_cmd = IAVF_TX_CTX_DESC_TSO;
2088         cd_tso_len = mbuf->pkt_len - hdr_len;
2089         ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2090                      ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2091                      ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2092
2093         return ctx_desc;
2094 }
2095
2096 /* Construct the tx flags */
2097 static inline uint64_t
2098 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2099                uint32_t td_tag)
2100 {
2101         return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2102                                 ((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
2103                                 ((uint64_t)td_offset <<
2104                                  IAVF_TXD_QW1_OFFSET_SHIFT) |
2105                                 ((uint64_t)size  <<
2106                                  IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2107                                 ((uint64_t)td_tag  <<
2108                                  IAVF_TXD_QW1_L2TAG1_SHIFT));
2109 }
2110
2111 /* TX function */
2112 uint16_t
2113 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2114 {
2115         volatile struct iavf_tx_desc *txd;
2116         volatile struct iavf_tx_desc *txr;
2117         struct iavf_tx_queue *txq;
2118         struct iavf_tx_entry *sw_ring;
2119         struct iavf_tx_entry *txe, *txn;
2120         struct rte_mbuf *tx_pkt;
2121         struct rte_mbuf *m_seg;
2122         uint16_t tx_id;
2123         uint16_t nb_tx;
2124         uint32_t td_cmd;
2125         uint32_t td_offset;
2126         uint32_t td_tag;
2127         uint64_t ol_flags;
2128         uint16_t nb_used;
2129         uint16_t nb_ctx;
2130         uint16_t tx_last;
2131         uint16_t slen;
2132         uint64_t buf_dma_addr;
2133         uint16_t cd_l2tag2 = 0;
2134         union iavf_tx_offload tx_offload = {0};
2135
2136         txq = tx_queue;
2137         sw_ring = txq->sw_ring;
2138         txr = txq->tx_ring;
2139         tx_id = txq->tx_tail;
2140         txe = &sw_ring[tx_id];
2141
2142         /* Check if the descriptor ring needs to be cleaned. */
2143         if (txq->nb_free < txq->free_thresh)
2144                 (void)iavf_xmit_cleanup(txq);
2145
2146         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2147                 td_cmd = 0;
2148                 td_tag = 0;
2149                 td_offset = 0;
2150
2151                 tx_pkt = *tx_pkts++;
2152                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2153
2154                 ol_flags = tx_pkt->ol_flags;
2155                 tx_offload.l2_len = tx_pkt->l2_len;
2156                 tx_offload.l3_len = tx_pkt->l3_len;
2157                 tx_offload.l4_len = tx_pkt->l4_len;
2158                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2159                 /* Calculate the number of context descriptors needed. */
2160                 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2161
2162                 /* The number of descriptors that must be allocated for
2163                  * a packet equals to the number of the segments of that
2164                  * packet plus 1 context descriptor if needed.
2165                  */
2166                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2167                 tx_last = (uint16_t)(tx_id + nb_used - 1);
2168
2169                 /* Circular ring */
2170                 if (tx_last >= txq->nb_tx_desc)
2171                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2172
2173                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2174                            " tx_first=%u tx_last=%u",
2175                            txq->port_id, txq->queue_id, tx_id, tx_last);
2176
2177                 if (nb_used > txq->nb_free) {
2178                         if (iavf_xmit_cleanup(txq)) {
2179                                 if (nb_tx == 0)
2180                                         return 0;
2181                                 goto end_of_tx;
2182                         }
2183                         if (unlikely(nb_used > txq->rs_thresh)) {
2184                                 while (nb_used > txq->nb_free) {
2185                                         if (iavf_xmit_cleanup(txq)) {
2186                                                 if (nb_tx == 0)
2187                                                         return 0;
2188                                                 goto end_of_tx;
2189                                         }
2190                                 }
2191                         }
2192                 }
2193
2194                 /* Descriptor based VLAN insertion */
2195                 if (ol_flags & PKT_TX_VLAN_PKT &&
2196                     txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2197                         td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2198                         td_tag = tx_pkt->vlan_tci;
2199                 }
2200
2201                 /* According to datasheet, the bit2 is reserved and must be
2202                  * set to 1.
2203                  */
2204                 td_cmd |= 0x04;
2205
2206                 /* Enable checksum offloading */
2207                 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2208                         iavf_txd_enable_checksum(ol_flags, &td_cmd,
2209                                                 &td_offset, tx_offload);
2210
2211                 if (nb_ctx) {
2212                         /* Setup TX context descriptor if required */
2213                         uint64_t cd_type_cmd_tso_mss =
2214                                 IAVF_TX_DESC_DTYPE_CONTEXT;
2215                         volatile struct iavf_tx_context_desc *ctx_txd =
2216                                 (volatile struct iavf_tx_context_desc *)
2217                                                         &txr[tx_id];
2218
2219                         txn = &sw_ring[txe->next_id];
2220                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2221                         if (txe->mbuf) {
2222                                 rte_pktmbuf_free_seg(txe->mbuf);
2223                                 txe->mbuf = NULL;
2224                         }
2225
2226                         /* TSO enabled */
2227                         if (ol_flags & PKT_TX_TCP_SEG)
2228                                 cd_type_cmd_tso_mss |=
2229                                         iavf_set_tso_ctx(tx_pkt, tx_offload);
2230
2231                         if (ol_flags & PKT_TX_VLAN_PKT &&
2232                            txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2233                                 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2234                                         << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2235                                 cd_l2tag2 = tx_pkt->vlan_tci;
2236                         }
2237
2238                         ctx_txd->type_cmd_tso_mss =
2239                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2240                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2241
2242                         IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2243                         txe->last_id = tx_last;
2244                         tx_id = txe->next_id;
2245                         txe = txn;
2246                 }
2247
2248                 m_seg = tx_pkt;
2249                 do {
2250                         txd = &txr[tx_id];
2251                         txn = &sw_ring[txe->next_id];
2252
2253                         if (txe->mbuf)
2254                                 rte_pktmbuf_free_seg(txe->mbuf);
2255                         txe->mbuf = m_seg;
2256
2257                         /* Setup TX Descriptor */
2258                         slen = m_seg->data_len;
2259                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
2260                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2261                         txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2262                                                                   td_offset,
2263                                                                   slen,
2264                                                                   td_tag);
2265
2266                         IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2267                         txe->last_id = tx_last;
2268                         tx_id = txe->next_id;
2269                         txe = txn;
2270                         m_seg = m_seg->next;
2271                 } while (m_seg);
2272
2273                 /* The last packet data descriptor needs End Of Packet (EOP) */
2274                 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2275                 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2276                 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2277
2278                 if (txq->nb_used >= txq->rs_thresh) {
2279                         PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2280                                    "%4u (port=%d queue=%d)",
2281                                    tx_last, txq->port_id, txq->queue_id);
2282
2283                         td_cmd |= IAVF_TX_DESC_CMD_RS;
2284
2285                         /* Update txq RS bit counters */
2286                         txq->nb_used = 0;
2287                 }
2288
2289                 txd->cmd_type_offset_bsz |=
2290                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2291                                          IAVF_TXD_QW1_CMD_SHIFT);
2292                 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2293         }
2294
2295 end_of_tx:
2296         rte_wmb();
2297
2298         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2299                    txq->port_id, txq->queue_id, tx_id, nb_tx);
2300
2301         IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2302         txq->tx_tail = tx_id;
2303
2304         return nb_tx;
2305 }
2306
2307 /* TX prep functions */
2308 uint16_t
2309 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2310               uint16_t nb_pkts)
2311 {
2312         int i, ret;
2313         uint64_t ol_flags;
2314         struct rte_mbuf *m;
2315
2316         for (i = 0; i < nb_pkts; i++) {
2317                 m = tx_pkts[i];
2318                 ol_flags = m->ol_flags;
2319
2320                 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2321                 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2322                         if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2323                                 rte_errno = EINVAL;
2324                                 return i;
2325                         }
2326                 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2327                            (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2328                         /* MSS outside the range are considered malicious */
2329                         rte_errno = EINVAL;
2330                         return i;
2331                 }
2332
2333                 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2334                         rte_errno = ENOTSUP;
2335                         return i;
2336                 }
2337
2338 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2339                 ret = rte_validate_tx_offload(m);
2340                 if (ret != 0) {
2341                         rte_errno = -ret;
2342                         return i;
2343                 }
2344 #endif
2345                 ret = rte_net_intel_cksum_prepare(m);
2346                 if (ret != 0) {
2347                         rte_errno = -ret;
2348                         return i;
2349                 }
2350         }
2351
2352         return i;
2353 }
2354
2355 /* choose rx function*/
2356 void
2357 iavf_set_rx_function(struct rte_eth_dev *dev)
2358 {
2359         struct iavf_adapter *adapter =
2360                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2361         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2362
2363 #ifdef RTE_ARCH_X86
2364         struct iavf_rx_queue *rxq;
2365         int i;
2366         bool use_avx2 = false;
2367 #ifdef CC_AVX512_SUPPORT
2368         bool use_avx512 = false;
2369 #endif
2370
2371         if (!iavf_rx_vec_dev_check(dev) &&
2372                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2373                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2374                         rxq = dev->data->rx_queues[i];
2375                         (void)iavf_rxq_vec_setup(rxq);
2376                 }
2377
2378                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2379                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2380                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2381                         use_avx2 = true;
2382 #ifdef CC_AVX512_SUPPORT
2383                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2384                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2385                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2386                         use_avx512 = true;
2387 #endif
2388
2389                 if (dev->data->scattered_rx) {
2390                         PMD_DRV_LOG(DEBUG,
2391                                     "Using %sVector Scattered Rx (port %d).",
2392                                     use_avx2 ? "avx2 " : "",
2393                                     dev->data->port_id);
2394                         if (vf->vf_res->vf_cap_flags &
2395                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2396                                 dev->rx_pkt_burst = use_avx2 ?
2397                                         iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2398                                         iavf_recv_scattered_pkts_vec_flex_rxd;
2399 #ifdef CC_AVX512_SUPPORT
2400                                 if (use_avx512)
2401                                         dev->rx_pkt_burst =
2402                                                 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2403 #endif
2404                         } else {
2405                                 dev->rx_pkt_burst = use_avx2 ?
2406                                         iavf_recv_scattered_pkts_vec_avx2 :
2407                                         iavf_recv_scattered_pkts_vec;
2408 #ifdef CC_AVX512_SUPPORT
2409                                 if (use_avx512)
2410                                         dev->rx_pkt_burst =
2411                                                 iavf_recv_scattered_pkts_vec_avx512;
2412 #endif
2413                         }
2414                 } else {
2415                         PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2416                                     use_avx2 ? "avx2 " : "",
2417                                     dev->data->port_id);
2418                         if (vf->vf_res->vf_cap_flags &
2419                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2420                                 dev->rx_pkt_burst = use_avx2 ?
2421                                         iavf_recv_pkts_vec_avx2_flex_rxd :
2422                                         iavf_recv_pkts_vec_flex_rxd;
2423 #ifdef CC_AVX512_SUPPORT
2424                                 if (use_avx512)
2425                                         dev->rx_pkt_burst =
2426                                                 iavf_recv_pkts_vec_avx512_flex_rxd;
2427 #endif
2428                         } else {
2429                                 dev->rx_pkt_burst = use_avx2 ?
2430                                         iavf_recv_pkts_vec_avx2 :
2431                                         iavf_recv_pkts_vec;
2432 #ifdef CC_AVX512_SUPPORT
2433                                 if (use_avx512)
2434                                         dev->rx_pkt_burst =
2435                                                 iavf_recv_pkts_vec_avx512;
2436 #endif
2437                         }
2438                 }
2439
2440                 return;
2441         }
2442 #endif
2443
2444         if (dev->data->scattered_rx) {
2445                 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2446                             dev->data->port_id);
2447                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2448                         dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2449                 else
2450                         dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2451         } else if (adapter->rx_bulk_alloc_allowed) {
2452                 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2453                             dev->data->port_id);
2454                 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2455         } else {
2456                 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2457                             dev->data->port_id);
2458                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2459                         dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2460                 else
2461                         dev->rx_pkt_burst = iavf_recv_pkts;
2462         }
2463 }
2464
2465 /* choose tx function*/
2466 void
2467 iavf_set_tx_function(struct rte_eth_dev *dev)
2468 {
2469 #ifdef RTE_ARCH_X86
2470         struct iavf_tx_queue *txq;
2471         int i;
2472         bool use_avx2 = false;
2473 #ifdef CC_AVX512_SUPPORT
2474         bool use_avx512 = false;
2475 #endif
2476
2477         if (!iavf_tx_vec_dev_check(dev) &&
2478                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2479                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2480                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2481                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2482                         use_avx2 = true;
2483 #ifdef CC_AVX512_SUPPORT
2484                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2485                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2486                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2487                         use_avx512 = true;
2488 #endif
2489
2490                 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2491                             use_avx2 ? "avx2 " : "",
2492                             dev->data->port_id);
2493                 dev->tx_pkt_burst = use_avx2 ?
2494                                     iavf_xmit_pkts_vec_avx2 :
2495                                     iavf_xmit_pkts_vec;
2496 #ifdef CC_AVX512_SUPPORT
2497                 if (use_avx512)
2498                         dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2499 #endif
2500                 dev->tx_pkt_prepare = NULL;
2501
2502                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2503                         txq = dev->data->tx_queues[i];
2504                         if (!txq)
2505                                 continue;
2506 #ifdef CC_AVX512_SUPPORT
2507                         if (use_avx512)
2508                                 iavf_txq_vec_setup_avx512(txq);
2509                         else
2510                                 iavf_txq_vec_setup(txq);
2511 #else
2512                         iavf_txq_vec_setup(txq);
2513 #endif
2514                 }
2515
2516                 return;
2517         }
2518 #endif
2519
2520         PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2521                     dev->data->port_id);
2522         dev->tx_pkt_burst = iavf_xmit_pkts;
2523         dev->tx_pkt_prepare = iavf_prep_pkts;
2524 }
2525
2526 static int
2527 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2528                         uint32_t free_cnt)
2529 {
2530         struct iavf_tx_entry *swr_ring = txq->sw_ring;
2531         uint16_t i, tx_last, tx_id;
2532         uint16_t nb_tx_free_last;
2533         uint16_t nb_tx_to_clean;
2534         uint32_t pkt_cnt;
2535
2536         /* Start free mbuf from the next of tx_tail */
2537         tx_last = txq->tx_tail;
2538         tx_id  = swr_ring[tx_last].next_id;
2539
2540         if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2541                 return 0;
2542
2543         nb_tx_to_clean = txq->nb_free;
2544         nb_tx_free_last = txq->nb_free;
2545         if (!free_cnt)
2546                 free_cnt = txq->nb_tx_desc;
2547
2548         /* Loop through swr_ring to count the amount of
2549          * freeable mubfs and packets.
2550          */
2551         for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2552                 for (i = 0; i < nb_tx_to_clean &&
2553                         pkt_cnt < free_cnt &&
2554                         tx_id != tx_last; i++) {
2555                         if (swr_ring[tx_id].mbuf != NULL) {
2556                                 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2557                                 swr_ring[tx_id].mbuf = NULL;
2558
2559                                 /*
2560                                  * last segment in the packet,
2561                                  * increment packet count
2562                                  */
2563                                 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2564                         }
2565
2566                         tx_id = swr_ring[tx_id].next_id;
2567                 }
2568
2569                 if (txq->rs_thresh > txq->nb_tx_desc -
2570                         txq->nb_free || tx_id == tx_last)
2571                         break;
2572
2573                 if (pkt_cnt < free_cnt) {
2574                         if (iavf_xmit_cleanup(txq))
2575                                 break;
2576
2577                         nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2578                         nb_tx_free_last = txq->nb_free;
2579                 }
2580         }
2581
2582         return (int)pkt_cnt;
2583 }
2584
2585 int
2586 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2587 {
2588         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2589
2590         return iavf_tx_done_cleanup_full(q, free_cnt);
2591 }
2592
2593 void
2594 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2595                      struct rte_eth_rxq_info *qinfo)
2596 {
2597         struct iavf_rx_queue *rxq;
2598
2599         rxq = dev->data->rx_queues[queue_id];
2600
2601         qinfo->mp = rxq->mp;
2602         qinfo->scattered_rx = dev->data->scattered_rx;
2603         qinfo->nb_desc = rxq->nb_rx_desc;
2604
2605         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2606         qinfo->conf.rx_drop_en = true;
2607         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2608 }
2609
2610 void
2611 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2612                      struct rte_eth_txq_info *qinfo)
2613 {
2614         struct iavf_tx_queue *txq;
2615
2616         txq = dev->data->tx_queues[queue_id];
2617
2618         qinfo->nb_desc = txq->nb_tx_desc;
2619
2620         qinfo->conf.tx_free_thresh = txq->free_thresh;
2621         qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2622         qinfo->conf.offloads = txq->offloads;
2623         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2624 }
2625
2626 /* Get the number of used descriptors of a rx queue */
2627 uint32_t
2628 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2629 {
2630 #define IAVF_RXQ_SCAN_INTERVAL 4
2631         volatile union iavf_rx_desc *rxdp;
2632         struct iavf_rx_queue *rxq;
2633         uint16_t desc = 0;
2634
2635         rxq = dev->data->rx_queues[queue_id];
2636         rxdp = &rxq->rx_ring[rxq->rx_tail];
2637
2638         while ((desc < rxq->nb_rx_desc) &&
2639                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2640                  IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2641                (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2642                 /* Check the DD bit of a rx descriptor of each 4 in a group,
2643                  * to avoid checking too frequently and downgrading performance
2644                  * too much.
2645                  */
2646                 desc += IAVF_RXQ_SCAN_INTERVAL;
2647                 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2648                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2649                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2650                                         desc - rxq->nb_rx_desc]);
2651         }
2652
2653         return desc;
2654 }
2655
2656 int
2657 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2658 {
2659         struct iavf_rx_queue *rxq = rx_queue;
2660         volatile uint64_t *status;
2661         uint64_t mask;
2662         uint32_t desc;
2663
2664         if (unlikely(offset >= rxq->nb_rx_desc))
2665                 return -EINVAL;
2666
2667         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2668                 return RTE_ETH_RX_DESC_UNAVAIL;
2669
2670         desc = rxq->rx_tail + offset;
2671         if (desc >= rxq->nb_rx_desc)
2672                 desc -= rxq->nb_rx_desc;
2673
2674         status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2675         mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2676                 << IAVF_RXD_QW1_STATUS_SHIFT);
2677         if (*status & mask)
2678                 return RTE_ETH_RX_DESC_DONE;
2679
2680         return RTE_ETH_RX_DESC_AVAIL;
2681 }
2682
2683 int
2684 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2685 {
2686         struct iavf_tx_queue *txq = tx_queue;
2687         volatile uint64_t *status;
2688         uint64_t mask, expect;
2689         uint32_t desc;
2690
2691         if (unlikely(offset >= txq->nb_tx_desc))
2692                 return -EINVAL;
2693
2694         desc = txq->tx_tail + offset;
2695         /* go to next desc that has the RS bit */
2696         desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2697                 txq->rs_thresh;
2698         if (desc >= txq->nb_tx_desc) {
2699                 desc -= txq->nb_tx_desc;
2700                 if (desc >= txq->nb_tx_desc)
2701                         desc -= txq->nb_tx_desc;
2702         }
2703
2704         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2705         mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2706         expect = rte_cpu_to_le_64(
2707                  IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2708         if ((*status & mask) == expect)
2709                 return RTE_ETH_TX_DESC_DONE;
2710
2711         return RTE_ETH_TX_DESC_FULL;
2712 }
2713
2714 const uint32_t *
2715 iavf_get_default_ptype_table(void)
2716 {
2717         static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2718                 __rte_cache_aligned = {
2719                 /* L2 types */
2720                 /* [0] reserved */
2721                 [1] = RTE_PTYPE_L2_ETHER,
2722                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2723                 /* [3] - [5] reserved */
2724                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2725                 /* [7] - [10] reserved */
2726                 [11] = RTE_PTYPE_L2_ETHER_ARP,
2727                 /* [12] - [21] reserved */
2728
2729                 /* Non tunneled IPv4 */
2730                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2731                        RTE_PTYPE_L4_FRAG,
2732                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2733                        RTE_PTYPE_L4_NONFRAG,
2734                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2735                        RTE_PTYPE_L4_UDP,
2736                 /* [25] reserved */
2737                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2738                        RTE_PTYPE_L4_TCP,
2739                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2740                        RTE_PTYPE_L4_SCTP,
2741                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2742                        RTE_PTYPE_L4_ICMP,
2743
2744                 /* IPv4 --> IPv4 */
2745                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2746                        RTE_PTYPE_TUNNEL_IP |
2747                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2748                        RTE_PTYPE_INNER_L4_FRAG,
2749                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2750                        RTE_PTYPE_TUNNEL_IP |
2751                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2752                        RTE_PTYPE_INNER_L4_NONFRAG,
2753                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2754                        RTE_PTYPE_TUNNEL_IP |
2755                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2756                        RTE_PTYPE_INNER_L4_UDP,
2757                 /* [32] reserved */
2758                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759                        RTE_PTYPE_TUNNEL_IP |
2760                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2761                        RTE_PTYPE_INNER_L4_TCP,
2762                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2763                        RTE_PTYPE_TUNNEL_IP |
2764                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2765                        RTE_PTYPE_INNER_L4_SCTP,
2766                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2767                        RTE_PTYPE_TUNNEL_IP |
2768                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2769                        RTE_PTYPE_INNER_L4_ICMP,
2770
2771                 /* IPv4 --> IPv6 */
2772                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2773                        RTE_PTYPE_TUNNEL_IP |
2774                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2775                        RTE_PTYPE_INNER_L4_FRAG,
2776                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2777                        RTE_PTYPE_TUNNEL_IP |
2778                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2779                        RTE_PTYPE_INNER_L4_NONFRAG,
2780                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2781                        RTE_PTYPE_TUNNEL_IP |
2782                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2783                        RTE_PTYPE_INNER_L4_UDP,
2784                 /* [39] reserved */
2785                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786                        RTE_PTYPE_TUNNEL_IP |
2787                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2788                        RTE_PTYPE_INNER_L4_TCP,
2789                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2790                        RTE_PTYPE_TUNNEL_IP |
2791                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2792                        RTE_PTYPE_INNER_L4_SCTP,
2793                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2794                        RTE_PTYPE_TUNNEL_IP |
2795                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2796                        RTE_PTYPE_INNER_L4_ICMP,
2797
2798                 /* IPv4 --> GRE/Teredo/VXLAN */
2799                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2800                        RTE_PTYPE_TUNNEL_GRENAT,
2801
2802                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2803                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2804                        RTE_PTYPE_TUNNEL_GRENAT |
2805                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2806                        RTE_PTYPE_INNER_L4_FRAG,
2807                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2808                        RTE_PTYPE_TUNNEL_GRENAT |
2809                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2810                        RTE_PTYPE_INNER_L4_NONFRAG,
2811                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2812                        RTE_PTYPE_TUNNEL_GRENAT |
2813                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2814                        RTE_PTYPE_INNER_L4_UDP,
2815                 /* [47] reserved */
2816                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2817                        RTE_PTYPE_TUNNEL_GRENAT |
2818                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2819                        RTE_PTYPE_INNER_L4_TCP,
2820                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2821                        RTE_PTYPE_TUNNEL_GRENAT |
2822                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2823                        RTE_PTYPE_INNER_L4_SCTP,
2824                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2825                        RTE_PTYPE_TUNNEL_GRENAT |
2826                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2827                        RTE_PTYPE_INNER_L4_ICMP,
2828
2829                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2830                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2831                        RTE_PTYPE_TUNNEL_GRENAT |
2832                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2833                        RTE_PTYPE_INNER_L4_FRAG,
2834                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2835                        RTE_PTYPE_TUNNEL_GRENAT |
2836                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2837                        RTE_PTYPE_INNER_L4_NONFRAG,
2838                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2839                        RTE_PTYPE_TUNNEL_GRENAT |
2840                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2841                        RTE_PTYPE_INNER_L4_UDP,
2842                 /* [54] reserved */
2843                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2844                        RTE_PTYPE_TUNNEL_GRENAT |
2845                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2846                        RTE_PTYPE_INNER_L4_TCP,
2847                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2848                        RTE_PTYPE_TUNNEL_GRENAT |
2849                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2850                        RTE_PTYPE_INNER_L4_SCTP,
2851                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2852                        RTE_PTYPE_TUNNEL_GRENAT |
2853                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2854                        RTE_PTYPE_INNER_L4_ICMP,
2855
2856                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2857                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2858                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2859
2860                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2861                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2862                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2863                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2864                        RTE_PTYPE_INNER_L4_FRAG,
2865                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2866                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2867                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2868                        RTE_PTYPE_INNER_L4_NONFRAG,
2869                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2870                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2871                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2872                        RTE_PTYPE_INNER_L4_UDP,
2873                 /* [62] reserved */
2874                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2875                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2876                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2877                        RTE_PTYPE_INNER_L4_TCP,
2878                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2879                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2880                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2881                        RTE_PTYPE_INNER_L4_SCTP,
2882                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2883                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2884                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2885                        RTE_PTYPE_INNER_L4_ICMP,
2886
2887                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2888                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2889                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2890                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2891                        RTE_PTYPE_INNER_L4_FRAG,
2892                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2893                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2894                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2895                        RTE_PTYPE_INNER_L4_NONFRAG,
2896                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2897                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2898                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2899                        RTE_PTYPE_INNER_L4_UDP,
2900                 /* [69] reserved */
2901                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2902                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2903                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2904                        RTE_PTYPE_INNER_L4_TCP,
2905                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2906                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2907                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2908                        RTE_PTYPE_INNER_L4_SCTP,
2909                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2910                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2911                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2912                        RTE_PTYPE_INNER_L4_ICMP,
2913                 /* [73] - [87] reserved */
2914
2915                 /* Non tunneled IPv6 */
2916                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2917                        RTE_PTYPE_L4_FRAG,
2918                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2919                        RTE_PTYPE_L4_NONFRAG,
2920                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2921                        RTE_PTYPE_L4_UDP,
2922                 /* [91] reserved */
2923                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2924                        RTE_PTYPE_L4_TCP,
2925                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2926                        RTE_PTYPE_L4_SCTP,
2927                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2928                        RTE_PTYPE_L4_ICMP,
2929
2930                 /* IPv6 --> IPv4 */
2931                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2932                        RTE_PTYPE_TUNNEL_IP |
2933                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2934                        RTE_PTYPE_INNER_L4_FRAG,
2935                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2936                        RTE_PTYPE_TUNNEL_IP |
2937                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2938                        RTE_PTYPE_INNER_L4_NONFRAG,
2939                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2940                        RTE_PTYPE_TUNNEL_IP |
2941                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2942                        RTE_PTYPE_INNER_L4_UDP,
2943                 /* [98] reserved */
2944                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2945                        RTE_PTYPE_TUNNEL_IP |
2946                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2947                        RTE_PTYPE_INNER_L4_TCP,
2948                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2949                         RTE_PTYPE_TUNNEL_IP |
2950                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2951                         RTE_PTYPE_INNER_L4_SCTP,
2952                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2953                         RTE_PTYPE_TUNNEL_IP |
2954                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2955                         RTE_PTYPE_INNER_L4_ICMP,
2956
2957                 /* IPv6 --> IPv6 */
2958                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2959                         RTE_PTYPE_TUNNEL_IP |
2960                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2961                         RTE_PTYPE_INNER_L4_FRAG,
2962                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2963                         RTE_PTYPE_TUNNEL_IP |
2964                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2965                         RTE_PTYPE_INNER_L4_NONFRAG,
2966                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2967                         RTE_PTYPE_TUNNEL_IP |
2968                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2969                         RTE_PTYPE_INNER_L4_UDP,
2970                 /* [105] reserved */
2971                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2972                         RTE_PTYPE_TUNNEL_IP |
2973                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2974                         RTE_PTYPE_INNER_L4_TCP,
2975                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2976                         RTE_PTYPE_TUNNEL_IP |
2977                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2978                         RTE_PTYPE_INNER_L4_SCTP,
2979                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2980                         RTE_PTYPE_TUNNEL_IP |
2981                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2982                         RTE_PTYPE_INNER_L4_ICMP,
2983
2984                 /* IPv6 --> GRE/Teredo/VXLAN */
2985                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2986                         RTE_PTYPE_TUNNEL_GRENAT,
2987
2988                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2989                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2990                         RTE_PTYPE_TUNNEL_GRENAT |
2991                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2992                         RTE_PTYPE_INNER_L4_FRAG,
2993                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2994                         RTE_PTYPE_TUNNEL_GRENAT |
2995                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2996                         RTE_PTYPE_INNER_L4_NONFRAG,
2997                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2998                         RTE_PTYPE_TUNNEL_GRENAT |
2999                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3000                         RTE_PTYPE_INNER_L4_UDP,
3001                 /* [113] reserved */
3002                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3003                         RTE_PTYPE_TUNNEL_GRENAT |
3004                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3005                         RTE_PTYPE_INNER_L4_TCP,
3006                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3007                         RTE_PTYPE_TUNNEL_GRENAT |
3008                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3009                         RTE_PTYPE_INNER_L4_SCTP,
3010                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3011                         RTE_PTYPE_TUNNEL_GRENAT |
3012                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3013                         RTE_PTYPE_INNER_L4_ICMP,
3014
3015                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3016                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3017                         RTE_PTYPE_TUNNEL_GRENAT |
3018                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3019                         RTE_PTYPE_INNER_L4_FRAG,
3020                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3021                         RTE_PTYPE_TUNNEL_GRENAT |
3022                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3023                         RTE_PTYPE_INNER_L4_NONFRAG,
3024                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3025                         RTE_PTYPE_TUNNEL_GRENAT |
3026                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3027                         RTE_PTYPE_INNER_L4_UDP,
3028                 /* [120] reserved */
3029                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3030                         RTE_PTYPE_TUNNEL_GRENAT |
3031                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3032                         RTE_PTYPE_INNER_L4_TCP,
3033                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3034                         RTE_PTYPE_TUNNEL_GRENAT |
3035                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3036                         RTE_PTYPE_INNER_L4_SCTP,
3037                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3038                         RTE_PTYPE_TUNNEL_GRENAT |
3039                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3040                         RTE_PTYPE_INNER_L4_ICMP,
3041
3042                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3043                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3044                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3045
3046                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3047                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3048                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3049                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3050                         RTE_PTYPE_INNER_L4_FRAG,
3051                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3052                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3053                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3054                         RTE_PTYPE_INNER_L4_NONFRAG,
3055                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3056                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3057                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3058                         RTE_PTYPE_INNER_L4_UDP,
3059                 /* [128] reserved */
3060                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3061                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3062                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3063                         RTE_PTYPE_INNER_L4_TCP,
3064                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3065                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3066                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3067                         RTE_PTYPE_INNER_L4_SCTP,
3068                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3069                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3070                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3071                         RTE_PTYPE_INNER_L4_ICMP,
3072
3073                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3074                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3075                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3076                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3077                         RTE_PTYPE_INNER_L4_FRAG,
3078                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3079                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3080                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3081                         RTE_PTYPE_INNER_L4_NONFRAG,
3082                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3083                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3084                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3085                         RTE_PTYPE_INNER_L4_UDP,
3086                 /* [135] reserved */
3087                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3088                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3089                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3090                         RTE_PTYPE_INNER_L4_TCP,
3091                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3092                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3093                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3094                         RTE_PTYPE_INNER_L4_SCTP,
3095                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3096                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3097                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3098                         RTE_PTYPE_INNER_L4_ICMP,
3099                 /* [139] - [299] reserved */
3100
3101                 /* PPPoE */
3102                 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3103                 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3104
3105                 /* PPPoE --> IPv4 */
3106                 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3107                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3108                         RTE_PTYPE_L4_FRAG,
3109                 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3110                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3111                         RTE_PTYPE_L4_NONFRAG,
3112                 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3113                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3114                         RTE_PTYPE_L4_UDP,
3115                 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3116                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3117                         RTE_PTYPE_L4_TCP,
3118                 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3119                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3120                         RTE_PTYPE_L4_SCTP,
3121                 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3122                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3123                         RTE_PTYPE_L4_ICMP,
3124
3125                 /* PPPoE --> IPv6 */
3126                 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3127                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3128                         RTE_PTYPE_L4_FRAG,
3129                 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3130                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131                         RTE_PTYPE_L4_NONFRAG,
3132                 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3133                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3134                         RTE_PTYPE_L4_UDP,
3135                 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3136                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3137                         RTE_PTYPE_L4_TCP,
3138                 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3139                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3140                         RTE_PTYPE_L4_SCTP,
3141                 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3142                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3143                         RTE_PTYPE_L4_ICMP,
3144                 /* [314] - [324] reserved */
3145
3146                 /* IPv4/IPv6 --> GTPC/GTPU */
3147                 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3148                         RTE_PTYPE_TUNNEL_GTPC,
3149                 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3150                         RTE_PTYPE_TUNNEL_GTPC,
3151                 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3152                         RTE_PTYPE_TUNNEL_GTPC,
3153                 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3154                         RTE_PTYPE_TUNNEL_GTPC,
3155                 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3156                         RTE_PTYPE_TUNNEL_GTPU,
3157                 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3158                         RTE_PTYPE_TUNNEL_GTPU,
3159
3160                 /* IPv4 --> GTPU --> IPv4 */
3161                 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3162                         RTE_PTYPE_TUNNEL_GTPU |
3163                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3164                         RTE_PTYPE_INNER_L4_FRAG,
3165                 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3166                         RTE_PTYPE_TUNNEL_GTPU |
3167                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3168                         RTE_PTYPE_INNER_L4_NONFRAG,
3169                 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3170                         RTE_PTYPE_TUNNEL_GTPU |
3171                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3172                         RTE_PTYPE_INNER_L4_UDP,
3173                 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3174                         RTE_PTYPE_TUNNEL_GTPU |
3175                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3176                         RTE_PTYPE_INNER_L4_TCP,
3177                 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3178                         RTE_PTYPE_TUNNEL_GTPU |
3179                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3180                         RTE_PTYPE_INNER_L4_ICMP,
3181
3182                 /* IPv6 --> GTPU --> IPv4 */
3183                 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3184                         RTE_PTYPE_TUNNEL_GTPU |
3185                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3186                         RTE_PTYPE_INNER_L4_FRAG,
3187                 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3188                         RTE_PTYPE_TUNNEL_GTPU |
3189                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3190                         RTE_PTYPE_INNER_L4_NONFRAG,
3191                 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3192                         RTE_PTYPE_TUNNEL_GTPU |
3193                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3194                         RTE_PTYPE_INNER_L4_UDP,
3195                 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3196                         RTE_PTYPE_TUNNEL_GTPU |
3197                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3198                         RTE_PTYPE_INNER_L4_TCP,
3199                 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3200                         RTE_PTYPE_TUNNEL_GTPU |
3201                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3202                         RTE_PTYPE_INNER_L4_ICMP,
3203
3204                 /* IPv4 --> GTPU --> IPv6 */
3205                 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3206                         RTE_PTYPE_TUNNEL_GTPU |
3207                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3208                         RTE_PTYPE_INNER_L4_FRAG,
3209                 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3210                         RTE_PTYPE_TUNNEL_GTPU |
3211                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3212                         RTE_PTYPE_INNER_L4_NONFRAG,
3213                 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3214                         RTE_PTYPE_TUNNEL_GTPU |
3215                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3216                         RTE_PTYPE_INNER_L4_UDP,
3217                 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3218                         RTE_PTYPE_TUNNEL_GTPU |
3219                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3220                         RTE_PTYPE_INNER_L4_TCP,
3221                 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3222                         RTE_PTYPE_TUNNEL_GTPU |
3223                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3224                         RTE_PTYPE_INNER_L4_ICMP,
3225
3226                 /* IPv6 --> GTPU --> IPv6 */
3227                 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3228                         RTE_PTYPE_TUNNEL_GTPU |
3229                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3230                         RTE_PTYPE_INNER_L4_FRAG,
3231                 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3232                         RTE_PTYPE_TUNNEL_GTPU |
3233                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3234                         RTE_PTYPE_INNER_L4_NONFRAG,
3235                 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3236                         RTE_PTYPE_TUNNEL_GTPU |
3237                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3238                         RTE_PTYPE_INNER_L4_UDP,
3239                 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3240                         RTE_PTYPE_TUNNEL_GTPU |
3241                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3242                         RTE_PTYPE_INNER_L4_TCP,
3243                 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3244                         RTE_PTYPE_TUNNEL_GTPU |
3245                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3246                         RTE_PTYPE_INNER_L4_ICMP,
3247
3248                 /* IPv4 --> UDP ECPRI */
3249                 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3250                         RTE_PTYPE_L4_UDP,
3251                 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3252                         RTE_PTYPE_L4_UDP,
3253                 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3254                         RTE_PTYPE_L4_UDP,
3255                 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3256                         RTE_PTYPE_L4_UDP,
3257                 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3258                         RTE_PTYPE_L4_UDP,
3259                 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3260                         RTE_PTYPE_L4_UDP,
3261                 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3262                         RTE_PTYPE_L4_UDP,
3263                 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3264                         RTE_PTYPE_L4_UDP,
3265                 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3266                         RTE_PTYPE_L4_UDP,
3267                 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3268                         RTE_PTYPE_L4_UDP,
3269
3270                 /* IPV6 --> UDP ECPRI */
3271                 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3272                         RTE_PTYPE_L4_UDP,
3273                 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3274                         RTE_PTYPE_L4_UDP,
3275                 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3276                         RTE_PTYPE_L4_UDP,
3277                 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3278                         RTE_PTYPE_L4_UDP,
3279                 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3280                         RTE_PTYPE_L4_UDP,
3281                 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3282                         RTE_PTYPE_L4_UDP,
3283                 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3284                         RTE_PTYPE_L4_UDP,
3285                 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3286                         RTE_PTYPE_L4_UDP,
3287                 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3288                         RTE_PTYPE_L4_UDP,
3289                 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3290                         RTE_PTYPE_L4_UDP,
3291                 /* All others reserved */
3292         };
3293
3294         return ptype_tbl;
3295 }