net/iavf: fix VLAN insert
[dpdk.git] / drivers / net / iavf / iavf_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_mbuf.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_udp.h>
24 #include <rte_ip.h>
25 #include <rte_net.h>
26 #include <rte_vect.h>
27
28 #include "iavf.h"
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
31
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
34
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
42
43 uint8_t
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
45 {
46         static uint8_t rxdid_map[] = {
47                 [IAVF_PROTO_XTR_NONE]      = IAVF_RXDID_COMMS_OVS_1,
48                 [IAVF_PROTO_XTR_VLAN]      = IAVF_RXDID_COMMS_AUX_VLAN,
49                 [IAVF_PROTO_XTR_IPV4]      = IAVF_RXDID_COMMS_AUX_IPV4,
50                 [IAVF_PROTO_XTR_IPV6]      = IAVF_RXDID_COMMS_AUX_IPV6,
51                 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52                 [IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
53                 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
54         };
55
56         return flex_type < RTE_DIM(rxdid_map) ?
57                                 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
58 }
59
60 static inline int
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
62 {
63         /* The following constraints must be satisfied:
64          *   thresh < rxq->nb_rx_desc
65          */
66         if (thresh >= nb_desc) {
67                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
68                              thresh, nb_desc);
69                 return -EINVAL;
70         }
71         return 0;
72 }
73
74 static inline int
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76                 uint16_t tx_free_thresh)
77 {
78         /* TX descriptors will have their RS bit set after tx_rs_thresh
79          * descriptors have been used. The TX descriptor ring will be cleaned
80          * after tx_free_thresh descriptors are used or if the number of
81          * descriptors required to transmit a packet is greater than the
82          * number of free TX descriptors.
83          *
84          * The following constraints must be satisfied:
85          *  - tx_rs_thresh must be less than the size of the ring minus 2.
86          *  - tx_free_thresh must be less than the size of the ring minus 3.
87          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
88          *  - tx_rs_thresh must be a divisor of the ring size.
89          *
90          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91          * race condition, hence the maximum threshold constraints. When set
92          * to zero use default values.
93          */
94         if (tx_rs_thresh >= (nb_desc - 2)) {
95                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96                              "number of TX descriptors (%u) minus 2",
97                              tx_rs_thresh, nb_desc);
98                 return -EINVAL;
99         }
100         if (tx_free_thresh >= (nb_desc - 3)) {
101                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102                              "number of TX descriptors (%u) minus 3.",
103                              tx_free_thresh, nb_desc);
104                 return -EINVAL;
105         }
106         if (tx_rs_thresh > tx_free_thresh) {
107                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108                              "equal to tx_free_thresh (%u).",
109                              tx_rs_thresh, tx_free_thresh);
110                 return -EINVAL;
111         }
112         if ((nb_desc % tx_rs_thresh) != 0) {
113                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114                              "number of TX descriptors (%u).",
115                              tx_rs_thresh, nb_desc);
116                 return -EINVAL;
117         }
118
119         return 0;
120 }
121
122 static inline bool
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
124 {
125         if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126             rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127                 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
128                 return true;
129         }
130
131         PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
132         return false;
133 }
134
135 static inline bool
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
137 {
138         if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139             txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140             txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141                 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
142                 return true;
143         }
144         PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
145         return false;
146 }
147
148 static inline bool
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
150 {
151         int ret = true;
152
153         if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155                              "rxq->rx_free_thresh=%d, "
156                              "IAVF_RX_MAX_BURST=%d",
157                              rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
158                 ret = false;
159         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161                              "rxq->nb_rx_desc=%d, "
162                              "rxq->rx_free_thresh=%d",
163                              rxq->nb_rx_desc, rxq->rx_free_thresh);
164                 ret = false;
165         }
166         return ret;
167 }
168
169 static inline void
170 reset_rx_queue(struct iavf_rx_queue *rxq)
171 {
172         uint16_t len;
173         uint32_t i;
174
175         if (!rxq)
176                 return;
177
178         len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
179
180         for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181                 ((volatile char *)rxq->rx_ring)[i] = 0;
182
183         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
184
185         for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186                 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
187
188         /* for rx bulk */
189         rxq->rx_nb_avail = 0;
190         rxq->rx_next_avail = 0;
191         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
192
193         rxq->rx_tail = 0;
194         rxq->nb_rx_hold = 0;
195         rxq->pkt_first_seg = NULL;
196         rxq->pkt_last_seg = NULL;
197         rxq->rxrearm_nb = 0;
198         rxq->rxrearm_start = 0;
199 }
200
201 static inline void
202 reset_tx_queue(struct iavf_tx_queue *txq)
203 {
204         struct iavf_tx_entry *txe;
205         uint32_t i, size;
206         uint16_t prev;
207
208         if (!txq) {
209                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
210                 return;
211         }
212
213         txe = txq->sw_ring;
214         size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
215         for (i = 0; i < size; i++)
216                 ((volatile char *)txq->tx_ring)[i] = 0;
217
218         prev = (uint16_t)(txq->nb_tx_desc - 1);
219         for (i = 0; i < txq->nb_tx_desc; i++) {
220                 txq->tx_ring[i].cmd_type_offset_bsz =
221                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
222                 txe[i].mbuf =  NULL;
223                 txe[i].last_id = i;
224                 txe[prev].next_id = i;
225                 prev = i;
226         }
227
228         txq->tx_tail = 0;
229         txq->nb_used = 0;
230
231         txq->last_desc_cleaned = txq->nb_tx_desc - 1;
232         txq->nb_free = txq->nb_tx_desc - 1;
233
234         txq->next_dd = txq->rs_thresh - 1;
235         txq->next_rs = txq->rs_thresh - 1;
236 }
237
238 static int
239 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
240 {
241         volatile union iavf_rx_desc *rxd;
242         struct rte_mbuf *mbuf = NULL;
243         uint64_t dma_addr;
244         uint16_t i;
245
246         for (i = 0; i < rxq->nb_rx_desc; i++) {
247                 mbuf = rte_mbuf_raw_alloc(rxq->mp);
248                 if (unlikely(!mbuf)) {
249                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
250                         return -ENOMEM;
251                 }
252
253                 rte_mbuf_refcnt_set(mbuf, 1);
254                 mbuf->next = NULL;
255                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256                 mbuf->nb_segs = 1;
257                 mbuf->port = rxq->port_id;
258
259                 dma_addr =
260                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
261
262                 rxd = &rxq->rx_ring[i];
263                 rxd->read.pkt_addr = dma_addr;
264                 rxd->read.hdr_addr = 0;
265 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
266                 rxd->read.rsvd1 = 0;
267                 rxd->read.rsvd2 = 0;
268 #endif
269
270                 rxq->sw_ring[i] = mbuf;
271         }
272
273         return 0;
274 }
275
276 static inline void
277 release_rxq_mbufs(struct iavf_rx_queue *rxq)
278 {
279         uint16_t i;
280
281         if (!rxq->sw_ring)
282                 return;
283
284         for (i = 0; i < rxq->nb_rx_desc; i++) {
285                 if (rxq->sw_ring[i]) {
286                         rte_pktmbuf_free_seg(rxq->sw_ring[i]);
287                         rxq->sw_ring[i] = NULL;
288                 }
289         }
290
291         /* for rx bulk */
292         if (rxq->rx_nb_avail == 0)
293                 return;
294         for (i = 0; i < rxq->rx_nb_avail; i++) {
295                 struct rte_mbuf *mbuf;
296
297                 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
298                 rte_pktmbuf_free_seg(mbuf);
299         }
300         rxq->rx_nb_avail = 0;
301 }
302
303 static inline void
304 release_txq_mbufs(struct iavf_tx_queue *txq)
305 {
306         uint16_t i;
307
308         if (!txq || !txq->sw_ring) {
309                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
310                 return;
311         }
312
313         for (i = 0; i < txq->nb_tx_desc; i++) {
314                 if (txq->sw_ring[i].mbuf) {
315                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
316                         txq->sw_ring[i].mbuf = NULL;
317                 }
318         }
319 }
320
321 static const struct iavf_rxq_ops def_rxq_ops = {
322         .release_mbufs = release_rxq_mbufs,
323 };
324
325 static const struct iavf_txq_ops def_txq_ops = {
326         .release_mbufs = release_txq_mbufs,
327 };
328
329 static inline void
330 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
331                                     struct rte_mbuf *mb,
332                                     volatile union iavf_rx_flex_desc *rxdp)
333 {
334         volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
335                         (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
336 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
337         uint16_t stat_err;
338 #endif
339
340         if (desc->flow_id != 0xFFFFFFFF) {
341                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
342                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
343         }
344
345 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
346         stat_err = rte_le_to_cpu_16(desc->status_error0);
347         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
348                 mb->ol_flags |= PKT_RX_RSS_HASH;
349                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
350         }
351 #endif
352 }
353
354 static inline void
355 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
356                                        struct rte_mbuf *mb,
357                                        volatile union iavf_rx_flex_desc *rxdp)
358 {
359         volatile struct iavf_32b_rx_flex_desc_comms *desc =
360                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
361         uint16_t stat_err;
362
363         stat_err = rte_le_to_cpu_16(desc->status_error0);
364         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
365                 mb->ol_flags |= PKT_RX_RSS_HASH;
366                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
367         }
368
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
370         if (desc->flow_id != 0xFFFFFFFF) {
371                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
372                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
373         }
374
375         if (rxq->xtr_ol_flag) {
376                 uint32_t metadata = 0;
377
378                 stat_err = rte_le_to_cpu_16(desc->status_error1);
379
380                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
381                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
382
383                 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
384                         metadata |=
385                                 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
386
387                 if (metadata) {
388                         mb->ol_flags |= rxq->xtr_ol_flag;
389
390                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
391                 }
392         }
393 #endif
394 }
395
396 static inline void
397 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
398                                        struct rte_mbuf *mb,
399                                        volatile union iavf_rx_flex_desc *rxdp)
400 {
401         volatile struct iavf_32b_rx_flex_desc_comms *desc =
402                         (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
403         uint16_t stat_err;
404
405         stat_err = rte_le_to_cpu_16(desc->status_error0);
406         if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
407                 mb->ol_flags |= PKT_RX_RSS_HASH;
408                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
409         }
410
411 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
412         if (desc->flow_id != 0xFFFFFFFF) {
413                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
414                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
415         }
416
417         if (rxq->xtr_ol_flag) {
418                 uint32_t metadata = 0;
419
420                 if (desc->flex_ts.flex.aux0 != 0xFFFF)
421                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
422                 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
423                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
424
425                 if (metadata) {
426                         mb->ol_flags |= rxq->xtr_ol_flag;
427
428                         *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
429                 }
430         }
431 #endif
432 }
433
434 static void
435 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
436 {
437         switch (rxdid) {
438         case IAVF_RXDID_COMMS_AUX_VLAN:
439                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
440                 rxq->rxd_to_pkt_fields =
441                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
442                 break;
443         case IAVF_RXDID_COMMS_AUX_IPV4:
444                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
445                 rxq->rxd_to_pkt_fields =
446                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
447                 break;
448         case IAVF_RXDID_COMMS_AUX_IPV6:
449                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
450                 rxq->rxd_to_pkt_fields =
451                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
452                 break;
453         case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
454                 rxq->xtr_ol_flag =
455                         rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
456                 rxq->rxd_to_pkt_fields =
457                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
458                 break;
459         case IAVF_RXDID_COMMS_AUX_TCP:
460                 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
461                 rxq->rxd_to_pkt_fields =
462                         iavf_rxd_to_pkt_fields_by_comms_aux_v1;
463                 break;
464         case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
465                 rxq->xtr_ol_flag =
466                         rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
467                 rxq->rxd_to_pkt_fields =
468                         iavf_rxd_to_pkt_fields_by_comms_aux_v2;
469                 break;
470         case IAVF_RXDID_COMMS_OVS_1:
471                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
472                 break;
473         default:
474                 /* update this according to the RXDID for FLEX_DESC_NONE */
475                 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
476                 break;
477         }
478
479         if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
480                 rxq->xtr_ol_flag = 0;
481 }
482
483 int
484 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
485                        uint16_t nb_desc, unsigned int socket_id,
486                        const struct rte_eth_rxconf *rx_conf,
487                        struct rte_mempool *mp)
488 {
489         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490         struct iavf_adapter *ad =
491                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
492         struct iavf_info *vf =
493                 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
494         struct iavf_vsi *vsi = &vf->vsi;
495         struct iavf_rx_queue *rxq;
496         const struct rte_memzone *mz;
497         uint32_t ring_size;
498         uint8_t proto_xtr;
499         uint16_t len;
500         uint16_t rx_free_thresh;
501
502         PMD_INIT_FUNC_TRACE();
503
504         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
505             nb_desc > IAVF_MAX_RING_DESC ||
506             nb_desc < IAVF_MIN_RING_DESC) {
507                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
508                              "invalid", nb_desc);
509                 return -EINVAL;
510         }
511
512         /* Check free threshold */
513         rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
514                          IAVF_DEFAULT_RX_FREE_THRESH :
515                          rx_conf->rx_free_thresh;
516         if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
517                 return -EINVAL;
518
519         /* Free memory if needed */
520         if (dev->data->rx_queues[queue_idx]) {
521                 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
522                 dev->data->rx_queues[queue_idx] = NULL;
523         }
524
525         /* Allocate the rx queue data structure */
526         rxq = rte_zmalloc_socket("iavf rxq",
527                                  sizeof(struct iavf_rx_queue),
528                                  RTE_CACHE_LINE_SIZE,
529                                  socket_id);
530         if (!rxq) {
531                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
532                              "rx queue data structure");
533                 return -ENOMEM;
534         }
535
536         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
537                 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
538                                 IAVF_PROTO_XTR_NONE;
539                 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
540                 rxq->proto_xtr = proto_xtr;
541         } else {
542                 rxq->rxdid = IAVF_RXDID_LEGACY_1;
543                 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
544         }
545
546         iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
547
548         rxq->mp = mp;
549         rxq->nb_rx_desc = nb_desc;
550         rxq->rx_free_thresh = rx_free_thresh;
551         rxq->queue_id = queue_idx;
552         rxq->port_id = dev->data->port_id;
553         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
554         rxq->rx_hdr_len = 0;
555         rxq->vsi = vsi;
556
557         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
558                 rxq->crc_len = RTE_ETHER_CRC_LEN;
559         else
560                 rxq->crc_len = 0;
561
562         len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
563         rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
564
565         /* Allocate the software ring. */
566         len = nb_desc + IAVF_RX_MAX_BURST;
567         rxq->sw_ring =
568                 rte_zmalloc_socket("iavf rx sw ring",
569                                    sizeof(struct rte_mbuf *) * len,
570                                    RTE_CACHE_LINE_SIZE,
571                                    socket_id);
572         if (!rxq->sw_ring) {
573                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
574                 rte_free(rxq);
575                 return -ENOMEM;
576         }
577
578         /* Allocate the maximun number of RX ring hardware descriptor with
579          * a liitle more to support bulk allocate.
580          */
581         len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
582         ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
583                               IAVF_DMA_MEM_ALIGN);
584         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
585                                       ring_size, IAVF_RING_BASE_ALIGN,
586                                       socket_id);
587         if (!mz) {
588                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
589                 rte_free(rxq->sw_ring);
590                 rte_free(rxq);
591                 return -ENOMEM;
592         }
593         /* Zero all the descriptors in the ring. */
594         memset(mz->addr, 0, ring_size);
595         rxq->rx_ring_phys_addr = mz->iova;
596         rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
597
598         rxq->mz = mz;
599         reset_rx_queue(rxq);
600         rxq->q_set = true;
601         dev->data->rx_queues[queue_idx] = rxq;
602         rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
603         rxq->ops = &def_rxq_ops;
604
605         if (check_rx_bulk_allow(rxq) == true) {
606                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
607                              "satisfied. Rx Burst Bulk Alloc function will be "
608                              "used on port=%d, queue=%d.",
609                              rxq->port_id, rxq->queue_id);
610         } else {
611                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
612                              "not satisfied, Scattered Rx is requested "
613                              "on port=%d, queue=%d.",
614                              rxq->port_id, rxq->queue_id);
615                 ad->rx_bulk_alloc_allowed = false;
616         }
617
618         if (check_rx_vec_allow(rxq) == false)
619                 ad->rx_vec_allowed = false;
620
621         return 0;
622 }
623
624 int
625 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
626                        uint16_t queue_idx,
627                        uint16_t nb_desc,
628                        unsigned int socket_id,
629                        const struct rte_eth_txconf *tx_conf)
630 {
631         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
632         struct iavf_info *vf =
633                 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
634         struct iavf_tx_queue *txq;
635         const struct rte_memzone *mz;
636         uint32_t ring_size;
637         uint16_t tx_rs_thresh, tx_free_thresh;
638         uint64_t offloads;
639
640         PMD_INIT_FUNC_TRACE();
641
642         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
643
644         if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
645             nb_desc > IAVF_MAX_RING_DESC ||
646             nb_desc < IAVF_MIN_RING_DESC) {
647                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
648                             "invalid", nb_desc);
649                 return -EINVAL;
650         }
651
652         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
653                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
654         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
655                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
656         check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
657
658         /* Free memory if needed. */
659         if (dev->data->tx_queues[queue_idx]) {
660                 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
661                 dev->data->tx_queues[queue_idx] = NULL;
662         }
663
664         /* Allocate the TX queue data structure. */
665         txq = rte_zmalloc_socket("iavf txq",
666                                  sizeof(struct iavf_tx_queue),
667                                  RTE_CACHE_LINE_SIZE,
668                                  socket_id);
669         if (!txq) {
670                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
671                              "tx queue structure");
672                 return -ENOMEM;
673         }
674
675         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
676                 struct virtchnl_vlan_supported_caps *insertion_support =
677                         &vf->vlan_v2_caps.offloads.insertion_support;
678                 uint32_t insertion_cap;
679
680                 if (insertion_support->outer)
681                         insertion_cap = insertion_support->outer;
682                 else
683                         insertion_cap = insertion_support->inner;
684
685                 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
686                         txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
687                 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
688                         txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
689         } else {
690                 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
691         }
692
693         txq->nb_tx_desc = nb_desc;
694         txq->rs_thresh = tx_rs_thresh;
695         txq->free_thresh = tx_free_thresh;
696         txq->queue_id = queue_idx;
697         txq->port_id = dev->data->port_id;
698         txq->offloads = offloads;
699         txq->tx_deferred_start = tx_conf->tx_deferred_start;
700
701         /* Allocate software ring */
702         txq->sw_ring =
703                 rte_zmalloc_socket("iavf tx sw ring",
704                                    sizeof(struct iavf_tx_entry) * nb_desc,
705                                    RTE_CACHE_LINE_SIZE,
706                                    socket_id);
707         if (!txq->sw_ring) {
708                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
709                 rte_free(txq);
710                 return -ENOMEM;
711         }
712
713         /* Allocate TX hardware ring descriptors. */
714         ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
715         ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
716         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
717                                       ring_size, IAVF_RING_BASE_ALIGN,
718                                       socket_id);
719         if (!mz) {
720                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
721                 rte_free(txq->sw_ring);
722                 rte_free(txq);
723                 return -ENOMEM;
724         }
725         txq->tx_ring_phys_addr = mz->iova;
726         txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
727
728         txq->mz = mz;
729         reset_tx_queue(txq);
730         txq->q_set = true;
731         dev->data->tx_queues[queue_idx] = txq;
732         txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
733         txq->ops = &def_txq_ops;
734
735         if (check_tx_vec_allow(txq) == false) {
736                 struct iavf_adapter *ad =
737                         IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
738                 ad->tx_vec_allowed = false;
739         }
740
741         return 0;
742 }
743
744 int
745 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
746 {
747         struct iavf_adapter *adapter =
748                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
749         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
750         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
751         struct iavf_rx_queue *rxq;
752         int err = 0;
753
754         PMD_DRV_FUNC_TRACE();
755
756         if (rx_queue_id >= dev->data->nb_rx_queues)
757                 return -EINVAL;
758
759         rxq = dev->data->rx_queues[rx_queue_id];
760
761         err = alloc_rxq_mbufs(rxq);
762         if (err) {
763                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
764                 return err;
765         }
766
767         rte_wmb();
768
769         /* Init the RX tail register. */
770         IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
771         IAVF_WRITE_FLUSH(hw);
772
773         /* Ready to switch the queue on */
774         if (!vf->lv_enabled)
775                 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
776         else
777                 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
778
779         if (err)
780                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
781                             rx_queue_id);
782         else
783                 dev->data->rx_queue_state[rx_queue_id] =
784                         RTE_ETH_QUEUE_STATE_STARTED;
785
786         return err;
787 }
788
789 int
790 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
791 {
792         struct iavf_adapter *adapter =
793                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
794         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
795         struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
796         struct iavf_tx_queue *txq;
797         int err = 0;
798
799         PMD_DRV_FUNC_TRACE();
800
801         if (tx_queue_id >= dev->data->nb_tx_queues)
802                 return -EINVAL;
803
804         txq = dev->data->tx_queues[tx_queue_id];
805
806         /* Init the RX tail register. */
807         IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
808         IAVF_WRITE_FLUSH(hw);
809
810         /* Ready to switch the queue on */
811         if (!vf->lv_enabled)
812                 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
813         else
814                 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
815
816         if (err)
817                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
818                             tx_queue_id);
819         else
820                 dev->data->tx_queue_state[tx_queue_id] =
821                         RTE_ETH_QUEUE_STATE_STARTED;
822
823         return err;
824 }
825
826 int
827 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
828 {
829         struct iavf_adapter *adapter =
830                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
831         struct iavf_rx_queue *rxq;
832         int err;
833
834         PMD_DRV_FUNC_TRACE();
835
836         if (rx_queue_id >= dev->data->nb_rx_queues)
837                 return -EINVAL;
838
839         err = iavf_switch_queue(adapter, rx_queue_id, true, false);
840         if (err) {
841                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
842                             rx_queue_id);
843                 return err;
844         }
845
846         rxq = dev->data->rx_queues[rx_queue_id];
847         rxq->ops->release_mbufs(rxq);
848         reset_rx_queue(rxq);
849         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
850
851         return 0;
852 }
853
854 int
855 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
856 {
857         struct iavf_adapter *adapter =
858                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
859         struct iavf_tx_queue *txq;
860         int err;
861
862         PMD_DRV_FUNC_TRACE();
863
864         if (tx_queue_id >= dev->data->nb_tx_queues)
865                 return -EINVAL;
866
867         err = iavf_switch_queue(adapter, tx_queue_id, false, false);
868         if (err) {
869                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
870                             tx_queue_id);
871                 return err;
872         }
873
874         txq = dev->data->tx_queues[tx_queue_id];
875         txq->ops->release_mbufs(txq);
876         reset_tx_queue(txq);
877         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
878
879         return 0;
880 }
881
882 void
883 iavf_dev_rx_queue_release(void *rxq)
884 {
885         struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
886
887         if (!q)
888                 return;
889
890         q->ops->release_mbufs(q);
891         rte_free(q->sw_ring);
892         rte_memzone_free(q->mz);
893         rte_free(q);
894 }
895
896 void
897 iavf_dev_tx_queue_release(void *txq)
898 {
899         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
900
901         if (!q)
902                 return;
903
904         q->ops->release_mbufs(q);
905         rte_free(q->sw_ring);
906         rte_memzone_free(q->mz);
907         rte_free(q);
908 }
909
910 void
911 iavf_stop_queues(struct rte_eth_dev *dev)
912 {
913         struct iavf_adapter *adapter =
914                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
915         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
916         struct iavf_rx_queue *rxq;
917         struct iavf_tx_queue *txq;
918         int ret, i;
919
920         /* Stop All queues */
921         if (!vf->lv_enabled) {
922                 ret = iavf_disable_queues(adapter);
923                 if (ret)
924                         PMD_DRV_LOG(WARNING, "Fail to stop queues");
925         } else {
926                 ret = iavf_disable_queues_lv(adapter);
927                 if (ret)
928                         PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
929         }
930
931         if (ret)
932                 PMD_DRV_LOG(WARNING, "Fail to stop queues");
933
934         for (i = 0; i < dev->data->nb_tx_queues; i++) {
935                 txq = dev->data->tx_queues[i];
936                 if (!txq)
937                         continue;
938                 txq->ops->release_mbufs(txq);
939                 reset_tx_queue(txq);
940                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
941         }
942         for (i = 0; i < dev->data->nb_rx_queues; i++) {
943                 rxq = dev->data->rx_queues[i];
944                 if (!rxq)
945                         continue;
946                 rxq->ops->release_mbufs(rxq);
947                 reset_rx_queue(rxq);
948                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
949         }
950 }
951
952 #define IAVF_RX_FLEX_ERR0_BITS  \
953         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
954          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
955          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
956          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
957          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
958          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
959
960 static inline void
961 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
962 {
963         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
964                 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
965                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
966                 mb->vlan_tci =
967                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
968         } else {
969                 mb->vlan_tci = 0;
970         }
971 }
972
973 static inline void
974 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
975                           volatile union iavf_rx_flex_desc *rxdp)
976 {
977         if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
978                 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
979                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
980                 mb->vlan_tci =
981                         rte_le_to_cpu_16(rxdp->wb.l2tag1);
982         } else {
983                 mb->vlan_tci = 0;
984         }
985
986 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
987         if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
988             (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
989                 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
990                                 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
991                 mb->vlan_tci_outer = mb->vlan_tci;
992                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
993                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
994                            rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
995                            rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
996         } else {
997                 mb->vlan_tci_outer = 0;
998         }
999 #endif
1000 }
1001
1002 /* Translate the rx descriptor status and error fields to pkt flags */
1003 static inline uint64_t
1004 iavf_rxd_to_pkt_flags(uint64_t qword)
1005 {
1006         uint64_t flags;
1007         uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1008
1009 #define IAVF_RX_ERR_BITS 0x3f
1010
1011         /* Check if RSS_HASH */
1012         flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1013                                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1014                         IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1015
1016         /* Check if FDIR Match */
1017         flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1018                                 PKT_RX_FDIR : 0);
1019
1020         if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1021                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1022                 return flags;
1023         }
1024
1025         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1026                 flags |= PKT_RX_IP_CKSUM_BAD;
1027         else
1028                 flags |= PKT_RX_IP_CKSUM_GOOD;
1029
1030         if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1031                 flags |= PKT_RX_L4_CKSUM_BAD;
1032         else
1033                 flags |= PKT_RX_L4_CKSUM_GOOD;
1034
1035         /* TODO: Oversize error bit is not processed here */
1036
1037         return flags;
1038 }
1039
1040 static inline uint64_t
1041 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1042 {
1043         uint64_t flags = 0;
1044 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1045         uint16_t flexbh;
1046
1047         flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1048                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1049                 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1050
1051         if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1052                 mb->hash.fdir.hi =
1053                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1054                 flags |= PKT_RX_FDIR_ID;
1055         }
1056 #else
1057         mb->hash.fdir.hi =
1058                 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1059         flags |= PKT_RX_FDIR_ID;
1060 #endif
1061         return flags;
1062 }
1063
1064 #define IAVF_RX_FLEX_ERR0_BITS  \
1065         ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
1066          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
1067          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
1068          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1069          (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
1070          (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1071
1072 /* Rx L3/L4 checksum */
1073 static inline uint64_t
1074 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1075 {
1076         uint64_t flags = 0;
1077
1078         /* check if HW has decoded the packet and checksum */
1079         if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1080                 return 0;
1081
1082         if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1083                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1084                 return flags;
1085         }
1086
1087         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1088                 flags |= PKT_RX_IP_CKSUM_BAD;
1089         else
1090                 flags |= PKT_RX_IP_CKSUM_GOOD;
1091
1092         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1093                 flags |= PKT_RX_L4_CKSUM_BAD;
1094         else
1095                 flags |= PKT_RX_L4_CKSUM_GOOD;
1096
1097         if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1098                 flags |= PKT_RX_EIP_CKSUM_BAD;
1099
1100         return flags;
1101 }
1102
1103 /* If the number of free RX descriptors is greater than the RX free
1104  * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1105  * register. Update the RDT with the value of the last processed RX
1106  * descriptor minus 1, to guarantee that the RDT register is never
1107  * equal to the RDH register, which creates a "full" ring situation
1108  * from the hardware point of view.
1109  */
1110 static inline void
1111 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1112 {
1113         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1114
1115         if (nb_hold > rxq->rx_free_thresh) {
1116                 PMD_RX_LOG(DEBUG,
1117                            "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1118                            rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1119                 rx_id = (uint16_t)((rx_id == 0) ?
1120                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1121                 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1122                 nb_hold = 0;
1123         }
1124         rxq->nb_rx_hold = nb_hold;
1125 }
1126
1127 /* implement recv_pkts */
1128 uint16_t
1129 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1130 {
1131         volatile union iavf_rx_desc *rx_ring;
1132         volatile union iavf_rx_desc *rxdp;
1133         struct iavf_rx_queue *rxq;
1134         union iavf_rx_desc rxd;
1135         struct rte_mbuf *rxe;
1136         struct rte_eth_dev *dev;
1137         struct rte_mbuf *rxm;
1138         struct rte_mbuf *nmb;
1139         uint16_t nb_rx;
1140         uint32_t rx_status;
1141         uint64_t qword1;
1142         uint16_t rx_packet_len;
1143         uint16_t rx_id, nb_hold;
1144         uint64_t dma_addr;
1145         uint64_t pkt_flags;
1146         const uint32_t *ptype_tbl;
1147
1148         nb_rx = 0;
1149         nb_hold = 0;
1150         rxq = rx_queue;
1151         rx_id = rxq->rx_tail;
1152         rx_ring = rxq->rx_ring;
1153         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1154
1155         while (nb_rx < nb_pkts) {
1156                 rxdp = &rx_ring[rx_id];
1157                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1158                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1159                             IAVF_RXD_QW1_STATUS_SHIFT;
1160
1161                 /* Check the DD bit first */
1162                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1163                         break;
1164                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1165
1166                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1167                 if (unlikely(!nmb)) {
1168                         dev = &rte_eth_devices[rxq->port_id];
1169                         dev->data->rx_mbuf_alloc_failed++;
1170                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1171                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1172                         break;
1173                 }
1174
1175                 rxd = *rxdp;
1176                 nb_hold++;
1177                 rxe = rxq->sw_ring[rx_id];
1178                 rx_id++;
1179                 if (unlikely(rx_id == rxq->nb_rx_desc))
1180                         rx_id = 0;
1181
1182                 /* Prefetch next mbuf */
1183                 rte_prefetch0(rxq->sw_ring[rx_id]);
1184
1185                 /* When next RX descriptor is on a cache line boundary,
1186                  * prefetch the next 4 RX descriptors and next 8 pointers
1187                  * to mbufs.
1188                  */
1189                 if ((rx_id & 0x3) == 0) {
1190                         rte_prefetch0(&rx_ring[rx_id]);
1191                         rte_prefetch0(rxq->sw_ring[rx_id]);
1192                 }
1193                 rxm = rxe;
1194                 dma_addr =
1195                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1196                 rxdp->read.hdr_addr = 0;
1197                 rxdp->read.pkt_addr = dma_addr;
1198
1199                 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1200                                 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1201
1202                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1203                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1204                 rxm->nb_segs = 1;
1205                 rxm->next = NULL;
1206                 rxm->pkt_len = rx_packet_len;
1207                 rxm->data_len = rx_packet_len;
1208                 rxm->port = rxq->port_id;
1209                 rxm->ol_flags = 0;
1210                 iavf_rxd_to_vlan_tci(rxm, &rxd);
1211                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1212                 rxm->packet_type =
1213                         ptype_tbl[(uint8_t)((qword1 &
1214                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1215
1216                 if (pkt_flags & PKT_RX_RSS_HASH)
1217                         rxm->hash.rss =
1218                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1219
1220                 if (pkt_flags & PKT_RX_FDIR)
1221                         pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1222
1223                 rxm->ol_flags |= pkt_flags;
1224
1225                 rx_pkts[nb_rx++] = rxm;
1226         }
1227         rxq->rx_tail = rx_id;
1228
1229         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1230
1231         return nb_rx;
1232 }
1233
1234 /* implement recv_pkts for flexible Rx descriptor */
1235 uint16_t
1236 iavf_recv_pkts_flex_rxd(void *rx_queue,
1237                         struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1238 {
1239         volatile union iavf_rx_desc *rx_ring;
1240         volatile union iavf_rx_flex_desc *rxdp;
1241         struct iavf_rx_queue *rxq;
1242         union iavf_rx_flex_desc rxd;
1243         struct rte_mbuf *rxe;
1244         struct rte_eth_dev *dev;
1245         struct rte_mbuf *rxm;
1246         struct rte_mbuf *nmb;
1247         uint16_t nb_rx;
1248         uint16_t rx_stat_err0;
1249         uint16_t rx_packet_len;
1250         uint16_t rx_id, nb_hold;
1251         uint64_t dma_addr;
1252         uint64_t pkt_flags;
1253         const uint32_t *ptype_tbl;
1254
1255         nb_rx = 0;
1256         nb_hold = 0;
1257         rxq = rx_queue;
1258         rx_id = rxq->rx_tail;
1259         rx_ring = rxq->rx_ring;
1260         ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1261
1262         while (nb_rx < nb_pkts) {
1263                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1264                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1265
1266                 /* Check the DD bit first */
1267                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1268                         break;
1269                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1270
1271                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1272                 if (unlikely(!nmb)) {
1273                         dev = &rte_eth_devices[rxq->port_id];
1274                         dev->data->rx_mbuf_alloc_failed++;
1275                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1276                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1277                         break;
1278                 }
1279
1280                 rxd = *rxdp;
1281                 nb_hold++;
1282                 rxe = rxq->sw_ring[rx_id];
1283                 rx_id++;
1284                 if (unlikely(rx_id == rxq->nb_rx_desc))
1285                         rx_id = 0;
1286
1287                 /* Prefetch next mbuf */
1288                 rte_prefetch0(rxq->sw_ring[rx_id]);
1289
1290                 /* When next RX descriptor is on a cache line boundary,
1291                  * prefetch the next 4 RX descriptors and next 8 pointers
1292                  * to mbufs.
1293                  */
1294                 if ((rx_id & 0x3) == 0) {
1295                         rte_prefetch0(&rx_ring[rx_id]);
1296                         rte_prefetch0(rxq->sw_ring[rx_id]);
1297                 }
1298                 rxm = rxe;
1299                 dma_addr =
1300                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1301                 rxdp->read.hdr_addr = 0;
1302                 rxdp->read.pkt_addr = dma_addr;
1303
1304                 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1305                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1306
1307                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1308                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1309                 rxm->nb_segs = 1;
1310                 rxm->next = NULL;
1311                 rxm->pkt_len = rx_packet_len;
1312                 rxm->data_len = rx_packet_len;
1313                 rxm->port = rxq->port_id;
1314                 rxm->ol_flags = 0;
1315                 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1316                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1317                 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1318                 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1319                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1320                 rxm->ol_flags |= pkt_flags;
1321
1322                 rx_pkts[nb_rx++] = rxm;
1323         }
1324         rxq->rx_tail = rx_id;
1325
1326         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1327
1328         return nb_rx;
1329 }
1330
1331 /* implement recv_scattered_pkts for flexible Rx descriptor */
1332 uint16_t
1333 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1334                                   uint16_t nb_pkts)
1335 {
1336         struct iavf_rx_queue *rxq = rx_queue;
1337         union iavf_rx_flex_desc rxd;
1338         struct rte_mbuf *rxe;
1339         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1340         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1341         struct rte_mbuf *nmb, *rxm;
1342         uint16_t rx_id = rxq->rx_tail;
1343         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1344         struct rte_eth_dev *dev;
1345         uint16_t rx_stat_err0;
1346         uint64_t dma_addr;
1347         uint64_t pkt_flags;
1348
1349         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1350         volatile union iavf_rx_flex_desc *rxdp;
1351         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1352
1353         while (nb_rx < nb_pkts) {
1354                 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1355                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1356
1357                 /* Check the DD bit */
1358                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1359                         break;
1360                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1361
1362                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1363                 if (unlikely(!nmb)) {
1364                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1365                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1366                         dev = &rte_eth_devices[rxq->port_id];
1367                         dev->data->rx_mbuf_alloc_failed++;
1368                         break;
1369                 }
1370
1371                 rxd = *rxdp;
1372                 nb_hold++;
1373                 rxe = rxq->sw_ring[rx_id];
1374                 rx_id++;
1375                 if (rx_id == rxq->nb_rx_desc)
1376                         rx_id = 0;
1377
1378                 /* Prefetch next mbuf */
1379                 rte_prefetch0(rxq->sw_ring[rx_id]);
1380
1381                 /* When next RX descriptor is on a cache line boundary,
1382                  * prefetch the next 4 RX descriptors and next 8 pointers
1383                  * to mbufs.
1384                  */
1385                 if ((rx_id & 0x3) == 0) {
1386                         rte_prefetch0(&rx_ring[rx_id]);
1387                         rte_prefetch0(rxq->sw_ring[rx_id]);
1388                 }
1389
1390                 rxm = rxe;
1391                 dma_addr =
1392                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1393
1394                 /* Set data buffer address and data length of the mbuf */
1395                 rxdp->read.hdr_addr = 0;
1396                 rxdp->read.pkt_addr = dma_addr;
1397                 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1398                                 IAVF_RX_FLX_DESC_PKT_LEN_M;
1399                 rxm->data_len = rx_packet_len;
1400                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1401
1402                 /* If this is the first buffer of the received packet, set the
1403                  * pointer to the first mbuf of the packet and initialize its
1404                  * context. Otherwise, update the total length and the number
1405                  * of segments of the current scattered packet, and update the
1406                  * pointer to the last mbuf of the current packet.
1407                  */
1408                 if (!first_seg) {
1409                         first_seg = rxm;
1410                         first_seg->nb_segs = 1;
1411                         first_seg->pkt_len = rx_packet_len;
1412                 } else {
1413                         first_seg->pkt_len =
1414                                 (uint16_t)(first_seg->pkt_len +
1415                                                 rx_packet_len);
1416                         first_seg->nb_segs++;
1417                         last_seg->next = rxm;
1418                 }
1419
1420                 /* If this is not the last buffer of the received packet,
1421                  * update the pointer to the last mbuf of the current scattered
1422                  * packet and continue to parse the RX ring.
1423                  */
1424                 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1425                         last_seg = rxm;
1426                         continue;
1427                 }
1428
1429                 /* This is the last buffer of the received packet. If the CRC
1430                  * is not stripped by the hardware:
1431                  *  - Subtract the CRC length from the total packet length.
1432                  *  - If the last buffer only contains the whole CRC or a part
1433                  *  of it, free the mbuf associated to the last buffer. If part
1434                  *  of the CRC is also contained in the previous mbuf, subtract
1435                  *  the length of that CRC part from the data length of the
1436                  *  previous mbuf.
1437                  */
1438                 rxm->next = NULL;
1439                 if (unlikely(rxq->crc_len > 0)) {
1440                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1441                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1442                                 rte_pktmbuf_free_seg(rxm);
1443                                 first_seg->nb_segs--;
1444                                 last_seg->data_len =
1445                                         (uint16_t)(last_seg->data_len -
1446                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1447                                 last_seg->next = NULL;
1448                         } else {
1449                                 rxm->data_len = (uint16_t)(rx_packet_len -
1450                                                         RTE_ETHER_CRC_LEN);
1451                         }
1452                 }
1453
1454                 first_seg->port = rxq->port_id;
1455                 first_seg->ol_flags = 0;
1456                 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1457                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1458                 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1459                 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1460                 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1461
1462                 first_seg->ol_flags |= pkt_flags;
1463
1464                 /* Prefetch data of first segment, if configured to do so. */
1465                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1466                                           first_seg->data_off));
1467                 rx_pkts[nb_rx++] = first_seg;
1468                 first_seg = NULL;
1469         }
1470
1471         /* Record index of the next RX descriptor to probe. */
1472         rxq->rx_tail = rx_id;
1473         rxq->pkt_first_seg = first_seg;
1474         rxq->pkt_last_seg = last_seg;
1475
1476         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1477
1478         return nb_rx;
1479 }
1480
1481 /* implement recv_scattered_pkts  */
1482 uint16_t
1483 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1484                         uint16_t nb_pkts)
1485 {
1486         struct iavf_rx_queue *rxq = rx_queue;
1487         union iavf_rx_desc rxd;
1488         struct rte_mbuf *rxe;
1489         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1490         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1491         struct rte_mbuf *nmb, *rxm;
1492         uint16_t rx_id = rxq->rx_tail;
1493         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1494         struct rte_eth_dev *dev;
1495         uint32_t rx_status;
1496         uint64_t qword1;
1497         uint64_t dma_addr;
1498         uint64_t pkt_flags;
1499
1500         volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1501         volatile union iavf_rx_desc *rxdp;
1502         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1503
1504         while (nb_rx < nb_pkts) {
1505                 rxdp = &rx_ring[rx_id];
1506                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1507                 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1508                             IAVF_RXD_QW1_STATUS_SHIFT;
1509
1510                 /* Check the DD bit */
1511                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1512                         break;
1513                 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1514
1515                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1516                 if (unlikely(!nmb)) {
1517                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1518                                    "queue_id=%u", rxq->port_id, rxq->queue_id);
1519                         dev = &rte_eth_devices[rxq->port_id];
1520                         dev->data->rx_mbuf_alloc_failed++;
1521                         break;
1522                 }
1523
1524                 rxd = *rxdp;
1525                 nb_hold++;
1526                 rxe = rxq->sw_ring[rx_id];
1527                 rx_id++;
1528                 if (rx_id == rxq->nb_rx_desc)
1529                         rx_id = 0;
1530
1531                 /* Prefetch next mbuf */
1532                 rte_prefetch0(rxq->sw_ring[rx_id]);
1533
1534                 /* When next RX descriptor is on a cache line boundary,
1535                  * prefetch the next 4 RX descriptors and next 8 pointers
1536                  * to mbufs.
1537                  */
1538                 if ((rx_id & 0x3) == 0) {
1539                         rte_prefetch0(&rx_ring[rx_id]);
1540                         rte_prefetch0(rxq->sw_ring[rx_id]);
1541                 }
1542
1543                 rxm = rxe;
1544                 dma_addr =
1545                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1546
1547                 /* Set data buffer address and data length of the mbuf */
1548                 rxdp->read.hdr_addr = 0;
1549                 rxdp->read.pkt_addr = dma_addr;
1550                 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1551                                  IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1552                 rxm->data_len = rx_packet_len;
1553                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1554
1555                 /* If this is the first buffer of the received packet, set the
1556                  * pointer to the first mbuf of the packet and initialize its
1557                  * context. Otherwise, update the total length and the number
1558                  * of segments of the current scattered packet, and update the
1559                  * pointer to the last mbuf of the current packet.
1560                  */
1561                 if (!first_seg) {
1562                         first_seg = rxm;
1563                         first_seg->nb_segs = 1;
1564                         first_seg->pkt_len = rx_packet_len;
1565                 } else {
1566                         first_seg->pkt_len =
1567                                 (uint16_t)(first_seg->pkt_len +
1568                                                 rx_packet_len);
1569                         first_seg->nb_segs++;
1570                         last_seg->next = rxm;
1571                 }
1572
1573                 /* If this is not the last buffer of the received packet,
1574                  * update the pointer to the last mbuf of the current scattered
1575                  * packet and continue to parse the RX ring.
1576                  */
1577                 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1578                         last_seg = rxm;
1579                         continue;
1580                 }
1581
1582                 /* This is the last buffer of the received packet. If the CRC
1583                  * is not stripped by the hardware:
1584                  *  - Subtract the CRC length from the total packet length.
1585                  *  - If the last buffer only contains the whole CRC or a part
1586                  *  of it, free the mbuf associated to the last buffer. If part
1587                  *  of the CRC is also contained in the previous mbuf, subtract
1588                  *  the length of that CRC part from the data length of the
1589                  *  previous mbuf.
1590                  */
1591                 rxm->next = NULL;
1592                 if (unlikely(rxq->crc_len > 0)) {
1593                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1594                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1595                                 rte_pktmbuf_free_seg(rxm);
1596                                 first_seg->nb_segs--;
1597                                 last_seg->data_len =
1598                                         (uint16_t)(last_seg->data_len -
1599                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1600                                 last_seg->next = NULL;
1601                         } else
1602                                 rxm->data_len = (uint16_t)(rx_packet_len -
1603                                                         RTE_ETHER_CRC_LEN);
1604                 }
1605
1606                 first_seg->port = rxq->port_id;
1607                 first_seg->ol_flags = 0;
1608                 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1609                 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1610                 first_seg->packet_type =
1611                         ptype_tbl[(uint8_t)((qword1 &
1612                         IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1613
1614                 if (pkt_flags & PKT_RX_RSS_HASH)
1615                         first_seg->hash.rss =
1616                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1617
1618                 if (pkt_flags & PKT_RX_FDIR)
1619                         pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1620
1621                 first_seg->ol_flags |= pkt_flags;
1622
1623                 /* Prefetch data of first segment, if configured to do so. */
1624                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1625                                           first_seg->data_off));
1626                 rx_pkts[nb_rx++] = first_seg;
1627                 first_seg = NULL;
1628         }
1629
1630         /* Record index of the next RX descriptor to probe. */
1631         rxq->rx_tail = rx_id;
1632         rxq->pkt_first_seg = first_seg;
1633         rxq->pkt_last_seg = last_seg;
1634
1635         iavf_update_rx_tail(rxq, nb_hold, rx_id);
1636
1637         return nb_rx;
1638 }
1639
1640 #define IAVF_LOOK_AHEAD 8
1641 static inline int
1642 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1643 {
1644         volatile union iavf_rx_flex_desc *rxdp;
1645         struct rte_mbuf **rxep;
1646         struct rte_mbuf *mb;
1647         uint16_t stat_err0;
1648         uint16_t pkt_len;
1649         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1650         int32_t i, j, nb_rx = 0;
1651         uint64_t pkt_flags;
1652         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1653
1654         rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1655         rxep = &rxq->sw_ring[rxq->rx_tail];
1656
1657         stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1658
1659         /* Make sure there is at least 1 packet to receive */
1660         if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1661                 return 0;
1662
1663         /* Scan LOOK_AHEAD descriptors at a time to determine which
1664          * descriptors reference packets that are ready to be received.
1665          */
1666         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1667              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1668                 /* Read desc statuses backwards to avoid race condition */
1669                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1670                         s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1671
1672                 rte_smp_rmb();
1673
1674                 /* Compute how many status bits were set */
1675                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1676                         nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1677
1678                 nb_rx += nb_dd;
1679
1680                 /* Translate descriptor info to mbuf parameters */
1681                 for (j = 0; j < nb_dd; j++) {
1682                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1683                                           rxq->rx_tail +
1684                                           i * IAVF_LOOK_AHEAD + j);
1685
1686                         mb = rxep[j];
1687                         pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1688                                 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1689                         mb->data_len = pkt_len;
1690                         mb->pkt_len = pkt_len;
1691                         mb->ol_flags = 0;
1692
1693                         mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1694                                 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1695                         iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1696                         rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1697                         stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1698                         pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1699
1700                         mb->ol_flags |= pkt_flags;
1701                 }
1702
1703                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1704                         rxq->rx_stage[i + j] = rxep[j];
1705
1706                 if (nb_dd != IAVF_LOOK_AHEAD)
1707                         break;
1708         }
1709
1710         /* Clear software ring entries */
1711         for (i = 0; i < nb_rx; i++)
1712                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1713
1714         return nb_rx;
1715 }
1716
1717 static inline int
1718 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1719 {
1720         volatile union iavf_rx_desc *rxdp;
1721         struct rte_mbuf **rxep;
1722         struct rte_mbuf *mb;
1723         uint16_t pkt_len;
1724         uint64_t qword1;
1725         uint32_t rx_status;
1726         int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1727         int32_t i, j, nb_rx = 0;
1728         uint64_t pkt_flags;
1729         const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1730
1731         rxdp = &rxq->rx_ring[rxq->rx_tail];
1732         rxep = &rxq->sw_ring[rxq->rx_tail];
1733
1734         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1735         rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1736                     IAVF_RXD_QW1_STATUS_SHIFT;
1737
1738         /* Make sure there is at least 1 packet to receive */
1739         if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1740                 return 0;
1741
1742         /* Scan LOOK_AHEAD descriptors at a time to determine which
1743          * descriptors reference packets that are ready to be received.
1744          */
1745         for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1746              rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1747                 /* Read desc statuses backwards to avoid race condition */
1748                 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1749                         qword1 = rte_le_to_cpu_64(
1750                                 rxdp[j].wb.qword1.status_error_len);
1751                         s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1752                                IAVF_RXD_QW1_STATUS_SHIFT;
1753                 }
1754
1755                 rte_smp_rmb();
1756
1757                 /* Compute how many status bits were set */
1758                 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1759                         nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1760
1761                 nb_rx += nb_dd;
1762
1763                 /* Translate descriptor info to mbuf parameters */
1764                 for (j = 0; j < nb_dd; j++) {
1765                         IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1766                                          rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1767
1768                         mb = rxep[j];
1769                         qword1 = rte_le_to_cpu_64
1770                                         (rxdp[j].wb.qword1.status_error_len);
1771                         pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1772                                   IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1773                         mb->data_len = pkt_len;
1774                         mb->pkt_len = pkt_len;
1775                         mb->ol_flags = 0;
1776                         iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1777                         pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1778                         mb->packet_type =
1779                                 ptype_tbl[(uint8_t)((qword1 &
1780                                 IAVF_RXD_QW1_PTYPE_MASK) >>
1781                                 IAVF_RXD_QW1_PTYPE_SHIFT)];
1782
1783                         if (pkt_flags & PKT_RX_RSS_HASH)
1784                                 mb->hash.rss = rte_le_to_cpu_32(
1785                                         rxdp[j].wb.qword0.hi_dword.rss);
1786
1787                         if (pkt_flags & PKT_RX_FDIR)
1788                                 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1789
1790                         mb->ol_flags |= pkt_flags;
1791                 }
1792
1793                 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1794                         rxq->rx_stage[i + j] = rxep[j];
1795
1796                 if (nb_dd != IAVF_LOOK_AHEAD)
1797                         break;
1798         }
1799
1800         /* Clear software ring entries */
1801         for (i = 0; i < nb_rx; i++)
1802                 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1803
1804         return nb_rx;
1805 }
1806
1807 static inline uint16_t
1808 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1809                        struct rte_mbuf **rx_pkts,
1810                        uint16_t nb_pkts)
1811 {
1812         uint16_t i;
1813         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1814
1815         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1816
1817         for (i = 0; i < nb_pkts; i++)
1818                 rx_pkts[i] = stage[i];
1819
1820         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1821         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1822
1823         return nb_pkts;
1824 }
1825
1826 static inline int
1827 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1828 {
1829         volatile union iavf_rx_desc *rxdp;
1830         struct rte_mbuf **rxep;
1831         struct rte_mbuf *mb;
1832         uint16_t alloc_idx, i;
1833         uint64_t dma_addr;
1834         int diag;
1835
1836         /* Allocate buffers in bulk */
1837         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1838                                 (rxq->rx_free_thresh - 1));
1839         rxep = &rxq->sw_ring[alloc_idx];
1840         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1841                                     rxq->rx_free_thresh);
1842         if (unlikely(diag != 0)) {
1843                 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1844                 return -ENOMEM;
1845         }
1846
1847         rxdp = &rxq->rx_ring[alloc_idx];
1848         for (i = 0; i < rxq->rx_free_thresh; i++) {
1849                 if (likely(i < (rxq->rx_free_thresh - 1)))
1850                         /* Prefetch next mbuf */
1851                         rte_prefetch0(rxep[i + 1]);
1852
1853                 mb = rxep[i];
1854                 rte_mbuf_refcnt_set(mb, 1);
1855                 mb->next = NULL;
1856                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1857                 mb->nb_segs = 1;
1858                 mb->port = rxq->port_id;
1859                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1860                 rxdp[i].read.hdr_addr = 0;
1861                 rxdp[i].read.pkt_addr = dma_addr;
1862         }
1863
1864         /* Update rx tail register */
1865         rte_wmb();
1866         IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1867
1868         rxq->rx_free_trigger =
1869                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1870         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1871                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1872
1873         return 0;
1874 }
1875
1876 static inline uint16_t
1877 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1878 {
1879         struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1880         uint16_t nb_rx = 0;
1881
1882         if (!nb_pkts)
1883                 return 0;
1884
1885         if (rxq->rx_nb_avail)
1886                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1887
1888         if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1889                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1890         else
1891                 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1892         rxq->rx_next_avail = 0;
1893         rxq->rx_nb_avail = nb_rx;
1894         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1895
1896         if (rxq->rx_tail > rxq->rx_free_trigger) {
1897                 if (iavf_rx_alloc_bufs(rxq) != 0) {
1898                         uint16_t i, j;
1899
1900                         /* TODO: count rx_mbuf_alloc_failed here */
1901
1902                         rxq->rx_nb_avail = 0;
1903                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1904                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1905                                 rxq->sw_ring[j] = rxq->rx_stage[i];
1906
1907                         return 0;
1908                 }
1909         }
1910
1911         if (rxq->rx_tail >= rxq->nb_rx_desc)
1912                 rxq->rx_tail = 0;
1913
1914         PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1915                    rxq->port_id, rxq->queue_id,
1916                    rxq->rx_tail, nb_rx);
1917
1918         if (rxq->rx_nb_avail)
1919                 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1920
1921         return 0;
1922 }
1923
1924 static uint16_t
1925 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1926                          struct rte_mbuf **rx_pkts,
1927                          uint16_t nb_pkts)
1928 {
1929         uint16_t nb_rx = 0, n, count;
1930
1931         if (unlikely(nb_pkts == 0))
1932                 return 0;
1933
1934         if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1935                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1936
1937         while (nb_pkts) {
1938                 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1939                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1940                 nb_rx = (uint16_t)(nb_rx + count);
1941                 nb_pkts = (uint16_t)(nb_pkts - count);
1942                 if (count < n)
1943                         break;
1944         }
1945
1946         return nb_rx;
1947 }
1948
1949 static inline int
1950 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1951 {
1952         struct iavf_tx_entry *sw_ring = txq->sw_ring;
1953         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1954         uint16_t nb_tx_desc = txq->nb_tx_desc;
1955         uint16_t desc_to_clean_to;
1956         uint16_t nb_tx_to_clean;
1957
1958         volatile struct iavf_tx_desc *txd = txq->tx_ring;
1959
1960         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1961         if (desc_to_clean_to >= nb_tx_desc)
1962                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1963
1964         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1965         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1966                         rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1967                         rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1968                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1969                                 "(port=%d queue=%d)", desc_to_clean_to,
1970                                 txq->port_id, txq->queue_id);
1971                 return -1;
1972         }
1973
1974         if (last_desc_cleaned > desc_to_clean_to)
1975                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1976                                                         desc_to_clean_to);
1977         else
1978                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1979                                         last_desc_cleaned);
1980
1981         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1982
1983         txq->last_desc_cleaned = desc_to_clean_to;
1984         txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1985
1986         return 0;
1987 }
1988
1989 /* Check if the context descriptor is needed for TX offloading */
1990 static inline uint16_t
1991 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
1992 {
1993         if (flags & PKT_TX_TCP_SEG)
1994                 return 1;
1995         if (flags & PKT_TX_VLAN_PKT &&
1996             vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
1997                 return 1;
1998         return 0;
1999 }
2000
2001 static inline void
2002 iavf_txd_enable_checksum(uint64_t ol_flags,
2003                         uint32_t *td_cmd,
2004                         uint32_t *td_offset,
2005                         union iavf_tx_offload tx_offload)
2006 {
2007         /* Set MACLEN */
2008         *td_offset |= (tx_offload.l2_len >> 1) <<
2009                       IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2010
2011         /* Enable L3 checksum offloads */
2012         if (ol_flags & PKT_TX_IP_CKSUM) {
2013                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2014                 *td_offset |= (tx_offload.l3_len >> 2) <<
2015                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2016         } else if (ol_flags & PKT_TX_IPV4) {
2017                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2018                 *td_offset |= (tx_offload.l3_len >> 2) <<
2019                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2020         } else if (ol_flags & PKT_TX_IPV6) {
2021                 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2022                 *td_offset |= (tx_offload.l3_len >> 2) <<
2023                               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2024         }
2025
2026         if (ol_flags & PKT_TX_TCP_SEG) {
2027                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2028                 *td_offset |= (tx_offload.l4_len >> 2) <<
2029                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2030                 return;
2031         }
2032
2033         /* Enable L4 checksum offloads */
2034         switch (ol_flags & PKT_TX_L4_MASK) {
2035         case PKT_TX_TCP_CKSUM:
2036                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2037                 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2038                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2039                 break;
2040         case PKT_TX_SCTP_CKSUM:
2041                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2042                 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2043                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2044                 break;
2045         case PKT_TX_UDP_CKSUM:
2046                 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2047                 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2048                               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2049                 break;
2050         default:
2051                 break;
2052         }
2053 }
2054
2055 /* set TSO context descriptor
2056  * support IP -> L4 and IP -> IP -> L4
2057  */
2058 static inline uint64_t
2059 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2060 {
2061         uint64_t ctx_desc = 0;
2062         uint32_t cd_cmd, hdr_len, cd_tso_len;
2063
2064         if (!tx_offload.l4_len) {
2065                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2066                 return ctx_desc;
2067         }
2068
2069         hdr_len = tx_offload.l2_len +
2070                   tx_offload.l3_len +
2071                   tx_offload.l4_len;
2072
2073         cd_cmd = IAVF_TX_CTX_DESC_TSO;
2074         cd_tso_len = mbuf->pkt_len - hdr_len;
2075         ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2076                      ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2077                      ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2078
2079         return ctx_desc;
2080 }
2081
2082 /* Construct the tx flags */
2083 static inline uint64_t
2084 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2085                uint32_t td_tag)
2086 {
2087         return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2088                                 ((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
2089                                 ((uint64_t)td_offset <<
2090                                  IAVF_TXD_QW1_OFFSET_SHIFT) |
2091                                 ((uint64_t)size  <<
2092                                  IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2093                                 ((uint64_t)td_tag  <<
2094                                  IAVF_TXD_QW1_L2TAG1_SHIFT));
2095 }
2096
2097 /* TX function */
2098 uint16_t
2099 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2100 {
2101         volatile struct iavf_tx_desc *txd;
2102         volatile struct iavf_tx_desc *txr;
2103         struct iavf_tx_queue *txq;
2104         struct iavf_tx_entry *sw_ring;
2105         struct iavf_tx_entry *txe, *txn;
2106         struct rte_mbuf *tx_pkt;
2107         struct rte_mbuf *m_seg;
2108         uint16_t tx_id;
2109         uint16_t nb_tx;
2110         uint32_t td_cmd;
2111         uint32_t td_offset;
2112         uint32_t td_tag;
2113         uint64_t ol_flags;
2114         uint16_t nb_used;
2115         uint16_t nb_ctx;
2116         uint16_t tx_last;
2117         uint16_t slen;
2118         uint64_t buf_dma_addr;
2119         uint16_t cd_l2tag2 = 0;
2120         union iavf_tx_offload tx_offload = {0};
2121
2122         txq = tx_queue;
2123         sw_ring = txq->sw_ring;
2124         txr = txq->tx_ring;
2125         tx_id = txq->tx_tail;
2126         txe = &sw_ring[tx_id];
2127
2128         /* Check if the descriptor ring needs to be cleaned. */
2129         if (txq->nb_free < txq->free_thresh)
2130                 (void)iavf_xmit_cleanup(txq);
2131
2132         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2133                 td_cmd = 0;
2134                 td_tag = 0;
2135                 td_offset = 0;
2136
2137                 tx_pkt = *tx_pkts++;
2138                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2139
2140                 ol_flags = tx_pkt->ol_flags;
2141                 tx_offload.l2_len = tx_pkt->l2_len;
2142                 tx_offload.l3_len = tx_pkt->l3_len;
2143                 tx_offload.l4_len = tx_pkt->l4_len;
2144                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2145                 /* Calculate the number of context descriptors needed. */
2146                 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2147
2148                 /* The number of descriptors that must be allocated for
2149                  * a packet equals to the number of the segments of that
2150                  * packet plus 1 context descriptor if needed.
2151                  */
2152                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2153                 tx_last = (uint16_t)(tx_id + nb_used - 1);
2154
2155                 /* Circular ring */
2156                 if (tx_last >= txq->nb_tx_desc)
2157                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2158
2159                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2160                            " tx_first=%u tx_last=%u",
2161                            txq->port_id, txq->queue_id, tx_id, tx_last);
2162
2163                 if (nb_used > txq->nb_free) {
2164                         if (iavf_xmit_cleanup(txq)) {
2165                                 if (nb_tx == 0)
2166                                         return 0;
2167                                 goto end_of_tx;
2168                         }
2169                         if (unlikely(nb_used > txq->rs_thresh)) {
2170                                 while (nb_used > txq->nb_free) {
2171                                         if (iavf_xmit_cleanup(txq)) {
2172                                                 if (nb_tx == 0)
2173                                                         return 0;
2174                                                 goto end_of_tx;
2175                                         }
2176                                 }
2177                         }
2178                 }
2179
2180                 /* Descriptor based VLAN insertion */
2181                 if (ol_flags & PKT_TX_VLAN_PKT &&
2182                     txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2183                         td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2184                         td_tag = tx_pkt->vlan_tci;
2185                 }
2186
2187                 /* According to datasheet, the bit2 is reserved and must be
2188                  * set to 1.
2189                  */
2190                 td_cmd |= 0x04;
2191
2192                 /* Enable checksum offloading */
2193                 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2194                         iavf_txd_enable_checksum(ol_flags, &td_cmd,
2195                                                 &td_offset, tx_offload);
2196
2197                 if (nb_ctx) {
2198                         /* Setup TX context descriptor if required */
2199                         uint64_t cd_type_cmd_tso_mss =
2200                                 IAVF_TX_DESC_DTYPE_CONTEXT;
2201                         volatile struct iavf_tx_context_desc *ctx_txd =
2202                                 (volatile struct iavf_tx_context_desc *)
2203                                                         &txr[tx_id];
2204
2205                         txn = &sw_ring[txe->next_id];
2206                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2207                         if (txe->mbuf) {
2208                                 rte_pktmbuf_free_seg(txe->mbuf);
2209                                 txe->mbuf = NULL;
2210                         }
2211
2212                         /* TSO enabled */
2213                         if (ol_flags & PKT_TX_TCP_SEG)
2214                                 cd_type_cmd_tso_mss |=
2215                                         iavf_set_tso_ctx(tx_pkt, tx_offload);
2216
2217                         if (ol_flags & PKT_TX_VLAN_PKT &&
2218                            txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2219                                 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2220                                         << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2221                                 cd_l2tag2 = tx_pkt->vlan_tci;
2222                         }
2223
2224                         ctx_txd->type_cmd_tso_mss =
2225                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2226                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2227
2228                         IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2229                         txe->last_id = tx_last;
2230                         tx_id = txe->next_id;
2231                         txe = txn;
2232                 }
2233
2234                 m_seg = tx_pkt;
2235                 do {
2236                         txd = &txr[tx_id];
2237                         txn = &sw_ring[txe->next_id];
2238
2239                         if (txe->mbuf)
2240                                 rte_pktmbuf_free_seg(txe->mbuf);
2241                         txe->mbuf = m_seg;
2242
2243                         /* Setup TX Descriptor */
2244                         slen = m_seg->data_len;
2245                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
2246                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2247                         txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2248                                                                   td_offset,
2249                                                                   slen,
2250                                                                   td_tag);
2251
2252                         IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2253                         txe->last_id = tx_last;
2254                         tx_id = txe->next_id;
2255                         txe = txn;
2256                         m_seg = m_seg->next;
2257                 } while (m_seg);
2258
2259                 /* The last packet data descriptor needs End Of Packet (EOP) */
2260                 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2261                 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2262                 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2263
2264                 if (txq->nb_used >= txq->rs_thresh) {
2265                         PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2266                                    "%4u (port=%d queue=%d)",
2267                                    tx_last, txq->port_id, txq->queue_id);
2268
2269                         td_cmd |= IAVF_TX_DESC_CMD_RS;
2270
2271                         /* Update txq RS bit counters */
2272                         txq->nb_used = 0;
2273                 }
2274
2275                 txd->cmd_type_offset_bsz |=
2276                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2277                                          IAVF_TXD_QW1_CMD_SHIFT);
2278                 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2279         }
2280
2281 end_of_tx:
2282         rte_wmb();
2283
2284         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2285                    txq->port_id, txq->queue_id, tx_id, nb_tx);
2286
2287         IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2288         txq->tx_tail = tx_id;
2289
2290         return nb_tx;
2291 }
2292
2293 /* TX prep functions */
2294 uint16_t
2295 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2296               uint16_t nb_pkts)
2297 {
2298         int i, ret;
2299         uint64_t ol_flags;
2300         struct rte_mbuf *m;
2301
2302         for (i = 0; i < nb_pkts; i++) {
2303                 m = tx_pkts[i];
2304                 ol_flags = m->ol_flags;
2305
2306                 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2307                 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2308                         if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2309                                 rte_errno = EINVAL;
2310                                 return i;
2311                         }
2312                 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2313                            (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2314                         /* MSS outside the range are considered malicious */
2315                         rte_errno = EINVAL;
2316                         return i;
2317                 }
2318
2319                 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2320                         rte_errno = ENOTSUP;
2321                         return i;
2322                 }
2323
2324 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2325                 ret = rte_validate_tx_offload(m);
2326                 if (ret != 0) {
2327                         rte_errno = -ret;
2328                         return i;
2329                 }
2330 #endif
2331                 ret = rte_net_intel_cksum_prepare(m);
2332                 if (ret != 0) {
2333                         rte_errno = -ret;
2334                         return i;
2335                 }
2336         }
2337
2338         return i;
2339 }
2340
2341 /* choose rx function*/
2342 void
2343 iavf_set_rx_function(struct rte_eth_dev *dev)
2344 {
2345         struct iavf_adapter *adapter =
2346                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2347         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2348
2349 #ifdef RTE_ARCH_X86
2350         struct iavf_rx_queue *rxq;
2351         int i;
2352         bool use_avx2 = false;
2353 #ifdef CC_AVX512_SUPPORT
2354         bool use_avx512 = false;
2355 #endif
2356
2357         if (!iavf_rx_vec_dev_check(dev) &&
2358                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2359                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2360                         rxq = dev->data->rx_queues[i];
2361                         (void)iavf_rxq_vec_setup(rxq);
2362                 }
2363
2364                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2365                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2366                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2367                         use_avx2 = true;
2368 #ifdef CC_AVX512_SUPPORT
2369                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2370                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2371                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2372                         use_avx512 = true;
2373 #endif
2374
2375                 if (dev->data->scattered_rx) {
2376                         PMD_DRV_LOG(DEBUG,
2377                                     "Using %sVector Scattered Rx (port %d).",
2378                                     use_avx2 ? "avx2 " : "",
2379                                     dev->data->port_id);
2380                         if (vf->vf_res->vf_cap_flags &
2381                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2382                                 dev->rx_pkt_burst = use_avx2 ?
2383                                         iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2384                                         iavf_recv_scattered_pkts_vec_flex_rxd;
2385 #ifdef CC_AVX512_SUPPORT
2386                                 if (use_avx512)
2387                                         dev->rx_pkt_burst =
2388                                                 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2389 #endif
2390                         } else {
2391                                 dev->rx_pkt_burst = use_avx2 ?
2392                                         iavf_recv_scattered_pkts_vec_avx2 :
2393                                         iavf_recv_scattered_pkts_vec;
2394 #ifdef CC_AVX512_SUPPORT
2395                                 if (use_avx512)
2396                                         dev->rx_pkt_burst =
2397                                                 iavf_recv_scattered_pkts_vec_avx512;
2398 #endif
2399                         }
2400                 } else {
2401                         PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2402                                     use_avx2 ? "avx2 " : "",
2403                                     dev->data->port_id);
2404                         if (vf->vf_res->vf_cap_flags &
2405                                 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2406                                 dev->rx_pkt_burst = use_avx2 ?
2407                                         iavf_recv_pkts_vec_avx2_flex_rxd :
2408                                         iavf_recv_pkts_vec_flex_rxd;
2409 #ifdef CC_AVX512_SUPPORT
2410                                 if (use_avx512)
2411                                         dev->rx_pkt_burst =
2412                                                 iavf_recv_pkts_vec_avx512_flex_rxd;
2413 #endif
2414                         } else {
2415                                 dev->rx_pkt_burst = use_avx2 ?
2416                                         iavf_recv_pkts_vec_avx2 :
2417                                         iavf_recv_pkts_vec;
2418 #ifdef CC_AVX512_SUPPORT
2419                                 if (use_avx512)
2420                                         dev->rx_pkt_burst =
2421                                                 iavf_recv_pkts_vec_avx512;
2422 #endif
2423                         }
2424                 }
2425
2426                 return;
2427         }
2428 #endif
2429
2430         if (dev->data->scattered_rx) {
2431                 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2432                             dev->data->port_id);
2433                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2434                         dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2435                 else
2436                         dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2437         } else if (adapter->rx_bulk_alloc_allowed) {
2438                 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2439                             dev->data->port_id);
2440                 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2441         } else {
2442                 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2443                             dev->data->port_id);
2444                 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2445                         dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2446                 else
2447                         dev->rx_pkt_burst = iavf_recv_pkts;
2448         }
2449 }
2450
2451 /* choose tx function*/
2452 void
2453 iavf_set_tx_function(struct rte_eth_dev *dev)
2454 {
2455 #ifdef RTE_ARCH_X86
2456         struct iavf_tx_queue *txq;
2457         int i;
2458         bool use_avx2 = false;
2459 #ifdef CC_AVX512_SUPPORT
2460         bool use_avx512 = false;
2461 #endif
2462
2463         if (!iavf_tx_vec_dev_check(dev) &&
2464                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2465                 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2466                      rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2467                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2468                         use_avx2 = true;
2469 #ifdef CC_AVX512_SUPPORT
2470                 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2471                     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2472                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2473                         use_avx512 = true;
2474 #endif
2475
2476                 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2477                             use_avx2 ? "avx2 " : "",
2478                             dev->data->port_id);
2479                 dev->tx_pkt_burst = use_avx2 ?
2480                                     iavf_xmit_pkts_vec_avx2 :
2481                                     iavf_xmit_pkts_vec;
2482 #ifdef CC_AVX512_SUPPORT
2483                 if (use_avx512)
2484                         dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2485 #endif
2486                 dev->tx_pkt_prepare = NULL;
2487
2488                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2489                         txq = dev->data->tx_queues[i];
2490                         if (!txq)
2491                                 continue;
2492 #ifdef CC_AVX512_SUPPORT
2493                         if (use_avx512)
2494                                 iavf_txq_vec_setup_avx512(txq);
2495                         else
2496                                 iavf_txq_vec_setup(txq);
2497 #else
2498                         iavf_txq_vec_setup(txq);
2499 #endif
2500                 }
2501
2502                 return;
2503         }
2504 #endif
2505
2506         PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2507                     dev->data->port_id);
2508         dev->tx_pkt_burst = iavf_xmit_pkts;
2509         dev->tx_pkt_prepare = iavf_prep_pkts;
2510 }
2511
2512 static int
2513 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2514                         uint32_t free_cnt)
2515 {
2516         struct iavf_tx_entry *swr_ring = txq->sw_ring;
2517         uint16_t i, tx_last, tx_id;
2518         uint16_t nb_tx_free_last;
2519         uint16_t nb_tx_to_clean;
2520         uint32_t pkt_cnt;
2521
2522         /* Start free mbuf from the next of tx_tail */
2523         tx_last = txq->tx_tail;
2524         tx_id  = swr_ring[tx_last].next_id;
2525
2526         if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2527                 return 0;
2528
2529         nb_tx_to_clean = txq->nb_free;
2530         nb_tx_free_last = txq->nb_free;
2531         if (!free_cnt)
2532                 free_cnt = txq->nb_tx_desc;
2533
2534         /* Loop through swr_ring to count the amount of
2535          * freeable mubfs and packets.
2536          */
2537         for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2538                 for (i = 0; i < nb_tx_to_clean &&
2539                         pkt_cnt < free_cnt &&
2540                         tx_id != tx_last; i++) {
2541                         if (swr_ring[tx_id].mbuf != NULL) {
2542                                 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2543                                 swr_ring[tx_id].mbuf = NULL;
2544
2545                                 /*
2546                                  * last segment in the packet,
2547                                  * increment packet count
2548                                  */
2549                                 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2550                         }
2551
2552                         tx_id = swr_ring[tx_id].next_id;
2553                 }
2554
2555                 if (txq->rs_thresh > txq->nb_tx_desc -
2556                         txq->nb_free || tx_id == tx_last)
2557                         break;
2558
2559                 if (pkt_cnt < free_cnt) {
2560                         if (iavf_xmit_cleanup(txq))
2561                                 break;
2562
2563                         nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2564                         nb_tx_free_last = txq->nb_free;
2565                 }
2566         }
2567
2568         return (int)pkt_cnt;
2569 }
2570
2571 int
2572 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2573 {
2574         struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2575
2576         return iavf_tx_done_cleanup_full(q, free_cnt);
2577 }
2578
2579 void
2580 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2581                      struct rte_eth_rxq_info *qinfo)
2582 {
2583         struct iavf_rx_queue *rxq;
2584
2585         rxq = dev->data->rx_queues[queue_id];
2586
2587         qinfo->mp = rxq->mp;
2588         qinfo->scattered_rx = dev->data->scattered_rx;
2589         qinfo->nb_desc = rxq->nb_rx_desc;
2590
2591         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2592         qinfo->conf.rx_drop_en = true;
2593         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2594 }
2595
2596 void
2597 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2598                      struct rte_eth_txq_info *qinfo)
2599 {
2600         struct iavf_tx_queue *txq;
2601
2602         txq = dev->data->tx_queues[queue_id];
2603
2604         qinfo->nb_desc = txq->nb_tx_desc;
2605
2606         qinfo->conf.tx_free_thresh = txq->free_thresh;
2607         qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2608         qinfo->conf.offloads = txq->offloads;
2609         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2610 }
2611
2612 /* Get the number of used descriptors of a rx queue */
2613 uint32_t
2614 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2615 {
2616 #define IAVF_RXQ_SCAN_INTERVAL 4
2617         volatile union iavf_rx_desc *rxdp;
2618         struct iavf_rx_queue *rxq;
2619         uint16_t desc = 0;
2620
2621         rxq = dev->data->rx_queues[queue_id];
2622         rxdp = &rxq->rx_ring[rxq->rx_tail];
2623
2624         while ((desc < rxq->nb_rx_desc) &&
2625                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2626                  IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2627                (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2628                 /* Check the DD bit of a rx descriptor of each 4 in a group,
2629                  * to avoid checking too frequently and downgrading performance
2630                  * too much.
2631                  */
2632                 desc += IAVF_RXQ_SCAN_INTERVAL;
2633                 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2634                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2635                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2636                                         desc - rxq->nb_rx_desc]);
2637         }
2638
2639         return desc;
2640 }
2641
2642 int
2643 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2644 {
2645         struct iavf_rx_queue *rxq = rx_queue;
2646         volatile uint64_t *status;
2647         uint64_t mask;
2648         uint32_t desc;
2649
2650         if (unlikely(offset >= rxq->nb_rx_desc))
2651                 return -EINVAL;
2652
2653         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2654                 return RTE_ETH_RX_DESC_UNAVAIL;
2655
2656         desc = rxq->rx_tail + offset;
2657         if (desc >= rxq->nb_rx_desc)
2658                 desc -= rxq->nb_rx_desc;
2659
2660         status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2661         mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2662                 << IAVF_RXD_QW1_STATUS_SHIFT);
2663         if (*status & mask)
2664                 return RTE_ETH_RX_DESC_DONE;
2665
2666         return RTE_ETH_RX_DESC_AVAIL;
2667 }
2668
2669 int
2670 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2671 {
2672         struct iavf_tx_queue *txq = tx_queue;
2673         volatile uint64_t *status;
2674         uint64_t mask, expect;
2675         uint32_t desc;
2676
2677         if (unlikely(offset >= txq->nb_tx_desc))
2678                 return -EINVAL;
2679
2680         desc = txq->tx_tail + offset;
2681         /* go to next desc that has the RS bit */
2682         desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2683                 txq->rs_thresh;
2684         if (desc >= txq->nb_tx_desc) {
2685                 desc -= txq->nb_tx_desc;
2686                 if (desc >= txq->nb_tx_desc)
2687                         desc -= txq->nb_tx_desc;
2688         }
2689
2690         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2691         mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2692         expect = rte_cpu_to_le_64(
2693                  IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2694         if ((*status & mask) == expect)
2695                 return RTE_ETH_TX_DESC_DONE;
2696
2697         return RTE_ETH_TX_DESC_FULL;
2698 }
2699
2700 const uint32_t *
2701 iavf_get_default_ptype_table(void)
2702 {
2703         static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2704                 __rte_cache_aligned = {
2705                 /* L2 types */
2706                 /* [0] reserved */
2707                 [1] = RTE_PTYPE_L2_ETHER,
2708                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2709                 /* [3] - [5] reserved */
2710                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2711                 /* [7] - [10] reserved */
2712                 [11] = RTE_PTYPE_L2_ETHER_ARP,
2713                 /* [12] - [21] reserved */
2714
2715                 /* Non tunneled IPv4 */
2716                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2717                        RTE_PTYPE_L4_FRAG,
2718                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2719                        RTE_PTYPE_L4_NONFRAG,
2720                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2721                        RTE_PTYPE_L4_UDP,
2722                 /* [25] reserved */
2723                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2724                        RTE_PTYPE_L4_TCP,
2725                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2726                        RTE_PTYPE_L4_SCTP,
2727                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2728                        RTE_PTYPE_L4_ICMP,
2729
2730                 /* IPv4 --> IPv4 */
2731                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2732                        RTE_PTYPE_TUNNEL_IP |
2733                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2734                        RTE_PTYPE_INNER_L4_FRAG,
2735                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2736                        RTE_PTYPE_TUNNEL_IP |
2737                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2738                        RTE_PTYPE_INNER_L4_NONFRAG,
2739                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2740                        RTE_PTYPE_TUNNEL_IP |
2741                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2742                        RTE_PTYPE_INNER_L4_UDP,
2743                 /* [32] reserved */
2744                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2745                        RTE_PTYPE_TUNNEL_IP |
2746                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2747                        RTE_PTYPE_INNER_L4_TCP,
2748                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2749                        RTE_PTYPE_TUNNEL_IP |
2750                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2751                        RTE_PTYPE_INNER_L4_SCTP,
2752                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2753                        RTE_PTYPE_TUNNEL_IP |
2754                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2755                        RTE_PTYPE_INNER_L4_ICMP,
2756
2757                 /* IPv4 --> IPv6 */
2758                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759                        RTE_PTYPE_TUNNEL_IP |
2760                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2761                        RTE_PTYPE_INNER_L4_FRAG,
2762                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2763                        RTE_PTYPE_TUNNEL_IP |
2764                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2765                        RTE_PTYPE_INNER_L4_NONFRAG,
2766                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2767                        RTE_PTYPE_TUNNEL_IP |
2768                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2769                        RTE_PTYPE_INNER_L4_UDP,
2770                 /* [39] reserved */
2771                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2772                        RTE_PTYPE_TUNNEL_IP |
2773                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2774                        RTE_PTYPE_INNER_L4_TCP,
2775                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2776                        RTE_PTYPE_TUNNEL_IP |
2777                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2778                        RTE_PTYPE_INNER_L4_SCTP,
2779                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2780                        RTE_PTYPE_TUNNEL_IP |
2781                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2782                        RTE_PTYPE_INNER_L4_ICMP,
2783
2784                 /* IPv4 --> GRE/Teredo/VXLAN */
2785                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786                        RTE_PTYPE_TUNNEL_GRENAT,
2787
2788                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2789                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2790                        RTE_PTYPE_TUNNEL_GRENAT |
2791                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2792                        RTE_PTYPE_INNER_L4_FRAG,
2793                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2794                        RTE_PTYPE_TUNNEL_GRENAT |
2795                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2796                        RTE_PTYPE_INNER_L4_NONFRAG,
2797                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2798                        RTE_PTYPE_TUNNEL_GRENAT |
2799                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2800                        RTE_PTYPE_INNER_L4_UDP,
2801                 /* [47] reserved */
2802                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2803                        RTE_PTYPE_TUNNEL_GRENAT |
2804                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2805                        RTE_PTYPE_INNER_L4_TCP,
2806                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2807                        RTE_PTYPE_TUNNEL_GRENAT |
2808                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2809                        RTE_PTYPE_INNER_L4_SCTP,
2810                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2811                        RTE_PTYPE_TUNNEL_GRENAT |
2812                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2813                        RTE_PTYPE_INNER_L4_ICMP,
2814
2815                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2816                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2817                        RTE_PTYPE_TUNNEL_GRENAT |
2818                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2819                        RTE_PTYPE_INNER_L4_FRAG,
2820                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2821                        RTE_PTYPE_TUNNEL_GRENAT |
2822                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2823                        RTE_PTYPE_INNER_L4_NONFRAG,
2824                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2825                        RTE_PTYPE_TUNNEL_GRENAT |
2826                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2827                        RTE_PTYPE_INNER_L4_UDP,
2828                 /* [54] reserved */
2829                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2830                        RTE_PTYPE_TUNNEL_GRENAT |
2831                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2832                        RTE_PTYPE_INNER_L4_TCP,
2833                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2834                        RTE_PTYPE_TUNNEL_GRENAT |
2835                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2836                        RTE_PTYPE_INNER_L4_SCTP,
2837                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2838                        RTE_PTYPE_TUNNEL_GRENAT |
2839                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2840                        RTE_PTYPE_INNER_L4_ICMP,
2841
2842                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2843                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2844                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2845
2846                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2847                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2848                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2849                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2850                        RTE_PTYPE_INNER_L4_FRAG,
2851                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2852                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2853                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2854                        RTE_PTYPE_INNER_L4_NONFRAG,
2855                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2856                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2857                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2858                        RTE_PTYPE_INNER_L4_UDP,
2859                 /* [62] reserved */
2860                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2861                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2862                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2863                        RTE_PTYPE_INNER_L4_TCP,
2864                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2865                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2866                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2867                        RTE_PTYPE_INNER_L4_SCTP,
2868                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2869                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2870                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2871                        RTE_PTYPE_INNER_L4_ICMP,
2872
2873                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2874                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2875                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2876                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2877                        RTE_PTYPE_INNER_L4_FRAG,
2878                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2879                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2880                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2881                        RTE_PTYPE_INNER_L4_NONFRAG,
2882                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2883                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2884                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2885                        RTE_PTYPE_INNER_L4_UDP,
2886                 /* [69] reserved */
2887                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2888                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2889                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2890                        RTE_PTYPE_INNER_L4_TCP,
2891                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2892                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2893                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2894                        RTE_PTYPE_INNER_L4_SCTP,
2895                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2896                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2897                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2898                        RTE_PTYPE_INNER_L4_ICMP,
2899                 /* [73] - [87] reserved */
2900
2901                 /* Non tunneled IPv6 */
2902                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2903                        RTE_PTYPE_L4_FRAG,
2904                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2905                        RTE_PTYPE_L4_NONFRAG,
2906                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2907                        RTE_PTYPE_L4_UDP,
2908                 /* [91] reserved */
2909                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2910                        RTE_PTYPE_L4_TCP,
2911                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2912                        RTE_PTYPE_L4_SCTP,
2913                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2914                        RTE_PTYPE_L4_ICMP,
2915
2916                 /* IPv6 --> IPv4 */
2917                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2918                        RTE_PTYPE_TUNNEL_IP |
2919                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2920                        RTE_PTYPE_INNER_L4_FRAG,
2921                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2922                        RTE_PTYPE_TUNNEL_IP |
2923                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2924                        RTE_PTYPE_INNER_L4_NONFRAG,
2925                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2926                        RTE_PTYPE_TUNNEL_IP |
2927                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2928                        RTE_PTYPE_INNER_L4_UDP,
2929                 /* [98] reserved */
2930                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2931                        RTE_PTYPE_TUNNEL_IP |
2932                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2933                        RTE_PTYPE_INNER_L4_TCP,
2934                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2935                         RTE_PTYPE_TUNNEL_IP |
2936                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2937                         RTE_PTYPE_INNER_L4_SCTP,
2938                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2939                         RTE_PTYPE_TUNNEL_IP |
2940                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2941                         RTE_PTYPE_INNER_L4_ICMP,
2942
2943                 /* IPv6 --> IPv6 */
2944                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2945                         RTE_PTYPE_TUNNEL_IP |
2946                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2947                         RTE_PTYPE_INNER_L4_FRAG,
2948                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2949                         RTE_PTYPE_TUNNEL_IP |
2950                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2951                         RTE_PTYPE_INNER_L4_NONFRAG,
2952                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2953                         RTE_PTYPE_TUNNEL_IP |
2954                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2955                         RTE_PTYPE_INNER_L4_UDP,
2956                 /* [105] reserved */
2957                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2958                         RTE_PTYPE_TUNNEL_IP |
2959                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2960                         RTE_PTYPE_INNER_L4_TCP,
2961                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2962                         RTE_PTYPE_TUNNEL_IP |
2963                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2964                         RTE_PTYPE_INNER_L4_SCTP,
2965                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2966                         RTE_PTYPE_TUNNEL_IP |
2967                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2968                         RTE_PTYPE_INNER_L4_ICMP,
2969
2970                 /* IPv6 --> GRE/Teredo/VXLAN */
2971                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2972                         RTE_PTYPE_TUNNEL_GRENAT,
2973
2974                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2975                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2976                         RTE_PTYPE_TUNNEL_GRENAT |
2977                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2978                         RTE_PTYPE_INNER_L4_FRAG,
2979                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2980                         RTE_PTYPE_TUNNEL_GRENAT |
2981                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2982                         RTE_PTYPE_INNER_L4_NONFRAG,
2983                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2984                         RTE_PTYPE_TUNNEL_GRENAT |
2985                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2986                         RTE_PTYPE_INNER_L4_UDP,
2987                 /* [113] reserved */
2988                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2989                         RTE_PTYPE_TUNNEL_GRENAT |
2990                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2991                         RTE_PTYPE_INNER_L4_TCP,
2992                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2993                         RTE_PTYPE_TUNNEL_GRENAT |
2994                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2995                         RTE_PTYPE_INNER_L4_SCTP,
2996                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2997                         RTE_PTYPE_TUNNEL_GRENAT |
2998                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2999                         RTE_PTYPE_INNER_L4_ICMP,
3000
3001                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3002                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3003                         RTE_PTYPE_TUNNEL_GRENAT |
3004                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3005                         RTE_PTYPE_INNER_L4_FRAG,
3006                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3007                         RTE_PTYPE_TUNNEL_GRENAT |
3008                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3009                         RTE_PTYPE_INNER_L4_NONFRAG,
3010                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3011                         RTE_PTYPE_TUNNEL_GRENAT |
3012                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3013                         RTE_PTYPE_INNER_L4_UDP,
3014                 /* [120] reserved */
3015                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3016                         RTE_PTYPE_TUNNEL_GRENAT |
3017                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3018                         RTE_PTYPE_INNER_L4_TCP,
3019                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3020                         RTE_PTYPE_TUNNEL_GRENAT |
3021                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3022                         RTE_PTYPE_INNER_L4_SCTP,
3023                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3024                         RTE_PTYPE_TUNNEL_GRENAT |
3025                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3026                         RTE_PTYPE_INNER_L4_ICMP,
3027
3028                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3029                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3030                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3031
3032                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3033                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3034                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3035                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3036                         RTE_PTYPE_INNER_L4_FRAG,
3037                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3038                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3039                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3040                         RTE_PTYPE_INNER_L4_NONFRAG,
3041                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3042                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3043                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3044                         RTE_PTYPE_INNER_L4_UDP,
3045                 /* [128] reserved */
3046                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3047                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3048                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3049                         RTE_PTYPE_INNER_L4_TCP,
3050                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3051                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3052                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3053                         RTE_PTYPE_INNER_L4_SCTP,
3054                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3055                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3056                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3057                         RTE_PTYPE_INNER_L4_ICMP,
3058
3059                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3060                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3061                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3062                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3063                         RTE_PTYPE_INNER_L4_FRAG,
3064                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3065                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3066                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3067                         RTE_PTYPE_INNER_L4_NONFRAG,
3068                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3069                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3070                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3071                         RTE_PTYPE_INNER_L4_UDP,
3072                 /* [135] reserved */
3073                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3074                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3075                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3076                         RTE_PTYPE_INNER_L4_TCP,
3077                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3078                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3079                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3080                         RTE_PTYPE_INNER_L4_SCTP,
3081                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3082                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3083                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3084                         RTE_PTYPE_INNER_L4_ICMP,
3085                 /* [139] - [299] reserved */
3086
3087                 /* PPPoE */
3088                 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3089                 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3090
3091                 /* PPPoE --> IPv4 */
3092                 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3093                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3094                         RTE_PTYPE_L4_FRAG,
3095                 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3096                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3097                         RTE_PTYPE_L4_NONFRAG,
3098                 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3099                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3100                         RTE_PTYPE_L4_UDP,
3101                 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3102                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3103                         RTE_PTYPE_L4_TCP,
3104                 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3105                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3106                         RTE_PTYPE_L4_SCTP,
3107                 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3108                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3109                         RTE_PTYPE_L4_ICMP,
3110
3111                 /* PPPoE --> IPv6 */
3112                 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3113                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3114                         RTE_PTYPE_L4_FRAG,
3115                 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3116                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3117                         RTE_PTYPE_L4_NONFRAG,
3118                 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3119                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3120                         RTE_PTYPE_L4_UDP,
3121                 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3122                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3123                         RTE_PTYPE_L4_TCP,
3124                 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3125                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3126                         RTE_PTYPE_L4_SCTP,
3127                 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3128                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3129                         RTE_PTYPE_L4_ICMP,
3130                 /* [314] - [324] reserved */
3131
3132                 /* IPv4/IPv6 --> GTPC/GTPU */
3133                 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3134                         RTE_PTYPE_TUNNEL_GTPC,
3135                 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3136                         RTE_PTYPE_TUNNEL_GTPC,
3137                 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3138                         RTE_PTYPE_TUNNEL_GTPC,
3139                 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3140                         RTE_PTYPE_TUNNEL_GTPC,
3141                 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3142                         RTE_PTYPE_TUNNEL_GTPU,
3143                 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3144                         RTE_PTYPE_TUNNEL_GTPU,
3145
3146                 /* IPv4 --> GTPU --> IPv4 */
3147                 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3148                         RTE_PTYPE_TUNNEL_GTPU |
3149                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3150                         RTE_PTYPE_INNER_L4_FRAG,
3151                 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3152                         RTE_PTYPE_TUNNEL_GTPU |
3153                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3154                         RTE_PTYPE_INNER_L4_NONFRAG,
3155                 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3156                         RTE_PTYPE_TUNNEL_GTPU |
3157                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3158                         RTE_PTYPE_INNER_L4_UDP,
3159                 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3160                         RTE_PTYPE_TUNNEL_GTPU |
3161                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3162                         RTE_PTYPE_INNER_L4_TCP,
3163                 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3164                         RTE_PTYPE_TUNNEL_GTPU |
3165                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3166                         RTE_PTYPE_INNER_L4_ICMP,
3167
3168                 /* IPv6 --> GTPU --> IPv4 */
3169                 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3170                         RTE_PTYPE_TUNNEL_GTPU |
3171                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3172                         RTE_PTYPE_INNER_L4_FRAG,
3173                 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3174                         RTE_PTYPE_TUNNEL_GTPU |
3175                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3176                         RTE_PTYPE_INNER_L4_NONFRAG,
3177                 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3178                         RTE_PTYPE_TUNNEL_GTPU |
3179                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3180                         RTE_PTYPE_INNER_L4_UDP,
3181                 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3182                         RTE_PTYPE_TUNNEL_GTPU |
3183                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3184                         RTE_PTYPE_INNER_L4_TCP,
3185                 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3186                         RTE_PTYPE_TUNNEL_GTPU |
3187                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3188                         RTE_PTYPE_INNER_L4_ICMP,
3189
3190                 /* IPv4 --> GTPU --> IPv6 */
3191                 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3192                         RTE_PTYPE_TUNNEL_GTPU |
3193                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3194                         RTE_PTYPE_INNER_L4_FRAG,
3195                 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3196                         RTE_PTYPE_TUNNEL_GTPU |
3197                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3198                         RTE_PTYPE_INNER_L4_NONFRAG,
3199                 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3200                         RTE_PTYPE_TUNNEL_GTPU |
3201                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3202                         RTE_PTYPE_INNER_L4_UDP,
3203                 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3204                         RTE_PTYPE_TUNNEL_GTPU |
3205                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3206                         RTE_PTYPE_INNER_L4_TCP,
3207                 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3208                         RTE_PTYPE_TUNNEL_GTPU |
3209                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3210                         RTE_PTYPE_INNER_L4_ICMP,
3211
3212                 /* IPv6 --> GTPU --> IPv6 */
3213                 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3214                         RTE_PTYPE_TUNNEL_GTPU |
3215                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3216                         RTE_PTYPE_INNER_L4_FRAG,
3217                 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3218                         RTE_PTYPE_TUNNEL_GTPU |
3219                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3220                         RTE_PTYPE_INNER_L4_NONFRAG,
3221                 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3222                         RTE_PTYPE_TUNNEL_GTPU |
3223                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3224                         RTE_PTYPE_INNER_L4_UDP,
3225                 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3226                         RTE_PTYPE_TUNNEL_GTPU |
3227                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3228                         RTE_PTYPE_INNER_L4_TCP,
3229                 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3230                         RTE_PTYPE_TUNNEL_GTPU |
3231                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3232                         RTE_PTYPE_INNER_L4_ICMP,
3233
3234                 /* IPv4 --> UDP ECPRI */
3235                 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3236                         RTE_PTYPE_L4_UDP,
3237                 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3238                         RTE_PTYPE_L4_UDP,
3239                 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3240                         RTE_PTYPE_L4_UDP,
3241                 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3242                         RTE_PTYPE_L4_UDP,
3243                 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3244                         RTE_PTYPE_L4_UDP,
3245                 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3246                         RTE_PTYPE_L4_UDP,
3247                 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3248                         RTE_PTYPE_L4_UDP,
3249                 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3250                         RTE_PTYPE_L4_UDP,
3251                 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3252                         RTE_PTYPE_L4_UDP,
3253                 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3254                         RTE_PTYPE_L4_UDP,
3255
3256                 /* IPV6 --> UDP ECPRI */
3257                 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3258                         RTE_PTYPE_L4_UDP,
3259                 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3260                         RTE_PTYPE_L4_UDP,
3261                 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3262                         RTE_PTYPE_L4_UDP,
3263                 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3264                         RTE_PTYPE_L4_UDP,
3265                 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3266                         RTE_PTYPE_L4_UDP,
3267                 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3268                         RTE_PTYPE_L4_UDP,
3269                 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3270                         RTE_PTYPE_L4_UDP,
3271                 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3272                         RTE_PTYPE_L4_UDP,
3273                 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3274                         RTE_PTYPE_L4_UDP,
3275                 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3276                         RTE_PTYPE_L4_UDP,
3277                 /* All others reserved */
3278         };
3279
3280         return ptype_tbl;
3281 }