1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
63 struct iavf_rx_queue *rxq = rx_queue;
64 volatile union iavf_rx_desc *rxdp;
68 rxdp = &rxq->rx_ring[desc];
69 /* watch for changes in status bit */
70 pmc->addr = &rxdp->wb.qword1.status_error_len;
73 * we expect the DD bit to be set to 1 if this descriptor was already
76 pmc->val = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
77 pmc->mask = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
79 /* registers are 64-bit */
80 pmc->size = sizeof(uint64_t);
86 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
88 /* The following constraints must be satisfied:
89 * thresh < rxq->nb_rx_desc
91 if (thresh >= nb_desc) {
92 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
100 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
101 uint16_t tx_free_thresh)
103 /* TX descriptors will have their RS bit set after tx_rs_thresh
104 * descriptors have been used. The TX descriptor ring will be cleaned
105 * after tx_free_thresh descriptors are used or if the number of
106 * descriptors required to transmit a packet is greater than the
107 * number of free TX descriptors.
109 * The following constraints must be satisfied:
110 * - tx_rs_thresh must be less than the size of the ring minus 2.
111 * - tx_free_thresh must be less than the size of the ring minus 3.
112 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
113 * - tx_rs_thresh must be a divisor of the ring size.
115 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
116 * race condition, hence the maximum threshold constraints. When set
117 * to zero use default values.
119 if (tx_rs_thresh >= (nb_desc - 2)) {
120 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
121 "number of TX descriptors (%u) minus 2",
122 tx_rs_thresh, nb_desc);
125 if (tx_free_thresh >= (nb_desc - 3)) {
126 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
127 "number of TX descriptors (%u) minus 3.",
128 tx_free_thresh, nb_desc);
131 if (tx_rs_thresh > tx_free_thresh) {
132 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
133 "equal to tx_free_thresh (%u).",
134 tx_rs_thresh, tx_free_thresh);
137 if ((nb_desc % tx_rs_thresh) != 0) {
138 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
139 "number of TX descriptors (%u).",
140 tx_rs_thresh, nb_desc);
148 check_rx_vec_allow(struct iavf_rx_queue *rxq)
150 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
151 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
152 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
156 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
161 check_tx_vec_allow(struct iavf_tx_queue *txq)
163 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
164 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
165 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
166 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
169 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
174 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
178 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
179 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
180 "rxq->rx_free_thresh=%d, "
181 "IAVF_RX_MAX_BURST=%d",
182 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
184 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
185 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
186 "rxq->nb_rx_desc=%d, "
187 "rxq->rx_free_thresh=%d",
188 rxq->nb_rx_desc, rxq->rx_free_thresh);
195 reset_rx_queue(struct iavf_rx_queue *rxq)
203 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
205 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
206 ((volatile char *)rxq->rx_ring)[i] = 0;
208 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
210 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
211 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
214 rxq->rx_nb_avail = 0;
215 rxq->rx_next_avail = 0;
216 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
220 rxq->pkt_first_seg = NULL;
221 rxq->pkt_last_seg = NULL;
223 rxq->rxrearm_start = 0;
227 reset_tx_queue(struct iavf_tx_queue *txq)
229 struct iavf_tx_entry *txe;
234 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
239 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
240 for (i = 0; i < size; i++)
241 ((volatile char *)txq->tx_ring)[i] = 0;
243 prev = (uint16_t)(txq->nb_tx_desc - 1);
244 for (i = 0; i < txq->nb_tx_desc; i++) {
245 txq->tx_ring[i].cmd_type_offset_bsz =
246 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
249 txe[prev].next_id = i;
256 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
257 txq->nb_free = txq->nb_tx_desc - 1;
259 txq->next_dd = txq->rs_thresh - 1;
260 txq->next_rs = txq->rs_thresh - 1;
264 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
266 volatile union iavf_rx_desc *rxd;
267 struct rte_mbuf *mbuf = NULL;
271 for (i = 0; i < rxq->nb_rx_desc; i++) {
272 mbuf = rte_mbuf_raw_alloc(rxq->mp);
273 if (unlikely(!mbuf)) {
274 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
278 rte_mbuf_refcnt_set(mbuf, 1);
280 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
282 mbuf->port = rxq->port_id;
285 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
287 rxd = &rxq->rx_ring[i];
288 rxd->read.pkt_addr = dma_addr;
289 rxd->read.hdr_addr = 0;
290 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
295 rxq->sw_ring[i] = mbuf;
302 release_rxq_mbufs(struct iavf_rx_queue *rxq)
309 for (i = 0; i < rxq->nb_rx_desc; i++) {
310 if (rxq->sw_ring[i]) {
311 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
312 rxq->sw_ring[i] = NULL;
317 if (rxq->rx_nb_avail == 0)
319 for (i = 0; i < rxq->rx_nb_avail; i++) {
320 struct rte_mbuf *mbuf;
322 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
323 rte_pktmbuf_free_seg(mbuf);
325 rxq->rx_nb_avail = 0;
329 release_txq_mbufs(struct iavf_tx_queue *txq)
333 if (!txq || !txq->sw_ring) {
334 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
338 for (i = 0; i < txq->nb_tx_desc; i++) {
339 if (txq->sw_ring[i].mbuf) {
340 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
341 txq->sw_ring[i].mbuf = NULL;
346 static const struct iavf_rxq_ops def_rxq_ops = {
347 .release_mbufs = release_rxq_mbufs,
350 static const struct iavf_txq_ops def_txq_ops = {
351 .release_mbufs = release_txq_mbufs,
355 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
357 volatile union iavf_rx_flex_desc *rxdp)
359 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
360 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
361 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
365 if (desc->flow_id != 0xFFFFFFFF) {
366 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
367 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
370 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
371 stat_err = rte_le_to_cpu_16(desc->status_error0);
372 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
373 mb->ol_flags |= PKT_RX_RSS_HASH;
374 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
380 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
382 volatile union iavf_rx_flex_desc *rxdp)
384 volatile struct iavf_32b_rx_flex_desc_comms *desc =
385 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
388 stat_err = rte_le_to_cpu_16(desc->status_error0);
389 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
390 mb->ol_flags |= PKT_RX_RSS_HASH;
391 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
394 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
395 if (desc->flow_id != 0xFFFFFFFF) {
396 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
397 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
400 if (rxq->xtr_ol_flag) {
401 uint32_t metadata = 0;
403 stat_err = rte_le_to_cpu_16(desc->status_error1);
405 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
406 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
408 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
410 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
413 mb->ol_flags |= rxq->xtr_ol_flag;
415 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
422 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
424 volatile union iavf_rx_flex_desc *rxdp)
426 volatile struct iavf_32b_rx_flex_desc_comms *desc =
427 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
430 stat_err = rte_le_to_cpu_16(desc->status_error0);
431 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
432 mb->ol_flags |= PKT_RX_RSS_HASH;
433 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
436 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
437 if (desc->flow_id != 0xFFFFFFFF) {
438 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
439 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
442 if (rxq->xtr_ol_flag) {
443 uint32_t metadata = 0;
445 if (desc->flex_ts.flex.aux0 != 0xFFFF)
446 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
447 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
448 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
451 mb->ol_flags |= rxq->xtr_ol_flag;
453 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
460 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
463 case IAVF_RXDID_COMMS_AUX_VLAN:
464 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
465 rxq->rxd_to_pkt_fields =
466 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
468 case IAVF_RXDID_COMMS_AUX_IPV4:
469 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
470 rxq->rxd_to_pkt_fields =
471 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
473 case IAVF_RXDID_COMMS_AUX_IPV6:
474 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
475 rxq->rxd_to_pkt_fields =
476 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
478 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
480 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
481 rxq->rxd_to_pkt_fields =
482 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
484 case IAVF_RXDID_COMMS_AUX_TCP:
485 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
486 rxq->rxd_to_pkt_fields =
487 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
489 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
491 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
492 rxq->rxd_to_pkt_fields =
493 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
495 case IAVF_RXDID_COMMS_OVS_1:
496 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
499 /* update this according to the RXDID for FLEX_DESC_NONE */
500 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
504 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
505 rxq->xtr_ol_flag = 0;
509 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
510 uint16_t nb_desc, unsigned int socket_id,
511 const struct rte_eth_rxconf *rx_conf,
512 struct rte_mempool *mp)
514 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 struct iavf_adapter *ad =
516 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
517 struct iavf_info *vf =
518 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
519 struct iavf_vsi *vsi = &vf->vsi;
520 struct iavf_rx_queue *rxq;
521 const struct rte_memzone *mz;
525 uint16_t rx_free_thresh;
528 PMD_INIT_FUNC_TRACE();
530 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
532 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
533 nb_desc > IAVF_MAX_RING_DESC ||
534 nb_desc < IAVF_MIN_RING_DESC) {
535 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
540 /* Check free threshold */
541 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
542 IAVF_DEFAULT_RX_FREE_THRESH :
543 rx_conf->rx_free_thresh;
544 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
547 /* Free memory if needed */
548 if (dev->data->rx_queues[queue_idx]) {
549 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
550 dev->data->rx_queues[queue_idx] = NULL;
553 /* Allocate the rx queue data structure */
554 rxq = rte_zmalloc_socket("iavf rxq",
555 sizeof(struct iavf_rx_queue),
559 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
560 "rx queue data structure");
564 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
565 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
567 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
568 rxq->proto_xtr = proto_xtr;
570 rxq->rxdid = IAVF_RXDID_LEGACY_1;
571 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
574 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
575 struct virtchnl_vlan_supported_caps *stripping_support =
576 &vf->vlan_v2_caps.offloads.stripping_support;
577 uint32_t stripping_cap;
579 if (stripping_support->outer)
580 stripping_cap = stripping_support->outer;
582 stripping_cap = stripping_support->inner;
584 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
585 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
586 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
587 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
589 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
592 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
595 rxq->nb_rx_desc = nb_desc;
596 rxq->rx_free_thresh = rx_free_thresh;
597 rxq->queue_id = queue_idx;
598 rxq->port_id = dev->data->port_id;
599 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
602 rxq->offloads = offloads;
604 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
605 rxq->crc_len = RTE_ETHER_CRC_LEN;
609 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
610 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
612 /* Allocate the software ring. */
613 len = nb_desc + IAVF_RX_MAX_BURST;
615 rte_zmalloc_socket("iavf rx sw ring",
616 sizeof(struct rte_mbuf *) * len,
620 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
625 /* Allocate the maximun number of RX ring hardware descriptor with
626 * a liitle more to support bulk allocate.
628 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
629 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
631 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
632 ring_size, IAVF_RING_BASE_ALIGN,
635 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
636 rte_free(rxq->sw_ring);
640 /* Zero all the descriptors in the ring. */
641 memset(mz->addr, 0, ring_size);
642 rxq->rx_ring_phys_addr = mz->iova;
643 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
648 dev->data->rx_queues[queue_idx] = rxq;
649 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
650 rxq->ops = &def_rxq_ops;
652 if (check_rx_bulk_allow(rxq) == true) {
653 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
654 "satisfied. Rx Burst Bulk Alloc function will be "
655 "used on port=%d, queue=%d.",
656 rxq->port_id, rxq->queue_id);
658 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
659 "not satisfied, Scattered Rx is requested "
660 "on port=%d, queue=%d.",
661 rxq->port_id, rxq->queue_id);
662 ad->rx_bulk_alloc_allowed = false;
665 if (check_rx_vec_allow(rxq) == false)
666 ad->rx_vec_allowed = false;
672 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
675 unsigned int socket_id,
676 const struct rte_eth_txconf *tx_conf)
678 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
679 struct iavf_info *vf =
680 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
681 struct iavf_tx_queue *txq;
682 const struct rte_memzone *mz;
684 uint16_t tx_rs_thresh, tx_free_thresh;
687 PMD_INIT_FUNC_TRACE();
689 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
691 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
692 nb_desc > IAVF_MAX_RING_DESC ||
693 nb_desc < IAVF_MIN_RING_DESC) {
694 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
699 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
700 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
701 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
702 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
703 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
705 /* Free memory if needed. */
706 if (dev->data->tx_queues[queue_idx]) {
707 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
708 dev->data->tx_queues[queue_idx] = NULL;
711 /* Allocate the TX queue data structure. */
712 txq = rte_zmalloc_socket("iavf txq",
713 sizeof(struct iavf_tx_queue),
717 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
718 "tx queue structure");
722 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
723 struct virtchnl_vlan_supported_caps *insertion_support =
724 &vf->vlan_v2_caps.offloads.insertion_support;
725 uint32_t insertion_cap;
727 if (insertion_support->outer)
728 insertion_cap = insertion_support->outer;
730 insertion_cap = insertion_support->inner;
732 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
733 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
734 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
735 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
737 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
740 txq->nb_tx_desc = nb_desc;
741 txq->rs_thresh = tx_rs_thresh;
742 txq->free_thresh = tx_free_thresh;
743 txq->queue_id = queue_idx;
744 txq->port_id = dev->data->port_id;
745 txq->offloads = offloads;
746 txq->tx_deferred_start = tx_conf->tx_deferred_start;
748 /* Allocate software ring */
750 rte_zmalloc_socket("iavf tx sw ring",
751 sizeof(struct iavf_tx_entry) * nb_desc,
755 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
760 /* Allocate TX hardware ring descriptors. */
761 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
762 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
763 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
764 ring_size, IAVF_RING_BASE_ALIGN,
767 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
768 rte_free(txq->sw_ring);
772 txq->tx_ring_phys_addr = mz->iova;
773 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
778 dev->data->tx_queues[queue_idx] = txq;
779 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
780 txq->ops = &def_txq_ops;
782 if (check_tx_vec_allow(txq) == false) {
783 struct iavf_adapter *ad =
784 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
785 ad->tx_vec_allowed = false;
788 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
789 vf->tm_conf.committed) {
791 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
792 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
793 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
794 vf->qtc_map[tc].queue_count))
797 if (tc >= vf->qos_cap->num_elem) {
798 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
808 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
810 struct iavf_adapter *adapter =
811 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
812 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
813 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
814 struct iavf_rx_queue *rxq;
817 PMD_DRV_FUNC_TRACE();
819 if (rx_queue_id >= dev->data->nb_rx_queues)
822 rxq = dev->data->rx_queues[rx_queue_id];
824 err = alloc_rxq_mbufs(rxq);
826 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
832 /* Init the RX tail register. */
833 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
834 IAVF_WRITE_FLUSH(hw);
836 /* Ready to switch the queue on */
838 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
840 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
843 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
846 dev->data->rx_queue_state[rx_queue_id] =
847 RTE_ETH_QUEUE_STATE_STARTED;
853 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
855 struct iavf_adapter *adapter =
856 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
857 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
858 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
859 struct iavf_tx_queue *txq;
862 PMD_DRV_FUNC_TRACE();
864 if (tx_queue_id >= dev->data->nb_tx_queues)
867 txq = dev->data->tx_queues[tx_queue_id];
869 /* Init the RX tail register. */
870 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
871 IAVF_WRITE_FLUSH(hw);
873 /* Ready to switch the queue on */
875 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
877 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
880 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
883 dev->data->tx_queue_state[tx_queue_id] =
884 RTE_ETH_QUEUE_STATE_STARTED;
890 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
892 struct iavf_adapter *adapter =
893 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
894 struct iavf_rx_queue *rxq;
897 PMD_DRV_FUNC_TRACE();
899 if (rx_queue_id >= dev->data->nb_rx_queues)
902 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
904 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
909 rxq = dev->data->rx_queues[rx_queue_id];
910 rxq->ops->release_mbufs(rxq);
912 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
918 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
920 struct iavf_adapter *adapter =
921 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
922 struct iavf_tx_queue *txq;
925 PMD_DRV_FUNC_TRACE();
927 if (tx_queue_id >= dev->data->nb_tx_queues)
930 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
932 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
937 txq = dev->data->tx_queues[tx_queue_id];
938 txq->ops->release_mbufs(txq);
940 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
946 iavf_dev_rx_queue_release(void *rxq)
948 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
953 q->ops->release_mbufs(q);
954 rte_free(q->sw_ring);
955 rte_memzone_free(q->mz);
960 iavf_dev_tx_queue_release(void *txq)
962 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
967 q->ops->release_mbufs(q);
968 rte_free(q->sw_ring);
969 rte_memzone_free(q->mz);
974 iavf_stop_queues(struct rte_eth_dev *dev)
976 struct iavf_adapter *adapter =
977 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
978 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
979 struct iavf_rx_queue *rxq;
980 struct iavf_tx_queue *txq;
983 /* Stop All queues */
984 if (!vf->lv_enabled) {
985 ret = iavf_disable_queues(adapter);
987 PMD_DRV_LOG(WARNING, "Fail to stop queues");
989 ret = iavf_disable_queues_lv(adapter);
991 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
995 PMD_DRV_LOG(WARNING, "Fail to stop queues");
997 for (i = 0; i < dev->data->nb_tx_queues; i++) {
998 txq = dev->data->tx_queues[i];
1001 txq->ops->release_mbufs(txq);
1002 reset_tx_queue(txq);
1003 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1005 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1006 rxq = dev->data->rx_queues[i];
1009 rxq->ops->release_mbufs(rxq);
1010 reset_rx_queue(rxq);
1011 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1015 #define IAVF_RX_FLEX_ERR0_BITS \
1016 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1017 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1018 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1019 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1020 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1021 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1024 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1026 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1027 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1028 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1030 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1037 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1038 volatile union iavf_rx_flex_desc *rxdp,
1041 uint16_t vlan_tci = 0;
1043 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
1044 rte_le_to_cpu_64(rxdp->wb.status_error0) &
1045 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
1046 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
1048 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1049 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
1050 rte_le_to_cpu_16(rxdp->wb.status_error1) &
1051 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
1052 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1056 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1057 mb->vlan_tci = vlan_tci;
1061 /* Translate the rx descriptor status and error fields to pkt flags */
1062 static inline uint64_t
1063 iavf_rxd_to_pkt_flags(uint64_t qword)
1066 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1068 #define IAVF_RX_ERR_BITS 0x3f
1070 /* Check if RSS_HASH */
1071 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1072 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1073 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1075 /* Check if FDIR Match */
1076 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1079 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1080 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1084 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1085 flags |= PKT_RX_IP_CKSUM_BAD;
1087 flags |= PKT_RX_IP_CKSUM_GOOD;
1089 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1090 flags |= PKT_RX_L4_CKSUM_BAD;
1092 flags |= PKT_RX_L4_CKSUM_GOOD;
1094 /* TODO: Oversize error bit is not processed here */
1099 static inline uint64_t
1100 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1103 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1106 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1107 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1108 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1110 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1112 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1113 flags |= PKT_RX_FDIR_ID;
1117 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1118 flags |= PKT_RX_FDIR_ID;
1123 #define IAVF_RX_FLEX_ERR0_BITS \
1124 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1125 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1126 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1127 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1128 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1129 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1131 /* Rx L3/L4 checksum */
1132 static inline uint64_t
1133 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1137 /* check if HW has decoded the packet and checksum */
1138 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1141 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1142 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1146 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1147 flags |= PKT_RX_IP_CKSUM_BAD;
1149 flags |= PKT_RX_IP_CKSUM_GOOD;
1151 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1152 flags |= PKT_RX_L4_CKSUM_BAD;
1154 flags |= PKT_RX_L4_CKSUM_GOOD;
1156 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1157 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1162 /* If the number of free RX descriptors is greater than the RX free
1163 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1164 * register. Update the RDT with the value of the last processed RX
1165 * descriptor minus 1, to guarantee that the RDT register is never
1166 * equal to the RDH register, which creates a "full" ring situation
1167 * from the hardware point of view.
1170 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1172 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1174 if (nb_hold > rxq->rx_free_thresh) {
1176 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1177 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1178 rx_id = (uint16_t)((rx_id == 0) ?
1179 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1180 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1183 rxq->nb_rx_hold = nb_hold;
1186 /* implement recv_pkts */
1188 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1190 volatile union iavf_rx_desc *rx_ring;
1191 volatile union iavf_rx_desc *rxdp;
1192 struct iavf_rx_queue *rxq;
1193 union iavf_rx_desc rxd;
1194 struct rte_mbuf *rxe;
1195 struct rte_eth_dev *dev;
1196 struct rte_mbuf *rxm;
1197 struct rte_mbuf *nmb;
1201 uint16_t rx_packet_len;
1202 uint16_t rx_id, nb_hold;
1205 const uint32_t *ptype_tbl;
1210 rx_id = rxq->rx_tail;
1211 rx_ring = rxq->rx_ring;
1212 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1214 while (nb_rx < nb_pkts) {
1215 rxdp = &rx_ring[rx_id];
1216 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1217 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1218 IAVF_RXD_QW1_STATUS_SHIFT;
1220 /* Check the DD bit first */
1221 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1223 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1225 nmb = rte_mbuf_raw_alloc(rxq->mp);
1226 if (unlikely(!nmb)) {
1227 dev = &rte_eth_devices[rxq->port_id];
1228 dev->data->rx_mbuf_alloc_failed++;
1229 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1230 "queue_id=%u", rxq->port_id, rxq->queue_id);
1236 rxe = rxq->sw_ring[rx_id];
1237 rxq->sw_ring[rx_id] = nmb;
1239 if (unlikely(rx_id == rxq->nb_rx_desc))
1242 /* Prefetch next mbuf */
1243 rte_prefetch0(rxq->sw_ring[rx_id]);
1245 /* When next RX descriptor is on a cache line boundary,
1246 * prefetch the next 4 RX descriptors and next 8 pointers
1249 if ((rx_id & 0x3) == 0) {
1250 rte_prefetch0(&rx_ring[rx_id]);
1251 rte_prefetch0(rxq->sw_ring[rx_id]);
1255 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1256 rxdp->read.hdr_addr = 0;
1257 rxdp->read.pkt_addr = dma_addr;
1259 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1260 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1262 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1263 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1266 rxm->pkt_len = rx_packet_len;
1267 rxm->data_len = rx_packet_len;
1268 rxm->port = rxq->port_id;
1270 iavf_rxd_to_vlan_tci(rxm, &rxd);
1271 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1273 ptype_tbl[(uint8_t)((qword1 &
1274 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1276 if (pkt_flags & PKT_RX_RSS_HASH)
1278 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1280 if (pkt_flags & PKT_RX_FDIR)
1281 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1283 rxm->ol_flags |= pkt_flags;
1285 rx_pkts[nb_rx++] = rxm;
1287 rxq->rx_tail = rx_id;
1289 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1294 /* implement recv_pkts for flexible Rx descriptor */
1296 iavf_recv_pkts_flex_rxd(void *rx_queue,
1297 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1299 volatile union iavf_rx_desc *rx_ring;
1300 volatile union iavf_rx_flex_desc *rxdp;
1301 struct iavf_rx_queue *rxq;
1302 union iavf_rx_flex_desc rxd;
1303 struct rte_mbuf *rxe;
1304 struct rte_eth_dev *dev;
1305 struct rte_mbuf *rxm;
1306 struct rte_mbuf *nmb;
1308 uint16_t rx_stat_err0;
1309 uint16_t rx_packet_len;
1310 uint16_t rx_id, nb_hold;
1313 const uint32_t *ptype_tbl;
1318 rx_id = rxq->rx_tail;
1319 rx_ring = rxq->rx_ring;
1320 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1322 while (nb_rx < nb_pkts) {
1323 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1324 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1326 /* Check the DD bit first */
1327 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1329 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1331 nmb = rte_mbuf_raw_alloc(rxq->mp);
1332 if (unlikely(!nmb)) {
1333 dev = &rte_eth_devices[rxq->port_id];
1334 dev->data->rx_mbuf_alloc_failed++;
1335 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1336 "queue_id=%u", rxq->port_id, rxq->queue_id);
1342 rxe = rxq->sw_ring[rx_id];
1343 rxq->sw_ring[rx_id] = nmb;
1345 if (unlikely(rx_id == rxq->nb_rx_desc))
1348 /* Prefetch next mbuf */
1349 rte_prefetch0(rxq->sw_ring[rx_id]);
1351 /* When next RX descriptor is on a cache line boundary,
1352 * prefetch the next 4 RX descriptors and next 8 pointers
1355 if ((rx_id & 0x3) == 0) {
1356 rte_prefetch0(&rx_ring[rx_id]);
1357 rte_prefetch0(rxq->sw_ring[rx_id]);
1361 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1362 rxdp->read.hdr_addr = 0;
1363 rxdp->read.pkt_addr = dma_addr;
1365 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1366 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1368 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1369 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1372 rxm->pkt_len = rx_packet_len;
1373 rxm->data_len = rx_packet_len;
1374 rxm->port = rxq->port_id;
1376 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1377 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1378 iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
1379 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1380 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1381 rxm->ol_flags |= pkt_flags;
1383 rx_pkts[nb_rx++] = rxm;
1385 rxq->rx_tail = rx_id;
1387 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1392 /* implement recv_scattered_pkts for flexible Rx descriptor */
1394 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1397 struct iavf_rx_queue *rxq = rx_queue;
1398 union iavf_rx_flex_desc rxd;
1399 struct rte_mbuf *rxe;
1400 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1401 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1402 struct rte_mbuf *nmb, *rxm;
1403 uint16_t rx_id = rxq->rx_tail;
1404 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1405 struct rte_eth_dev *dev;
1406 uint16_t rx_stat_err0;
1410 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1411 volatile union iavf_rx_flex_desc *rxdp;
1412 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1414 while (nb_rx < nb_pkts) {
1415 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1416 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1418 /* Check the DD bit */
1419 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1421 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1423 nmb = rte_mbuf_raw_alloc(rxq->mp);
1424 if (unlikely(!nmb)) {
1425 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1426 "queue_id=%u", rxq->port_id, rxq->queue_id);
1427 dev = &rte_eth_devices[rxq->port_id];
1428 dev->data->rx_mbuf_alloc_failed++;
1434 rxe = rxq->sw_ring[rx_id];
1435 rxq->sw_ring[rx_id] = nmb;
1437 if (rx_id == rxq->nb_rx_desc)
1440 /* Prefetch next mbuf */
1441 rte_prefetch0(rxq->sw_ring[rx_id]);
1443 /* When next RX descriptor is on a cache line boundary,
1444 * prefetch the next 4 RX descriptors and next 8 pointers
1447 if ((rx_id & 0x3) == 0) {
1448 rte_prefetch0(&rx_ring[rx_id]);
1449 rte_prefetch0(rxq->sw_ring[rx_id]);
1454 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1456 /* Set data buffer address and data length of the mbuf */
1457 rxdp->read.hdr_addr = 0;
1458 rxdp->read.pkt_addr = dma_addr;
1459 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1460 IAVF_RX_FLX_DESC_PKT_LEN_M;
1461 rxm->data_len = rx_packet_len;
1462 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1464 /* If this is the first buffer of the received packet, set the
1465 * pointer to the first mbuf of the packet and initialize its
1466 * context. Otherwise, update the total length and the number
1467 * of segments of the current scattered packet, and update the
1468 * pointer to the last mbuf of the current packet.
1472 first_seg->nb_segs = 1;
1473 first_seg->pkt_len = rx_packet_len;
1475 first_seg->pkt_len =
1476 (uint16_t)(first_seg->pkt_len +
1478 first_seg->nb_segs++;
1479 last_seg->next = rxm;
1482 /* If this is not the last buffer of the received packet,
1483 * update the pointer to the last mbuf of the current scattered
1484 * packet and continue to parse the RX ring.
1486 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1491 /* This is the last buffer of the received packet. If the CRC
1492 * is not stripped by the hardware:
1493 * - Subtract the CRC length from the total packet length.
1494 * - If the last buffer only contains the whole CRC or a part
1495 * of it, free the mbuf associated to the last buffer. If part
1496 * of the CRC is also contained in the previous mbuf, subtract
1497 * the length of that CRC part from the data length of the
1501 if (unlikely(rxq->crc_len > 0)) {
1502 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1503 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1504 rte_pktmbuf_free_seg(rxm);
1505 first_seg->nb_segs--;
1506 last_seg->data_len =
1507 (uint16_t)(last_seg->data_len -
1508 (RTE_ETHER_CRC_LEN - rx_packet_len));
1509 last_seg->next = NULL;
1511 rxm->data_len = (uint16_t)(rx_packet_len -
1516 first_seg->port = rxq->port_id;
1517 first_seg->ol_flags = 0;
1518 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1519 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1520 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
1521 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1522 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1524 first_seg->ol_flags |= pkt_flags;
1526 /* Prefetch data of first segment, if configured to do so. */
1527 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1528 first_seg->data_off));
1529 rx_pkts[nb_rx++] = first_seg;
1533 /* Record index of the next RX descriptor to probe. */
1534 rxq->rx_tail = rx_id;
1535 rxq->pkt_first_seg = first_seg;
1536 rxq->pkt_last_seg = last_seg;
1538 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1543 /* implement recv_scattered_pkts */
1545 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1548 struct iavf_rx_queue *rxq = rx_queue;
1549 union iavf_rx_desc rxd;
1550 struct rte_mbuf *rxe;
1551 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1552 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1553 struct rte_mbuf *nmb, *rxm;
1554 uint16_t rx_id = rxq->rx_tail;
1555 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1556 struct rte_eth_dev *dev;
1562 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1563 volatile union iavf_rx_desc *rxdp;
1564 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1566 while (nb_rx < nb_pkts) {
1567 rxdp = &rx_ring[rx_id];
1568 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1569 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1570 IAVF_RXD_QW1_STATUS_SHIFT;
1572 /* Check the DD bit */
1573 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1575 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1577 nmb = rte_mbuf_raw_alloc(rxq->mp);
1578 if (unlikely(!nmb)) {
1579 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1580 "queue_id=%u", rxq->port_id, rxq->queue_id);
1581 dev = &rte_eth_devices[rxq->port_id];
1582 dev->data->rx_mbuf_alloc_failed++;
1588 rxe = rxq->sw_ring[rx_id];
1589 rxq->sw_ring[rx_id] = nmb;
1591 if (rx_id == rxq->nb_rx_desc)
1594 /* Prefetch next mbuf */
1595 rte_prefetch0(rxq->sw_ring[rx_id]);
1597 /* When next RX descriptor is on a cache line boundary,
1598 * prefetch the next 4 RX descriptors and next 8 pointers
1601 if ((rx_id & 0x3) == 0) {
1602 rte_prefetch0(&rx_ring[rx_id]);
1603 rte_prefetch0(rxq->sw_ring[rx_id]);
1608 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1610 /* Set data buffer address and data length of the mbuf */
1611 rxdp->read.hdr_addr = 0;
1612 rxdp->read.pkt_addr = dma_addr;
1613 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1614 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1615 rxm->data_len = rx_packet_len;
1616 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1618 /* If this is the first buffer of the received packet, set the
1619 * pointer to the first mbuf of the packet and initialize its
1620 * context. Otherwise, update the total length and the number
1621 * of segments of the current scattered packet, and update the
1622 * pointer to the last mbuf of the current packet.
1626 first_seg->nb_segs = 1;
1627 first_seg->pkt_len = rx_packet_len;
1629 first_seg->pkt_len =
1630 (uint16_t)(first_seg->pkt_len +
1632 first_seg->nb_segs++;
1633 last_seg->next = rxm;
1636 /* If this is not the last buffer of the received packet,
1637 * update the pointer to the last mbuf of the current scattered
1638 * packet and continue to parse the RX ring.
1640 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1645 /* This is the last buffer of the received packet. If the CRC
1646 * is not stripped by the hardware:
1647 * - Subtract the CRC length from the total packet length.
1648 * - If the last buffer only contains the whole CRC or a part
1649 * of it, free the mbuf associated to the last buffer. If part
1650 * of the CRC is also contained in the previous mbuf, subtract
1651 * the length of that CRC part from the data length of the
1655 if (unlikely(rxq->crc_len > 0)) {
1656 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1657 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1658 rte_pktmbuf_free_seg(rxm);
1659 first_seg->nb_segs--;
1660 last_seg->data_len =
1661 (uint16_t)(last_seg->data_len -
1662 (RTE_ETHER_CRC_LEN - rx_packet_len));
1663 last_seg->next = NULL;
1665 rxm->data_len = (uint16_t)(rx_packet_len -
1669 first_seg->port = rxq->port_id;
1670 first_seg->ol_flags = 0;
1671 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1672 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1673 first_seg->packet_type =
1674 ptype_tbl[(uint8_t)((qword1 &
1675 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1677 if (pkt_flags & PKT_RX_RSS_HASH)
1678 first_seg->hash.rss =
1679 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1681 if (pkt_flags & PKT_RX_FDIR)
1682 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1684 first_seg->ol_flags |= pkt_flags;
1686 /* Prefetch data of first segment, if configured to do so. */
1687 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1688 first_seg->data_off));
1689 rx_pkts[nb_rx++] = first_seg;
1693 /* Record index of the next RX descriptor to probe. */
1694 rxq->rx_tail = rx_id;
1695 rxq->pkt_first_seg = first_seg;
1696 rxq->pkt_last_seg = last_seg;
1698 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1703 #define IAVF_LOOK_AHEAD 8
1705 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1707 volatile union iavf_rx_flex_desc *rxdp;
1708 struct rte_mbuf **rxep;
1709 struct rte_mbuf *mb;
1712 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1713 int32_t i, j, nb_rx = 0;
1715 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1717 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1718 rxep = &rxq->sw_ring[rxq->rx_tail];
1720 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1722 /* Make sure there is at least 1 packet to receive */
1723 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1726 /* Scan LOOK_AHEAD descriptors at a time to determine which
1727 * descriptors reference packets that are ready to be received.
1729 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1730 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1731 /* Read desc statuses backwards to avoid race condition */
1732 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1733 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1737 /* Compute how many status bits were set */
1738 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1739 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1743 /* Translate descriptor info to mbuf parameters */
1744 for (j = 0; j < nb_dd; j++) {
1745 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1747 i * IAVF_LOOK_AHEAD + j);
1750 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1751 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1752 mb->data_len = pkt_len;
1753 mb->pkt_len = pkt_len;
1756 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1757 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1758 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
1759 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1760 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1761 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1763 mb->ol_flags |= pkt_flags;
1766 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1767 rxq->rx_stage[i + j] = rxep[j];
1769 if (nb_dd != IAVF_LOOK_AHEAD)
1773 /* Clear software ring entries */
1774 for (i = 0; i < nb_rx; i++)
1775 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1781 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1783 volatile union iavf_rx_desc *rxdp;
1784 struct rte_mbuf **rxep;
1785 struct rte_mbuf *mb;
1789 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1790 int32_t i, j, nb_rx = 0;
1792 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1794 rxdp = &rxq->rx_ring[rxq->rx_tail];
1795 rxep = &rxq->sw_ring[rxq->rx_tail];
1797 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1798 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1799 IAVF_RXD_QW1_STATUS_SHIFT;
1801 /* Make sure there is at least 1 packet to receive */
1802 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1805 /* Scan LOOK_AHEAD descriptors at a time to determine which
1806 * descriptors reference packets that are ready to be received.
1808 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1809 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1810 /* Read desc statuses backwards to avoid race condition */
1811 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1812 qword1 = rte_le_to_cpu_64(
1813 rxdp[j].wb.qword1.status_error_len);
1814 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1815 IAVF_RXD_QW1_STATUS_SHIFT;
1820 /* Compute how many status bits were set */
1821 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1822 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1826 /* Translate descriptor info to mbuf parameters */
1827 for (j = 0; j < nb_dd; j++) {
1828 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1829 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1832 qword1 = rte_le_to_cpu_64
1833 (rxdp[j].wb.qword1.status_error_len);
1834 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1835 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1836 mb->data_len = pkt_len;
1837 mb->pkt_len = pkt_len;
1839 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1840 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1842 ptype_tbl[(uint8_t)((qword1 &
1843 IAVF_RXD_QW1_PTYPE_MASK) >>
1844 IAVF_RXD_QW1_PTYPE_SHIFT)];
1846 if (pkt_flags & PKT_RX_RSS_HASH)
1847 mb->hash.rss = rte_le_to_cpu_32(
1848 rxdp[j].wb.qword0.hi_dword.rss);
1850 if (pkt_flags & PKT_RX_FDIR)
1851 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1853 mb->ol_flags |= pkt_flags;
1856 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1857 rxq->rx_stage[i + j] = rxep[j];
1859 if (nb_dd != IAVF_LOOK_AHEAD)
1863 /* Clear software ring entries */
1864 for (i = 0; i < nb_rx; i++)
1865 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1870 static inline uint16_t
1871 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1872 struct rte_mbuf **rx_pkts,
1876 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1878 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1880 for (i = 0; i < nb_pkts; i++)
1881 rx_pkts[i] = stage[i];
1883 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1884 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1890 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1892 volatile union iavf_rx_desc *rxdp;
1893 struct rte_mbuf **rxep;
1894 struct rte_mbuf *mb;
1895 uint16_t alloc_idx, i;
1899 /* Allocate buffers in bulk */
1900 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1901 (rxq->rx_free_thresh - 1));
1902 rxep = &rxq->sw_ring[alloc_idx];
1903 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1904 rxq->rx_free_thresh);
1905 if (unlikely(diag != 0)) {
1906 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1910 rxdp = &rxq->rx_ring[alloc_idx];
1911 for (i = 0; i < rxq->rx_free_thresh; i++) {
1912 if (likely(i < (rxq->rx_free_thresh - 1)))
1913 /* Prefetch next mbuf */
1914 rte_prefetch0(rxep[i + 1]);
1917 rte_mbuf_refcnt_set(mb, 1);
1919 mb->data_off = RTE_PKTMBUF_HEADROOM;
1921 mb->port = rxq->port_id;
1922 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1923 rxdp[i].read.hdr_addr = 0;
1924 rxdp[i].read.pkt_addr = dma_addr;
1927 /* Update rx tail register */
1929 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1931 rxq->rx_free_trigger =
1932 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1933 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1934 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1939 static inline uint16_t
1940 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1942 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1948 if (rxq->rx_nb_avail)
1949 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1951 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1952 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1954 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1955 rxq->rx_next_avail = 0;
1956 rxq->rx_nb_avail = nb_rx;
1957 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1959 if (rxq->rx_tail > rxq->rx_free_trigger) {
1960 if (iavf_rx_alloc_bufs(rxq) != 0) {
1963 /* TODO: count rx_mbuf_alloc_failed here */
1965 rxq->rx_nb_avail = 0;
1966 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1967 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1968 rxq->sw_ring[j] = rxq->rx_stage[i];
1974 if (rxq->rx_tail >= rxq->nb_rx_desc)
1977 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1978 rxq->port_id, rxq->queue_id,
1979 rxq->rx_tail, nb_rx);
1981 if (rxq->rx_nb_avail)
1982 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1988 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1989 struct rte_mbuf **rx_pkts,
1992 uint16_t nb_rx = 0, n, count;
1994 if (unlikely(nb_pkts == 0))
1997 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1998 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2001 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2002 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2003 nb_rx = (uint16_t)(nb_rx + count);
2004 nb_pkts = (uint16_t)(nb_pkts - count);
2013 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2015 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2016 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2017 uint16_t nb_tx_desc = txq->nb_tx_desc;
2018 uint16_t desc_to_clean_to;
2019 uint16_t nb_tx_to_clean;
2021 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2023 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2024 if (desc_to_clean_to >= nb_tx_desc)
2025 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2027 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2028 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2029 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2030 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2031 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2032 "(port=%d queue=%d)", desc_to_clean_to,
2033 txq->port_id, txq->queue_id);
2037 if (last_desc_cleaned > desc_to_clean_to)
2038 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2041 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2044 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2046 txq->last_desc_cleaned = desc_to_clean_to;
2047 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2052 /* Check if the context descriptor is needed for TX offloading */
2053 static inline uint16_t
2054 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2056 if (flags & PKT_TX_TCP_SEG)
2058 if (flags & PKT_TX_VLAN_PKT &&
2059 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2065 iavf_txd_enable_checksum(uint64_t ol_flags,
2067 uint32_t *td_offset,
2068 union iavf_tx_offload tx_offload)
2071 *td_offset |= (tx_offload.l2_len >> 1) <<
2072 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2074 /* Enable L3 checksum offloads */
2075 if (ol_flags & PKT_TX_IP_CKSUM) {
2076 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2077 *td_offset |= (tx_offload.l3_len >> 2) <<
2078 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2079 } else if (ol_flags & PKT_TX_IPV4) {
2080 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2081 *td_offset |= (tx_offload.l3_len >> 2) <<
2082 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2083 } else if (ol_flags & PKT_TX_IPV6) {
2084 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2085 *td_offset |= (tx_offload.l3_len >> 2) <<
2086 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2089 if (ol_flags & PKT_TX_TCP_SEG) {
2090 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2091 *td_offset |= (tx_offload.l4_len >> 2) <<
2092 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2096 /* Enable L4 checksum offloads */
2097 switch (ol_flags & PKT_TX_L4_MASK) {
2098 case PKT_TX_TCP_CKSUM:
2099 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2100 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2101 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2103 case PKT_TX_SCTP_CKSUM:
2104 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2105 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2106 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2108 case PKT_TX_UDP_CKSUM:
2109 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2110 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2111 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2118 /* set TSO context descriptor
2119 * support IP -> L4 and IP -> IP -> L4
2121 static inline uint64_t
2122 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2124 uint64_t ctx_desc = 0;
2125 uint32_t cd_cmd, hdr_len, cd_tso_len;
2127 if (!tx_offload.l4_len) {
2128 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2132 hdr_len = tx_offload.l2_len +
2136 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2137 cd_tso_len = mbuf->pkt_len - hdr_len;
2138 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2139 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2140 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2145 /* Construct the tx flags */
2146 static inline uint64_t
2147 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2150 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2151 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2152 ((uint64_t)td_offset <<
2153 IAVF_TXD_QW1_OFFSET_SHIFT) |
2155 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2156 ((uint64_t)td_tag <<
2157 IAVF_TXD_QW1_L2TAG1_SHIFT));
2162 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2164 volatile struct iavf_tx_desc *txd;
2165 volatile struct iavf_tx_desc *txr;
2166 struct iavf_tx_queue *txq;
2167 struct iavf_tx_entry *sw_ring;
2168 struct iavf_tx_entry *txe, *txn;
2169 struct rte_mbuf *tx_pkt;
2170 struct rte_mbuf *m_seg;
2181 uint64_t buf_dma_addr;
2182 uint16_t cd_l2tag2 = 0;
2183 union iavf_tx_offload tx_offload = {0};
2186 sw_ring = txq->sw_ring;
2188 tx_id = txq->tx_tail;
2189 txe = &sw_ring[tx_id];
2191 /* Check if the descriptor ring needs to be cleaned. */
2192 if (txq->nb_free < txq->free_thresh)
2193 (void)iavf_xmit_cleanup(txq);
2195 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2200 tx_pkt = *tx_pkts++;
2201 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2203 ol_flags = tx_pkt->ol_flags;
2204 tx_offload.l2_len = tx_pkt->l2_len;
2205 tx_offload.l3_len = tx_pkt->l3_len;
2206 tx_offload.l4_len = tx_pkt->l4_len;
2207 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2208 /* Calculate the number of context descriptors needed. */
2209 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2211 /* The number of descriptors that must be allocated for
2212 * a packet equals to the number of the segments of that
2213 * packet plus 1 context descriptor if needed.
2215 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2216 tx_last = (uint16_t)(tx_id + nb_used - 1);
2219 if (tx_last >= txq->nb_tx_desc)
2220 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2222 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2223 " tx_first=%u tx_last=%u",
2224 txq->port_id, txq->queue_id, tx_id, tx_last);
2226 if (nb_used > txq->nb_free) {
2227 if (iavf_xmit_cleanup(txq)) {
2232 if (unlikely(nb_used > txq->rs_thresh)) {
2233 while (nb_used > txq->nb_free) {
2234 if (iavf_xmit_cleanup(txq)) {
2243 /* Descriptor based VLAN insertion */
2244 if (ol_flags & PKT_TX_VLAN_PKT &&
2245 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2246 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2247 td_tag = tx_pkt->vlan_tci;
2250 /* According to datasheet, the bit2 is reserved and must be
2255 /* Enable checksum offloading */
2256 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2257 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2258 &td_offset, tx_offload);
2261 /* Setup TX context descriptor if required */
2262 uint64_t cd_type_cmd_tso_mss =
2263 IAVF_TX_DESC_DTYPE_CONTEXT;
2264 volatile struct iavf_tx_context_desc *ctx_txd =
2265 (volatile struct iavf_tx_context_desc *)
2268 /* clear QW0 or the previous writeback value
2269 * may impact next write
2271 *(volatile uint64_t *)ctx_txd = 0;
2273 txn = &sw_ring[txe->next_id];
2274 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2276 rte_pktmbuf_free_seg(txe->mbuf);
2281 if (ol_flags & PKT_TX_TCP_SEG)
2282 cd_type_cmd_tso_mss |=
2283 iavf_set_tso_ctx(tx_pkt, tx_offload);
2285 if (ol_flags & PKT_TX_VLAN_PKT &&
2286 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2287 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2288 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2289 cd_l2tag2 = tx_pkt->vlan_tci;
2292 ctx_txd->type_cmd_tso_mss =
2293 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2294 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2296 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2297 txe->last_id = tx_last;
2298 tx_id = txe->next_id;
2305 txn = &sw_ring[txe->next_id];
2308 rte_pktmbuf_free_seg(txe->mbuf);
2311 /* Setup TX Descriptor */
2312 slen = m_seg->data_len;
2313 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2314 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2315 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2320 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2321 txe->last_id = tx_last;
2322 tx_id = txe->next_id;
2324 m_seg = m_seg->next;
2327 /* The last packet data descriptor needs End Of Packet (EOP) */
2328 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2329 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2330 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2332 if (txq->nb_used >= txq->rs_thresh) {
2333 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2334 "%4u (port=%d queue=%d)",
2335 tx_last, txq->port_id, txq->queue_id);
2337 td_cmd |= IAVF_TX_DESC_CMD_RS;
2339 /* Update txq RS bit counters */
2343 txd->cmd_type_offset_bsz |=
2344 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2345 IAVF_TXD_QW1_CMD_SHIFT);
2346 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2352 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2353 txq->port_id, txq->queue_id, tx_id, nb_tx);
2355 IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
2356 txq->tx_tail = tx_id;
2361 /* Check if the packet with vlan user priority is transmitted in the
2365 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2367 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2368 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2371 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2373 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2374 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2382 /* TX prep functions */
2384 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2390 struct iavf_tx_queue *txq = tx_queue;
2391 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2392 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2394 for (i = 0; i < nb_pkts; i++) {
2396 ol_flags = m->ol_flags;
2398 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2399 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2400 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2404 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2405 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2406 /* MSS outside the range are considered malicious */
2411 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2412 rte_errno = ENOTSUP;
2416 #ifdef RTE_ETHDEV_DEBUG_TX
2417 ret = rte_validate_tx_offload(m);
2423 ret = rte_net_intel_cksum_prepare(m);
2429 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2430 ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
2431 ret = iavf_check_vlan_up2tc(txq, m);
2442 /* choose rx function*/
2444 iavf_set_rx_function(struct rte_eth_dev *dev)
2446 struct iavf_adapter *adapter =
2447 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2448 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2451 struct iavf_rx_queue *rxq;
2454 bool use_avx2 = false;
2455 bool use_avx512 = false;
2456 bool use_flex = false;
2458 check_ret = iavf_rx_vec_dev_check(dev);
2459 if (check_ret >= 0 &&
2460 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2461 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2462 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2463 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2466 #ifdef CC_AVX512_SUPPORT
2467 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2468 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2469 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2473 if (vf->vf_res->vf_cap_flags &
2474 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2477 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2478 rxq = dev->data->rx_queues[i];
2479 (void)iavf_rxq_vec_setup(rxq);
2482 if (dev->data->scattered_rx) {
2485 "Using %sVector Scattered Rx (port %d).",
2486 use_avx2 ? "avx2 " : "",
2487 dev->data->port_id);
2489 if (check_ret == IAVF_VECTOR_PATH)
2491 "Using AVX512 Vector Scattered Rx (port %d).",
2492 dev->data->port_id);
2495 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2496 dev->data->port_id);
2499 dev->rx_pkt_burst = use_avx2 ?
2500 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2501 iavf_recv_scattered_pkts_vec_flex_rxd;
2502 #ifdef CC_AVX512_SUPPORT
2504 if (check_ret == IAVF_VECTOR_PATH)
2506 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2509 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2513 dev->rx_pkt_burst = use_avx2 ?
2514 iavf_recv_scattered_pkts_vec_avx2 :
2515 iavf_recv_scattered_pkts_vec;
2516 #ifdef CC_AVX512_SUPPORT
2518 if (check_ret == IAVF_VECTOR_PATH)
2520 iavf_recv_scattered_pkts_vec_avx512;
2523 iavf_recv_scattered_pkts_vec_avx512_offload;
2529 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2530 use_avx2 ? "avx2 " : "",
2531 dev->data->port_id);
2533 if (check_ret == IAVF_VECTOR_PATH)
2535 "Using AVX512 Vector Rx (port %d).",
2536 dev->data->port_id);
2539 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2540 dev->data->port_id);
2543 dev->rx_pkt_burst = use_avx2 ?
2544 iavf_recv_pkts_vec_avx2_flex_rxd :
2545 iavf_recv_pkts_vec_flex_rxd;
2546 #ifdef CC_AVX512_SUPPORT
2548 if (check_ret == IAVF_VECTOR_PATH)
2550 iavf_recv_pkts_vec_avx512_flex_rxd;
2553 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2557 dev->rx_pkt_burst = use_avx2 ?
2558 iavf_recv_pkts_vec_avx2 :
2560 #ifdef CC_AVX512_SUPPORT
2562 if (check_ret == IAVF_VECTOR_PATH)
2564 iavf_recv_pkts_vec_avx512;
2567 iavf_recv_pkts_vec_avx512_offload;
2577 if (dev->data->scattered_rx) {
2578 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2579 dev->data->port_id);
2580 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2581 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2583 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2584 } else if (adapter->rx_bulk_alloc_allowed) {
2585 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2586 dev->data->port_id);
2587 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2589 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2590 dev->data->port_id);
2591 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2592 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2594 dev->rx_pkt_burst = iavf_recv_pkts;
2598 /* choose tx function*/
2600 iavf_set_tx_function(struct rte_eth_dev *dev)
2603 struct iavf_tx_queue *txq;
2606 bool use_sse = false;
2607 bool use_avx2 = false;
2608 bool use_avx512 = false;
2610 check_ret = iavf_tx_vec_dev_check(dev);
2612 if (check_ret >= 0 &&
2613 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2614 /* SSE and AVX2 not support offload path yet. */
2615 if (check_ret == IAVF_VECTOR_PATH) {
2617 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2618 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2619 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2622 #ifdef CC_AVX512_SUPPORT
2623 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2624 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2625 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2629 if (!use_sse && !use_avx2 && !use_avx512)
2633 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2634 use_avx2 ? "avx2 " : "",
2635 dev->data->port_id);
2636 dev->tx_pkt_burst = use_avx2 ?
2637 iavf_xmit_pkts_vec_avx2 :
2640 dev->tx_pkt_prepare = NULL;
2641 #ifdef CC_AVX512_SUPPORT
2643 if (check_ret == IAVF_VECTOR_PATH) {
2644 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2645 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2646 dev->data->port_id);
2648 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2649 dev->tx_pkt_prepare = iavf_prep_pkts;
2650 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2651 dev->data->port_id);
2656 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2657 txq = dev->data->tx_queues[i];
2660 #ifdef CC_AVX512_SUPPORT
2662 iavf_txq_vec_setup_avx512(txq);
2664 iavf_txq_vec_setup(txq);
2666 iavf_txq_vec_setup(txq);
2675 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2676 dev->data->port_id);
2677 dev->tx_pkt_burst = iavf_xmit_pkts;
2678 dev->tx_pkt_prepare = iavf_prep_pkts;
2682 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2685 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2686 uint16_t i, tx_last, tx_id;
2687 uint16_t nb_tx_free_last;
2688 uint16_t nb_tx_to_clean;
2691 /* Start free mbuf from the next of tx_tail */
2692 tx_last = txq->tx_tail;
2693 tx_id = swr_ring[tx_last].next_id;
2695 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2698 nb_tx_to_clean = txq->nb_free;
2699 nb_tx_free_last = txq->nb_free;
2701 free_cnt = txq->nb_tx_desc;
2703 /* Loop through swr_ring to count the amount of
2704 * freeable mubfs and packets.
2706 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2707 for (i = 0; i < nb_tx_to_clean &&
2708 pkt_cnt < free_cnt &&
2709 tx_id != tx_last; i++) {
2710 if (swr_ring[tx_id].mbuf != NULL) {
2711 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2712 swr_ring[tx_id].mbuf = NULL;
2715 * last segment in the packet,
2716 * increment packet count
2718 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2721 tx_id = swr_ring[tx_id].next_id;
2724 if (txq->rs_thresh > txq->nb_tx_desc -
2725 txq->nb_free || tx_id == tx_last)
2728 if (pkt_cnt < free_cnt) {
2729 if (iavf_xmit_cleanup(txq))
2732 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2733 nb_tx_free_last = txq->nb_free;
2737 return (int)pkt_cnt;
2741 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2743 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2745 return iavf_tx_done_cleanup_full(q, free_cnt);
2749 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2750 struct rte_eth_rxq_info *qinfo)
2752 struct iavf_rx_queue *rxq;
2754 rxq = dev->data->rx_queues[queue_id];
2756 qinfo->mp = rxq->mp;
2757 qinfo->scattered_rx = dev->data->scattered_rx;
2758 qinfo->nb_desc = rxq->nb_rx_desc;
2760 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2761 qinfo->conf.rx_drop_en = true;
2762 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2766 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2767 struct rte_eth_txq_info *qinfo)
2769 struct iavf_tx_queue *txq;
2771 txq = dev->data->tx_queues[queue_id];
2773 qinfo->nb_desc = txq->nb_tx_desc;
2775 qinfo->conf.tx_free_thresh = txq->free_thresh;
2776 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2777 qinfo->conf.offloads = txq->offloads;
2778 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2781 /* Get the number of used descriptors of a rx queue */
2783 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2785 #define IAVF_RXQ_SCAN_INTERVAL 4
2786 volatile union iavf_rx_desc *rxdp;
2787 struct iavf_rx_queue *rxq;
2790 rxq = dev->data->rx_queues[queue_id];
2791 rxdp = &rxq->rx_ring[rxq->rx_tail];
2793 while ((desc < rxq->nb_rx_desc) &&
2794 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2795 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2796 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2797 /* Check the DD bit of a rx descriptor of each 4 in a group,
2798 * to avoid checking too frequently and downgrading performance
2801 desc += IAVF_RXQ_SCAN_INTERVAL;
2802 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2803 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2804 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2805 desc - rxq->nb_rx_desc]);
2812 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2814 struct iavf_rx_queue *rxq = rx_queue;
2815 volatile uint64_t *status;
2819 if (unlikely(offset >= rxq->nb_rx_desc))
2822 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2823 return RTE_ETH_RX_DESC_UNAVAIL;
2825 desc = rxq->rx_tail + offset;
2826 if (desc >= rxq->nb_rx_desc)
2827 desc -= rxq->nb_rx_desc;
2829 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2830 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2831 << IAVF_RXD_QW1_STATUS_SHIFT);
2833 return RTE_ETH_RX_DESC_DONE;
2835 return RTE_ETH_RX_DESC_AVAIL;
2839 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2841 struct iavf_tx_queue *txq = tx_queue;
2842 volatile uint64_t *status;
2843 uint64_t mask, expect;
2846 if (unlikely(offset >= txq->nb_tx_desc))
2849 desc = txq->tx_tail + offset;
2850 /* go to next desc that has the RS bit */
2851 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2853 if (desc >= txq->nb_tx_desc) {
2854 desc -= txq->nb_tx_desc;
2855 if (desc >= txq->nb_tx_desc)
2856 desc -= txq->nb_tx_desc;
2859 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2860 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2861 expect = rte_cpu_to_le_64(
2862 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2863 if ((*status & mask) == expect)
2864 return RTE_ETH_TX_DESC_DONE;
2866 return RTE_ETH_TX_DESC_FULL;
2870 iavf_get_default_ptype_table(void)
2872 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2873 __rte_cache_aligned = {
2876 [1] = RTE_PTYPE_L2_ETHER,
2877 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2878 /* [3] - [5] reserved */
2879 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2880 /* [7] - [10] reserved */
2881 [11] = RTE_PTYPE_L2_ETHER_ARP,
2882 /* [12] - [21] reserved */
2884 /* Non tunneled IPv4 */
2885 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2887 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2888 RTE_PTYPE_L4_NONFRAG,
2889 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2892 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2894 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2896 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2900 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2901 RTE_PTYPE_TUNNEL_IP |
2902 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2903 RTE_PTYPE_INNER_L4_FRAG,
2904 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2905 RTE_PTYPE_TUNNEL_IP |
2906 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2907 RTE_PTYPE_INNER_L4_NONFRAG,
2908 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2909 RTE_PTYPE_TUNNEL_IP |
2910 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2911 RTE_PTYPE_INNER_L4_UDP,
2913 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2914 RTE_PTYPE_TUNNEL_IP |
2915 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2916 RTE_PTYPE_INNER_L4_TCP,
2917 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2918 RTE_PTYPE_TUNNEL_IP |
2919 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2920 RTE_PTYPE_INNER_L4_SCTP,
2921 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2922 RTE_PTYPE_TUNNEL_IP |
2923 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2924 RTE_PTYPE_INNER_L4_ICMP,
2927 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2928 RTE_PTYPE_TUNNEL_IP |
2929 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2930 RTE_PTYPE_INNER_L4_FRAG,
2931 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2932 RTE_PTYPE_TUNNEL_IP |
2933 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2934 RTE_PTYPE_INNER_L4_NONFRAG,
2935 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2936 RTE_PTYPE_TUNNEL_IP |
2937 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2938 RTE_PTYPE_INNER_L4_UDP,
2940 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2941 RTE_PTYPE_TUNNEL_IP |
2942 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2943 RTE_PTYPE_INNER_L4_TCP,
2944 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2945 RTE_PTYPE_TUNNEL_IP |
2946 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2947 RTE_PTYPE_INNER_L4_SCTP,
2948 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2949 RTE_PTYPE_TUNNEL_IP |
2950 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2951 RTE_PTYPE_INNER_L4_ICMP,
2953 /* IPv4 --> GRE/Teredo/VXLAN */
2954 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2955 RTE_PTYPE_TUNNEL_GRENAT,
2957 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2958 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2959 RTE_PTYPE_TUNNEL_GRENAT |
2960 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2961 RTE_PTYPE_INNER_L4_FRAG,
2962 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2963 RTE_PTYPE_TUNNEL_GRENAT |
2964 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2965 RTE_PTYPE_INNER_L4_NONFRAG,
2966 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2967 RTE_PTYPE_TUNNEL_GRENAT |
2968 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2969 RTE_PTYPE_INNER_L4_UDP,
2971 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2972 RTE_PTYPE_TUNNEL_GRENAT |
2973 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2974 RTE_PTYPE_INNER_L4_TCP,
2975 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2976 RTE_PTYPE_TUNNEL_GRENAT |
2977 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2978 RTE_PTYPE_INNER_L4_SCTP,
2979 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2980 RTE_PTYPE_TUNNEL_GRENAT |
2981 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2982 RTE_PTYPE_INNER_L4_ICMP,
2984 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2985 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2986 RTE_PTYPE_TUNNEL_GRENAT |
2987 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2988 RTE_PTYPE_INNER_L4_FRAG,
2989 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2990 RTE_PTYPE_TUNNEL_GRENAT |
2991 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2992 RTE_PTYPE_INNER_L4_NONFRAG,
2993 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2994 RTE_PTYPE_TUNNEL_GRENAT |
2995 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2996 RTE_PTYPE_INNER_L4_UDP,
2998 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2999 RTE_PTYPE_TUNNEL_GRENAT |
3000 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3001 RTE_PTYPE_INNER_L4_TCP,
3002 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3003 RTE_PTYPE_TUNNEL_GRENAT |
3004 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3005 RTE_PTYPE_INNER_L4_SCTP,
3006 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3007 RTE_PTYPE_TUNNEL_GRENAT |
3008 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3009 RTE_PTYPE_INNER_L4_ICMP,
3011 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3012 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3013 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3015 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3016 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3017 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3018 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3019 RTE_PTYPE_INNER_L4_FRAG,
3020 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3021 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3022 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3023 RTE_PTYPE_INNER_L4_NONFRAG,
3024 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3025 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3026 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3027 RTE_PTYPE_INNER_L4_UDP,
3029 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3030 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3031 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3032 RTE_PTYPE_INNER_L4_TCP,
3033 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3034 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3035 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3036 RTE_PTYPE_INNER_L4_SCTP,
3037 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3038 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3039 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3040 RTE_PTYPE_INNER_L4_ICMP,
3042 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3043 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3044 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3045 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3046 RTE_PTYPE_INNER_L4_FRAG,
3047 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3048 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3049 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3050 RTE_PTYPE_INNER_L4_NONFRAG,
3051 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3052 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3053 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3054 RTE_PTYPE_INNER_L4_UDP,
3056 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3057 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3058 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3059 RTE_PTYPE_INNER_L4_TCP,
3060 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3061 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3062 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3063 RTE_PTYPE_INNER_L4_SCTP,
3064 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3065 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3066 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3067 RTE_PTYPE_INNER_L4_ICMP,
3068 /* [73] - [87] reserved */
3070 /* Non tunneled IPv6 */
3071 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3073 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3074 RTE_PTYPE_L4_NONFRAG,
3075 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3078 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3080 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3082 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3086 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3087 RTE_PTYPE_TUNNEL_IP |
3088 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3089 RTE_PTYPE_INNER_L4_FRAG,
3090 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3091 RTE_PTYPE_TUNNEL_IP |
3092 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3093 RTE_PTYPE_INNER_L4_NONFRAG,
3094 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3095 RTE_PTYPE_TUNNEL_IP |
3096 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3097 RTE_PTYPE_INNER_L4_UDP,
3099 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3100 RTE_PTYPE_TUNNEL_IP |
3101 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3102 RTE_PTYPE_INNER_L4_TCP,
3103 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3104 RTE_PTYPE_TUNNEL_IP |
3105 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3106 RTE_PTYPE_INNER_L4_SCTP,
3107 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3108 RTE_PTYPE_TUNNEL_IP |
3109 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3110 RTE_PTYPE_INNER_L4_ICMP,
3113 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3114 RTE_PTYPE_TUNNEL_IP |
3115 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3116 RTE_PTYPE_INNER_L4_FRAG,
3117 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3118 RTE_PTYPE_TUNNEL_IP |
3119 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3120 RTE_PTYPE_INNER_L4_NONFRAG,
3121 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3122 RTE_PTYPE_TUNNEL_IP |
3123 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3124 RTE_PTYPE_INNER_L4_UDP,
3125 /* [105] reserved */
3126 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3127 RTE_PTYPE_TUNNEL_IP |
3128 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3129 RTE_PTYPE_INNER_L4_TCP,
3130 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131 RTE_PTYPE_TUNNEL_IP |
3132 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3133 RTE_PTYPE_INNER_L4_SCTP,
3134 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3135 RTE_PTYPE_TUNNEL_IP |
3136 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3137 RTE_PTYPE_INNER_L4_ICMP,
3139 /* IPv6 --> GRE/Teredo/VXLAN */
3140 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3141 RTE_PTYPE_TUNNEL_GRENAT,
3143 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3144 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3145 RTE_PTYPE_TUNNEL_GRENAT |
3146 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3147 RTE_PTYPE_INNER_L4_FRAG,
3148 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3149 RTE_PTYPE_TUNNEL_GRENAT |
3150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3151 RTE_PTYPE_INNER_L4_NONFRAG,
3152 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3153 RTE_PTYPE_TUNNEL_GRENAT |
3154 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3155 RTE_PTYPE_INNER_L4_UDP,
3156 /* [113] reserved */
3157 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GRENAT |
3159 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3160 RTE_PTYPE_INNER_L4_TCP,
3161 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3162 RTE_PTYPE_TUNNEL_GRENAT |
3163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3164 RTE_PTYPE_INNER_L4_SCTP,
3165 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3166 RTE_PTYPE_TUNNEL_GRENAT |
3167 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3168 RTE_PTYPE_INNER_L4_ICMP,
3170 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3171 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3172 RTE_PTYPE_TUNNEL_GRENAT |
3173 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3174 RTE_PTYPE_INNER_L4_FRAG,
3175 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3176 RTE_PTYPE_TUNNEL_GRENAT |
3177 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3178 RTE_PTYPE_INNER_L4_NONFRAG,
3179 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3180 RTE_PTYPE_TUNNEL_GRENAT |
3181 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3182 RTE_PTYPE_INNER_L4_UDP,
3183 /* [120] reserved */
3184 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3185 RTE_PTYPE_TUNNEL_GRENAT |
3186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3187 RTE_PTYPE_INNER_L4_TCP,
3188 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3189 RTE_PTYPE_TUNNEL_GRENAT |
3190 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3191 RTE_PTYPE_INNER_L4_SCTP,
3192 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3193 RTE_PTYPE_TUNNEL_GRENAT |
3194 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3195 RTE_PTYPE_INNER_L4_ICMP,
3197 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3198 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3199 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3201 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3202 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3203 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3204 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3205 RTE_PTYPE_INNER_L4_FRAG,
3206 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3207 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3208 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3209 RTE_PTYPE_INNER_L4_NONFRAG,
3210 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3211 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3212 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3213 RTE_PTYPE_INNER_L4_UDP,
3214 /* [128] reserved */
3215 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3216 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3217 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3218 RTE_PTYPE_INNER_L4_TCP,
3219 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3220 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3221 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3222 RTE_PTYPE_INNER_L4_SCTP,
3223 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3224 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3226 RTE_PTYPE_INNER_L4_ICMP,
3228 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3229 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3230 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3231 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3232 RTE_PTYPE_INNER_L4_FRAG,
3233 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3234 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3236 RTE_PTYPE_INNER_L4_NONFRAG,
3237 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3238 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3239 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3240 RTE_PTYPE_INNER_L4_UDP,
3241 /* [135] reserved */
3242 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3244 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3245 RTE_PTYPE_INNER_L4_TCP,
3246 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3247 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3249 RTE_PTYPE_INNER_L4_SCTP,
3250 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3251 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3252 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3253 RTE_PTYPE_INNER_L4_ICMP,
3254 /* [139] - [299] reserved */
3257 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3258 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3260 /* PPPoE --> IPv4 */
3261 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3262 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3264 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3265 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3266 RTE_PTYPE_L4_NONFRAG,
3267 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3268 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3270 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3271 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3273 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3274 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3276 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3277 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3280 /* PPPoE --> IPv6 */
3281 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3282 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3284 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3285 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3286 RTE_PTYPE_L4_NONFRAG,
3287 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3288 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3290 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3291 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3293 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3294 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3296 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3297 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3299 /* [314] - [324] reserved */
3301 /* IPv4/IPv6 --> GTPC/GTPU */
3302 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3303 RTE_PTYPE_TUNNEL_GTPC,
3304 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3305 RTE_PTYPE_TUNNEL_GTPC,
3306 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3307 RTE_PTYPE_TUNNEL_GTPC,
3308 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3309 RTE_PTYPE_TUNNEL_GTPC,
3310 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3311 RTE_PTYPE_TUNNEL_GTPU,
3312 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_GTPU,
3315 /* IPv4 --> GTPU --> IPv4 */
3316 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GTPU |
3318 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_INNER_L4_FRAG,
3320 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_GTPU |
3322 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3323 RTE_PTYPE_INNER_L4_NONFRAG,
3324 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3325 RTE_PTYPE_TUNNEL_GTPU |
3326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_INNER_L4_UDP,
3328 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_TUNNEL_GTPU |
3330 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3331 RTE_PTYPE_INNER_L4_TCP,
3332 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_TUNNEL_GTPU |
3334 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3335 RTE_PTYPE_INNER_L4_ICMP,
3337 /* IPv6 --> GTPU --> IPv4 */
3338 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3339 RTE_PTYPE_TUNNEL_GTPU |
3340 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3341 RTE_PTYPE_INNER_L4_FRAG,
3342 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3343 RTE_PTYPE_TUNNEL_GTPU |
3344 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3345 RTE_PTYPE_INNER_L4_NONFRAG,
3346 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3347 RTE_PTYPE_TUNNEL_GTPU |
3348 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3349 RTE_PTYPE_INNER_L4_UDP,
3350 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3351 RTE_PTYPE_TUNNEL_GTPU |
3352 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3353 RTE_PTYPE_INNER_L4_TCP,
3354 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3355 RTE_PTYPE_TUNNEL_GTPU |
3356 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3357 RTE_PTYPE_INNER_L4_ICMP,
3359 /* IPv4 --> GTPU --> IPv6 */
3360 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_TUNNEL_GTPU |
3362 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3363 RTE_PTYPE_INNER_L4_FRAG,
3364 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_TUNNEL_GTPU |
3366 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3367 RTE_PTYPE_INNER_L4_NONFRAG,
3368 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3369 RTE_PTYPE_TUNNEL_GTPU |
3370 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3371 RTE_PTYPE_INNER_L4_UDP,
3372 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3373 RTE_PTYPE_TUNNEL_GTPU |
3374 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3375 RTE_PTYPE_INNER_L4_TCP,
3376 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3377 RTE_PTYPE_TUNNEL_GTPU |
3378 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3379 RTE_PTYPE_INNER_L4_ICMP,
3381 /* IPv6 --> GTPU --> IPv6 */
3382 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3383 RTE_PTYPE_TUNNEL_GTPU |
3384 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3385 RTE_PTYPE_INNER_L4_FRAG,
3386 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3387 RTE_PTYPE_TUNNEL_GTPU |
3388 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3389 RTE_PTYPE_INNER_L4_NONFRAG,
3390 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3391 RTE_PTYPE_TUNNEL_GTPU |
3392 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3393 RTE_PTYPE_INNER_L4_UDP,
3394 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3395 RTE_PTYPE_TUNNEL_GTPU |
3396 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3397 RTE_PTYPE_INNER_L4_TCP,
3398 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3399 RTE_PTYPE_TUNNEL_GTPU |
3400 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3401 RTE_PTYPE_INNER_L4_ICMP,
3403 /* IPv4 --> UDP ECPRI */
3404 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3406 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3408 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3410 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3412 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3414 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3416 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3418 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3420 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3422 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3425 /* IPV6 --> UDP ECPRI */
3426 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3428 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3430 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3432 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3434 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3436 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3438 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3440 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3442 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3444 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3446 /* All others reserved */