1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 iavf_monitor_callback(const uint64_t value,
62 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
64 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
66 * we expect the DD bit to be set to 1 if this descriptor was already
69 return (value & m) == m ? -1 : 0;
73 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
75 struct iavf_rx_queue *rxq = rx_queue;
76 volatile union iavf_rx_desc *rxdp;
80 rxdp = &rxq->rx_ring[desc];
81 /* watch for changes in status bit */
82 pmc->addr = &rxdp->wb.qword1.status_error_len;
84 /* comparison callback */
85 pmc->fn = iavf_monitor_callback;
87 /* registers are 64-bit */
88 pmc->size = sizeof(uint64_t);
94 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
96 /* The following constraints must be satisfied:
97 * thresh < rxq->nb_rx_desc
99 if (thresh >= nb_desc) {
100 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
108 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
109 uint16_t tx_free_thresh)
111 /* TX descriptors will have their RS bit set after tx_rs_thresh
112 * descriptors have been used. The TX descriptor ring will be cleaned
113 * after tx_free_thresh descriptors are used or if the number of
114 * descriptors required to transmit a packet is greater than the
115 * number of free TX descriptors.
117 * The following constraints must be satisfied:
118 * - tx_rs_thresh must be less than the size of the ring minus 2.
119 * - tx_free_thresh must be less than the size of the ring minus 3.
120 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
121 * - tx_rs_thresh must be a divisor of the ring size.
123 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
124 * race condition, hence the maximum threshold constraints. When set
125 * to zero use default values.
127 if (tx_rs_thresh >= (nb_desc - 2)) {
128 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
129 "number of TX descriptors (%u) minus 2",
130 tx_rs_thresh, nb_desc);
133 if (tx_free_thresh >= (nb_desc - 3)) {
134 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
135 "number of TX descriptors (%u) minus 3.",
136 tx_free_thresh, nb_desc);
139 if (tx_rs_thresh > tx_free_thresh) {
140 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
141 "equal to tx_free_thresh (%u).",
142 tx_rs_thresh, tx_free_thresh);
145 if ((nb_desc % tx_rs_thresh) != 0) {
146 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
147 "number of TX descriptors (%u).",
148 tx_rs_thresh, nb_desc);
156 check_rx_vec_allow(struct iavf_rx_queue *rxq)
158 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
159 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
160 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
164 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
169 check_tx_vec_allow(struct iavf_tx_queue *txq)
171 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
172 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
173 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
174 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
177 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
182 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
186 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
187 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
188 "rxq->rx_free_thresh=%d, "
189 "IAVF_RX_MAX_BURST=%d",
190 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
192 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
193 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
194 "rxq->nb_rx_desc=%d, "
195 "rxq->rx_free_thresh=%d",
196 rxq->nb_rx_desc, rxq->rx_free_thresh);
203 reset_rx_queue(struct iavf_rx_queue *rxq)
211 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
213 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
214 ((volatile char *)rxq->rx_ring)[i] = 0;
216 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
218 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
219 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
222 rxq->rx_nb_avail = 0;
223 rxq->rx_next_avail = 0;
224 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
228 rxq->pkt_first_seg = NULL;
229 rxq->pkt_last_seg = NULL;
231 rxq->rxrearm_start = 0;
235 reset_tx_queue(struct iavf_tx_queue *txq)
237 struct iavf_tx_entry *txe;
242 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
247 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
248 for (i = 0; i < size; i++)
249 ((volatile char *)txq->tx_ring)[i] = 0;
251 prev = (uint16_t)(txq->nb_tx_desc - 1);
252 for (i = 0; i < txq->nb_tx_desc; i++) {
253 txq->tx_ring[i].cmd_type_offset_bsz =
254 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
257 txe[prev].next_id = i;
264 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
265 txq->nb_free = txq->nb_tx_desc - 1;
267 txq->next_dd = txq->rs_thresh - 1;
268 txq->next_rs = txq->rs_thresh - 1;
272 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
274 volatile union iavf_rx_desc *rxd;
275 struct rte_mbuf *mbuf = NULL;
279 for (i = 0; i < rxq->nb_rx_desc; i++) {
280 mbuf = rte_mbuf_raw_alloc(rxq->mp);
281 if (unlikely(!mbuf)) {
282 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
286 rte_mbuf_refcnt_set(mbuf, 1);
288 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
290 mbuf->port = rxq->port_id;
293 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
295 rxd = &rxq->rx_ring[i];
296 rxd->read.pkt_addr = dma_addr;
297 rxd->read.hdr_addr = 0;
298 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
303 rxq->sw_ring[i] = mbuf;
310 release_rxq_mbufs(struct iavf_rx_queue *rxq)
317 for (i = 0; i < rxq->nb_rx_desc; i++) {
318 if (rxq->sw_ring[i]) {
319 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
320 rxq->sw_ring[i] = NULL;
325 if (rxq->rx_nb_avail == 0)
327 for (i = 0; i < rxq->rx_nb_avail; i++) {
328 struct rte_mbuf *mbuf;
330 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
331 rte_pktmbuf_free_seg(mbuf);
333 rxq->rx_nb_avail = 0;
337 release_txq_mbufs(struct iavf_tx_queue *txq)
341 if (!txq || !txq->sw_ring) {
342 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
346 for (i = 0; i < txq->nb_tx_desc; i++) {
347 if (txq->sw_ring[i].mbuf) {
348 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
349 txq->sw_ring[i].mbuf = NULL;
354 static const struct iavf_rxq_ops def_rxq_ops = {
355 .release_mbufs = release_rxq_mbufs,
358 static const struct iavf_txq_ops def_txq_ops = {
359 .release_mbufs = release_txq_mbufs,
363 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
365 volatile union iavf_rx_flex_desc *rxdp)
367 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
368 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
373 if (desc->flow_id != 0xFFFFFFFF) {
374 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
375 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
378 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
379 stat_err = rte_le_to_cpu_16(desc->status_error0);
380 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
381 mb->ol_flags |= PKT_RX_RSS_HASH;
382 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
388 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
390 volatile union iavf_rx_flex_desc *rxdp)
392 volatile struct iavf_32b_rx_flex_desc_comms *desc =
393 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
396 stat_err = rte_le_to_cpu_16(desc->status_error0);
397 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
398 mb->ol_flags |= PKT_RX_RSS_HASH;
399 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
402 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
403 if (desc->flow_id != 0xFFFFFFFF) {
404 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
405 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
408 if (rxq->xtr_ol_flag) {
409 uint32_t metadata = 0;
411 stat_err = rte_le_to_cpu_16(desc->status_error1);
413 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
414 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
416 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
418 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
421 mb->ol_flags |= rxq->xtr_ol_flag;
423 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
430 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
432 volatile union iavf_rx_flex_desc *rxdp)
434 volatile struct iavf_32b_rx_flex_desc_comms *desc =
435 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
438 stat_err = rte_le_to_cpu_16(desc->status_error0);
439 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
440 mb->ol_flags |= PKT_RX_RSS_HASH;
441 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
444 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
445 if (desc->flow_id != 0xFFFFFFFF) {
446 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
447 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
450 if (rxq->xtr_ol_flag) {
451 uint32_t metadata = 0;
453 if (desc->flex_ts.flex.aux0 != 0xFFFF)
454 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
455 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
456 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
459 mb->ol_flags |= rxq->xtr_ol_flag;
461 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
468 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
471 case IAVF_RXDID_COMMS_AUX_VLAN:
472 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
473 rxq->rxd_to_pkt_fields =
474 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
476 case IAVF_RXDID_COMMS_AUX_IPV4:
477 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
478 rxq->rxd_to_pkt_fields =
479 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
481 case IAVF_RXDID_COMMS_AUX_IPV6:
482 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
483 rxq->rxd_to_pkt_fields =
484 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
486 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
488 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
489 rxq->rxd_to_pkt_fields =
490 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
492 case IAVF_RXDID_COMMS_AUX_TCP:
493 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
494 rxq->rxd_to_pkt_fields =
495 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
497 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
499 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
500 rxq->rxd_to_pkt_fields =
501 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
503 case IAVF_RXDID_COMMS_OVS_1:
504 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
507 /* update this according to the RXDID for FLEX_DESC_NONE */
508 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
512 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
513 rxq->xtr_ol_flag = 0;
517 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
518 uint16_t nb_desc, unsigned int socket_id,
519 const struct rte_eth_rxconf *rx_conf,
520 struct rte_mempool *mp)
522 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523 struct iavf_adapter *ad =
524 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
525 struct iavf_info *vf =
526 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
527 struct iavf_vsi *vsi = &vf->vsi;
528 struct iavf_rx_queue *rxq;
529 const struct rte_memzone *mz;
533 uint16_t rx_free_thresh;
536 PMD_INIT_FUNC_TRACE();
538 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
540 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
541 nb_desc > IAVF_MAX_RING_DESC ||
542 nb_desc < IAVF_MIN_RING_DESC) {
543 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
548 /* Check free threshold */
549 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
550 IAVF_DEFAULT_RX_FREE_THRESH :
551 rx_conf->rx_free_thresh;
552 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
555 /* Free memory if needed */
556 if (dev->data->rx_queues[queue_idx]) {
557 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
558 dev->data->rx_queues[queue_idx] = NULL;
561 /* Allocate the rx queue data structure */
562 rxq = rte_zmalloc_socket("iavf rxq",
563 sizeof(struct iavf_rx_queue),
567 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
568 "rx queue data structure");
572 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
573 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
575 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
576 rxq->proto_xtr = proto_xtr;
578 rxq->rxdid = IAVF_RXDID_LEGACY_1;
579 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
582 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
583 struct virtchnl_vlan_supported_caps *stripping_support =
584 &vf->vlan_v2_caps.offloads.stripping_support;
585 uint32_t stripping_cap;
587 if (stripping_support->outer)
588 stripping_cap = stripping_support->outer;
590 stripping_cap = stripping_support->inner;
592 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
593 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
594 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
595 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
597 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
600 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
603 rxq->nb_rx_desc = nb_desc;
604 rxq->rx_free_thresh = rx_free_thresh;
605 rxq->queue_id = queue_idx;
606 rxq->port_id = dev->data->port_id;
607 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
610 rxq->offloads = offloads;
612 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
613 rxq->crc_len = RTE_ETHER_CRC_LEN;
617 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
618 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
620 /* Allocate the software ring. */
621 len = nb_desc + IAVF_RX_MAX_BURST;
623 rte_zmalloc_socket("iavf rx sw ring",
624 sizeof(struct rte_mbuf *) * len,
628 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
633 /* Allocate the maximun number of RX ring hardware descriptor with
634 * a liitle more to support bulk allocate.
636 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
637 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
639 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
640 ring_size, IAVF_RING_BASE_ALIGN,
643 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
644 rte_free(rxq->sw_ring);
648 /* Zero all the descriptors in the ring. */
649 memset(mz->addr, 0, ring_size);
650 rxq->rx_ring_phys_addr = mz->iova;
651 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
656 dev->data->rx_queues[queue_idx] = rxq;
657 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
658 rxq->ops = &def_rxq_ops;
660 if (check_rx_bulk_allow(rxq) == true) {
661 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
662 "satisfied. Rx Burst Bulk Alloc function will be "
663 "used on port=%d, queue=%d.",
664 rxq->port_id, rxq->queue_id);
666 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
667 "not satisfied, Scattered Rx is requested "
668 "on port=%d, queue=%d.",
669 rxq->port_id, rxq->queue_id);
670 ad->rx_bulk_alloc_allowed = false;
673 if (check_rx_vec_allow(rxq) == false)
674 ad->rx_vec_allowed = false;
680 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
683 unsigned int socket_id,
684 const struct rte_eth_txconf *tx_conf)
686 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 struct iavf_info *vf =
688 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
689 struct iavf_tx_queue *txq;
690 const struct rte_memzone *mz;
692 uint16_t tx_rs_thresh, tx_free_thresh;
695 PMD_INIT_FUNC_TRACE();
697 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
699 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
700 nb_desc > IAVF_MAX_RING_DESC ||
701 nb_desc < IAVF_MIN_RING_DESC) {
702 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
707 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
708 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
709 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
710 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
711 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
713 /* Free memory if needed. */
714 if (dev->data->tx_queues[queue_idx]) {
715 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
716 dev->data->tx_queues[queue_idx] = NULL;
719 /* Allocate the TX queue data structure. */
720 txq = rte_zmalloc_socket("iavf txq",
721 sizeof(struct iavf_tx_queue),
725 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
726 "tx queue structure");
730 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
731 struct virtchnl_vlan_supported_caps *insertion_support =
732 &vf->vlan_v2_caps.offloads.insertion_support;
733 uint32_t insertion_cap;
735 if (insertion_support->outer)
736 insertion_cap = insertion_support->outer;
738 insertion_cap = insertion_support->inner;
740 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
741 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
742 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
743 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
745 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
748 txq->nb_tx_desc = nb_desc;
749 txq->rs_thresh = tx_rs_thresh;
750 txq->free_thresh = tx_free_thresh;
751 txq->queue_id = queue_idx;
752 txq->port_id = dev->data->port_id;
753 txq->offloads = offloads;
754 txq->tx_deferred_start = tx_conf->tx_deferred_start;
756 /* Allocate software ring */
758 rte_zmalloc_socket("iavf tx sw ring",
759 sizeof(struct iavf_tx_entry) * nb_desc,
763 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
768 /* Allocate TX hardware ring descriptors. */
769 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
770 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
771 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
772 ring_size, IAVF_RING_BASE_ALIGN,
775 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
776 rte_free(txq->sw_ring);
780 txq->tx_ring_phys_addr = mz->iova;
781 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
786 dev->data->tx_queues[queue_idx] = txq;
787 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
788 txq->ops = &def_txq_ops;
790 if (check_tx_vec_allow(txq) == false) {
791 struct iavf_adapter *ad =
792 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
793 ad->tx_vec_allowed = false;
796 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
797 vf->tm_conf.committed) {
799 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
800 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
801 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
802 vf->qtc_map[tc].queue_count))
805 if (tc >= vf->qos_cap->num_elem) {
806 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
816 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
818 struct iavf_adapter *adapter =
819 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
820 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
821 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822 struct iavf_rx_queue *rxq;
825 PMD_DRV_FUNC_TRACE();
827 if (rx_queue_id >= dev->data->nb_rx_queues)
830 rxq = dev->data->rx_queues[rx_queue_id];
832 err = alloc_rxq_mbufs(rxq);
834 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
840 /* Init the RX tail register. */
841 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
842 IAVF_WRITE_FLUSH(hw);
844 /* Ready to switch the queue on */
846 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
848 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
851 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
854 dev->data->rx_queue_state[rx_queue_id] =
855 RTE_ETH_QUEUE_STATE_STARTED;
861 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
863 struct iavf_adapter *adapter =
864 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
865 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
866 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867 struct iavf_tx_queue *txq;
870 PMD_DRV_FUNC_TRACE();
872 if (tx_queue_id >= dev->data->nb_tx_queues)
875 txq = dev->data->tx_queues[tx_queue_id];
877 /* Init the RX tail register. */
878 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
879 IAVF_WRITE_FLUSH(hw);
881 /* Ready to switch the queue on */
883 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
885 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
888 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
891 dev->data->tx_queue_state[tx_queue_id] =
892 RTE_ETH_QUEUE_STATE_STARTED;
898 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
900 struct iavf_adapter *adapter =
901 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
902 struct iavf_rx_queue *rxq;
905 PMD_DRV_FUNC_TRACE();
907 if (rx_queue_id >= dev->data->nb_rx_queues)
910 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
912 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
917 rxq = dev->data->rx_queues[rx_queue_id];
918 rxq->ops->release_mbufs(rxq);
920 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
926 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
928 struct iavf_adapter *adapter =
929 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
930 struct iavf_tx_queue *txq;
933 PMD_DRV_FUNC_TRACE();
935 if (tx_queue_id >= dev->data->nb_tx_queues)
938 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
940 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
945 txq = dev->data->tx_queues[tx_queue_id];
946 txq->ops->release_mbufs(txq);
948 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
954 iavf_dev_rx_queue_release(void *rxq)
956 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
961 q->ops->release_mbufs(q);
962 rte_free(q->sw_ring);
963 rte_memzone_free(q->mz);
968 iavf_dev_tx_queue_release(void *txq)
970 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
975 q->ops->release_mbufs(q);
976 rte_free(q->sw_ring);
977 rte_memzone_free(q->mz);
982 iavf_stop_queues(struct rte_eth_dev *dev)
984 struct iavf_adapter *adapter =
985 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
986 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
987 struct iavf_rx_queue *rxq;
988 struct iavf_tx_queue *txq;
991 /* Stop All queues */
992 if (!vf->lv_enabled) {
993 ret = iavf_disable_queues(adapter);
995 PMD_DRV_LOG(WARNING, "Fail to stop queues");
997 ret = iavf_disable_queues_lv(adapter);
999 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1003 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1005 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1006 txq = dev->data->tx_queues[i];
1009 txq->ops->release_mbufs(txq);
1010 reset_tx_queue(txq);
1011 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1013 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1014 rxq = dev->data->rx_queues[i];
1017 rxq->ops->release_mbufs(rxq);
1018 reset_rx_queue(rxq);
1019 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1023 #define IAVF_RX_FLEX_ERR0_BITS \
1024 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1025 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1026 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1027 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1028 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1029 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1032 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1034 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1035 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1036 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1038 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1045 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1046 volatile union iavf_rx_flex_desc *rxdp,
1049 uint16_t vlan_tci = 0;
1051 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
1052 rte_le_to_cpu_64(rxdp->wb.status_error0) &
1053 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
1054 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
1056 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1057 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
1058 rte_le_to_cpu_16(rxdp->wb.status_error1) &
1059 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
1060 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1064 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1065 mb->vlan_tci = vlan_tci;
1069 /* Translate the rx descriptor status and error fields to pkt flags */
1070 static inline uint64_t
1071 iavf_rxd_to_pkt_flags(uint64_t qword)
1074 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1076 #define IAVF_RX_ERR_BITS 0x3f
1078 /* Check if RSS_HASH */
1079 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1080 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1081 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1083 /* Check if FDIR Match */
1084 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1087 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1088 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1092 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1093 flags |= PKT_RX_IP_CKSUM_BAD;
1095 flags |= PKT_RX_IP_CKSUM_GOOD;
1097 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1098 flags |= PKT_RX_L4_CKSUM_BAD;
1100 flags |= PKT_RX_L4_CKSUM_GOOD;
1102 /* TODO: Oversize error bit is not processed here */
1107 static inline uint64_t
1108 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1111 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1114 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1115 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1116 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1118 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1120 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1121 flags |= PKT_RX_FDIR_ID;
1125 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1126 flags |= PKT_RX_FDIR_ID;
1131 #define IAVF_RX_FLEX_ERR0_BITS \
1132 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1133 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1134 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1135 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1136 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1137 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1139 /* Rx L3/L4 checksum */
1140 static inline uint64_t
1141 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1145 /* check if HW has decoded the packet and checksum */
1146 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1149 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1150 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1154 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1155 flags |= PKT_RX_IP_CKSUM_BAD;
1157 flags |= PKT_RX_IP_CKSUM_GOOD;
1159 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1160 flags |= PKT_RX_L4_CKSUM_BAD;
1162 flags |= PKT_RX_L4_CKSUM_GOOD;
1164 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1165 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1170 /* If the number of free RX descriptors is greater than the RX free
1171 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1172 * register. Update the RDT with the value of the last processed RX
1173 * descriptor minus 1, to guarantee that the RDT register is never
1174 * equal to the RDH register, which creates a "full" ring situation
1175 * from the hardware point of view.
1178 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1180 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1182 if (nb_hold > rxq->rx_free_thresh) {
1184 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1185 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1186 rx_id = (uint16_t)((rx_id == 0) ?
1187 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1188 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1191 rxq->nb_rx_hold = nb_hold;
1194 /* implement recv_pkts */
1196 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1198 volatile union iavf_rx_desc *rx_ring;
1199 volatile union iavf_rx_desc *rxdp;
1200 struct iavf_rx_queue *rxq;
1201 union iavf_rx_desc rxd;
1202 struct rte_mbuf *rxe;
1203 struct rte_eth_dev *dev;
1204 struct rte_mbuf *rxm;
1205 struct rte_mbuf *nmb;
1209 uint16_t rx_packet_len;
1210 uint16_t rx_id, nb_hold;
1213 const uint32_t *ptype_tbl;
1218 rx_id = rxq->rx_tail;
1219 rx_ring = rxq->rx_ring;
1220 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1222 while (nb_rx < nb_pkts) {
1223 rxdp = &rx_ring[rx_id];
1224 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1225 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1226 IAVF_RXD_QW1_STATUS_SHIFT;
1228 /* Check the DD bit first */
1229 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1231 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1233 nmb = rte_mbuf_raw_alloc(rxq->mp);
1234 if (unlikely(!nmb)) {
1235 dev = &rte_eth_devices[rxq->port_id];
1236 dev->data->rx_mbuf_alloc_failed++;
1237 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1238 "queue_id=%u", rxq->port_id, rxq->queue_id);
1244 rxe = rxq->sw_ring[rx_id];
1245 rxq->sw_ring[rx_id] = nmb;
1247 if (unlikely(rx_id == rxq->nb_rx_desc))
1250 /* Prefetch next mbuf */
1251 rte_prefetch0(rxq->sw_ring[rx_id]);
1253 /* When next RX descriptor is on a cache line boundary,
1254 * prefetch the next 4 RX descriptors and next 8 pointers
1257 if ((rx_id & 0x3) == 0) {
1258 rte_prefetch0(&rx_ring[rx_id]);
1259 rte_prefetch0(rxq->sw_ring[rx_id]);
1263 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1264 rxdp->read.hdr_addr = 0;
1265 rxdp->read.pkt_addr = dma_addr;
1267 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1268 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1270 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1271 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1274 rxm->pkt_len = rx_packet_len;
1275 rxm->data_len = rx_packet_len;
1276 rxm->port = rxq->port_id;
1278 iavf_rxd_to_vlan_tci(rxm, &rxd);
1279 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1281 ptype_tbl[(uint8_t)((qword1 &
1282 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1284 if (pkt_flags & PKT_RX_RSS_HASH)
1286 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1288 if (pkt_flags & PKT_RX_FDIR)
1289 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1291 rxm->ol_flags |= pkt_flags;
1293 rx_pkts[nb_rx++] = rxm;
1295 rxq->rx_tail = rx_id;
1297 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1302 /* implement recv_pkts for flexible Rx descriptor */
1304 iavf_recv_pkts_flex_rxd(void *rx_queue,
1305 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1307 volatile union iavf_rx_desc *rx_ring;
1308 volatile union iavf_rx_flex_desc *rxdp;
1309 struct iavf_rx_queue *rxq;
1310 union iavf_rx_flex_desc rxd;
1311 struct rte_mbuf *rxe;
1312 struct rte_eth_dev *dev;
1313 struct rte_mbuf *rxm;
1314 struct rte_mbuf *nmb;
1316 uint16_t rx_stat_err0;
1317 uint16_t rx_packet_len;
1318 uint16_t rx_id, nb_hold;
1321 const uint32_t *ptype_tbl;
1326 rx_id = rxq->rx_tail;
1327 rx_ring = rxq->rx_ring;
1328 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1330 while (nb_rx < nb_pkts) {
1331 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1332 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1334 /* Check the DD bit first */
1335 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1337 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1339 nmb = rte_mbuf_raw_alloc(rxq->mp);
1340 if (unlikely(!nmb)) {
1341 dev = &rte_eth_devices[rxq->port_id];
1342 dev->data->rx_mbuf_alloc_failed++;
1343 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1344 "queue_id=%u", rxq->port_id, rxq->queue_id);
1350 rxe = rxq->sw_ring[rx_id];
1351 rxq->sw_ring[rx_id] = nmb;
1353 if (unlikely(rx_id == rxq->nb_rx_desc))
1356 /* Prefetch next mbuf */
1357 rte_prefetch0(rxq->sw_ring[rx_id]);
1359 /* When next RX descriptor is on a cache line boundary,
1360 * prefetch the next 4 RX descriptors and next 8 pointers
1363 if ((rx_id & 0x3) == 0) {
1364 rte_prefetch0(&rx_ring[rx_id]);
1365 rte_prefetch0(rxq->sw_ring[rx_id]);
1369 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1370 rxdp->read.hdr_addr = 0;
1371 rxdp->read.pkt_addr = dma_addr;
1373 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1374 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1376 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1377 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1380 rxm->pkt_len = rx_packet_len;
1381 rxm->data_len = rx_packet_len;
1382 rxm->port = rxq->port_id;
1384 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1385 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1386 iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
1387 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1388 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1389 rxm->ol_flags |= pkt_flags;
1391 rx_pkts[nb_rx++] = rxm;
1393 rxq->rx_tail = rx_id;
1395 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1400 /* implement recv_scattered_pkts for flexible Rx descriptor */
1402 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1405 struct iavf_rx_queue *rxq = rx_queue;
1406 union iavf_rx_flex_desc rxd;
1407 struct rte_mbuf *rxe;
1408 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1409 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1410 struct rte_mbuf *nmb, *rxm;
1411 uint16_t rx_id = rxq->rx_tail;
1412 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1413 struct rte_eth_dev *dev;
1414 uint16_t rx_stat_err0;
1418 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1419 volatile union iavf_rx_flex_desc *rxdp;
1420 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1422 while (nb_rx < nb_pkts) {
1423 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1424 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1426 /* Check the DD bit */
1427 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1429 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1431 nmb = rte_mbuf_raw_alloc(rxq->mp);
1432 if (unlikely(!nmb)) {
1433 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1434 "queue_id=%u", rxq->port_id, rxq->queue_id);
1435 dev = &rte_eth_devices[rxq->port_id];
1436 dev->data->rx_mbuf_alloc_failed++;
1442 rxe = rxq->sw_ring[rx_id];
1443 rxq->sw_ring[rx_id] = nmb;
1445 if (rx_id == rxq->nb_rx_desc)
1448 /* Prefetch next mbuf */
1449 rte_prefetch0(rxq->sw_ring[rx_id]);
1451 /* When next RX descriptor is on a cache line boundary,
1452 * prefetch the next 4 RX descriptors and next 8 pointers
1455 if ((rx_id & 0x3) == 0) {
1456 rte_prefetch0(&rx_ring[rx_id]);
1457 rte_prefetch0(rxq->sw_ring[rx_id]);
1462 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1464 /* Set data buffer address and data length of the mbuf */
1465 rxdp->read.hdr_addr = 0;
1466 rxdp->read.pkt_addr = dma_addr;
1467 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1468 IAVF_RX_FLX_DESC_PKT_LEN_M;
1469 rxm->data_len = rx_packet_len;
1470 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1472 /* If this is the first buffer of the received packet, set the
1473 * pointer to the first mbuf of the packet and initialize its
1474 * context. Otherwise, update the total length and the number
1475 * of segments of the current scattered packet, and update the
1476 * pointer to the last mbuf of the current packet.
1480 first_seg->nb_segs = 1;
1481 first_seg->pkt_len = rx_packet_len;
1483 first_seg->pkt_len =
1484 (uint16_t)(first_seg->pkt_len +
1486 first_seg->nb_segs++;
1487 last_seg->next = rxm;
1490 /* If this is not the last buffer of the received packet,
1491 * update the pointer to the last mbuf of the current scattered
1492 * packet and continue to parse the RX ring.
1494 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1499 /* This is the last buffer of the received packet. If the CRC
1500 * is not stripped by the hardware:
1501 * - Subtract the CRC length from the total packet length.
1502 * - If the last buffer only contains the whole CRC or a part
1503 * of it, free the mbuf associated to the last buffer. If part
1504 * of the CRC is also contained in the previous mbuf, subtract
1505 * the length of that CRC part from the data length of the
1509 if (unlikely(rxq->crc_len > 0)) {
1510 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1511 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1512 rte_pktmbuf_free_seg(rxm);
1513 first_seg->nb_segs--;
1514 last_seg->data_len =
1515 (uint16_t)(last_seg->data_len -
1516 (RTE_ETHER_CRC_LEN - rx_packet_len));
1517 last_seg->next = NULL;
1519 rxm->data_len = (uint16_t)(rx_packet_len -
1524 first_seg->port = rxq->port_id;
1525 first_seg->ol_flags = 0;
1526 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1527 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1528 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
1529 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1530 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1532 first_seg->ol_flags |= pkt_flags;
1534 /* Prefetch data of first segment, if configured to do so. */
1535 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1536 first_seg->data_off));
1537 rx_pkts[nb_rx++] = first_seg;
1541 /* Record index of the next RX descriptor to probe. */
1542 rxq->rx_tail = rx_id;
1543 rxq->pkt_first_seg = first_seg;
1544 rxq->pkt_last_seg = last_seg;
1546 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1551 /* implement recv_scattered_pkts */
1553 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1556 struct iavf_rx_queue *rxq = rx_queue;
1557 union iavf_rx_desc rxd;
1558 struct rte_mbuf *rxe;
1559 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1560 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1561 struct rte_mbuf *nmb, *rxm;
1562 uint16_t rx_id = rxq->rx_tail;
1563 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1564 struct rte_eth_dev *dev;
1570 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1571 volatile union iavf_rx_desc *rxdp;
1572 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1574 while (nb_rx < nb_pkts) {
1575 rxdp = &rx_ring[rx_id];
1576 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1577 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1578 IAVF_RXD_QW1_STATUS_SHIFT;
1580 /* Check the DD bit */
1581 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1583 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1585 nmb = rte_mbuf_raw_alloc(rxq->mp);
1586 if (unlikely(!nmb)) {
1587 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1588 "queue_id=%u", rxq->port_id, rxq->queue_id);
1589 dev = &rte_eth_devices[rxq->port_id];
1590 dev->data->rx_mbuf_alloc_failed++;
1596 rxe = rxq->sw_ring[rx_id];
1597 rxq->sw_ring[rx_id] = nmb;
1599 if (rx_id == rxq->nb_rx_desc)
1602 /* Prefetch next mbuf */
1603 rte_prefetch0(rxq->sw_ring[rx_id]);
1605 /* When next RX descriptor is on a cache line boundary,
1606 * prefetch the next 4 RX descriptors and next 8 pointers
1609 if ((rx_id & 0x3) == 0) {
1610 rte_prefetch0(&rx_ring[rx_id]);
1611 rte_prefetch0(rxq->sw_ring[rx_id]);
1616 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1618 /* Set data buffer address and data length of the mbuf */
1619 rxdp->read.hdr_addr = 0;
1620 rxdp->read.pkt_addr = dma_addr;
1621 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1622 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1623 rxm->data_len = rx_packet_len;
1624 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1626 /* If this is the first buffer of the received packet, set the
1627 * pointer to the first mbuf of the packet and initialize its
1628 * context. Otherwise, update the total length and the number
1629 * of segments of the current scattered packet, and update the
1630 * pointer to the last mbuf of the current packet.
1634 first_seg->nb_segs = 1;
1635 first_seg->pkt_len = rx_packet_len;
1637 first_seg->pkt_len =
1638 (uint16_t)(first_seg->pkt_len +
1640 first_seg->nb_segs++;
1641 last_seg->next = rxm;
1644 /* If this is not the last buffer of the received packet,
1645 * update the pointer to the last mbuf of the current scattered
1646 * packet and continue to parse the RX ring.
1648 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1653 /* This is the last buffer of the received packet. If the CRC
1654 * is not stripped by the hardware:
1655 * - Subtract the CRC length from the total packet length.
1656 * - If the last buffer only contains the whole CRC or a part
1657 * of it, free the mbuf associated to the last buffer. If part
1658 * of the CRC is also contained in the previous mbuf, subtract
1659 * the length of that CRC part from the data length of the
1663 if (unlikely(rxq->crc_len > 0)) {
1664 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1665 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1666 rte_pktmbuf_free_seg(rxm);
1667 first_seg->nb_segs--;
1668 last_seg->data_len =
1669 (uint16_t)(last_seg->data_len -
1670 (RTE_ETHER_CRC_LEN - rx_packet_len));
1671 last_seg->next = NULL;
1673 rxm->data_len = (uint16_t)(rx_packet_len -
1677 first_seg->port = rxq->port_id;
1678 first_seg->ol_flags = 0;
1679 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1680 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1681 first_seg->packet_type =
1682 ptype_tbl[(uint8_t)((qword1 &
1683 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1685 if (pkt_flags & PKT_RX_RSS_HASH)
1686 first_seg->hash.rss =
1687 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1689 if (pkt_flags & PKT_RX_FDIR)
1690 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1692 first_seg->ol_flags |= pkt_flags;
1694 /* Prefetch data of first segment, if configured to do so. */
1695 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1696 first_seg->data_off));
1697 rx_pkts[nb_rx++] = first_seg;
1701 /* Record index of the next RX descriptor to probe. */
1702 rxq->rx_tail = rx_id;
1703 rxq->pkt_first_seg = first_seg;
1704 rxq->pkt_last_seg = last_seg;
1706 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1711 #define IAVF_LOOK_AHEAD 8
1713 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1715 volatile union iavf_rx_flex_desc *rxdp;
1716 struct rte_mbuf **rxep;
1717 struct rte_mbuf *mb;
1720 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1721 int32_t i, j, nb_rx = 0;
1723 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1725 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1726 rxep = &rxq->sw_ring[rxq->rx_tail];
1728 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1730 /* Make sure there is at least 1 packet to receive */
1731 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1734 /* Scan LOOK_AHEAD descriptors at a time to determine which
1735 * descriptors reference packets that are ready to be received.
1737 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1738 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1739 /* Read desc statuses backwards to avoid race condition */
1740 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1741 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1745 /* Compute how many status bits were set */
1746 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1747 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1751 /* Translate descriptor info to mbuf parameters */
1752 for (j = 0; j < nb_dd; j++) {
1753 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1755 i * IAVF_LOOK_AHEAD + j);
1758 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1759 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1760 mb->data_len = pkt_len;
1761 mb->pkt_len = pkt_len;
1764 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1765 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1766 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
1767 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1768 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1769 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1771 mb->ol_flags |= pkt_flags;
1774 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1775 rxq->rx_stage[i + j] = rxep[j];
1777 if (nb_dd != IAVF_LOOK_AHEAD)
1781 /* Clear software ring entries */
1782 for (i = 0; i < nb_rx; i++)
1783 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1789 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1791 volatile union iavf_rx_desc *rxdp;
1792 struct rte_mbuf **rxep;
1793 struct rte_mbuf *mb;
1797 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1798 int32_t i, j, nb_rx = 0;
1800 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1802 rxdp = &rxq->rx_ring[rxq->rx_tail];
1803 rxep = &rxq->sw_ring[rxq->rx_tail];
1805 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1806 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1807 IAVF_RXD_QW1_STATUS_SHIFT;
1809 /* Make sure there is at least 1 packet to receive */
1810 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1813 /* Scan LOOK_AHEAD descriptors at a time to determine which
1814 * descriptors reference packets that are ready to be received.
1816 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1817 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1818 /* Read desc statuses backwards to avoid race condition */
1819 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1820 qword1 = rte_le_to_cpu_64(
1821 rxdp[j].wb.qword1.status_error_len);
1822 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1823 IAVF_RXD_QW1_STATUS_SHIFT;
1828 /* Compute how many status bits were set */
1829 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1830 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1834 /* Translate descriptor info to mbuf parameters */
1835 for (j = 0; j < nb_dd; j++) {
1836 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1837 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1840 qword1 = rte_le_to_cpu_64
1841 (rxdp[j].wb.qword1.status_error_len);
1842 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1843 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1844 mb->data_len = pkt_len;
1845 mb->pkt_len = pkt_len;
1847 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1848 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1850 ptype_tbl[(uint8_t)((qword1 &
1851 IAVF_RXD_QW1_PTYPE_MASK) >>
1852 IAVF_RXD_QW1_PTYPE_SHIFT)];
1854 if (pkt_flags & PKT_RX_RSS_HASH)
1855 mb->hash.rss = rte_le_to_cpu_32(
1856 rxdp[j].wb.qword0.hi_dword.rss);
1858 if (pkt_flags & PKT_RX_FDIR)
1859 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1861 mb->ol_flags |= pkt_flags;
1864 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1865 rxq->rx_stage[i + j] = rxep[j];
1867 if (nb_dd != IAVF_LOOK_AHEAD)
1871 /* Clear software ring entries */
1872 for (i = 0; i < nb_rx; i++)
1873 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1878 static inline uint16_t
1879 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1880 struct rte_mbuf **rx_pkts,
1884 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1886 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1888 for (i = 0; i < nb_pkts; i++)
1889 rx_pkts[i] = stage[i];
1891 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1892 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1898 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1900 volatile union iavf_rx_desc *rxdp;
1901 struct rte_mbuf **rxep;
1902 struct rte_mbuf *mb;
1903 uint16_t alloc_idx, i;
1907 /* Allocate buffers in bulk */
1908 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1909 (rxq->rx_free_thresh - 1));
1910 rxep = &rxq->sw_ring[alloc_idx];
1911 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1912 rxq->rx_free_thresh);
1913 if (unlikely(diag != 0)) {
1914 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1918 rxdp = &rxq->rx_ring[alloc_idx];
1919 for (i = 0; i < rxq->rx_free_thresh; i++) {
1920 if (likely(i < (rxq->rx_free_thresh - 1)))
1921 /* Prefetch next mbuf */
1922 rte_prefetch0(rxep[i + 1]);
1925 rte_mbuf_refcnt_set(mb, 1);
1927 mb->data_off = RTE_PKTMBUF_HEADROOM;
1929 mb->port = rxq->port_id;
1930 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1931 rxdp[i].read.hdr_addr = 0;
1932 rxdp[i].read.pkt_addr = dma_addr;
1935 /* Update rx tail register */
1937 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1939 rxq->rx_free_trigger =
1940 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1941 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1942 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1947 static inline uint16_t
1948 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1950 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1956 if (rxq->rx_nb_avail)
1957 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1959 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1960 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1962 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1963 rxq->rx_next_avail = 0;
1964 rxq->rx_nb_avail = nb_rx;
1965 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1967 if (rxq->rx_tail > rxq->rx_free_trigger) {
1968 if (iavf_rx_alloc_bufs(rxq) != 0) {
1971 /* TODO: count rx_mbuf_alloc_failed here */
1973 rxq->rx_nb_avail = 0;
1974 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1975 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1976 rxq->sw_ring[j] = rxq->rx_stage[i];
1982 if (rxq->rx_tail >= rxq->nb_rx_desc)
1985 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1986 rxq->port_id, rxq->queue_id,
1987 rxq->rx_tail, nb_rx);
1989 if (rxq->rx_nb_avail)
1990 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1996 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1997 struct rte_mbuf **rx_pkts,
2000 uint16_t nb_rx = 0, n, count;
2002 if (unlikely(nb_pkts == 0))
2005 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2006 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2009 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2010 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2011 nb_rx = (uint16_t)(nb_rx + count);
2012 nb_pkts = (uint16_t)(nb_pkts - count);
2021 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2023 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2024 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2025 uint16_t nb_tx_desc = txq->nb_tx_desc;
2026 uint16_t desc_to_clean_to;
2027 uint16_t nb_tx_to_clean;
2029 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2031 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2032 if (desc_to_clean_to >= nb_tx_desc)
2033 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2035 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2036 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2037 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2038 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2039 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2040 "(port=%d queue=%d)", desc_to_clean_to,
2041 txq->port_id, txq->queue_id);
2045 if (last_desc_cleaned > desc_to_clean_to)
2046 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2049 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2052 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2054 txq->last_desc_cleaned = desc_to_clean_to;
2055 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2060 /* Check if the context descriptor is needed for TX offloading */
2061 static inline uint16_t
2062 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2064 if (flags & PKT_TX_TCP_SEG)
2066 if (flags & PKT_TX_VLAN_PKT &&
2067 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2073 iavf_txd_enable_checksum(uint64_t ol_flags,
2075 uint32_t *td_offset,
2076 union iavf_tx_offload tx_offload)
2079 *td_offset |= (tx_offload.l2_len >> 1) <<
2080 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2082 /* Enable L3 checksum offloads */
2083 if (ol_flags & PKT_TX_IP_CKSUM) {
2084 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2085 *td_offset |= (tx_offload.l3_len >> 2) <<
2086 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2087 } else if (ol_flags & PKT_TX_IPV4) {
2088 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2089 *td_offset |= (tx_offload.l3_len >> 2) <<
2090 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2091 } else if (ol_flags & PKT_TX_IPV6) {
2092 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2093 *td_offset |= (tx_offload.l3_len >> 2) <<
2094 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2097 if (ol_flags & PKT_TX_TCP_SEG) {
2098 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2099 *td_offset |= (tx_offload.l4_len >> 2) <<
2100 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2104 /* Enable L4 checksum offloads */
2105 switch (ol_flags & PKT_TX_L4_MASK) {
2106 case PKT_TX_TCP_CKSUM:
2107 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2108 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2109 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2111 case PKT_TX_SCTP_CKSUM:
2112 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2113 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2114 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2116 case PKT_TX_UDP_CKSUM:
2117 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2118 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2119 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2126 /* set TSO context descriptor
2127 * support IP -> L4 and IP -> IP -> L4
2129 static inline uint64_t
2130 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2132 uint64_t ctx_desc = 0;
2133 uint32_t cd_cmd, hdr_len, cd_tso_len;
2135 if (!tx_offload.l4_len) {
2136 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2140 hdr_len = tx_offload.l2_len +
2144 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2145 cd_tso_len = mbuf->pkt_len - hdr_len;
2146 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2147 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2148 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2153 /* Construct the tx flags */
2154 static inline uint64_t
2155 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2158 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2159 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2160 ((uint64_t)td_offset <<
2161 IAVF_TXD_QW1_OFFSET_SHIFT) |
2163 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2164 ((uint64_t)td_tag <<
2165 IAVF_TXD_QW1_L2TAG1_SHIFT));
2170 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2172 volatile struct iavf_tx_desc *txd;
2173 volatile struct iavf_tx_desc *txr;
2174 struct iavf_tx_queue *txq;
2175 struct iavf_tx_entry *sw_ring;
2176 struct iavf_tx_entry *txe, *txn;
2177 struct rte_mbuf *tx_pkt;
2178 struct rte_mbuf *m_seg;
2189 uint64_t buf_dma_addr;
2190 uint16_t cd_l2tag2 = 0;
2191 union iavf_tx_offload tx_offload = {0};
2194 sw_ring = txq->sw_ring;
2196 tx_id = txq->tx_tail;
2197 txe = &sw_ring[tx_id];
2199 /* Check if the descriptor ring needs to be cleaned. */
2200 if (txq->nb_free < txq->free_thresh)
2201 (void)iavf_xmit_cleanup(txq);
2203 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2208 tx_pkt = *tx_pkts++;
2209 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2211 ol_flags = tx_pkt->ol_flags;
2212 tx_offload.l2_len = tx_pkt->l2_len;
2213 tx_offload.l3_len = tx_pkt->l3_len;
2214 tx_offload.l4_len = tx_pkt->l4_len;
2215 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2216 /* Calculate the number of context descriptors needed. */
2217 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2219 /* The number of descriptors that must be allocated for
2220 * a packet equals to the number of the segments of that
2221 * packet plus 1 context descriptor if needed.
2223 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2224 tx_last = (uint16_t)(tx_id + nb_used - 1);
2227 if (tx_last >= txq->nb_tx_desc)
2228 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2230 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2231 " tx_first=%u tx_last=%u",
2232 txq->port_id, txq->queue_id, tx_id, tx_last);
2234 if (nb_used > txq->nb_free) {
2235 if (iavf_xmit_cleanup(txq)) {
2240 if (unlikely(nb_used > txq->rs_thresh)) {
2241 while (nb_used > txq->nb_free) {
2242 if (iavf_xmit_cleanup(txq)) {
2251 /* Descriptor based VLAN insertion */
2252 if (ol_flags & PKT_TX_VLAN_PKT &&
2253 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2254 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2255 td_tag = tx_pkt->vlan_tci;
2258 /* According to datasheet, the bit2 is reserved and must be
2263 /* Enable checksum offloading */
2264 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2265 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2266 &td_offset, tx_offload);
2269 /* Setup TX context descriptor if required */
2270 uint64_t cd_type_cmd_tso_mss =
2271 IAVF_TX_DESC_DTYPE_CONTEXT;
2272 volatile struct iavf_tx_context_desc *ctx_txd =
2273 (volatile struct iavf_tx_context_desc *)
2276 /* clear QW0 or the previous writeback value
2277 * may impact next write
2279 *(volatile uint64_t *)ctx_txd = 0;
2281 txn = &sw_ring[txe->next_id];
2282 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2284 rte_pktmbuf_free_seg(txe->mbuf);
2289 if (ol_flags & PKT_TX_TCP_SEG)
2290 cd_type_cmd_tso_mss |=
2291 iavf_set_tso_ctx(tx_pkt, tx_offload);
2293 if (ol_flags & PKT_TX_VLAN_PKT &&
2294 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2295 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2296 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2297 cd_l2tag2 = tx_pkt->vlan_tci;
2300 ctx_txd->type_cmd_tso_mss =
2301 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2302 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2304 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2305 txe->last_id = tx_last;
2306 tx_id = txe->next_id;
2313 txn = &sw_ring[txe->next_id];
2316 rte_pktmbuf_free_seg(txe->mbuf);
2319 /* Setup TX Descriptor */
2320 slen = m_seg->data_len;
2321 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2322 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2323 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2328 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2329 txe->last_id = tx_last;
2330 tx_id = txe->next_id;
2332 m_seg = m_seg->next;
2335 /* The last packet data descriptor needs End Of Packet (EOP) */
2336 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2337 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2338 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2340 if (txq->nb_used >= txq->rs_thresh) {
2341 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2342 "%4u (port=%d queue=%d)",
2343 tx_last, txq->port_id, txq->queue_id);
2345 td_cmd |= IAVF_TX_DESC_CMD_RS;
2347 /* Update txq RS bit counters */
2351 txd->cmd_type_offset_bsz |=
2352 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2353 IAVF_TXD_QW1_CMD_SHIFT);
2354 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2360 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2361 txq->port_id, txq->queue_id, tx_id, nb_tx);
2363 IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
2364 txq->tx_tail = tx_id;
2369 /* Check if the packet with vlan user priority is transmitted in the
2373 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2375 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2376 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2379 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2381 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2382 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2390 /* TX prep functions */
2392 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2398 struct iavf_tx_queue *txq = tx_queue;
2399 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2400 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2402 for (i = 0; i < nb_pkts; i++) {
2404 ol_flags = m->ol_flags;
2406 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2407 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2408 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2412 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2413 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2414 /* MSS outside the range are considered malicious */
2419 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2420 rte_errno = ENOTSUP;
2424 #ifdef RTE_ETHDEV_DEBUG_TX
2425 ret = rte_validate_tx_offload(m);
2431 ret = rte_net_intel_cksum_prepare(m);
2437 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2438 ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
2439 ret = iavf_check_vlan_up2tc(txq, m);
2450 /* choose rx function*/
2452 iavf_set_rx_function(struct rte_eth_dev *dev)
2454 struct iavf_adapter *adapter =
2455 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2456 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2459 struct iavf_rx_queue *rxq;
2462 bool use_avx2 = false;
2463 bool use_avx512 = false;
2464 bool use_flex = false;
2466 check_ret = iavf_rx_vec_dev_check(dev);
2467 if (check_ret >= 0 &&
2468 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2469 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2470 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2471 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2474 #ifdef CC_AVX512_SUPPORT
2475 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2476 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2477 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2481 if (vf->vf_res->vf_cap_flags &
2482 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2485 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2486 rxq = dev->data->rx_queues[i];
2487 (void)iavf_rxq_vec_setup(rxq);
2490 if (dev->data->scattered_rx) {
2493 "Using %sVector Scattered Rx (port %d).",
2494 use_avx2 ? "avx2 " : "",
2495 dev->data->port_id);
2497 if (check_ret == IAVF_VECTOR_PATH)
2499 "Using AVX512 Vector Scattered Rx (port %d).",
2500 dev->data->port_id);
2503 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2504 dev->data->port_id);
2507 dev->rx_pkt_burst = use_avx2 ?
2508 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2509 iavf_recv_scattered_pkts_vec_flex_rxd;
2510 #ifdef CC_AVX512_SUPPORT
2512 if (check_ret == IAVF_VECTOR_PATH)
2514 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2517 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2521 dev->rx_pkt_burst = use_avx2 ?
2522 iavf_recv_scattered_pkts_vec_avx2 :
2523 iavf_recv_scattered_pkts_vec;
2524 #ifdef CC_AVX512_SUPPORT
2526 if (check_ret == IAVF_VECTOR_PATH)
2528 iavf_recv_scattered_pkts_vec_avx512;
2531 iavf_recv_scattered_pkts_vec_avx512_offload;
2537 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2538 use_avx2 ? "avx2 " : "",
2539 dev->data->port_id);
2541 if (check_ret == IAVF_VECTOR_PATH)
2543 "Using AVX512 Vector Rx (port %d).",
2544 dev->data->port_id);
2547 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2548 dev->data->port_id);
2551 dev->rx_pkt_burst = use_avx2 ?
2552 iavf_recv_pkts_vec_avx2_flex_rxd :
2553 iavf_recv_pkts_vec_flex_rxd;
2554 #ifdef CC_AVX512_SUPPORT
2556 if (check_ret == IAVF_VECTOR_PATH)
2558 iavf_recv_pkts_vec_avx512_flex_rxd;
2561 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2565 dev->rx_pkt_burst = use_avx2 ?
2566 iavf_recv_pkts_vec_avx2 :
2568 #ifdef CC_AVX512_SUPPORT
2570 if (check_ret == IAVF_VECTOR_PATH)
2572 iavf_recv_pkts_vec_avx512;
2575 iavf_recv_pkts_vec_avx512_offload;
2585 if (dev->data->scattered_rx) {
2586 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2587 dev->data->port_id);
2588 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2589 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2591 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2592 } else if (adapter->rx_bulk_alloc_allowed) {
2593 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2594 dev->data->port_id);
2595 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2597 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2598 dev->data->port_id);
2599 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2600 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2602 dev->rx_pkt_burst = iavf_recv_pkts;
2606 /* choose tx function*/
2608 iavf_set_tx_function(struct rte_eth_dev *dev)
2611 struct iavf_tx_queue *txq;
2614 bool use_sse = false;
2615 bool use_avx2 = false;
2616 bool use_avx512 = false;
2618 check_ret = iavf_tx_vec_dev_check(dev);
2620 if (check_ret >= 0 &&
2621 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2622 /* SSE and AVX2 not support offload path yet. */
2623 if (check_ret == IAVF_VECTOR_PATH) {
2625 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2626 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2627 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2630 #ifdef CC_AVX512_SUPPORT
2631 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2632 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2633 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2637 if (!use_sse && !use_avx2 && !use_avx512)
2641 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2642 use_avx2 ? "avx2 " : "",
2643 dev->data->port_id);
2644 dev->tx_pkt_burst = use_avx2 ?
2645 iavf_xmit_pkts_vec_avx2 :
2648 dev->tx_pkt_prepare = NULL;
2649 #ifdef CC_AVX512_SUPPORT
2651 if (check_ret == IAVF_VECTOR_PATH) {
2652 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2653 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2654 dev->data->port_id);
2656 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2657 dev->tx_pkt_prepare = iavf_prep_pkts;
2658 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2659 dev->data->port_id);
2664 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2665 txq = dev->data->tx_queues[i];
2668 #ifdef CC_AVX512_SUPPORT
2670 iavf_txq_vec_setup_avx512(txq);
2672 iavf_txq_vec_setup(txq);
2674 iavf_txq_vec_setup(txq);
2683 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2684 dev->data->port_id);
2685 dev->tx_pkt_burst = iavf_xmit_pkts;
2686 dev->tx_pkt_prepare = iavf_prep_pkts;
2690 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2693 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2694 uint16_t i, tx_last, tx_id;
2695 uint16_t nb_tx_free_last;
2696 uint16_t nb_tx_to_clean;
2699 /* Start free mbuf from the next of tx_tail */
2700 tx_last = txq->tx_tail;
2701 tx_id = swr_ring[tx_last].next_id;
2703 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2706 nb_tx_to_clean = txq->nb_free;
2707 nb_tx_free_last = txq->nb_free;
2709 free_cnt = txq->nb_tx_desc;
2711 /* Loop through swr_ring to count the amount of
2712 * freeable mubfs and packets.
2714 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2715 for (i = 0; i < nb_tx_to_clean &&
2716 pkt_cnt < free_cnt &&
2717 tx_id != tx_last; i++) {
2718 if (swr_ring[tx_id].mbuf != NULL) {
2719 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2720 swr_ring[tx_id].mbuf = NULL;
2723 * last segment in the packet,
2724 * increment packet count
2726 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2729 tx_id = swr_ring[tx_id].next_id;
2732 if (txq->rs_thresh > txq->nb_tx_desc -
2733 txq->nb_free || tx_id == tx_last)
2736 if (pkt_cnt < free_cnt) {
2737 if (iavf_xmit_cleanup(txq))
2740 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2741 nb_tx_free_last = txq->nb_free;
2745 return (int)pkt_cnt;
2749 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2751 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2753 return iavf_tx_done_cleanup_full(q, free_cnt);
2757 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2758 struct rte_eth_rxq_info *qinfo)
2760 struct iavf_rx_queue *rxq;
2762 rxq = dev->data->rx_queues[queue_id];
2764 qinfo->mp = rxq->mp;
2765 qinfo->scattered_rx = dev->data->scattered_rx;
2766 qinfo->nb_desc = rxq->nb_rx_desc;
2768 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2769 qinfo->conf.rx_drop_en = true;
2770 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2774 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2775 struct rte_eth_txq_info *qinfo)
2777 struct iavf_tx_queue *txq;
2779 txq = dev->data->tx_queues[queue_id];
2781 qinfo->nb_desc = txq->nb_tx_desc;
2783 qinfo->conf.tx_free_thresh = txq->free_thresh;
2784 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2785 qinfo->conf.offloads = txq->offloads;
2786 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2789 /* Get the number of used descriptors of a rx queue */
2791 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2793 #define IAVF_RXQ_SCAN_INTERVAL 4
2794 volatile union iavf_rx_desc *rxdp;
2795 struct iavf_rx_queue *rxq;
2798 rxq = dev->data->rx_queues[queue_id];
2799 rxdp = &rxq->rx_ring[rxq->rx_tail];
2801 while ((desc < rxq->nb_rx_desc) &&
2802 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2803 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2804 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2805 /* Check the DD bit of a rx descriptor of each 4 in a group,
2806 * to avoid checking too frequently and downgrading performance
2809 desc += IAVF_RXQ_SCAN_INTERVAL;
2810 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2811 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2812 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2813 desc - rxq->nb_rx_desc]);
2820 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2822 struct iavf_rx_queue *rxq = rx_queue;
2823 volatile uint64_t *status;
2827 if (unlikely(offset >= rxq->nb_rx_desc))
2830 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2831 return RTE_ETH_RX_DESC_UNAVAIL;
2833 desc = rxq->rx_tail + offset;
2834 if (desc >= rxq->nb_rx_desc)
2835 desc -= rxq->nb_rx_desc;
2837 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2838 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2839 << IAVF_RXD_QW1_STATUS_SHIFT);
2841 return RTE_ETH_RX_DESC_DONE;
2843 return RTE_ETH_RX_DESC_AVAIL;
2847 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2849 struct iavf_tx_queue *txq = tx_queue;
2850 volatile uint64_t *status;
2851 uint64_t mask, expect;
2854 if (unlikely(offset >= txq->nb_tx_desc))
2857 desc = txq->tx_tail + offset;
2858 /* go to next desc that has the RS bit */
2859 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2861 if (desc >= txq->nb_tx_desc) {
2862 desc -= txq->nb_tx_desc;
2863 if (desc >= txq->nb_tx_desc)
2864 desc -= txq->nb_tx_desc;
2867 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2868 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2869 expect = rte_cpu_to_le_64(
2870 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2871 if ((*status & mask) == expect)
2872 return RTE_ETH_TX_DESC_DONE;
2874 return RTE_ETH_TX_DESC_FULL;
2878 iavf_get_default_ptype_table(void)
2880 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2881 __rte_cache_aligned = {
2884 [1] = RTE_PTYPE_L2_ETHER,
2885 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2886 /* [3] - [5] reserved */
2887 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2888 /* [7] - [10] reserved */
2889 [11] = RTE_PTYPE_L2_ETHER_ARP,
2890 /* [12] - [21] reserved */
2892 /* Non tunneled IPv4 */
2893 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2895 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2896 RTE_PTYPE_L4_NONFRAG,
2897 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2900 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2902 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2904 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2908 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2909 RTE_PTYPE_TUNNEL_IP |
2910 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2911 RTE_PTYPE_INNER_L4_FRAG,
2912 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2913 RTE_PTYPE_TUNNEL_IP |
2914 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2915 RTE_PTYPE_INNER_L4_NONFRAG,
2916 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2917 RTE_PTYPE_TUNNEL_IP |
2918 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2919 RTE_PTYPE_INNER_L4_UDP,
2921 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2922 RTE_PTYPE_TUNNEL_IP |
2923 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2924 RTE_PTYPE_INNER_L4_TCP,
2925 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2926 RTE_PTYPE_TUNNEL_IP |
2927 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2928 RTE_PTYPE_INNER_L4_SCTP,
2929 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2930 RTE_PTYPE_TUNNEL_IP |
2931 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2932 RTE_PTYPE_INNER_L4_ICMP,
2935 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2936 RTE_PTYPE_TUNNEL_IP |
2937 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2938 RTE_PTYPE_INNER_L4_FRAG,
2939 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2940 RTE_PTYPE_TUNNEL_IP |
2941 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2942 RTE_PTYPE_INNER_L4_NONFRAG,
2943 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2944 RTE_PTYPE_TUNNEL_IP |
2945 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2946 RTE_PTYPE_INNER_L4_UDP,
2948 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2949 RTE_PTYPE_TUNNEL_IP |
2950 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2951 RTE_PTYPE_INNER_L4_TCP,
2952 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2953 RTE_PTYPE_TUNNEL_IP |
2954 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2955 RTE_PTYPE_INNER_L4_SCTP,
2956 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2957 RTE_PTYPE_TUNNEL_IP |
2958 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2959 RTE_PTYPE_INNER_L4_ICMP,
2961 /* IPv4 --> GRE/Teredo/VXLAN */
2962 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2963 RTE_PTYPE_TUNNEL_GRENAT,
2965 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2966 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2967 RTE_PTYPE_TUNNEL_GRENAT |
2968 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2969 RTE_PTYPE_INNER_L4_FRAG,
2970 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2971 RTE_PTYPE_TUNNEL_GRENAT |
2972 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2973 RTE_PTYPE_INNER_L4_NONFRAG,
2974 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2975 RTE_PTYPE_TUNNEL_GRENAT |
2976 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2977 RTE_PTYPE_INNER_L4_UDP,
2979 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2980 RTE_PTYPE_TUNNEL_GRENAT |
2981 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2982 RTE_PTYPE_INNER_L4_TCP,
2983 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2984 RTE_PTYPE_TUNNEL_GRENAT |
2985 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2986 RTE_PTYPE_INNER_L4_SCTP,
2987 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2988 RTE_PTYPE_TUNNEL_GRENAT |
2989 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2990 RTE_PTYPE_INNER_L4_ICMP,
2992 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2993 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2994 RTE_PTYPE_TUNNEL_GRENAT |
2995 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2996 RTE_PTYPE_INNER_L4_FRAG,
2997 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2998 RTE_PTYPE_TUNNEL_GRENAT |
2999 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3000 RTE_PTYPE_INNER_L4_NONFRAG,
3001 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3002 RTE_PTYPE_TUNNEL_GRENAT |
3003 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3004 RTE_PTYPE_INNER_L4_UDP,
3006 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3007 RTE_PTYPE_TUNNEL_GRENAT |
3008 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3009 RTE_PTYPE_INNER_L4_TCP,
3010 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3011 RTE_PTYPE_TUNNEL_GRENAT |
3012 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3013 RTE_PTYPE_INNER_L4_SCTP,
3014 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3015 RTE_PTYPE_TUNNEL_GRENAT |
3016 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3017 RTE_PTYPE_INNER_L4_ICMP,
3019 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3020 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3021 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3023 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3024 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3025 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3026 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3027 RTE_PTYPE_INNER_L4_FRAG,
3028 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3029 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3030 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3031 RTE_PTYPE_INNER_L4_NONFRAG,
3032 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3033 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3034 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3035 RTE_PTYPE_INNER_L4_UDP,
3037 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3038 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3039 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3040 RTE_PTYPE_INNER_L4_TCP,
3041 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3042 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3043 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3044 RTE_PTYPE_INNER_L4_SCTP,
3045 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3046 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3047 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3048 RTE_PTYPE_INNER_L4_ICMP,
3050 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3051 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3052 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3053 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3054 RTE_PTYPE_INNER_L4_FRAG,
3055 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3056 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3057 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3058 RTE_PTYPE_INNER_L4_NONFRAG,
3059 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3060 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3061 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3062 RTE_PTYPE_INNER_L4_UDP,
3064 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3065 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3066 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3067 RTE_PTYPE_INNER_L4_TCP,
3068 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3069 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3070 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3071 RTE_PTYPE_INNER_L4_SCTP,
3072 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3073 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3074 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3075 RTE_PTYPE_INNER_L4_ICMP,
3076 /* [73] - [87] reserved */
3078 /* Non tunneled IPv6 */
3079 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3081 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3082 RTE_PTYPE_L4_NONFRAG,
3083 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3086 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3088 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3090 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3094 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3095 RTE_PTYPE_TUNNEL_IP |
3096 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3097 RTE_PTYPE_INNER_L4_FRAG,
3098 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3099 RTE_PTYPE_TUNNEL_IP |
3100 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3101 RTE_PTYPE_INNER_L4_NONFRAG,
3102 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3103 RTE_PTYPE_TUNNEL_IP |
3104 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3105 RTE_PTYPE_INNER_L4_UDP,
3107 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3108 RTE_PTYPE_TUNNEL_IP |
3109 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3110 RTE_PTYPE_INNER_L4_TCP,
3111 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3112 RTE_PTYPE_TUNNEL_IP |
3113 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3114 RTE_PTYPE_INNER_L4_SCTP,
3115 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3116 RTE_PTYPE_TUNNEL_IP |
3117 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3118 RTE_PTYPE_INNER_L4_ICMP,
3121 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3122 RTE_PTYPE_TUNNEL_IP |
3123 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3124 RTE_PTYPE_INNER_L4_FRAG,
3125 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3126 RTE_PTYPE_TUNNEL_IP |
3127 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3128 RTE_PTYPE_INNER_L4_NONFRAG,
3129 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3130 RTE_PTYPE_TUNNEL_IP |
3131 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3132 RTE_PTYPE_INNER_L4_UDP,
3133 /* [105] reserved */
3134 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3135 RTE_PTYPE_TUNNEL_IP |
3136 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3137 RTE_PTYPE_INNER_L4_TCP,
3138 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3139 RTE_PTYPE_TUNNEL_IP |
3140 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3141 RTE_PTYPE_INNER_L4_SCTP,
3142 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3143 RTE_PTYPE_TUNNEL_IP |
3144 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3145 RTE_PTYPE_INNER_L4_ICMP,
3147 /* IPv6 --> GRE/Teredo/VXLAN */
3148 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3149 RTE_PTYPE_TUNNEL_GRENAT,
3151 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3152 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3153 RTE_PTYPE_TUNNEL_GRENAT |
3154 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3155 RTE_PTYPE_INNER_L4_FRAG,
3156 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3157 RTE_PTYPE_TUNNEL_GRENAT |
3158 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3159 RTE_PTYPE_INNER_L4_NONFRAG,
3160 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3161 RTE_PTYPE_TUNNEL_GRENAT |
3162 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3163 RTE_PTYPE_INNER_L4_UDP,
3164 /* [113] reserved */
3165 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3166 RTE_PTYPE_TUNNEL_GRENAT |
3167 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3168 RTE_PTYPE_INNER_L4_TCP,
3169 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3170 RTE_PTYPE_TUNNEL_GRENAT |
3171 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3172 RTE_PTYPE_INNER_L4_SCTP,
3173 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3174 RTE_PTYPE_TUNNEL_GRENAT |
3175 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3176 RTE_PTYPE_INNER_L4_ICMP,
3178 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3179 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3180 RTE_PTYPE_TUNNEL_GRENAT |
3181 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3182 RTE_PTYPE_INNER_L4_FRAG,
3183 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3184 RTE_PTYPE_TUNNEL_GRENAT |
3185 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3186 RTE_PTYPE_INNER_L4_NONFRAG,
3187 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3188 RTE_PTYPE_TUNNEL_GRENAT |
3189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3190 RTE_PTYPE_INNER_L4_UDP,
3191 /* [120] reserved */
3192 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3193 RTE_PTYPE_TUNNEL_GRENAT |
3194 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3195 RTE_PTYPE_INNER_L4_TCP,
3196 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3197 RTE_PTYPE_TUNNEL_GRENAT |
3198 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3199 RTE_PTYPE_INNER_L4_SCTP,
3200 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3201 RTE_PTYPE_TUNNEL_GRENAT |
3202 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3203 RTE_PTYPE_INNER_L4_ICMP,
3205 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3206 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3207 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3209 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3210 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3211 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3212 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3213 RTE_PTYPE_INNER_L4_FRAG,
3214 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3215 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3216 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3217 RTE_PTYPE_INNER_L4_NONFRAG,
3218 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3219 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3221 RTE_PTYPE_INNER_L4_UDP,
3222 /* [128] reserved */
3223 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3224 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3226 RTE_PTYPE_INNER_L4_TCP,
3227 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3228 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3229 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3230 RTE_PTYPE_INNER_L4_SCTP,
3231 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3232 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3234 RTE_PTYPE_INNER_L4_ICMP,
3236 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3237 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3238 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3239 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3240 RTE_PTYPE_INNER_L4_FRAG,
3241 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3242 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3244 RTE_PTYPE_INNER_L4_NONFRAG,
3245 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3246 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3247 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3248 RTE_PTYPE_INNER_L4_UDP,
3249 /* [135] reserved */
3250 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3251 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3252 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3253 RTE_PTYPE_INNER_L4_TCP,
3254 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3255 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3257 RTE_PTYPE_INNER_L4_SCTP,
3258 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3259 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3261 RTE_PTYPE_INNER_L4_ICMP,
3262 /* [139] - [299] reserved */
3265 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3266 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3268 /* PPPoE --> IPv4 */
3269 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3270 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3272 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3273 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3274 RTE_PTYPE_L4_NONFRAG,
3275 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3276 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3278 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3279 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3281 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3282 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3284 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3285 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3288 /* PPPoE --> IPv6 */
3289 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3290 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3292 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3293 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3294 RTE_PTYPE_L4_NONFRAG,
3295 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3296 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3298 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3299 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3301 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3302 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3304 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3305 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3307 /* [314] - [324] reserved */
3309 /* IPv4/IPv6 --> GTPC/GTPU */
3310 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3311 RTE_PTYPE_TUNNEL_GTPC,
3312 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_GTPC,
3314 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3315 RTE_PTYPE_TUNNEL_GTPC,
3316 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GTPC,
3318 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_TUNNEL_GTPU,
3320 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_GTPU,
3323 /* IPv4 --> GTPU --> IPv4 */
3324 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3325 RTE_PTYPE_TUNNEL_GTPU |
3326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_INNER_L4_FRAG,
3328 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_TUNNEL_GTPU |
3330 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3331 RTE_PTYPE_INNER_L4_NONFRAG,
3332 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_TUNNEL_GTPU |
3334 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3335 RTE_PTYPE_INNER_L4_UDP,
3336 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3337 RTE_PTYPE_TUNNEL_GTPU |
3338 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3339 RTE_PTYPE_INNER_L4_TCP,
3340 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3341 RTE_PTYPE_TUNNEL_GTPU |
3342 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3343 RTE_PTYPE_INNER_L4_ICMP,
3345 /* IPv6 --> GTPU --> IPv4 */
3346 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3347 RTE_PTYPE_TUNNEL_GTPU |
3348 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3349 RTE_PTYPE_INNER_L4_FRAG,
3350 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3351 RTE_PTYPE_TUNNEL_GTPU |
3352 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3353 RTE_PTYPE_INNER_L4_NONFRAG,
3354 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3355 RTE_PTYPE_TUNNEL_GTPU |
3356 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3357 RTE_PTYPE_INNER_L4_UDP,
3358 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3359 RTE_PTYPE_TUNNEL_GTPU |
3360 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_INNER_L4_TCP,
3362 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3363 RTE_PTYPE_TUNNEL_GTPU |
3364 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_INNER_L4_ICMP,
3367 /* IPv4 --> GTPU --> IPv6 */
3368 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3369 RTE_PTYPE_TUNNEL_GTPU |
3370 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3371 RTE_PTYPE_INNER_L4_FRAG,
3372 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3373 RTE_PTYPE_TUNNEL_GTPU |
3374 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3375 RTE_PTYPE_INNER_L4_NONFRAG,
3376 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3377 RTE_PTYPE_TUNNEL_GTPU |
3378 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3379 RTE_PTYPE_INNER_L4_UDP,
3380 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3381 RTE_PTYPE_TUNNEL_GTPU |
3382 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3383 RTE_PTYPE_INNER_L4_TCP,
3384 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3385 RTE_PTYPE_TUNNEL_GTPU |
3386 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3387 RTE_PTYPE_INNER_L4_ICMP,
3389 /* IPv6 --> GTPU --> IPv6 */
3390 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3391 RTE_PTYPE_TUNNEL_GTPU |
3392 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3393 RTE_PTYPE_INNER_L4_FRAG,
3394 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3395 RTE_PTYPE_TUNNEL_GTPU |
3396 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3397 RTE_PTYPE_INNER_L4_NONFRAG,
3398 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3399 RTE_PTYPE_TUNNEL_GTPU |
3400 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3401 RTE_PTYPE_INNER_L4_UDP,
3402 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3403 RTE_PTYPE_TUNNEL_GTPU |
3404 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3405 RTE_PTYPE_INNER_L4_TCP,
3406 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3407 RTE_PTYPE_TUNNEL_GTPU |
3408 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3409 RTE_PTYPE_INNER_L4_ICMP,
3411 /* IPv4 --> UDP ECPRI */
3412 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3414 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3416 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3418 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3420 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3422 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3424 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3426 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3428 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3430 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3433 /* IPV6 --> UDP ECPRI */
3434 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3436 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3438 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3440 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3442 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3444 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3446 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3448 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3450 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3452 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3454 /* All others reserved */