1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 iavf_monitor_callback(const uint64_t value,
62 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
64 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
66 * we expect the DD bit to be set to 1 if this descriptor was already
69 return (value & m) == m ? -1 : 0;
73 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
75 struct iavf_rx_queue *rxq = rx_queue;
76 volatile union iavf_rx_desc *rxdp;
80 rxdp = &rxq->rx_ring[desc];
81 /* watch for changes in status bit */
82 pmc->addr = &rxdp->wb.qword1.status_error_len;
84 /* comparison callback */
85 pmc->fn = iavf_monitor_callback;
87 /* registers are 64-bit */
88 pmc->size = sizeof(uint64_t);
94 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
96 /* The following constraints must be satisfied:
97 * thresh < rxq->nb_rx_desc
99 if (thresh >= nb_desc) {
100 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
108 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
109 uint16_t tx_free_thresh)
111 /* TX descriptors will have their RS bit set after tx_rs_thresh
112 * descriptors have been used. The TX descriptor ring will be cleaned
113 * after tx_free_thresh descriptors are used or if the number of
114 * descriptors required to transmit a packet is greater than the
115 * number of free TX descriptors.
117 * The following constraints must be satisfied:
118 * - tx_rs_thresh must be less than the size of the ring minus 2.
119 * - tx_free_thresh must be less than the size of the ring minus 3.
120 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
121 * - tx_rs_thresh must be a divisor of the ring size.
123 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
124 * race condition, hence the maximum threshold constraints. When set
125 * to zero use default values.
127 if (tx_rs_thresh >= (nb_desc - 2)) {
128 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
129 "number of TX descriptors (%u) minus 2",
130 tx_rs_thresh, nb_desc);
133 if (tx_free_thresh >= (nb_desc - 3)) {
134 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
135 "number of TX descriptors (%u) minus 3.",
136 tx_free_thresh, nb_desc);
139 if (tx_rs_thresh > tx_free_thresh) {
140 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
141 "equal to tx_free_thresh (%u).",
142 tx_rs_thresh, tx_free_thresh);
145 if ((nb_desc % tx_rs_thresh) != 0) {
146 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
147 "number of TX descriptors (%u).",
148 tx_rs_thresh, nb_desc);
156 check_rx_vec_allow(struct iavf_rx_queue *rxq)
158 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
159 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
160 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
164 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
169 check_tx_vec_allow(struct iavf_tx_queue *txq)
171 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
172 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
173 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
174 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
177 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
182 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
186 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
187 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
188 "rxq->rx_free_thresh=%d, "
189 "IAVF_RX_MAX_BURST=%d",
190 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
192 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
193 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
194 "rxq->nb_rx_desc=%d, "
195 "rxq->rx_free_thresh=%d",
196 rxq->nb_rx_desc, rxq->rx_free_thresh);
203 reset_rx_queue(struct iavf_rx_queue *rxq)
211 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
213 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
214 ((volatile char *)rxq->rx_ring)[i] = 0;
216 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
218 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
219 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
222 rxq->rx_nb_avail = 0;
223 rxq->rx_next_avail = 0;
224 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
228 rxq->pkt_first_seg = NULL;
229 rxq->pkt_last_seg = NULL;
231 rxq->rxrearm_start = 0;
235 reset_tx_queue(struct iavf_tx_queue *txq)
237 struct iavf_tx_entry *txe;
242 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
247 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
248 for (i = 0; i < size; i++)
249 ((volatile char *)txq->tx_ring)[i] = 0;
251 prev = (uint16_t)(txq->nb_tx_desc - 1);
252 for (i = 0; i < txq->nb_tx_desc; i++) {
253 txq->tx_ring[i].cmd_type_offset_bsz =
254 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
257 txe[prev].next_id = i;
264 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
265 txq->nb_free = txq->nb_tx_desc - 1;
267 txq->next_dd = txq->rs_thresh - 1;
268 txq->next_rs = txq->rs_thresh - 1;
272 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
274 volatile union iavf_rx_desc *rxd;
275 struct rte_mbuf *mbuf = NULL;
279 for (i = 0; i < rxq->nb_rx_desc; i++) {
280 mbuf = rte_mbuf_raw_alloc(rxq->mp);
281 if (unlikely(!mbuf)) {
282 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
286 rte_mbuf_refcnt_set(mbuf, 1);
288 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
290 mbuf->port = rxq->port_id;
293 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
295 rxd = &rxq->rx_ring[i];
296 rxd->read.pkt_addr = dma_addr;
297 rxd->read.hdr_addr = 0;
298 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
303 rxq->sw_ring[i] = mbuf;
310 release_rxq_mbufs(struct iavf_rx_queue *rxq)
317 for (i = 0; i < rxq->nb_rx_desc; i++) {
318 if (rxq->sw_ring[i]) {
319 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
320 rxq->sw_ring[i] = NULL;
325 if (rxq->rx_nb_avail == 0)
327 for (i = 0; i < rxq->rx_nb_avail; i++) {
328 struct rte_mbuf *mbuf;
330 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
331 rte_pktmbuf_free_seg(mbuf);
333 rxq->rx_nb_avail = 0;
337 release_txq_mbufs(struct iavf_tx_queue *txq)
341 if (!txq || !txq->sw_ring) {
342 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
346 for (i = 0; i < txq->nb_tx_desc; i++) {
347 if (txq->sw_ring[i].mbuf) {
348 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
349 txq->sw_ring[i].mbuf = NULL;
354 static const struct iavf_rxq_ops def_rxq_ops = {
355 .release_mbufs = release_rxq_mbufs,
358 static const struct iavf_txq_ops def_txq_ops = {
359 .release_mbufs = release_txq_mbufs,
363 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
365 volatile union iavf_rx_flex_desc *rxdp)
367 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
368 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
373 if (desc->flow_id != 0xFFFFFFFF) {
374 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
375 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
378 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
379 stat_err = rte_le_to_cpu_16(desc->status_error0);
380 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
381 mb->ol_flags |= PKT_RX_RSS_HASH;
382 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
388 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
390 volatile union iavf_rx_flex_desc *rxdp)
392 volatile struct iavf_32b_rx_flex_desc_comms *desc =
393 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
396 stat_err = rte_le_to_cpu_16(desc->status_error0);
397 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
398 mb->ol_flags |= PKT_RX_RSS_HASH;
399 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
402 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
403 if (desc->flow_id != 0xFFFFFFFF) {
404 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
405 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
408 if (rxq->xtr_ol_flag) {
409 uint32_t metadata = 0;
411 stat_err = rte_le_to_cpu_16(desc->status_error1);
413 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
414 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
416 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
418 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
421 mb->ol_flags |= rxq->xtr_ol_flag;
423 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
430 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
432 volatile union iavf_rx_flex_desc *rxdp)
434 volatile struct iavf_32b_rx_flex_desc_comms *desc =
435 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
438 stat_err = rte_le_to_cpu_16(desc->status_error0);
439 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
440 mb->ol_flags |= PKT_RX_RSS_HASH;
441 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
444 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
445 if (desc->flow_id != 0xFFFFFFFF) {
446 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
447 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
450 if (rxq->xtr_ol_flag) {
451 uint32_t metadata = 0;
453 if (desc->flex_ts.flex.aux0 != 0xFFFF)
454 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
455 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
456 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
459 mb->ol_flags |= rxq->xtr_ol_flag;
461 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
468 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
471 case IAVF_RXDID_COMMS_AUX_VLAN:
472 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
473 rxq->rxd_to_pkt_fields =
474 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
476 case IAVF_RXDID_COMMS_AUX_IPV4:
477 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
478 rxq->rxd_to_pkt_fields =
479 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
481 case IAVF_RXDID_COMMS_AUX_IPV6:
482 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
483 rxq->rxd_to_pkt_fields =
484 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
486 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
488 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
489 rxq->rxd_to_pkt_fields =
490 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
492 case IAVF_RXDID_COMMS_AUX_TCP:
493 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
494 rxq->rxd_to_pkt_fields =
495 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
497 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
499 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
500 rxq->rxd_to_pkt_fields =
501 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
503 case IAVF_RXDID_COMMS_OVS_1:
504 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
507 /* update this according to the RXDID for FLEX_DESC_NONE */
508 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
512 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
513 rxq->xtr_ol_flag = 0;
517 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
518 uint16_t nb_desc, unsigned int socket_id,
519 const struct rte_eth_rxconf *rx_conf,
520 struct rte_mempool *mp)
522 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523 struct iavf_adapter *ad =
524 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
525 struct iavf_info *vf =
526 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
527 struct iavf_vsi *vsi = &vf->vsi;
528 struct iavf_rx_queue *rxq;
529 const struct rte_memzone *mz;
533 uint16_t rx_free_thresh;
536 PMD_INIT_FUNC_TRACE();
538 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
540 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
541 nb_desc > IAVF_MAX_RING_DESC ||
542 nb_desc < IAVF_MIN_RING_DESC) {
543 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
548 /* Check free threshold */
549 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
550 IAVF_DEFAULT_RX_FREE_THRESH :
551 rx_conf->rx_free_thresh;
552 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
555 /* Free memory if needed */
556 if (dev->data->rx_queues[queue_idx]) {
557 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
558 dev->data->rx_queues[queue_idx] = NULL;
561 /* Allocate the rx queue data structure */
562 rxq = rte_zmalloc_socket("iavf rxq",
563 sizeof(struct iavf_rx_queue),
567 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
568 "rx queue data structure");
572 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
573 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
575 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
576 rxq->proto_xtr = proto_xtr;
578 rxq->rxdid = IAVF_RXDID_LEGACY_1;
579 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
582 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
583 struct virtchnl_vlan_supported_caps *stripping_support =
584 &vf->vlan_v2_caps.offloads.stripping_support;
585 uint32_t stripping_cap;
587 if (stripping_support->outer)
588 stripping_cap = stripping_support->outer;
590 stripping_cap = stripping_support->inner;
592 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
593 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
594 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
595 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
597 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
600 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
603 rxq->nb_rx_desc = nb_desc;
604 rxq->rx_free_thresh = rx_free_thresh;
605 rxq->queue_id = queue_idx;
606 rxq->port_id = dev->data->port_id;
607 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
610 rxq->offloads = offloads;
612 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
613 rxq->crc_len = RTE_ETHER_CRC_LEN;
617 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
618 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
620 /* Allocate the software ring. */
621 len = nb_desc + IAVF_RX_MAX_BURST;
623 rte_zmalloc_socket("iavf rx sw ring",
624 sizeof(struct rte_mbuf *) * len,
628 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
633 /* Allocate the maximun number of RX ring hardware descriptor with
634 * a liitle more to support bulk allocate.
636 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
637 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
639 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
640 ring_size, IAVF_RING_BASE_ALIGN,
643 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
644 rte_free(rxq->sw_ring);
648 /* Zero all the descriptors in the ring. */
649 memset(mz->addr, 0, ring_size);
650 rxq->rx_ring_phys_addr = mz->iova;
651 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
656 dev->data->rx_queues[queue_idx] = rxq;
657 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
658 rxq->ops = &def_rxq_ops;
660 if (check_rx_bulk_allow(rxq) == true) {
661 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
662 "satisfied. Rx Burst Bulk Alloc function will be "
663 "used on port=%d, queue=%d.",
664 rxq->port_id, rxq->queue_id);
666 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
667 "not satisfied, Scattered Rx is requested "
668 "on port=%d, queue=%d.",
669 rxq->port_id, rxq->queue_id);
670 ad->rx_bulk_alloc_allowed = false;
673 if (check_rx_vec_allow(rxq) == false)
674 ad->rx_vec_allowed = false;
680 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
683 unsigned int socket_id,
684 const struct rte_eth_txconf *tx_conf)
686 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 struct iavf_info *vf =
688 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
689 struct iavf_tx_queue *txq;
690 const struct rte_memzone *mz;
692 uint16_t tx_rs_thresh, tx_free_thresh;
695 PMD_INIT_FUNC_TRACE();
697 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
699 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
700 nb_desc > IAVF_MAX_RING_DESC ||
701 nb_desc < IAVF_MIN_RING_DESC) {
702 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
707 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
708 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
709 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
710 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
711 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
714 /* Free memory if needed. */
715 if (dev->data->tx_queues[queue_idx]) {
716 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
717 dev->data->tx_queues[queue_idx] = NULL;
720 /* Allocate the TX queue data structure. */
721 txq = rte_zmalloc_socket("iavf txq",
722 sizeof(struct iavf_tx_queue),
726 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
727 "tx queue structure");
731 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
732 struct virtchnl_vlan_supported_caps *insertion_support =
733 &vf->vlan_v2_caps.offloads.insertion_support;
734 uint32_t insertion_cap;
736 if (insertion_support->outer)
737 insertion_cap = insertion_support->outer;
739 insertion_cap = insertion_support->inner;
741 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
742 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
743 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
744 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
746 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
749 txq->nb_tx_desc = nb_desc;
750 txq->rs_thresh = tx_rs_thresh;
751 txq->free_thresh = tx_free_thresh;
752 txq->queue_id = queue_idx;
753 txq->port_id = dev->data->port_id;
754 txq->offloads = offloads;
755 txq->tx_deferred_start = tx_conf->tx_deferred_start;
757 /* Allocate software ring */
759 rte_zmalloc_socket("iavf tx sw ring",
760 sizeof(struct iavf_tx_entry) * nb_desc,
764 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
769 /* Allocate TX hardware ring descriptors. */
770 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
771 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
772 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
773 ring_size, IAVF_RING_BASE_ALIGN,
776 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
777 rte_free(txq->sw_ring);
781 txq->tx_ring_phys_addr = mz->iova;
782 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
787 dev->data->tx_queues[queue_idx] = txq;
788 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
789 txq->ops = &def_txq_ops;
791 if (check_tx_vec_allow(txq) == false) {
792 struct iavf_adapter *ad =
793 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
794 ad->tx_vec_allowed = false;
797 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
798 vf->tm_conf.committed) {
800 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
801 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
802 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
803 vf->qtc_map[tc].queue_count))
806 if (tc >= vf->qos_cap->num_elem) {
807 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
817 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
819 struct iavf_adapter *adapter =
820 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
821 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
822 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
823 struct iavf_rx_queue *rxq;
826 PMD_DRV_FUNC_TRACE();
828 if (rx_queue_id >= dev->data->nb_rx_queues)
831 rxq = dev->data->rx_queues[rx_queue_id];
833 err = alloc_rxq_mbufs(rxq);
835 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
841 /* Init the RX tail register. */
842 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
843 IAVF_WRITE_FLUSH(hw);
845 /* Ready to switch the queue on */
847 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
849 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
852 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
855 dev->data->rx_queue_state[rx_queue_id] =
856 RTE_ETH_QUEUE_STATE_STARTED;
862 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
864 struct iavf_adapter *adapter =
865 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
866 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
867 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868 struct iavf_tx_queue *txq;
871 PMD_DRV_FUNC_TRACE();
873 if (tx_queue_id >= dev->data->nb_tx_queues)
876 txq = dev->data->tx_queues[tx_queue_id];
878 /* Init the RX tail register. */
879 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
880 IAVF_WRITE_FLUSH(hw);
882 /* Ready to switch the queue on */
884 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
886 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
889 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
892 dev->data->tx_queue_state[tx_queue_id] =
893 RTE_ETH_QUEUE_STATE_STARTED;
899 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
901 struct iavf_adapter *adapter =
902 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
903 struct iavf_rx_queue *rxq;
906 PMD_DRV_FUNC_TRACE();
908 if (rx_queue_id >= dev->data->nb_rx_queues)
911 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
913 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
918 rxq = dev->data->rx_queues[rx_queue_id];
919 rxq->ops->release_mbufs(rxq);
921 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
927 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
929 struct iavf_adapter *adapter =
930 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
931 struct iavf_tx_queue *txq;
934 PMD_DRV_FUNC_TRACE();
936 if (tx_queue_id >= dev->data->nb_tx_queues)
939 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
941 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
946 txq = dev->data->tx_queues[tx_queue_id];
947 txq->ops->release_mbufs(txq);
949 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
955 iavf_dev_rx_queue_release(void *rxq)
957 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
962 q->ops->release_mbufs(q);
963 rte_free(q->sw_ring);
964 rte_memzone_free(q->mz);
969 iavf_dev_tx_queue_release(void *txq)
971 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
976 q->ops->release_mbufs(q);
977 rte_free(q->sw_ring);
978 rte_memzone_free(q->mz);
983 iavf_stop_queues(struct rte_eth_dev *dev)
985 struct iavf_adapter *adapter =
986 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
987 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
988 struct iavf_rx_queue *rxq;
989 struct iavf_tx_queue *txq;
992 /* Stop All queues */
993 if (!vf->lv_enabled) {
994 ret = iavf_disable_queues(adapter);
996 PMD_DRV_LOG(WARNING, "Fail to stop queues");
998 ret = iavf_disable_queues_lv(adapter);
1000 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1004 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1006 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1007 txq = dev->data->tx_queues[i];
1010 txq->ops->release_mbufs(txq);
1011 reset_tx_queue(txq);
1012 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1014 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1015 rxq = dev->data->rx_queues[i];
1018 rxq->ops->release_mbufs(rxq);
1019 reset_rx_queue(rxq);
1020 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1024 #define IAVF_RX_FLEX_ERR0_BITS \
1025 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1026 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1027 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1028 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1029 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1030 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1033 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1035 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1036 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1037 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1039 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1046 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1047 volatile union iavf_rx_flex_desc *rxdp,
1050 uint16_t vlan_tci = 0;
1052 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
1053 rte_le_to_cpu_64(rxdp->wb.status_error0) &
1054 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
1055 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
1057 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1058 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
1059 rte_le_to_cpu_16(rxdp->wb.status_error1) &
1060 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
1061 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1065 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1066 mb->vlan_tci = vlan_tci;
1070 /* Translate the rx descriptor status and error fields to pkt flags */
1071 static inline uint64_t
1072 iavf_rxd_to_pkt_flags(uint64_t qword)
1075 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1077 #define IAVF_RX_ERR_BITS 0x3f
1079 /* Check if RSS_HASH */
1080 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1081 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1082 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1084 /* Check if FDIR Match */
1085 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1088 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1089 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1093 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1094 flags |= PKT_RX_IP_CKSUM_BAD;
1096 flags |= PKT_RX_IP_CKSUM_GOOD;
1098 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1099 flags |= PKT_RX_L4_CKSUM_BAD;
1101 flags |= PKT_RX_L4_CKSUM_GOOD;
1103 /* TODO: Oversize error bit is not processed here */
1108 static inline uint64_t
1109 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1112 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1115 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1116 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1117 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1119 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1121 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1122 flags |= PKT_RX_FDIR_ID;
1126 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1127 flags |= PKT_RX_FDIR_ID;
1132 #define IAVF_RX_FLEX_ERR0_BITS \
1133 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1134 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1135 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1136 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1137 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1138 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1140 /* Rx L3/L4 checksum */
1141 static inline uint64_t
1142 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1146 /* check if HW has decoded the packet and checksum */
1147 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1150 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1151 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1155 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1156 flags |= PKT_RX_IP_CKSUM_BAD;
1158 flags |= PKT_RX_IP_CKSUM_GOOD;
1160 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1161 flags |= PKT_RX_L4_CKSUM_BAD;
1163 flags |= PKT_RX_L4_CKSUM_GOOD;
1165 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1166 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1171 /* If the number of free RX descriptors is greater than the RX free
1172 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1173 * register. Update the RDT with the value of the last processed RX
1174 * descriptor minus 1, to guarantee that the RDT register is never
1175 * equal to the RDH register, which creates a "full" ring situation
1176 * from the hardware point of view.
1179 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1181 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1183 if (nb_hold > rxq->rx_free_thresh) {
1185 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1186 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1187 rx_id = (uint16_t)((rx_id == 0) ?
1188 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1189 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1192 rxq->nb_rx_hold = nb_hold;
1195 /* implement recv_pkts */
1197 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1199 volatile union iavf_rx_desc *rx_ring;
1200 volatile union iavf_rx_desc *rxdp;
1201 struct iavf_rx_queue *rxq;
1202 union iavf_rx_desc rxd;
1203 struct rte_mbuf *rxe;
1204 struct rte_eth_dev *dev;
1205 struct rte_mbuf *rxm;
1206 struct rte_mbuf *nmb;
1210 uint16_t rx_packet_len;
1211 uint16_t rx_id, nb_hold;
1214 const uint32_t *ptype_tbl;
1219 rx_id = rxq->rx_tail;
1220 rx_ring = rxq->rx_ring;
1221 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1223 while (nb_rx < nb_pkts) {
1224 rxdp = &rx_ring[rx_id];
1225 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1226 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1227 IAVF_RXD_QW1_STATUS_SHIFT;
1229 /* Check the DD bit first */
1230 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1232 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1234 nmb = rte_mbuf_raw_alloc(rxq->mp);
1235 if (unlikely(!nmb)) {
1236 dev = &rte_eth_devices[rxq->port_id];
1237 dev->data->rx_mbuf_alloc_failed++;
1238 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1239 "queue_id=%u", rxq->port_id, rxq->queue_id);
1245 rxe = rxq->sw_ring[rx_id];
1246 rxq->sw_ring[rx_id] = nmb;
1248 if (unlikely(rx_id == rxq->nb_rx_desc))
1251 /* Prefetch next mbuf */
1252 rte_prefetch0(rxq->sw_ring[rx_id]);
1254 /* When next RX descriptor is on a cache line boundary,
1255 * prefetch the next 4 RX descriptors and next 8 pointers
1258 if ((rx_id & 0x3) == 0) {
1259 rte_prefetch0(&rx_ring[rx_id]);
1260 rte_prefetch0(rxq->sw_ring[rx_id]);
1264 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1265 rxdp->read.hdr_addr = 0;
1266 rxdp->read.pkt_addr = dma_addr;
1268 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1269 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1271 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1272 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1275 rxm->pkt_len = rx_packet_len;
1276 rxm->data_len = rx_packet_len;
1277 rxm->port = rxq->port_id;
1279 iavf_rxd_to_vlan_tci(rxm, &rxd);
1280 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1282 ptype_tbl[(uint8_t)((qword1 &
1283 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1285 if (pkt_flags & PKT_RX_RSS_HASH)
1287 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1289 if (pkt_flags & PKT_RX_FDIR)
1290 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1292 rxm->ol_flags |= pkt_flags;
1294 rx_pkts[nb_rx++] = rxm;
1296 rxq->rx_tail = rx_id;
1298 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1303 /* implement recv_pkts for flexible Rx descriptor */
1305 iavf_recv_pkts_flex_rxd(void *rx_queue,
1306 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1308 volatile union iavf_rx_desc *rx_ring;
1309 volatile union iavf_rx_flex_desc *rxdp;
1310 struct iavf_rx_queue *rxq;
1311 union iavf_rx_flex_desc rxd;
1312 struct rte_mbuf *rxe;
1313 struct rte_eth_dev *dev;
1314 struct rte_mbuf *rxm;
1315 struct rte_mbuf *nmb;
1317 uint16_t rx_stat_err0;
1318 uint16_t rx_packet_len;
1319 uint16_t rx_id, nb_hold;
1322 const uint32_t *ptype_tbl;
1327 rx_id = rxq->rx_tail;
1328 rx_ring = rxq->rx_ring;
1329 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1331 while (nb_rx < nb_pkts) {
1332 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1333 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1335 /* Check the DD bit first */
1336 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1338 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1340 nmb = rte_mbuf_raw_alloc(rxq->mp);
1341 if (unlikely(!nmb)) {
1342 dev = &rte_eth_devices[rxq->port_id];
1343 dev->data->rx_mbuf_alloc_failed++;
1344 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1345 "queue_id=%u", rxq->port_id, rxq->queue_id);
1351 rxe = rxq->sw_ring[rx_id];
1352 rxq->sw_ring[rx_id] = nmb;
1354 if (unlikely(rx_id == rxq->nb_rx_desc))
1357 /* Prefetch next mbuf */
1358 rte_prefetch0(rxq->sw_ring[rx_id]);
1360 /* When next RX descriptor is on a cache line boundary,
1361 * prefetch the next 4 RX descriptors and next 8 pointers
1364 if ((rx_id & 0x3) == 0) {
1365 rte_prefetch0(&rx_ring[rx_id]);
1366 rte_prefetch0(rxq->sw_ring[rx_id]);
1370 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1371 rxdp->read.hdr_addr = 0;
1372 rxdp->read.pkt_addr = dma_addr;
1374 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1375 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1377 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1378 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1381 rxm->pkt_len = rx_packet_len;
1382 rxm->data_len = rx_packet_len;
1383 rxm->port = rxq->port_id;
1385 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1386 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1387 iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
1388 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1389 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1390 rxm->ol_flags |= pkt_flags;
1392 rx_pkts[nb_rx++] = rxm;
1394 rxq->rx_tail = rx_id;
1396 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1401 /* implement recv_scattered_pkts for flexible Rx descriptor */
1403 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1406 struct iavf_rx_queue *rxq = rx_queue;
1407 union iavf_rx_flex_desc rxd;
1408 struct rte_mbuf *rxe;
1409 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1410 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1411 struct rte_mbuf *nmb, *rxm;
1412 uint16_t rx_id = rxq->rx_tail;
1413 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1414 struct rte_eth_dev *dev;
1415 uint16_t rx_stat_err0;
1419 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1420 volatile union iavf_rx_flex_desc *rxdp;
1421 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1423 while (nb_rx < nb_pkts) {
1424 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1425 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1427 /* Check the DD bit */
1428 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1430 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1432 nmb = rte_mbuf_raw_alloc(rxq->mp);
1433 if (unlikely(!nmb)) {
1434 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1435 "queue_id=%u", rxq->port_id, rxq->queue_id);
1436 dev = &rte_eth_devices[rxq->port_id];
1437 dev->data->rx_mbuf_alloc_failed++;
1443 rxe = rxq->sw_ring[rx_id];
1444 rxq->sw_ring[rx_id] = nmb;
1446 if (rx_id == rxq->nb_rx_desc)
1449 /* Prefetch next mbuf */
1450 rte_prefetch0(rxq->sw_ring[rx_id]);
1452 /* When next RX descriptor is on a cache line boundary,
1453 * prefetch the next 4 RX descriptors and next 8 pointers
1456 if ((rx_id & 0x3) == 0) {
1457 rte_prefetch0(&rx_ring[rx_id]);
1458 rte_prefetch0(rxq->sw_ring[rx_id]);
1463 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1465 /* Set data buffer address and data length of the mbuf */
1466 rxdp->read.hdr_addr = 0;
1467 rxdp->read.pkt_addr = dma_addr;
1468 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1469 IAVF_RX_FLX_DESC_PKT_LEN_M;
1470 rxm->data_len = rx_packet_len;
1471 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1473 /* If this is the first buffer of the received packet, set the
1474 * pointer to the first mbuf of the packet and initialize its
1475 * context. Otherwise, update the total length and the number
1476 * of segments of the current scattered packet, and update the
1477 * pointer to the last mbuf of the current packet.
1481 first_seg->nb_segs = 1;
1482 first_seg->pkt_len = rx_packet_len;
1484 first_seg->pkt_len =
1485 (uint16_t)(first_seg->pkt_len +
1487 first_seg->nb_segs++;
1488 last_seg->next = rxm;
1491 /* If this is not the last buffer of the received packet,
1492 * update the pointer to the last mbuf of the current scattered
1493 * packet and continue to parse the RX ring.
1495 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1500 /* This is the last buffer of the received packet. If the CRC
1501 * is not stripped by the hardware:
1502 * - Subtract the CRC length from the total packet length.
1503 * - If the last buffer only contains the whole CRC or a part
1504 * of it, free the mbuf associated to the last buffer. If part
1505 * of the CRC is also contained in the previous mbuf, subtract
1506 * the length of that CRC part from the data length of the
1510 if (unlikely(rxq->crc_len > 0)) {
1511 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1512 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1513 rte_pktmbuf_free_seg(rxm);
1514 first_seg->nb_segs--;
1515 last_seg->data_len =
1516 (uint16_t)(last_seg->data_len -
1517 (RTE_ETHER_CRC_LEN - rx_packet_len));
1518 last_seg->next = NULL;
1520 rxm->data_len = (uint16_t)(rx_packet_len -
1525 first_seg->port = rxq->port_id;
1526 first_seg->ol_flags = 0;
1527 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1528 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1529 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
1530 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1531 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1533 first_seg->ol_flags |= pkt_flags;
1535 /* Prefetch data of first segment, if configured to do so. */
1536 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1537 first_seg->data_off));
1538 rx_pkts[nb_rx++] = first_seg;
1542 /* Record index of the next RX descriptor to probe. */
1543 rxq->rx_tail = rx_id;
1544 rxq->pkt_first_seg = first_seg;
1545 rxq->pkt_last_seg = last_seg;
1547 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1552 /* implement recv_scattered_pkts */
1554 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1557 struct iavf_rx_queue *rxq = rx_queue;
1558 union iavf_rx_desc rxd;
1559 struct rte_mbuf *rxe;
1560 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1561 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1562 struct rte_mbuf *nmb, *rxm;
1563 uint16_t rx_id = rxq->rx_tail;
1564 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1565 struct rte_eth_dev *dev;
1571 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1572 volatile union iavf_rx_desc *rxdp;
1573 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1575 while (nb_rx < nb_pkts) {
1576 rxdp = &rx_ring[rx_id];
1577 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1578 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1579 IAVF_RXD_QW1_STATUS_SHIFT;
1581 /* Check the DD bit */
1582 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1584 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1586 nmb = rte_mbuf_raw_alloc(rxq->mp);
1587 if (unlikely(!nmb)) {
1588 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1589 "queue_id=%u", rxq->port_id, rxq->queue_id);
1590 dev = &rte_eth_devices[rxq->port_id];
1591 dev->data->rx_mbuf_alloc_failed++;
1597 rxe = rxq->sw_ring[rx_id];
1598 rxq->sw_ring[rx_id] = nmb;
1600 if (rx_id == rxq->nb_rx_desc)
1603 /* Prefetch next mbuf */
1604 rte_prefetch0(rxq->sw_ring[rx_id]);
1606 /* When next RX descriptor is on a cache line boundary,
1607 * prefetch the next 4 RX descriptors and next 8 pointers
1610 if ((rx_id & 0x3) == 0) {
1611 rte_prefetch0(&rx_ring[rx_id]);
1612 rte_prefetch0(rxq->sw_ring[rx_id]);
1617 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1619 /* Set data buffer address and data length of the mbuf */
1620 rxdp->read.hdr_addr = 0;
1621 rxdp->read.pkt_addr = dma_addr;
1622 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1623 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1624 rxm->data_len = rx_packet_len;
1625 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1627 /* If this is the first buffer of the received packet, set the
1628 * pointer to the first mbuf of the packet and initialize its
1629 * context. Otherwise, update the total length and the number
1630 * of segments of the current scattered packet, and update the
1631 * pointer to the last mbuf of the current packet.
1635 first_seg->nb_segs = 1;
1636 first_seg->pkt_len = rx_packet_len;
1638 first_seg->pkt_len =
1639 (uint16_t)(first_seg->pkt_len +
1641 first_seg->nb_segs++;
1642 last_seg->next = rxm;
1645 /* If this is not the last buffer of the received packet,
1646 * update the pointer to the last mbuf of the current scattered
1647 * packet and continue to parse the RX ring.
1649 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1654 /* This is the last buffer of the received packet. If the CRC
1655 * is not stripped by the hardware:
1656 * - Subtract the CRC length from the total packet length.
1657 * - If the last buffer only contains the whole CRC or a part
1658 * of it, free the mbuf associated to the last buffer. If part
1659 * of the CRC is also contained in the previous mbuf, subtract
1660 * the length of that CRC part from the data length of the
1664 if (unlikely(rxq->crc_len > 0)) {
1665 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1666 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1667 rte_pktmbuf_free_seg(rxm);
1668 first_seg->nb_segs--;
1669 last_seg->data_len =
1670 (uint16_t)(last_seg->data_len -
1671 (RTE_ETHER_CRC_LEN - rx_packet_len));
1672 last_seg->next = NULL;
1674 rxm->data_len = (uint16_t)(rx_packet_len -
1678 first_seg->port = rxq->port_id;
1679 first_seg->ol_flags = 0;
1680 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1681 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1682 first_seg->packet_type =
1683 ptype_tbl[(uint8_t)((qword1 &
1684 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1686 if (pkt_flags & PKT_RX_RSS_HASH)
1687 first_seg->hash.rss =
1688 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1690 if (pkt_flags & PKT_RX_FDIR)
1691 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1693 first_seg->ol_flags |= pkt_flags;
1695 /* Prefetch data of first segment, if configured to do so. */
1696 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1697 first_seg->data_off));
1698 rx_pkts[nb_rx++] = first_seg;
1702 /* Record index of the next RX descriptor to probe. */
1703 rxq->rx_tail = rx_id;
1704 rxq->pkt_first_seg = first_seg;
1705 rxq->pkt_last_seg = last_seg;
1707 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1712 #define IAVF_LOOK_AHEAD 8
1714 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1716 volatile union iavf_rx_flex_desc *rxdp;
1717 struct rte_mbuf **rxep;
1718 struct rte_mbuf *mb;
1721 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1722 int32_t i, j, nb_rx = 0;
1724 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1726 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1727 rxep = &rxq->sw_ring[rxq->rx_tail];
1729 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1731 /* Make sure there is at least 1 packet to receive */
1732 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1735 /* Scan LOOK_AHEAD descriptors at a time to determine which
1736 * descriptors reference packets that are ready to be received.
1738 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1739 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1740 /* Read desc statuses backwards to avoid race condition */
1741 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1742 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1746 /* Compute how many status bits were set */
1747 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1748 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1752 /* Translate descriptor info to mbuf parameters */
1753 for (j = 0; j < nb_dd; j++) {
1754 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1756 i * IAVF_LOOK_AHEAD + j);
1759 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1760 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1761 mb->data_len = pkt_len;
1762 mb->pkt_len = pkt_len;
1765 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1766 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1767 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
1768 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1769 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1770 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1772 mb->ol_flags |= pkt_flags;
1775 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1776 rxq->rx_stage[i + j] = rxep[j];
1778 if (nb_dd != IAVF_LOOK_AHEAD)
1782 /* Clear software ring entries */
1783 for (i = 0; i < nb_rx; i++)
1784 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1790 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1792 volatile union iavf_rx_desc *rxdp;
1793 struct rte_mbuf **rxep;
1794 struct rte_mbuf *mb;
1798 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1799 int32_t i, j, nb_rx = 0;
1801 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1803 rxdp = &rxq->rx_ring[rxq->rx_tail];
1804 rxep = &rxq->sw_ring[rxq->rx_tail];
1806 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1807 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1808 IAVF_RXD_QW1_STATUS_SHIFT;
1810 /* Make sure there is at least 1 packet to receive */
1811 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1814 /* Scan LOOK_AHEAD descriptors at a time to determine which
1815 * descriptors reference packets that are ready to be received.
1817 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1818 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1819 /* Read desc statuses backwards to avoid race condition */
1820 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1821 qword1 = rte_le_to_cpu_64(
1822 rxdp[j].wb.qword1.status_error_len);
1823 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1824 IAVF_RXD_QW1_STATUS_SHIFT;
1829 /* Compute how many status bits were set */
1830 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1831 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1835 /* Translate descriptor info to mbuf parameters */
1836 for (j = 0; j < nb_dd; j++) {
1837 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1838 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1841 qword1 = rte_le_to_cpu_64
1842 (rxdp[j].wb.qword1.status_error_len);
1843 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1844 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1845 mb->data_len = pkt_len;
1846 mb->pkt_len = pkt_len;
1848 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1849 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1851 ptype_tbl[(uint8_t)((qword1 &
1852 IAVF_RXD_QW1_PTYPE_MASK) >>
1853 IAVF_RXD_QW1_PTYPE_SHIFT)];
1855 if (pkt_flags & PKT_RX_RSS_HASH)
1856 mb->hash.rss = rte_le_to_cpu_32(
1857 rxdp[j].wb.qword0.hi_dword.rss);
1859 if (pkt_flags & PKT_RX_FDIR)
1860 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1862 mb->ol_flags |= pkt_flags;
1865 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1866 rxq->rx_stage[i + j] = rxep[j];
1868 if (nb_dd != IAVF_LOOK_AHEAD)
1872 /* Clear software ring entries */
1873 for (i = 0; i < nb_rx; i++)
1874 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1879 static inline uint16_t
1880 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1881 struct rte_mbuf **rx_pkts,
1885 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1887 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1889 for (i = 0; i < nb_pkts; i++)
1890 rx_pkts[i] = stage[i];
1892 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1893 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1899 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1901 volatile union iavf_rx_desc *rxdp;
1902 struct rte_mbuf **rxep;
1903 struct rte_mbuf *mb;
1904 uint16_t alloc_idx, i;
1908 /* Allocate buffers in bulk */
1909 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1910 (rxq->rx_free_thresh - 1));
1911 rxep = &rxq->sw_ring[alloc_idx];
1912 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1913 rxq->rx_free_thresh);
1914 if (unlikely(diag != 0)) {
1915 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1919 rxdp = &rxq->rx_ring[alloc_idx];
1920 for (i = 0; i < rxq->rx_free_thresh; i++) {
1921 if (likely(i < (rxq->rx_free_thresh - 1)))
1922 /* Prefetch next mbuf */
1923 rte_prefetch0(rxep[i + 1]);
1926 rte_mbuf_refcnt_set(mb, 1);
1928 mb->data_off = RTE_PKTMBUF_HEADROOM;
1930 mb->port = rxq->port_id;
1931 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1932 rxdp[i].read.hdr_addr = 0;
1933 rxdp[i].read.pkt_addr = dma_addr;
1936 /* Update rx tail register */
1938 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1940 rxq->rx_free_trigger =
1941 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1942 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1943 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1948 static inline uint16_t
1949 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1951 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1957 if (rxq->rx_nb_avail)
1958 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1960 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1961 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1963 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1964 rxq->rx_next_avail = 0;
1965 rxq->rx_nb_avail = nb_rx;
1966 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1968 if (rxq->rx_tail > rxq->rx_free_trigger) {
1969 if (iavf_rx_alloc_bufs(rxq) != 0) {
1972 /* TODO: count rx_mbuf_alloc_failed here */
1974 rxq->rx_nb_avail = 0;
1975 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1976 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1977 rxq->sw_ring[j] = rxq->rx_stage[i];
1983 if (rxq->rx_tail >= rxq->nb_rx_desc)
1986 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1987 rxq->port_id, rxq->queue_id,
1988 rxq->rx_tail, nb_rx);
1990 if (rxq->rx_nb_avail)
1991 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1997 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1998 struct rte_mbuf **rx_pkts,
2001 uint16_t nb_rx = 0, n, count;
2003 if (unlikely(nb_pkts == 0))
2006 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2007 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2010 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2011 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2012 nb_rx = (uint16_t)(nb_rx + count);
2013 nb_pkts = (uint16_t)(nb_pkts - count);
2022 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2024 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2025 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2026 uint16_t nb_tx_desc = txq->nb_tx_desc;
2027 uint16_t desc_to_clean_to;
2028 uint16_t nb_tx_to_clean;
2030 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2032 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2033 if (desc_to_clean_to >= nb_tx_desc)
2034 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2036 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2037 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2038 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2039 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2040 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2041 "(port=%d queue=%d)", desc_to_clean_to,
2042 txq->port_id, txq->queue_id);
2046 if (last_desc_cleaned > desc_to_clean_to)
2047 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2050 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2053 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2055 txq->last_desc_cleaned = desc_to_clean_to;
2056 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2061 /* Check if the context descriptor is needed for TX offloading */
2062 static inline uint16_t
2063 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2065 if (flags & PKT_TX_TCP_SEG)
2067 if (flags & PKT_TX_VLAN_PKT &&
2068 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2074 iavf_txd_enable_checksum(uint64_t ol_flags,
2076 uint32_t *td_offset,
2077 union iavf_tx_offload tx_offload)
2080 *td_offset |= (tx_offload.l2_len >> 1) <<
2081 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2083 /* Enable L3 checksum offloads */
2084 if (ol_flags & PKT_TX_IP_CKSUM) {
2085 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2086 *td_offset |= (tx_offload.l3_len >> 2) <<
2087 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2088 } else if (ol_flags & PKT_TX_IPV4) {
2089 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2090 *td_offset |= (tx_offload.l3_len >> 2) <<
2091 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2092 } else if (ol_flags & PKT_TX_IPV6) {
2093 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2094 *td_offset |= (tx_offload.l3_len >> 2) <<
2095 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2098 if (ol_flags & PKT_TX_TCP_SEG) {
2099 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2100 *td_offset |= (tx_offload.l4_len >> 2) <<
2101 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2105 /* Enable L4 checksum offloads */
2106 switch (ol_flags & PKT_TX_L4_MASK) {
2107 case PKT_TX_TCP_CKSUM:
2108 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2109 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2110 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2112 case PKT_TX_SCTP_CKSUM:
2113 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2114 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2115 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2117 case PKT_TX_UDP_CKSUM:
2118 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2119 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2120 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2127 /* set TSO context descriptor
2128 * support IP -> L4 and IP -> IP -> L4
2130 static inline uint64_t
2131 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2133 uint64_t ctx_desc = 0;
2134 uint32_t cd_cmd, hdr_len, cd_tso_len;
2136 if (!tx_offload.l4_len) {
2137 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2141 hdr_len = tx_offload.l2_len +
2145 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2146 cd_tso_len = mbuf->pkt_len - hdr_len;
2147 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2148 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2149 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2154 /* Construct the tx flags */
2155 static inline uint64_t
2156 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2159 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2160 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2161 ((uint64_t)td_offset <<
2162 IAVF_TXD_QW1_OFFSET_SHIFT) |
2164 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2165 ((uint64_t)td_tag <<
2166 IAVF_TXD_QW1_L2TAG1_SHIFT));
2171 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2173 volatile struct iavf_tx_desc *txd;
2174 volatile struct iavf_tx_desc *txr;
2175 struct iavf_tx_queue *txq;
2176 struct iavf_tx_entry *sw_ring;
2177 struct iavf_tx_entry *txe, *txn;
2178 struct rte_mbuf *tx_pkt;
2179 struct rte_mbuf *m_seg;
2190 uint64_t buf_dma_addr;
2191 uint16_t cd_l2tag2 = 0;
2192 union iavf_tx_offload tx_offload = {0};
2195 sw_ring = txq->sw_ring;
2197 tx_id = txq->tx_tail;
2198 txe = &sw_ring[tx_id];
2200 /* Check if the descriptor ring needs to be cleaned. */
2201 if (txq->nb_free < txq->free_thresh)
2202 (void)iavf_xmit_cleanup(txq);
2204 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2209 tx_pkt = *tx_pkts++;
2210 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2212 ol_flags = tx_pkt->ol_flags;
2213 tx_offload.l2_len = tx_pkt->l2_len;
2214 tx_offload.l3_len = tx_pkt->l3_len;
2215 tx_offload.l4_len = tx_pkt->l4_len;
2216 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2217 /* Calculate the number of context descriptors needed. */
2218 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2220 /* The number of descriptors that must be allocated for
2221 * a packet equals to the number of the segments of that
2222 * packet plus 1 context descriptor if needed.
2224 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2225 tx_last = (uint16_t)(tx_id + nb_used - 1);
2228 if (tx_last >= txq->nb_tx_desc)
2229 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2231 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2232 " tx_first=%u tx_last=%u",
2233 txq->port_id, txq->queue_id, tx_id, tx_last);
2235 if (nb_used > txq->nb_free) {
2236 if (iavf_xmit_cleanup(txq)) {
2241 if (unlikely(nb_used > txq->rs_thresh)) {
2242 while (nb_used > txq->nb_free) {
2243 if (iavf_xmit_cleanup(txq)) {
2252 /* Descriptor based VLAN insertion */
2253 if (ol_flags & PKT_TX_VLAN_PKT &&
2254 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2255 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2256 td_tag = tx_pkt->vlan_tci;
2259 /* According to datasheet, the bit2 is reserved and must be
2264 /* Enable checksum offloading */
2265 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2266 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2267 &td_offset, tx_offload);
2270 /* Setup TX context descriptor if required */
2271 uint64_t cd_type_cmd_tso_mss =
2272 IAVF_TX_DESC_DTYPE_CONTEXT;
2273 volatile struct iavf_tx_context_desc *ctx_txd =
2274 (volatile struct iavf_tx_context_desc *)
2277 /* clear QW0 or the previous writeback value
2278 * may impact next write
2280 *(volatile uint64_t *)ctx_txd = 0;
2282 txn = &sw_ring[txe->next_id];
2283 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2285 rte_pktmbuf_free_seg(txe->mbuf);
2290 if (ol_flags & PKT_TX_TCP_SEG)
2291 cd_type_cmd_tso_mss |=
2292 iavf_set_tso_ctx(tx_pkt, tx_offload);
2294 if (ol_flags & PKT_TX_VLAN_PKT &&
2295 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2296 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2297 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2298 cd_l2tag2 = tx_pkt->vlan_tci;
2301 ctx_txd->type_cmd_tso_mss =
2302 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2303 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2305 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2306 txe->last_id = tx_last;
2307 tx_id = txe->next_id;
2314 txn = &sw_ring[txe->next_id];
2317 rte_pktmbuf_free_seg(txe->mbuf);
2320 /* Setup TX Descriptor */
2321 slen = m_seg->data_len;
2322 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2323 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2324 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2329 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2330 txe->last_id = tx_last;
2331 tx_id = txe->next_id;
2333 m_seg = m_seg->next;
2336 /* The last packet data descriptor needs End Of Packet (EOP) */
2337 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2338 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2339 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2341 if (txq->nb_used >= txq->rs_thresh) {
2342 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2343 "%4u (port=%d queue=%d)",
2344 tx_last, txq->port_id, txq->queue_id);
2346 td_cmd |= IAVF_TX_DESC_CMD_RS;
2348 /* Update txq RS bit counters */
2352 txd->cmd_type_offset_bsz |=
2353 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2354 IAVF_TXD_QW1_CMD_SHIFT);
2355 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2361 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2362 txq->port_id, txq->queue_id, tx_id, nb_tx);
2364 IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
2365 txq->tx_tail = tx_id;
2370 /* Check if the packet with vlan user priority is transmitted in the
2374 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2376 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2377 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2380 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2382 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2383 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2391 /* TX prep functions */
2393 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2399 struct iavf_tx_queue *txq = tx_queue;
2400 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2401 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2403 for (i = 0; i < nb_pkts; i++) {
2405 ol_flags = m->ol_flags;
2407 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2408 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2409 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2413 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2414 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2415 /* MSS outside the range are considered malicious */
2420 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2421 rte_errno = ENOTSUP;
2425 #ifdef RTE_ETHDEV_DEBUG_TX
2426 ret = rte_validate_tx_offload(m);
2432 ret = rte_net_intel_cksum_prepare(m);
2438 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2439 ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
2440 ret = iavf_check_vlan_up2tc(txq, m);
2451 /* choose rx function*/
2453 iavf_set_rx_function(struct rte_eth_dev *dev)
2455 struct iavf_adapter *adapter =
2456 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2457 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2460 struct iavf_rx_queue *rxq;
2463 bool use_avx2 = false;
2464 bool use_avx512 = false;
2465 bool use_flex = false;
2467 check_ret = iavf_rx_vec_dev_check(dev);
2468 if (check_ret >= 0 &&
2469 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2470 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2471 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2472 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2475 #ifdef CC_AVX512_SUPPORT
2476 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2477 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2478 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2482 if (vf->vf_res->vf_cap_flags &
2483 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2486 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2487 rxq = dev->data->rx_queues[i];
2488 (void)iavf_rxq_vec_setup(rxq);
2491 if (dev->data->scattered_rx) {
2494 "Using %sVector Scattered Rx (port %d).",
2495 use_avx2 ? "avx2 " : "",
2496 dev->data->port_id);
2498 if (check_ret == IAVF_VECTOR_PATH)
2500 "Using AVX512 Vector Scattered Rx (port %d).",
2501 dev->data->port_id);
2504 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2505 dev->data->port_id);
2508 dev->rx_pkt_burst = use_avx2 ?
2509 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2510 iavf_recv_scattered_pkts_vec_flex_rxd;
2511 #ifdef CC_AVX512_SUPPORT
2513 if (check_ret == IAVF_VECTOR_PATH)
2515 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2518 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2522 dev->rx_pkt_burst = use_avx2 ?
2523 iavf_recv_scattered_pkts_vec_avx2 :
2524 iavf_recv_scattered_pkts_vec;
2525 #ifdef CC_AVX512_SUPPORT
2527 if (check_ret == IAVF_VECTOR_PATH)
2529 iavf_recv_scattered_pkts_vec_avx512;
2532 iavf_recv_scattered_pkts_vec_avx512_offload;
2538 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2539 use_avx2 ? "avx2 " : "",
2540 dev->data->port_id);
2542 if (check_ret == IAVF_VECTOR_PATH)
2544 "Using AVX512 Vector Rx (port %d).",
2545 dev->data->port_id);
2548 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2549 dev->data->port_id);
2552 dev->rx_pkt_burst = use_avx2 ?
2553 iavf_recv_pkts_vec_avx2_flex_rxd :
2554 iavf_recv_pkts_vec_flex_rxd;
2555 #ifdef CC_AVX512_SUPPORT
2557 if (check_ret == IAVF_VECTOR_PATH)
2559 iavf_recv_pkts_vec_avx512_flex_rxd;
2562 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2566 dev->rx_pkt_burst = use_avx2 ?
2567 iavf_recv_pkts_vec_avx2 :
2569 #ifdef CC_AVX512_SUPPORT
2571 if (check_ret == IAVF_VECTOR_PATH)
2573 iavf_recv_pkts_vec_avx512;
2576 iavf_recv_pkts_vec_avx512_offload;
2586 if (dev->data->scattered_rx) {
2587 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2588 dev->data->port_id);
2589 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2590 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2592 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2593 } else if (adapter->rx_bulk_alloc_allowed) {
2594 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2595 dev->data->port_id);
2596 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2598 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2599 dev->data->port_id);
2600 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2601 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2603 dev->rx_pkt_burst = iavf_recv_pkts;
2607 /* choose tx function*/
2609 iavf_set_tx_function(struct rte_eth_dev *dev)
2612 struct iavf_tx_queue *txq;
2615 bool use_sse = false;
2616 bool use_avx2 = false;
2617 bool use_avx512 = false;
2619 check_ret = iavf_tx_vec_dev_check(dev);
2621 if (check_ret >= 0 &&
2622 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2623 /* SSE and AVX2 not support offload path yet. */
2624 if (check_ret == IAVF_VECTOR_PATH) {
2626 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2627 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2628 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2631 #ifdef CC_AVX512_SUPPORT
2632 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2633 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2634 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2638 if (!use_sse && !use_avx2 && !use_avx512)
2642 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2643 use_avx2 ? "avx2 " : "",
2644 dev->data->port_id);
2645 dev->tx_pkt_burst = use_avx2 ?
2646 iavf_xmit_pkts_vec_avx2 :
2649 dev->tx_pkt_prepare = NULL;
2650 #ifdef CC_AVX512_SUPPORT
2652 if (check_ret == IAVF_VECTOR_PATH) {
2653 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2654 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2655 dev->data->port_id);
2657 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2658 dev->tx_pkt_prepare = iavf_prep_pkts;
2659 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2660 dev->data->port_id);
2665 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2666 txq = dev->data->tx_queues[i];
2669 #ifdef CC_AVX512_SUPPORT
2671 iavf_txq_vec_setup_avx512(txq);
2673 iavf_txq_vec_setup(txq);
2675 iavf_txq_vec_setup(txq);
2684 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2685 dev->data->port_id);
2686 dev->tx_pkt_burst = iavf_xmit_pkts;
2687 dev->tx_pkt_prepare = iavf_prep_pkts;
2691 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2694 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2695 uint16_t i, tx_last, tx_id;
2696 uint16_t nb_tx_free_last;
2697 uint16_t nb_tx_to_clean;
2700 /* Start free mbuf from the next of tx_tail */
2701 tx_last = txq->tx_tail;
2702 tx_id = swr_ring[tx_last].next_id;
2704 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2707 nb_tx_to_clean = txq->nb_free;
2708 nb_tx_free_last = txq->nb_free;
2710 free_cnt = txq->nb_tx_desc;
2712 /* Loop through swr_ring to count the amount of
2713 * freeable mubfs and packets.
2715 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2716 for (i = 0; i < nb_tx_to_clean &&
2717 pkt_cnt < free_cnt &&
2718 tx_id != tx_last; i++) {
2719 if (swr_ring[tx_id].mbuf != NULL) {
2720 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2721 swr_ring[tx_id].mbuf = NULL;
2724 * last segment in the packet,
2725 * increment packet count
2727 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2730 tx_id = swr_ring[tx_id].next_id;
2733 if (txq->rs_thresh > txq->nb_tx_desc -
2734 txq->nb_free || tx_id == tx_last)
2737 if (pkt_cnt < free_cnt) {
2738 if (iavf_xmit_cleanup(txq))
2741 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2742 nb_tx_free_last = txq->nb_free;
2746 return (int)pkt_cnt;
2750 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2752 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2754 return iavf_tx_done_cleanup_full(q, free_cnt);
2758 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2759 struct rte_eth_rxq_info *qinfo)
2761 struct iavf_rx_queue *rxq;
2763 rxq = dev->data->rx_queues[queue_id];
2765 qinfo->mp = rxq->mp;
2766 qinfo->scattered_rx = dev->data->scattered_rx;
2767 qinfo->nb_desc = rxq->nb_rx_desc;
2769 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2770 qinfo->conf.rx_drop_en = true;
2771 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2775 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2776 struct rte_eth_txq_info *qinfo)
2778 struct iavf_tx_queue *txq;
2780 txq = dev->data->tx_queues[queue_id];
2782 qinfo->nb_desc = txq->nb_tx_desc;
2784 qinfo->conf.tx_free_thresh = txq->free_thresh;
2785 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2786 qinfo->conf.offloads = txq->offloads;
2787 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2790 /* Get the number of used descriptors of a rx queue */
2792 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2794 #define IAVF_RXQ_SCAN_INTERVAL 4
2795 volatile union iavf_rx_desc *rxdp;
2796 struct iavf_rx_queue *rxq;
2799 rxq = dev->data->rx_queues[queue_id];
2800 rxdp = &rxq->rx_ring[rxq->rx_tail];
2802 while ((desc < rxq->nb_rx_desc) &&
2803 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2804 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2805 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2806 /* Check the DD bit of a rx descriptor of each 4 in a group,
2807 * to avoid checking too frequently and downgrading performance
2810 desc += IAVF_RXQ_SCAN_INTERVAL;
2811 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2812 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2813 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2814 desc - rxq->nb_rx_desc]);
2821 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2823 struct iavf_rx_queue *rxq = rx_queue;
2824 volatile uint64_t *status;
2828 if (unlikely(offset >= rxq->nb_rx_desc))
2831 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2832 return RTE_ETH_RX_DESC_UNAVAIL;
2834 desc = rxq->rx_tail + offset;
2835 if (desc >= rxq->nb_rx_desc)
2836 desc -= rxq->nb_rx_desc;
2838 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2839 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2840 << IAVF_RXD_QW1_STATUS_SHIFT);
2842 return RTE_ETH_RX_DESC_DONE;
2844 return RTE_ETH_RX_DESC_AVAIL;
2848 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2850 struct iavf_tx_queue *txq = tx_queue;
2851 volatile uint64_t *status;
2852 uint64_t mask, expect;
2855 if (unlikely(offset >= txq->nb_tx_desc))
2858 desc = txq->tx_tail + offset;
2859 /* go to next desc that has the RS bit */
2860 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2862 if (desc >= txq->nb_tx_desc) {
2863 desc -= txq->nb_tx_desc;
2864 if (desc >= txq->nb_tx_desc)
2865 desc -= txq->nb_tx_desc;
2868 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2869 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2870 expect = rte_cpu_to_le_64(
2871 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2872 if ((*status & mask) == expect)
2873 return RTE_ETH_TX_DESC_DONE;
2875 return RTE_ETH_TX_DESC_FULL;
2879 iavf_get_default_ptype_table(void)
2881 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2882 __rte_cache_aligned = {
2885 [1] = RTE_PTYPE_L2_ETHER,
2886 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2887 /* [3] - [5] reserved */
2888 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2889 /* [7] - [10] reserved */
2890 [11] = RTE_PTYPE_L2_ETHER_ARP,
2891 /* [12] - [21] reserved */
2893 /* Non tunneled IPv4 */
2894 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2896 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2897 RTE_PTYPE_L4_NONFRAG,
2898 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2901 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2903 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2905 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2909 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2910 RTE_PTYPE_TUNNEL_IP |
2911 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2912 RTE_PTYPE_INNER_L4_FRAG,
2913 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2914 RTE_PTYPE_TUNNEL_IP |
2915 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2916 RTE_PTYPE_INNER_L4_NONFRAG,
2917 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2918 RTE_PTYPE_TUNNEL_IP |
2919 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2920 RTE_PTYPE_INNER_L4_UDP,
2922 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2923 RTE_PTYPE_TUNNEL_IP |
2924 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2925 RTE_PTYPE_INNER_L4_TCP,
2926 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2927 RTE_PTYPE_TUNNEL_IP |
2928 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2929 RTE_PTYPE_INNER_L4_SCTP,
2930 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2931 RTE_PTYPE_TUNNEL_IP |
2932 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2933 RTE_PTYPE_INNER_L4_ICMP,
2936 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2937 RTE_PTYPE_TUNNEL_IP |
2938 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2939 RTE_PTYPE_INNER_L4_FRAG,
2940 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2941 RTE_PTYPE_TUNNEL_IP |
2942 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2943 RTE_PTYPE_INNER_L4_NONFRAG,
2944 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2945 RTE_PTYPE_TUNNEL_IP |
2946 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2947 RTE_PTYPE_INNER_L4_UDP,
2949 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2950 RTE_PTYPE_TUNNEL_IP |
2951 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2952 RTE_PTYPE_INNER_L4_TCP,
2953 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2954 RTE_PTYPE_TUNNEL_IP |
2955 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2956 RTE_PTYPE_INNER_L4_SCTP,
2957 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2958 RTE_PTYPE_TUNNEL_IP |
2959 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2960 RTE_PTYPE_INNER_L4_ICMP,
2962 /* IPv4 --> GRE/Teredo/VXLAN */
2963 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2964 RTE_PTYPE_TUNNEL_GRENAT,
2966 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2967 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2968 RTE_PTYPE_TUNNEL_GRENAT |
2969 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2970 RTE_PTYPE_INNER_L4_FRAG,
2971 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2972 RTE_PTYPE_TUNNEL_GRENAT |
2973 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2974 RTE_PTYPE_INNER_L4_NONFRAG,
2975 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2976 RTE_PTYPE_TUNNEL_GRENAT |
2977 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2978 RTE_PTYPE_INNER_L4_UDP,
2980 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2981 RTE_PTYPE_TUNNEL_GRENAT |
2982 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2983 RTE_PTYPE_INNER_L4_TCP,
2984 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2985 RTE_PTYPE_TUNNEL_GRENAT |
2986 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2987 RTE_PTYPE_INNER_L4_SCTP,
2988 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2989 RTE_PTYPE_TUNNEL_GRENAT |
2990 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2991 RTE_PTYPE_INNER_L4_ICMP,
2993 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2994 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2995 RTE_PTYPE_TUNNEL_GRENAT |
2996 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2997 RTE_PTYPE_INNER_L4_FRAG,
2998 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2999 RTE_PTYPE_TUNNEL_GRENAT |
3000 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3001 RTE_PTYPE_INNER_L4_NONFRAG,
3002 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3003 RTE_PTYPE_TUNNEL_GRENAT |
3004 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3005 RTE_PTYPE_INNER_L4_UDP,
3007 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3008 RTE_PTYPE_TUNNEL_GRENAT |
3009 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3010 RTE_PTYPE_INNER_L4_TCP,
3011 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3012 RTE_PTYPE_TUNNEL_GRENAT |
3013 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3014 RTE_PTYPE_INNER_L4_SCTP,
3015 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3016 RTE_PTYPE_TUNNEL_GRENAT |
3017 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3018 RTE_PTYPE_INNER_L4_ICMP,
3020 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3021 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3022 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3024 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3025 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3026 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3027 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3028 RTE_PTYPE_INNER_L4_FRAG,
3029 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3030 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3031 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3032 RTE_PTYPE_INNER_L4_NONFRAG,
3033 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3034 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3035 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3036 RTE_PTYPE_INNER_L4_UDP,
3038 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3039 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3040 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3041 RTE_PTYPE_INNER_L4_TCP,
3042 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3043 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3044 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3045 RTE_PTYPE_INNER_L4_SCTP,
3046 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3047 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3048 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3049 RTE_PTYPE_INNER_L4_ICMP,
3051 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3052 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3053 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3054 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3055 RTE_PTYPE_INNER_L4_FRAG,
3056 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3057 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3058 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3059 RTE_PTYPE_INNER_L4_NONFRAG,
3060 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3061 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3062 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3063 RTE_PTYPE_INNER_L4_UDP,
3065 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3066 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3067 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3068 RTE_PTYPE_INNER_L4_TCP,
3069 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3070 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3071 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3072 RTE_PTYPE_INNER_L4_SCTP,
3073 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3074 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3075 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3076 RTE_PTYPE_INNER_L4_ICMP,
3077 /* [73] - [87] reserved */
3079 /* Non tunneled IPv6 */
3080 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3082 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3083 RTE_PTYPE_L4_NONFRAG,
3084 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3087 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3089 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3091 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3095 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3096 RTE_PTYPE_TUNNEL_IP |
3097 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3098 RTE_PTYPE_INNER_L4_FRAG,
3099 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3100 RTE_PTYPE_TUNNEL_IP |
3101 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3102 RTE_PTYPE_INNER_L4_NONFRAG,
3103 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3104 RTE_PTYPE_TUNNEL_IP |
3105 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3106 RTE_PTYPE_INNER_L4_UDP,
3108 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3109 RTE_PTYPE_TUNNEL_IP |
3110 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3111 RTE_PTYPE_INNER_L4_TCP,
3112 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3113 RTE_PTYPE_TUNNEL_IP |
3114 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3115 RTE_PTYPE_INNER_L4_SCTP,
3116 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3117 RTE_PTYPE_TUNNEL_IP |
3118 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3119 RTE_PTYPE_INNER_L4_ICMP,
3122 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3123 RTE_PTYPE_TUNNEL_IP |
3124 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3125 RTE_PTYPE_INNER_L4_FRAG,
3126 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3127 RTE_PTYPE_TUNNEL_IP |
3128 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3129 RTE_PTYPE_INNER_L4_NONFRAG,
3130 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131 RTE_PTYPE_TUNNEL_IP |
3132 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3133 RTE_PTYPE_INNER_L4_UDP,
3134 /* [105] reserved */
3135 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3136 RTE_PTYPE_TUNNEL_IP |
3137 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3138 RTE_PTYPE_INNER_L4_TCP,
3139 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3140 RTE_PTYPE_TUNNEL_IP |
3141 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3142 RTE_PTYPE_INNER_L4_SCTP,
3143 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3144 RTE_PTYPE_TUNNEL_IP |
3145 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3146 RTE_PTYPE_INNER_L4_ICMP,
3148 /* IPv6 --> GRE/Teredo/VXLAN */
3149 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3150 RTE_PTYPE_TUNNEL_GRENAT,
3152 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3153 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3154 RTE_PTYPE_TUNNEL_GRENAT |
3155 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3156 RTE_PTYPE_INNER_L4_FRAG,
3157 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GRENAT |
3159 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3160 RTE_PTYPE_INNER_L4_NONFRAG,
3161 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3162 RTE_PTYPE_TUNNEL_GRENAT |
3163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3164 RTE_PTYPE_INNER_L4_UDP,
3165 /* [113] reserved */
3166 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3167 RTE_PTYPE_TUNNEL_GRENAT |
3168 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3169 RTE_PTYPE_INNER_L4_TCP,
3170 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3171 RTE_PTYPE_TUNNEL_GRENAT |
3172 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3173 RTE_PTYPE_INNER_L4_SCTP,
3174 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3175 RTE_PTYPE_TUNNEL_GRENAT |
3176 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3177 RTE_PTYPE_INNER_L4_ICMP,
3179 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3180 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3181 RTE_PTYPE_TUNNEL_GRENAT |
3182 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3183 RTE_PTYPE_INNER_L4_FRAG,
3184 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3185 RTE_PTYPE_TUNNEL_GRENAT |
3186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3187 RTE_PTYPE_INNER_L4_NONFRAG,
3188 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3189 RTE_PTYPE_TUNNEL_GRENAT |
3190 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3191 RTE_PTYPE_INNER_L4_UDP,
3192 /* [120] reserved */
3193 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3194 RTE_PTYPE_TUNNEL_GRENAT |
3195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3196 RTE_PTYPE_INNER_L4_TCP,
3197 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3198 RTE_PTYPE_TUNNEL_GRENAT |
3199 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3200 RTE_PTYPE_INNER_L4_SCTP,
3201 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3202 RTE_PTYPE_TUNNEL_GRENAT |
3203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3204 RTE_PTYPE_INNER_L4_ICMP,
3206 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3207 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3208 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3210 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3211 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3212 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3214 RTE_PTYPE_INNER_L4_FRAG,
3215 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3216 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3217 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3218 RTE_PTYPE_INNER_L4_NONFRAG,
3219 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3220 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3221 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3222 RTE_PTYPE_INNER_L4_UDP,
3223 /* [128] reserved */
3224 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3225 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3227 RTE_PTYPE_INNER_L4_TCP,
3228 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3229 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3230 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3231 RTE_PTYPE_INNER_L4_SCTP,
3232 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3233 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3234 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3235 RTE_PTYPE_INNER_L4_ICMP,
3237 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3238 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3239 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3240 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3241 RTE_PTYPE_INNER_L4_FRAG,
3242 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3244 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3245 RTE_PTYPE_INNER_L4_NONFRAG,
3246 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3247 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3249 RTE_PTYPE_INNER_L4_UDP,
3250 /* [135] reserved */
3251 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3252 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3254 RTE_PTYPE_INNER_L4_TCP,
3255 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3256 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3257 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3258 RTE_PTYPE_INNER_L4_SCTP,
3259 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3260 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3261 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3262 RTE_PTYPE_INNER_L4_ICMP,
3263 /* [139] - [299] reserved */
3266 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3267 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3269 /* PPPoE --> IPv4 */
3270 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3271 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3273 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3274 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3275 RTE_PTYPE_L4_NONFRAG,
3276 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3277 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3280 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3282 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3283 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3285 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3286 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3289 /* PPPoE --> IPv6 */
3290 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3291 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3293 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3294 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3295 RTE_PTYPE_L4_NONFRAG,
3296 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3297 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3299 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3300 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3302 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3303 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3305 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3306 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3308 /* [314] - [324] reserved */
3310 /* IPv4/IPv6 --> GTPC/GTPU */
3311 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3312 RTE_PTYPE_TUNNEL_GTPC,
3313 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3314 RTE_PTYPE_TUNNEL_GTPC,
3315 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3316 RTE_PTYPE_TUNNEL_GTPC,
3317 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3318 RTE_PTYPE_TUNNEL_GTPC,
3319 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3320 RTE_PTYPE_TUNNEL_GTPU,
3321 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3322 RTE_PTYPE_TUNNEL_GTPU,
3324 /* IPv4 --> GTPU --> IPv4 */
3325 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3326 RTE_PTYPE_TUNNEL_GTPU |
3327 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3328 RTE_PTYPE_INNER_L4_FRAG,
3329 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3330 RTE_PTYPE_TUNNEL_GTPU |
3331 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3332 RTE_PTYPE_INNER_L4_NONFRAG,
3333 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3334 RTE_PTYPE_TUNNEL_GTPU |
3335 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3336 RTE_PTYPE_INNER_L4_UDP,
3337 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3338 RTE_PTYPE_TUNNEL_GTPU |
3339 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3340 RTE_PTYPE_INNER_L4_TCP,
3341 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3342 RTE_PTYPE_TUNNEL_GTPU |
3343 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3344 RTE_PTYPE_INNER_L4_ICMP,
3346 /* IPv6 --> GTPU --> IPv4 */
3347 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3348 RTE_PTYPE_TUNNEL_GTPU |
3349 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_INNER_L4_FRAG,
3351 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3352 RTE_PTYPE_TUNNEL_GTPU |
3353 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3354 RTE_PTYPE_INNER_L4_NONFRAG,
3355 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3356 RTE_PTYPE_TUNNEL_GTPU |
3357 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3358 RTE_PTYPE_INNER_L4_UDP,
3359 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3360 RTE_PTYPE_TUNNEL_GTPU |
3361 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3362 RTE_PTYPE_INNER_L4_TCP,
3363 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3364 RTE_PTYPE_TUNNEL_GTPU |
3365 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3366 RTE_PTYPE_INNER_L4_ICMP,
3368 /* IPv4 --> GTPU --> IPv6 */
3369 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3370 RTE_PTYPE_TUNNEL_GTPU |
3371 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3372 RTE_PTYPE_INNER_L4_FRAG,
3373 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3374 RTE_PTYPE_TUNNEL_GTPU |
3375 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3376 RTE_PTYPE_INNER_L4_NONFRAG,
3377 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3378 RTE_PTYPE_TUNNEL_GTPU |
3379 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3380 RTE_PTYPE_INNER_L4_UDP,
3381 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3382 RTE_PTYPE_TUNNEL_GTPU |
3383 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3384 RTE_PTYPE_INNER_L4_TCP,
3385 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3386 RTE_PTYPE_TUNNEL_GTPU |
3387 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3388 RTE_PTYPE_INNER_L4_ICMP,
3390 /* IPv6 --> GTPU --> IPv6 */
3391 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3392 RTE_PTYPE_TUNNEL_GTPU |
3393 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3394 RTE_PTYPE_INNER_L4_FRAG,
3395 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3396 RTE_PTYPE_TUNNEL_GTPU |
3397 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3398 RTE_PTYPE_INNER_L4_NONFRAG,
3399 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3400 RTE_PTYPE_TUNNEL_GTPU |
3401 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3402 RTE_PTYPE_INNER_L4_UDP,
3403 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3404 RTE_PTYPE_TUNNEL_GTPU |
3405 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3406 RTE_PTYPE_INNER_L4_TCP,
3407 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3408 RTE_PTYPE_TUNNEL_GTPU |
3409 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3410 RTE_PTYPE_INNER_L4_ICMP,
3412 /* IPv4 --> UDP ECPRI */
3413 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3415 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3417 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3419 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3421 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3423 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3425 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3427 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3429 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3431 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3434 /* IPV6 --> UDP ECPRI */
3435 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3437 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3439 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3441 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3443 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3445 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3447 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3449 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3451 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3453 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3455 /* All others reserved */