1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 iavf_monitor_callback(const uint64_t value,
62 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
64 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
66 * we expect the DD bit to be set to 1 if this descriptor was already
69 return (value & m) == m ? -1 : 0;
73 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
75 struct iavf_rx_queue *rxq = rx_queue;
76 volatile union iavf_rx_desc *rxdp;
80 rxdp = &rxq->rx_ring[desc];
81 /* watch for changes in status bit */
82 pmc->addr = &rxdp->wb.qword1.status_error_len;
84 /* comparison callback */
85 pmc->fn = iavf_monitor_callback;
87 /* registers are 64-bit */
88 pmc->size = sizeof(uint64_t);
94 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
96 /* The following constraints must be satisfied:
97 * thresh < rxq->nb_rx_desc
99 if (thresh >= nb_desc) {
100 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
108 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
109 uint16_t tx_free_thresh)
111 /* TX descriptors will have their RS bit set after tx_rs_thresh
112 * descriptors have been used. The TX descriptor ring will be cleaned
113 * after tx_free_thresh descriptors are used or if the number of
114 * descriptors required to transmit a packet is greater than the
115 * number of free TX descriptors.
117 * The following constraints must be satisfied:
118 * - tx_rs_thresh must be less than the size of the ring minus 2.
119 * - tx_free_thresh must be less than the size of the ring minus 3.
120 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
121 * - tx_rs_thresh must be a divisor of the ring size.
123 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
124 * race condition, hence the maximum threshold constraints. When set
125 * to zero use default values.
127 if (tx_rs_thresh >= (nb_desc - 2)) {
128 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
129 "number of TX descriptors (%u) minus 2",
130 tx_rs_thresh, nb_desc);
133 if (tx_free_thresh >= (nb_desc - 3)) {
134 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
135 "number of TX descriptors (%u) minus 3.",
136 tx_free_thresh, nb_desc);
139 if (tx_rs_thresh > tx_free_thresh) {
140 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
141 "equal to tx_free_thresh (%u).",
142 tx_rs_thresh, tx_free_thresh);
145 if ((nb_desc % tx_rs_thresh) != 0) {
146 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
147 "number of TX descriptors (%u).",
148 tx_rs_thresh, nb_desc);
156 check_rx_vec_allow(struct iavf_rx_queue *rxq)
158 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
159 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
160 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
164 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
169 check_tx_vec_allow(struct iavf_tx_queue *txq)
171 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
172 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
173 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
174 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
177 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
182 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
186 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
187 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
188 "rxq->rx_free_thresh=%d, "
189 "IAVF_RX_MAX_BURST=%d",
190 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
192 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
193 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
194 "rxq->nb_rx_desc=%d, "
195 "rxq->rx_free_thresh=%d",
196 rxq->nb_rx_desc, rxq->rx_free_thresh);
203 reset_rx_queue(struct iavf_rx_queue *rxq)
211 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
213 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
214 ((volatile char *)rxq->rx_ring)[i] = 0;
216 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
218 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
219 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
222 rxq->rx_nb_avail = 0;
223 rxq->rx_next_avail = 0;
224 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
229 if (rxq->pkt_first_seg != NULL)
230 rte_pktmbuf_free(rxq->pkt_first_seg);
232 rxq->pkt_first_seg = NULL;
233 rxq->pkt_last_seg = NULL;
235 rxq->rxrearm_start = 0;
239 reset_tx_queue(struct iavf_tx_queue *txq)
241 struct iavf_tx_entry *txe;
246 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
251 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
252 for (i = 0; i < size; i++)
253 ((volatile char *)txq->tx_ring)[i] = 0;
255 prev = (uint16_t)(txq->nb_tx_desc - 1);
256 for (i = 0; i < txq->nb_tx_desc; i++) {
257 txq->tx_ring[i].cmd_type_offset_bsz =
258 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
261 txe[prev].next_id = i;
268 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
269 txq->nb_free = txq->nb_tx_desc - 1;
271 txq->next_dd = txq->rs_thresh - 1;
272 txq->next_rs = txq->rs_thresh - 1;
276 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
278 volatile union iavf_rx_desc *rxd;
279 struct rte_mbuf *mbuf = NULL;
283 for (i = 0; i < rxq->nb_rx_desc; i++) {
284 mbuf = rte_mbuf_raw_alloc(rxq->mp);
285 if (unlikely(!mbuf)) {
286 for (j = 0; j < i; j++) {
287 rte_pktmbuf_free_seg(rxq->sw_ring[j]);
288 rxq->sw_ring[j] = NULL;
290 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
294 rte_mbuf_refcnt_set(mbuf, 1);
296 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
298 mbuf->port = rxq->port_id;
301 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
303 rxd = &rxq->rx_ring[i];
304 rxd->read.pkt_addr = dma_addr;
305 rxd->read.hdr_addr = 0;
306 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
311 rxq->sw_ring[i] = mbuf;
318 release_rxq_mbufs(struct iavf_rx_queue *rxq)
325 for (i = 0; i < rxq->nb_rx_desc; i++) {
326 if (rxq->sw_ring[i]) {
327 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
328 rxq->sw_ring[i] = NULL;
333 if (rxq->rx_nb_avail == 0)
335 for (i = 0; i < rxq->rx_nb_avail; i++) {
336 struct rte_mbuf *mbuf;
338 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
339 rte_pktmbuf_free_seg(mbuf);
341 rxq->rx_nb_avail = 0;
345 release_txq_mbufs(struct iavf_tx_queue *txq)
349 if (!txq || !txq->sw_ring) {
350 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
354 for (i = 0; i < txq->nb_tx_desc; i++) {
355 if (txq->sw_ring[i].mbuf) {
356 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
357 txq->sw_ring[i].mbuf = NULL;
362 static const struct iavf_rxq_ops def_rxq_ops = {
363 .release_mbufs = release_rxq_mbufs,
366 static const struct iavf_txq_ops def_txq_ops = {
367 .release_mbufs = release_txq_mbufs,
371 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
373 volatile union iavf_rx_flex_desc *rxdp)
375 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
376 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
377 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
381 if (desc->flow_id != 0xFFFFFFFF) {
382 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
383 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
386 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
387 stat_err = rte_le_to_cpu_16(desc->status_error0);
388 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
389 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
390 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
396 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
398 volatile union iavf_rx_flex_desc *rxdp)
400 volatile struct iavf_32b_rx_flex_desc_comms *desc =
401 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
404 stat_err = rte_le_to_cpu_16(desc->status_error0);
405 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
406 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
407 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
410 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
411 if (desc->flow_id != 0xFFFFFFFF) {
412 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
413 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
416 if (rxq->xtr_ol_flag) {
417 uint32_t metadata = 0;
419 stat_err = rte_le_to_cpu_16(desc->status_error1);
421 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
422 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
424 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
426 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
429 mb->ol_flags |= rxq->xtr_ol_flag;
431 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
438 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
440 volatile union iavf_rx_flex_desc *rxdp)
442 volatile struct iavf_32b_rx_flex_desc_comms *desc =
443 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
446 stat_err = rte_le_to_cpu_16(desc->status_error0);
447 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
448 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
449 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
452 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
453 if (desc->flow_id != 0xFFFFFFFF) {
454 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
455 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
458 if (rxq->xtr_ol_flag) {
459 uint32_t metadata = 0;
461 if (desc->flex_ts.flex.aux0 != 0xFFFF)
462 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
463 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
464 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
467 mb->ol_flags |= rxq->xtr_ol_flag;
469 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
476 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
479 case IAVF_RXDID_COMMS_AUX_VLAN:
480 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
481 rxq->rxd_to_pkt_fields =
482 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
484 case IAVF_RXDID_COMMS_AUX_IPV4:
485 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
486 rxq->rxd_to_pkt_fields =
487 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
489 case IAVF_RXDID_COMMS_AUX_IPV6:
490 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
491 rxq->rxd_to_pkt_fields =
492 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
494 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
496 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
497 rxq->rxd_to_pkt_fields =
498 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
500 case IAVF_RXDID_COMMS_AUX_TCP:
501 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
502 rxq->rxd_to_pkt_fields =
503 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
505 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
507 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
508 rxq->rxd_to_pkt_fields =
509 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
511 case IAVF_RXDID_COMMS_OVS_1:
512 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
515 /* update this according to the RXDID for FLEX_DESC_NONE */
516 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
520 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
521 rxq->xtr_ol_flag = 0;
525 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
526 uint16_t nb_desc, unsigned int socket_id,
527 const struct rte_eth_rxconf *rx_conf,
528 struct rte_mempool *mp)
530 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 struct iavf_adapter *ad =
532 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
533 struct iavf_info *vf =
534 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
535 struct iavf_vsi *vsi = &vf->vsi;
536 struct iavf_rx_queue *rxq;
537 const struct rte_memzone *mz;
541 uint16_t rx_free_thresh;
544 PMD_INIT_FUNC_TRACE();
546 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
548 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
549 nb_desc > IAVF_MAX_RING_DESC ||
550 nb_desc < IAVF_MIN_RING_DESC) {
551 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
556 /* Check free threshold */
557 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
558 IAVF_DEFAULT_RX_FREE_THRESH :
559 rx_conf->rx_free_thresh;
560 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
563 /* Free memory if needed */
564 if (dev->data->rx_queues[queue_idx]) {
565 iavf_dev_rx_queue_release(dev, queue_idx);
566 dev->data->rx_queues[queue_idx] = NULL;
569 /* Allocate the rx queue data structure */
570 rxq = rte_zmalloc_socket("iavf rxq",
571 sizeof(struct iavf_rx_queue),
575 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
576 "rx queue data structure");
580 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
581 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
583 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
584 rxq->proto_xtr = proto_xtr;
586 rxq->rxdid = IAVF_RXDID_LEGACY_1;
587 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
590 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
591 struct virtchnl_vlan_supported_caps *stripping_support =
592 &vf->vlan_v2_caps.offloads.stripping_support;
593 uint32_t stripping_cap;
595 if (stripping_support->outer)
596 stripping_cap = stripping_support->outer;
598 stripping_cap = stripping_support->inner;
600 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
601 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
602 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
603 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
605 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
608 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
611 rxq->nb_rx_desc = nb_desc;
612 rxq->rx_free_thresh = rx_free_thresh;
613 rxq->queue_id = queue_idx;
614 rxq->port_id = dev->data->port_id;
615 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
618 rxq->offloads = offloads;
620 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
621 rxq->crc_len = RTE_ETHER_CRC_LEN;
625 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
626 rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
628 /* Allocate the software ring. */
629 len = nb_desc + IAVF_RX_MAX_BURST;
631 rte_zmalloc_socket("iavf rx sw ring",
632 sizeof(struct rte_mbuf *) * len,
636 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
641 /* Allocate the maximun number of RX ring hardware descriptor with
642 * a liitle more to support bulk allocate.
644 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
645 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
647 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
648 ring_size, IAVF_RING_BASE_ALIGN,
651 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
652 rte_free(rxq->sw_ring);
656 /* Zero all the descriptors in the ring. */
657 memset(mz->addr, 0, ring_size);
658 rxq->rx_ring_phys_addr = mz->iova;
659 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
664 dev->data->rx_queues[queue_idx] = rxq;
665 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
666 rxq->ops = &def_rxq_ops;
668 if (check_rx_bulk_allow(rxq) == true) {
669 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
670 "satisfied. Rx Burst Bulk Alloc function will be "
671 "used on port=%d, queue=%d.",
672 rxq->port_id, rxq->queue_id);
674 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
675 "not satisfied, Scattered Rx is requested "
676 "on port=%d, queue=%d.",
677 rxq->port_id, rxq->queue_id);
678 ad->rx_bulk_alloc_allowed = false;
681 if (check_rx_vec_allow(rxq) == false)
682 ad->rx_vec_allowed = false;
688 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
691 unsigned int socket_id,
692 const struct rte_eth_txconf *tx_conf)
694 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
695 struct iavf_info *vf =
696 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
697 struct iavf_tx_queue *txq;
698 const struct rte_memzone *mz;
700 uint16_t tx_rs_thresh, tx_free_thresh;
703 PMD_INIT_FUNC_TRACE();
705 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
707 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
708 nb_desc > IAVF_MAX_RING_DESC ||
709 nb_desc < IAVF_MIN_RING_DESC) {
710 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
715 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
716 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
717 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
718 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
719 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
722 /* Free memory if needed. */
723 if (dev->data->tx_queues[queue_idx]) {
724 iavf_dev_tx_queue_release(dev, queue_idx);
725 dev->data->tx_queues[queue_idx] = NULL;
728 /* Allocate the TX queue data structure. */
729 txq = rte_zmalloc_socket("iavf txq",
730 sizeof(struct iavf_tx_queue),
734 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
735 "tx queue structure");
739 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
740 struct virtchnl_vlan_supported_caps *insertion_support =
741 &vf->vlan_v2_caps.offloads.insertion_support;
742 uint32_t insertion_cap;
744 if (insertion_support->outer)
745 insertion_cap = insertion_support->outer;
747 insertion_cap = insertion_support->inner;
749 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
750 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
751 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
752 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
754 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
757 txq->nb_tx_desc = nb_desc;
758 txq->rs_thresh = tx_rs_thresh;
759 txq->free_thresh = tx_free_thresh;
760 txq->queue_id = queue_idx;
761 txq->port_id = dev->data->port_id;
762 txq->offloads = offloads;
763 txq->tx_deferred_start = tx_conf->tx_deferred_start;
765 /* Allocate software ring */
767 rte_zmalloc_socket("iavf tx sw ring",
768 sizeof(struct iavf_tx_entry) * nb_desc,
772 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
777 /* Allocate TX hardware ring descriptors. */
778 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
779 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
780 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
781 ring_size, IAVF_RING_BASE_ALIGN,
784 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
785 rte_free(txq->sw_ring);
789 txq->tx_ring_phys_addr = mz->iova;
790 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
795 dev->data->tx_queues[queue_idx] = txq;
796 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
797 txq->ops = &def_txq_ops;
799 if (check_tx_vec_allow(txq) == false) {
800 struct iavf_adapter *ad =
801 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
802 ad->tx_vec_allowed = false;
805 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
806 vf->tm_conf.committed) {
808 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
809 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
810 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
811 vf->qtc_map[tc].queue_count))
814 if (tc >= vf->qos_cap->num_elem) {
815 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
825 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
827 struct iavf_adapter *adapter =
828 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
829 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
830 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
831 struct iavf_rx_queue *rxq;
834 PMD_DRV_FUNC_TRACE();
836 if (rx_queue_id >= dev->data->nb_rx_queues)
839 rxq = dev->data->rx_queues[rx_queue_id];
841 err = alloc_rxq_mbufs(rxq);
843 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
849 /* Init the RX tail register. */
850 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
851 IAVF_WRITE_FLUSH(hw);
853 /* Ready to switch the queue on */
855 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
857 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
860 release_rxq_mbufs(rxq);
861 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
864 dev->data->rx_queue_state[rx_queue_id] =
865 RTE_ETH_QUEUE_STATE_STARTED;
872 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
874 struct iavf_adapter *adapter =
875 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
876 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
877 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
878 struct iavf_tx_queue *txq;
881 PMD_DRV_FUNC_TRACE();
883 if (tx_queue_id >= dev->data->nb_tx_queues)
886 txq = dev->data->tx_queues[tx_queue_id];
888 /* Init the RX tail register. */
889 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
890 IAVF_WRITE_FLUSH(hw);
892 /* Ready to switch the queue on */
894 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
896 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
899 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
902 dev->data->tx_queue_state[tx_queue_id] =
903 RTE_ETH_QUEUE_STATE_STARTED;
909 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
911 struct iavf_adapter *adapter =
912 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
913 struct iavf_rx_queue *rxq;
916 PMD_DRV_FUNC_TRACE();
918 if (rx_queue_id >= dev->data->nb_rx_queues)
921 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
923 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
928 rxq = dev->data->rx_queues[rx_queue_id];
929 rxq->ops->release_mbufs(rxq);
931 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
937 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
939 struct iavf_adapter *adapter =
940 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
941 struct iavf_tx_queue *txq;
944 PMD_DRV_FUNC_TRACE();
946 if (tx_queue_id >= dev->data->nb_tx_queues)
949 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
951 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
956 txq = dev->data->tx_queues[tx_queue_id];
957 txq->ops->release_mbufs(txq);
959 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
965 iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
967 struct iavf_rx_queue *q = dev->data->rx_queues[qid];
972 q->ops->release_mbufs(q);
973 rte_free(q->sw_ring);
974 rte_memzone_free(q->mz);
979 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
981 struct iavf_tx_queue *q = dev->data->tx_queues[qid];
986 q->ops->release_mbufs(q);
987 rte_free(q->sw_ring);
988 rte_memzone_free(q->mz);
993 iavf_stop_queues(struct rte_eth_dev *dev)
995 struct iavf_adapter *adapter =
996 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
997 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
998 struct iavf_rx_queue *rxq;
999 struct iavf_tx_queue *txq;
1002 /* Stop All queues */
1003 if (!vf->lv_enabled) {
1004 ret = iavf_disable_queues(adapter);
1006 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1008 ret = iavf_disable_queues_lv(adapter);
1010 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1014 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1016 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1017 txq = dev->data->tx_queues[i];
1020 txq->ops->release_mbufs(txq);
1021 reset_tx_queue(txq);
1022 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1024 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1025 rxq = dev->data->rx_queues[i];
1028 rxq->ops->release_mbufs(rxq);
1029 reset_rx_queue(rxq);
1030 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1034 #define IAVF_RX_FLEX_ERR0_BITS \
1035 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1036 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1037 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1038 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1039 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1040 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1043 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1045 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1046 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1047 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1049 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1056 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1057 volatile union iavf_rx_flex_desc *rxdp)
1059 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
1060 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1061 mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
1062 RTE_MBUF_F_RX_VLAN_STRIPPED;
1064 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1069 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1070 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1071 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1072 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
1073 RTE_MBUF_F_RX_QINQ |
1074 RTE_MBUF_F_RX_VLAN_STRIPPED |
1076 mb->vlan_tci_outer = mb->vlan_tci;
1077 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1078 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1079 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1080 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1082 mb->vlan_tci_outer = 0;
1087 /* Translate the rx descriptor status and error fields to pkt flags */
1088 static inline uint64_t
1089 iavf_rxd_to_pkt_flags(uint64_t qword)
1092 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1094 #define IAVF_RX_ERR_BITS 0x3f
1096 /* Check if RSS_HASH */
1097 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1098 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1099 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
1101 /* Check if FDIR Match */
1102 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1103 RTE_MBUF_F_RX_FDIR : 0);
1105 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1106 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1110 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1111 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1113 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1115 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1116 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1118 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1120 /* TODO: Oversize error bit is not processed here */
1125 static inline uint64_t
1126 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1129 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1132 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1133 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1134 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1136 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1138 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1139 flags |= RTE_MBUF_F_RX_FDIR_ID;
1143 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1144 flags |= RTE_MBUF_F_RX_FDIR_ID;
1149 #define IAVF_RX_FLEX_ERR0_BITS \
1150 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1151 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1152 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1153 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1154 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1155 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1157 /* Rx L3/L4 checksum */
1158 static inline uint64_t
1159 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1163 /* check if HW has decoded the packet and checksum */
1164 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1167 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1168 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1172 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1173 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1175 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1177 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1178 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1180 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1182 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1183 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1188 /* If the number of free RX descriptors is greater than the RX free
1189 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1190 * register. Update the RDT with the value of the last processed RX
1191 * descriptor minus 1, to guarantee that the RDT register is never
1192 * equal to the RDH register, which creates a "full" ring situation
1193 * from the hardware point of view.
1196 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1198 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1200 if (nb_hold > rxq->rx_free_thresh) {
1202 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1203 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1204 rx_id = (uint16_t)((rx_id == 0) ?
1205 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1206 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1209 rxq->nb_rx_hold = nb_hold;
1212 /* implement recv_pkts */
1214 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1216 volatile union iavf_rx_desc *rx_ring;
1217 volatile union iavf_rx_desc *rxdp;
1218 struct iavf_rx_queue *rxq;
1219 union iavf_rx_desc rxd;
1220 struct rte_mbuf *rxe;
1221 struct rte_eth_dev *dev;
1222 struct rte_mbuf *rxm;
1223 struct rte_mbuf *nmb;
1227 uint16_t rx_packet_len;
1228 uint16_t rx_id, nb_hold;
1231 const uint32_t *ptype_tbl;
1236 rx_id = rxq->rx_tail;
1237 rx_ring = rxq->rx_ring;
1238 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1240 while (nb_rx < nb_pkts) {
1241 rxdp = &rx_ring[rx_id];
1242 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1243 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1244 IAVF_RXD_QW1_STATUS_SHIFT;
1246 /* Check the DD bit first */
1247 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1249 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1251 nmb = rte_mbuf_raw_alloc(rxq->mp);
1252 if (unlikely(!nmb)) {
1253 dev = &rte_eth_devices[rxq->port_id];
1254 dev->data->rx_mbuf_alloc_failed++;
1255 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1256 "queue_id=%u", rxq->port_id, rxq->queue_id);
1262 rxe = rxq->sw_ring[rx_id];
1263 rxq->sw_ring[rx_id] = nmb;
1265 if (unlikely(rx_id == rxq->nb_rx_desc))
1268 /* Prefetch next mbuf */
1269 rte_prefetch0(rxq->sw_ring[rx_id]);
1271 /* When next RX descriptor is on a cache line boundary,
1272 * prefetch the next 4 RX descriptors and next 8 pointers
1275 if ((rx_id & 0x3) == 0) {
1276 rte_prefetch0(&rx_ring[rx_id]);
1277 rte_prefetch0(rxq->sw_ring[rx_id]);
1281 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1282 rxdp->read.hdr_addr = 0;
1283 rxdp->read.pkt_addr = dma_addr;
1285 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1286 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1288 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1289 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1292 rxm->pkt_len = rx_packet_len;
1293 rxm->data_len = rx_packet_len;
1294 rxm->port = rxq->port_id;
1296 iavf_rxd_to_vlan_tci(rxm, &rxd);
1297 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1299 ptype_tbl[(uint8_t)((qword1 &
1300 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1302 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1304 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1306 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1307 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1309 rxm->ol_flags |= pkt_flags;
1311 rx_pkts[nb_rx++] = rxm;
1313 rxq->rx_tail = rx_id;
1315 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1320 /* implement recv_pkts for flexible Rx descriptor */
1322 iavf_recv_pkts_flex_rxd(void *rx_queue,
1323 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1325 volatile union iavf_rx_desc *rx_ring;
1326 volatile union iavf_rx_flex_desc *rxdp;
1327 struct iavf_rx_queue *rxq;
1328 union iavf_rx_flex_desc rxd;
1329 struct rte_mbuf *rxe;
1330 struct rte_eth_dev *dev;
1331 struct rte_mbuf *rxm;
1332 struct rte_mbuf *nmb;
1334 uint16_t rx_stat_err0;
1335 uint16_t rx_packet_len;
1336 uint16_t rx_id, nb_hold;
1339 const uint32_t *ptype_tbl;
1344 rx_id = rxq->rx_tail;
1345 rx_ring = rxq->rx_ring;
1346 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1348 while (nb_rx < nb_pkts) {
1349 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1350 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1352 /* Check the DD bit first */
1353 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1355 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1357 nmb = rte_mbuf_raw_alloc(rxq->mp);
1358 if (unlikely(!nmb)) {
1359 dev = &rte_eth_devices[rxq->port_id];
1360 dev->data->rx_mbuf_alloc_failed++;
1361 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1362 "queue_id=%u", rxq->port_id, rxq->queue_id);
1368 rxe = rxq->sw_ring[rx_id];
1369 rxq->sw_ring[rx_id] = nmb;
1371 if (unlikely(rx_id == rxq->nb_rx_desc))
1374 /* Prefetch next mbuf */
1375 rte_prefetch0(rxq->sw_ring[rx_id]);
1377 /* When next RX descriptor is on a cache line boundary,
1378 * prefetch the next 4 RX descriptors and next 8 pointers
1381 if ((rx_id & 0x3) == 0) {
1382 rte_prefetch0(&rx_ring[rx_id]);
1383 rte_prefetch0(rxq->sw_ring[rx_id]);
1387 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1388 rxdp->read.hdr_addr = 0;
1389 rxdp->read.pkt_addr = dma_addr;
1391 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1392 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1394 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1395 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1398 rxm->pkt_len = rx_packet_len;
1399 rxm->data_len = rx_packet_len;
1400 rxm->port = rxq->port_id;
1402 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1403 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1404 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1405 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1406 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1407 rxm->ol_flags |= pkt_flags;
1409 rx_pkts[nb_rx++] = rxm;
1411 rxq->rx_tail = rx_id;
1413 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1418 /* implement recv_scattered_pkts for flexible Rx descriptor */
1420 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1423 struct iavf_rx_queue *rxq = rx_queue;
1424 union iavf_rx_flex_desc rxd;
1425 struct rte_mbuf *rxe;
1426 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1427 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1428 struct rte_mbuf *nmb, *rxm;
1429 uint16_t rx_id = rxq->rx_tail;
1430 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1431 struct rte_eth_dev *dev;
1432 uint16_t rx_stat_err0;
1436 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1437 volatile union iavf_rx_flex_desc *rxdp;
1438 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1440 while (nb_rx < nb_pkts) {
1441 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1442 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1444 /* Check the DD bit */
1445 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1447 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1449 nmb = rte_mbuf_raw_alloc(rxq->mp);
1450 if (unlikely(!nmb)) {
1451 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1452 "queue_id=%u", rxq->port_id, rxq->queue_id);
1453 dev = &rte_eth_devices[rxq->port_id];
1454 dev->data->rx_mbuf_alloc_failed++;
1460 rxe = rxq->sw_ring[rx_id];
1461 rxq->sw_ring[rx_id] = nmb;
1463 if (rx_id == rxq->nb_rx_desc)
1466 /* Prefetch next mbuf */
1467 rte_prefetch0(rxq->sw_ring[rx_id]);
1469 /* When next RX descriptor is on a cache line boundary,
1470 * prefetch the next 4 RX descriptors and next 8 pointers
1473 if ((rx_id & 0x3) == 0) {
1474 rte_prefetch0(&rx_ring[rx_id]);
1475 rte_prefetch0(rxq->sw_ring[rx_id]);
1480 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1482 /* Set data buffer address and data length of the mbuf */
1483 rxdp->read.hdr_addr = 0;
1484 rxdp->read.pkt_addr = dma_addr;
1485 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1486 IAVF_RX_FLX_DESC_PKT_LEN_M;
1487 rxm->data_len = rx_packet_len;
1488 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1490 /* If this is the first buffer of the received packet, set the
1491 * pointer to the first mbuf of the packet and initialize its
1492 * context. Otherwise, update the total length and the number
1493 * of segments of the current scattered packet, and update the
1494 * pointer to the last mbuf of the current packet.
1498 first_seg->nb_segs = 1;
1499 first_seg->pkt_len = rx_packet_len;
1501 first_seg->pkt_len =
1502 (uint16_t)(first_seg->pkt_len +
1504 first_seg->nb_segs++;
1505 last_seg->next = rxm;
1508 /* If this is not the last buffer of the received packet,
1509 * update the pointer to the last mbuf of the current scattered
1510 * packet and continue to parse the RX ring.
1512 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1517 /* This is the last buffer of the received packet. If the CRC
1518 * is not stripped by the hardware:
1519 * - Subtract the CRC length from the total packet length.
1520 * - If the last buffer only contains the whole CRC or a part
1521 * of it, free the mbuf associated to the last buffer. If part
1522 * of the CRC is also contained in the previous mbuf, subtract
1523 * the length of that CRC part from the data length of the
1527 if (unlikely(rxq->crc_len > 0)) {
1528 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1529 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1530 rte_pktmbuf_free_seg(rxm);
1531 first_seg->nb_segs--;
1532 last_seg->data_len =
1533 (uint16_t)(last_seg->data_len -
1534 (RTE_ETHER_CRC_LEN - rx_packet_len));
1535 last_seg->next = NULL;
1537 rxm->data_len = (uint16_t)(rx_packet_len -
1542 first_seg->port = rxq->port_id;
1543 first_seg->ol_flags = 0;
1544 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1545 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1546 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1547 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1548 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1550 first_seg->ol_flags |= pkt_flags;
1552 /* Prefetch data of first segment, if configured to do so. */
1553 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1554 first_seg->data_off));
1555 rx_pkts[nb_rx++] = first_seg;
1559 /* Record index of the next RX descriptor to probe. */
1560 rxq->rx_tail = rx_id;
1561 rxq->pkt_first_seg = first_seg;
1562 rxq->pkt_last_seg = last_seg;
1564 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1569 /* implement recv_scattered_pkts */
1571 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1574 struct iavf_rx_queue *rxq = rx_queue;
1575 union iavf_rx_desc rxd;
1576 struct rte_mbuf *rxe;
1577 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1578 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1579 struct rte_mbuf *nmb, *rxm;
1580 uint16_t rx_id = rxq->rx_tail;
1581 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1582 struct rte_eth_dev *dev;
1588 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1589 volatile union iavf_rx_desc *rxdp;
1590 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1592 while (nb_rx < nb_pkts) {
1593 rxdp = &rx_ring[rx_id];
1594 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1595 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1596 IAVF_RXD_QW1_STATUS_SHIFT;
1598 /* Check the DD bit */
1599 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1601 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1603 nmb = rte_mbuf_raw_alloc(rxq->mp);
1604 if (unlikely(!nmb)) {
1605 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1606 "queue_id=%u", rxq->port_id, rxq->queue_id);
1607 dev = &rte_eth_devices[rxq->port_id];
1608 dev->data->rx_mbuf_alloc_failed++;
1614 rxe = rxq->sw_ring[rx_id];
1615 rxq->sw_ring[rx_id] = nmb;
1617 if (rx_id == rxq->nb_rx_desc)
1620 /* Prefetch next mbuf */
1621 rte_prefetch0(rxq->sw_ring[rx_id]);
1623 /* When next RX descriptor is on a cache line boundary,
1624 * prefetch the next 4 RX descriptors and next 8 pointers
1627 if ((rx_id & 0x3) == 0) {
1628 rte_prefetch0(&rx_ring[rx_id]);
1629 rte_prefetch0(rxq->sw_ring[rx_id]);
1634 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1636 /* Set data buffer address and data length of the mbuf */
1637 rxdp->read.hdr_addr = 0;
1638 rxdp->read.pkt_addr = dma_addr;
1639 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1640 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1641 rxm->data_len = rx_packet_len;
1642 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1644 /* If this is the first buffer of the received packet, set the
1645 * pointer to the first mbuf of the packet and initialize its
1646 * context. Otherwise, update the total length and the number
1647 * of segments of the current scattered packet, and update the
1648 * pointer to the last mbuf of the current packet.
1652 first_seg->nb_segs = 1;
1653 first_seg->pkt_len = rx_packet_len;
1655 first_seg->pkt_len =
1656 (uint16_t)(first_seg->pkt_len +
1658 first_seg->nb_segs++;
1659 last_seg->next = rxm;
1662 /* If this is not the last buffer of the received packet,
1663 * update the pointer to the last mbuf of the current scattered
1664 * packet and continue to parse the RX ring.
1666 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1671 /* This is the last buffer of the received packet. If the CRC
1672 * is not stripped by the hardware:
1673 * - Subtract the CRC length from the total packet length.
1674 * - If the last buffer only contains the whole CRC or a part
1675 * of it, free the mbuf associated to the last buffer. If part
1676 * of the CRC is also contained in the previous mbuf, subtract
1677 * the length of that CRC part from the data length of the
1681 if (unlikely(rxq->crc_len > 0)) {
1682 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1683 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1684 rte_pktmbuf_free_seg(rxm);
1685 first_seg->nb_segs--;
1686 last_seg->data_len =
1687 (uint16_t)(last_seg->data_len -
1688 (RTE_ETHER_CRC_LEN - rx_packet_len));
1689 last_seg->next = NULL;
1691 rxm->data_len = (uint16_t)(rx_packet_len -
1695 first_seg->port = rxq->port_id;
1696 first_seg->ol_flags = 0;
1697 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1698 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1699 first_seg->packet_type =
1700 ptype_tbl[(uint8_t)((qword1 &
1701 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1703 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1704 first_seg->hash.rss =
1705 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1707 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1708 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1710 first_seg->ol_flags |= pkt_flags;
1712 /* Prefetch data of first segment, if configured to do so. */
1713 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1714 first_seg->data_off));
1715 rx_pkts[nb_rx++] = first_seg;
1719 /* Record index of the next RX descriptor to probe. */
1720 rxq->rx_tail = rx_id;
1721 rxq->pkt_first_seg = first_seg;
1722 rxq->pkt_last_seg = last_seg;
1724 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1729 #define IAVF_LOOK_AHEAD 8
1731 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1733 volatile union iavf_rx_flex_desc *rxdp;
1734 struct rte_mbuf **rxep;
1735 struct rte_mbuf *mb;
1738 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1739 int32_t i, j, nb_rx = 0;
1741 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1743 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1744 rxep = &rxq->sw_ring[rxq->rx_tail];
1746 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1748 /* Make sure there is at least 1 packet to receive */
1749 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1752 /* Scan LOOK_AHEAD descriptors at a time to determine which
1753 * descriptors reference packets that are ready to be received.
1755 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1756 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1757 /* Read desc statuses backwards to avoid race condition */
1758 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1759 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1763 /* Compute how many status bits were set */
1764 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1765 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1769 /* Translate descriptor info to mbuf parameters */
1770 for (j = 0; j < nb_dd; j++) {
1771 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1773 i * IAVF_LOOK_AHEAD + j);
1776 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1777 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1778 mb->data_len = pkt_len;
1779 mb->pkt_len = pkt_len;
1782 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1783 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1784 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1785 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1786 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1787 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1789 mb->ol_flags |= pkt_flags;
1792 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1793 rxq->rx_stage[i + j] = rxep[j];
1795 if (nb_dd != IAVF_LOOK_AHEAD)
1799 /* Clear software ring entries */
1800 for (i = 0; i < nb_rx; i++)
1801 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1807 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1809 volatile union iavf_rx_desc *rxdp;
1810 struct rte_mbuf **rxep;
1811 struct rte_mbuf *mb;
1815 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1816 int32_t i, j, nb_rx = 0;
1818 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1820 rxdp = &rxq->rx_ring[rxq->rx_tail];
1821 rxep = &rxq->sw_ring[rxq->rx_tail];
1823 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1824 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1825 IAVF_RXD_QW1_STATUS_SHIFT;
1827 /* Make sure there is at least 1 packet to receive */
1828 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1831 /* Scan LOOK_AHEAD descriptors at a time to determine which
1832 * descriptors reference packets that are ready to be received.
1834 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1835 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1836 /* Read desc statuses backwards to avoid race condition */
1837 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1838 qword1 = rte_le_to_cpu_64(
1839 rxdp[j].wb.qword1.status_error_len);
1840 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1841 IAVF_RXD_QW1_STATUS_SHIFT;
1846 /* Compute how many status bits were set */
1847 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1848 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1852 /* Translate descriptor info to mbuf parameters */
1853 for (j = 0; j < nb_dd; j++) {
1854 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1855 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1858 qword1 = rte_le_to_cpu_64
1859 (rxdp[j].wb.qword1.status_error_len);
1860 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1861 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1862 mb->data_len = pkt_len;
1863 mb->pkt_len = pkt_len;
1865 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1866 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1868 ptype_tbl[(uint8_t)((qword1 &
1869 IAVF_RXD_QW1_PTYPE_MASK) >>
1870 IAVF_RXD_QW1_PTYPE_SHIFT)];
1872 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1873 mb->hash.rss = rte_le_to_cpu_32(
1874 rxdp[j].wb.qword0.hi_dword.rss);
1876 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1877 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1879 mb->ol_flags |= pkt_flags;
1882 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1883 rxq->rx_stage[i + j] = rxep[j];
1885 if (nb_dd != IAVF_LOOK_AHEAD)
1889 /* Clear software ring entries */
1890 for (i = 0; i < nb_rx; i++)
1891 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1896 static inline uint16_t
1897 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1898 struct rte_mbuf **rx_pkts,
1902 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1904 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1906 for (i = 0; i < nb_pkts; i++)
1907 rx_pkts[i] = stage[i];
1909 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1910 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1916 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1918 volatile union iavf_rx_desc *rxdp;
1919 struct rte_mbuf **rxep;
1920 struct rte_mbuf *mb;
1921 uint16_t alloc_idx, i;
1925 /* Allocate buffers in bulk */
1926 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1927 (rxq->rx_free_thresh - 1));
1928 rxep = &rxq->sw_ring[alloc_idx];
1929 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1930 rxq->rx_free_thresh);
1931 if (unlikely(diag != 0)) {
1932 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1936 rxdp = &rxq->rx_ring[alloc_idx];
1937 for (i = 0; i < rxq->rx_free_thresh; i++) {
1938 if (likely(i < (rxq->rx_free_thresh - 1)))
1939 /* Prefetch next mbuf */
1940 rte_prefetch0(rxep[i + 1]);
1943 rte_mbuf_refcnt_set(mb, 1);
1945 mb->data_off = RTE_PKTMBUF_HEADROOM;
1947 mb->port = rxq->port_id;
1948 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1949 rxdp[i].read.hdr_addr = 0;
1950 rxdp[i].read.pkt_addr = dma_addr;
1953 /* Update rx tail register */
1955 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1957 rxq->rx_free_trigger =
1958 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1959 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1960 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1965 static inline uint16_t
1966 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1968 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1974 if (rxq->rx_nb_avail)
1975 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1977 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1978 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1980 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1981 rxq->rx_next_avail = 0;
1982 rxq->rx_nb_avail = nb_rx;
1983 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1985 if (rxq->rx_tail > rxq->rx_free_trigger) {
1986 if (iavf_rx_alloc_bufs(rxq) != 0) {
1989 /* TODO: count rx_mbuf_alloc_failed here */
1991 rxq->rx_nb_avail = 0;
1992 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1993 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1994 rxq->sw_ring[j] = rxq->rx_stage[i];
2000 if (rxq->rx_tail >= rxq->nb_rx_desc)
2003 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
2004 rxq->port_id, rxq->queue_id,
2005 rxq->rx_tail, nb_rx);
2007 if (rxq->rx_nb_avail)
2008 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2014 iavf_recv_pkts_bulk_alloc(void *rx_queue,
2015 struct rte_mbuf **rx_pkts,
2018 uint16_t nb_rx = 0, n, count;
2020 if (unlikely(nb_pkts == 0))
2023 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2024 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2027 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2028 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2029 nb_rx = (uint16_t)(nb_rx + count);
2030 nb_pkts = (uint16_t)(nb_pkts - count);
2039 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2041 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2042 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2043 uint16_t nb_tx_desc = txq->nb_tx_desc;
2044 uint16_t desc_to_clean_to;
2045 uint16_t nb_tx_to_clean;
2047 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2049 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2050 if (desc_to_clean_to >= nb_tx_desc)
2051 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2053 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2054 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2055 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2056 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2057 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2058 "(port=%d queue=%d)", desc_to_clean_to,
2059 txq->port_id, txq->queue_id);
2063 if (last_desc_cleaned > desc_to_clean_to)
2064 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2067 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2070 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2072 txq->last_desc_cleaned = desc_to_clean_to;
2073 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2081 iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
2086 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
2087 cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
2089 /* Time Sync - Currently not supported */
2091 /* Outer L2 TAG 2 Insertion - Currently not supported */
2092 /* Inner L2 TAG 2 Insertion - Currently not supported */
2098 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
2099 const struct rte_mbuf *m)
2101 uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
2102 uint64_t eip_len = 0;
2103 uint64_t eip_noinc = 0;
2104 /* Default - IP_ID is increment in each segment of LSO */
2106 switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
2107 RTE_MBUF_F_TX_OUTER_IPV6 |
2108 RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
2109 case RTE_MBUF_F_TX_OUTER_IPV4:
2110 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
2111 eip_len = m->outer_l3_len >> 2;
2113 case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
2114 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
2115 eip_len = m->outer_l3_len >> 2;
2117 case RTE_MBUF_F_TX_OUTER_IPV6:
2118 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
2119 eip_len = m->outer_l3_len >> 2;
2123 *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
2124 eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
2125 eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
2128 static inline uint16_t
2129 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
2132 uint64_t segmentation_field = 0;
2133 uint64_t total_length = 0;
2135 total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
2137 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2138 total_length -= m->outer_l3_len;
2140 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
2141 if (!m->l4_len || !m->tso_segsz)
2142 PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
2143 m->l4_len, m->tso_segsz);
2144 if (m->tso_segsz < 88)
2145 PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
2148 segmentation_field =
2149 (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
2150 IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
2151 (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
2152 IAVF_TXD_CTX_QW1_MSS_MASK);
2154 *field |= segmentation_field;
2156 return total_length;
2160 struct iavf_tx_context_desc_qws {
2166 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
2167 struct rte_mbuf *m, uint16_t *tlen)
2169 volatile struct iavf_tx_context_desc_qws *desc_qws =
2170 (volatile struct iavf_tx_context_desc_qws *)desc;
2171 /* fill descriptor type field */
2172 desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
2174 /* fill command field */
2175 iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
2177 /* fill segmentation field */
2178 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
2179 *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
2183 /* fill tunnelling field */
2184 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2185 iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
2189 desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
2190 desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
2195 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
2198 uint64_t command = 0;
2199 uint64_t offset = 0;
2200 uint64_t l2tag1 = 0;
2202 *qw1 = IAVF_TX_DESC_DTYPE_DATA;
2204 command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
2206 /* Descriptor based VLAN insertion */
2207 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
2208 command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
2209 l2tag1 |= m->vlan_tci;
2213 offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2215 /* Enable L3 checksum offloading inner */
2216 if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
2217 command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2218 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2219 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
2220 command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2221 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2222 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
2223 command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2224 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2227 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2228 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2229 offset |= (m->l4_len >> 2) <<
2230 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2233 /* Enable L4 checksum offloads */
2234 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2235 case RTE_MBUF_F_TX_TCP_CKSUM:
2236 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2237 offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2238 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2240 case RTE_MBUF_F_TX_SCTP_CKSUM:
2241 command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2242 offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2243 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2245 case RTE_MBUF_F_TX_UDP_CKSUM:
2246 command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2247 offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2248 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2252 *qw1 = rte_cpu_to_le_64((((uint64_t)command <<
2253 IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
2254 (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
2255 IAVF_TXD_DATA_QW1_OFFSET_MASK) |
2256 ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
2260 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
2261 struct rte_mbuf *m, uint64_t desc_template,
2262 uint16_t tlen, uint16_t ipseclen)
2264 uint32_t hdrlen = m->l2_len;
2267 /* fill data descriptor qw1 from template */
2268 desc->cmd_type_offset_bsz = desc_template;
2270 /* set data buffer address */
2271 desc->buffer_addr = rte_mbuf_data_iova(m);
2273 /* calculate data buffer size less set header lengths */
2274 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
2275 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2276 RTE_MBUF_F_TX_UDP_SEG))) {
2277 hdrlen += m->outer_l3_len;
2278 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2279 hdrlen += m->l3_len + m->l4_len;
2281 hdrlen += m->l3_len;
2282 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2284 bufsz = hdrlen + tlen;
2286 bufsz = m->data_len;
2289 /* set data buffer size */
2290 desc->cmd_type_offset_bsz |=
2291 (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
2292 IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
2294 desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
2295 desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
2301 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2303 struct iavf_tx_queue *txq = tx_queue;
2304 volatile struct iavf_tx_desc *txr = txq->tx_ring;
2305 struct iavf_tx_entry *txe_ring = txq->sw_ring;
2306 struct iavf_tx_entry *txe, *txn;
2307 struct rte_mbuf *mb, *mb_seg;
2308 uint16_t desc_idx, desc_idx_last;
2312 /* Check if the descriptor ring needs to be cleaned. */
2313 if (txq->nb_free < txq->free_thresh)
2314 iavf_xmit_cleanup(txq);
2316 desc_idx = txq->tx_tail;
2317 txe = &txe_ring[desc_idx];
2319 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
2320 iavf_dump_tx_entry_ring(txq);
2321 iavf_dump_tx_desc_ring(txq);
2325 for (idx = 0; idx < nb_pkts; idx++) {
2326 volatile struct iavf_tx_desc *ddesc;
2327 uint16_t nb_desc_ctx;
2328 uint16_t nb_desc_data, nb_desc_required;
2329 uint16_t tlen = 0, ipseclen = 0;
2330 uint64_t ddesc_template = 0;
2331 uint64_t ddesc_cmd = 0;
2335 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2337 nb_desc_data = mb->nb_segs;
2338 nb_desc_ctx = !!(mb->ol_flags &
2339 (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
2340 RTE_MBUF_F_TX_TUNNEL_MASK));
2343 * The number of descriptors that must be allocated for
2344 * a packet equals to the number of the segments of that
2345 * packet plus the context and ipsec descriptors if needed.
2347 nb_desc_required = nb_desc_data + nb_desc_ctx;
2349 desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
2351 /* wrap descriptor ring */
2352 if (desc_idx_last >= txq->nb_tx_desc)
2354 (uint16_t)(desc_idx_last - txq->nb_tx_desc);
2357 "port_id=%u queue_id=%u tx_first=%u tx_last=%u",
2358 txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
2360 if (nb_desc_required > txq->nb_free) {
2361 if (iavf_xmit_cleanup(txq)) {
2366 if (unlikely(nb_desc_required > txq->rs_thresh)) {
2367 while (nb_desc_required > txq->nb_free) {
2368 if (iavf_xmit_cleanup(txq)) {
2377 iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
2379 /* Setup TX context descriptor if required */
2381 volatile struct iavf_tx_context_desc *ctx_desc =
2382 (volatile struct iavf_tx_context_desc *)
2385 /* clear QW0 or the previous writeback value
2386 * may impact next write
2388 *(volatile uint64_t *)ctx_desc = 0;
2390 txn = &txe_ring[txe->next_id];
2391 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2394 rte_pktmbuf_free_seg(txe->mbuf);
2398 iavf_fill_context_desc(ctx_desc, mb, &tlen);
2399 IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
2401 txe->last_id = desc_idx_last;
2402 desc_idx = txe->next_id;
2411 ddesc = (volatile struct iavf_tx_desc *)
2414 txn = &txe_ring[txe->next_id];
2415 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2418 rte_pktmbuf_free_seg(txe->mbuf);
2421 iavf_fill_data_desc(ddesc, mb_seg,
2422 ddesc_template, tlen, ipseclen);
2424 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
2426 txe->last_id = desc_idx_last;
2427 desc_idx = txe->next_id;
2429 mb_seg = mb_seg->next;
2432 /* The last packet data descriptor needs End Of Packet (EOP) */
2433 ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
2435 txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
2436 txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
2438 if (txq->nb_used >= txq->rs_thresh) {
2439 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2440 "%4u (port=%d queue=%d)",
2441 desc_idx_last, txq->port_id, txq->queue_id);
2443 ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
2445 /* Update txq RS bit counters */
2449 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
2450 IAVF_TXD_DATA_QW1_CMD_SHIFT);
2452 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
2458 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2459 txq->port_id, txq->queue_id, desc_idx, idx);
2461 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
2462 txq->tx_tail = desc_idx;
2467 /* Check if the packet with vlan user priority is transmitted in the
2471 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2473 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2474 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2477 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2479 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2480 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2488 /* TX prep functions */
2490 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2496 struct iavf_tx_queue *txq = tx_queue;
2497 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2498 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2500 for (i = 0; i < nb_pkts; i++) {
2502 ol_flags = m->ol_flags;
2504 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2505 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2506 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2510 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2511 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2512 /* MSS outside the range are considered malicious */
2517 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2518 rte_errno = ENOTSUP;
2522 #ifdef RTE_ETHDEV_DEBUG_TX
2523 ret = rte_validate_tx_offload(m);
2529 ret = rte_net_intel_cksum_prepare(m);
2535 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2536 ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
2537 ret = iavf_check_vlan_up2tc(txq, m);
2548 /* choose rx function*/
2550 iavf_set_rx_function(struct rte_eth_dev *dev)
2552 struct iavf_adapter *adapter =
2553 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2554 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2557 struct iavf_rx_queue *rxq;
2560 bool use_avx2 = false;
2561 bool use_avx512 = false;
2562 bool use_flex = false;
2564 check_ret = iavf_rx_vec_dev_check(dev);
2565 if (check_ret >= 0 &&
2566 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2567 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2568 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2569 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2572 #ifdef CC_AVX512_SUPPORT
2573 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2574 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2575 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2579 if (vf->vf_res->vf_cap_flags &
2580 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2583 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2584 rxq = dev->data->rx_queues[i];
2585 (void)iavf_rxq_vec_setup(rxq);
2588 if (dev->data->scattered_rx) {
2591 "Using %sVector Scattered Rx (port %d).",
2592 use_avx2 ? "avx2 " : "",
2593 dev->data->port_id);
2595 if (check_ret == IAVF_VECTOR_PATH)
2597 "Using AVX512 Vector Scattered Rx (port %d).",
2598 dev->data->port_id);
2601 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2602 dev->data->port_id);
2605 dev->rx_pkt_burst = use_avx2 ?
2606 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2607 iavf_recv_scattered_pkts_vec_flex_rxd;
2608 #ifdef CC_AVX512_SUPPORT
2610 if (check_ret == IAVF_VECTOR_PATH)
2612 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2615 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2619 dev->rx_pkt_burst = use_avx2 ?
2620 iavf_recv_scattered_pkts_vec_avx2 :
2621 iavf_recv_scattered_pkts_vec;
2622 #ifdef CC_AVX512_SUPPORT
2624 if (check_ret == IAVF_VECTOR_PATH)
2626 iavf_recv_scattered_pkts_vec_avx512;
2629 iavf_recv_scattered_pkts_vec_avx512_offload;
2635 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2636 use_avx2 ? "avx2 " : "",
2637 dev->data->port_id);
2639 if (check_ret == IAVF_VECTOR_PATH)
2641 "Using AVX512 Vector Rx (port %d).",
2642 dev->data->port_id);
2645 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2646 dev->data->port_id);
2649 dev->rx_pkt_burst = use_avx2 ?
2650 iavf_recv_pkts_vec_avx2_flex_rxd :
2651 iavf_recv_pkts_vec_flex_rxd;
2652 #ifdef CC_AVX512_SUPPORT
2654 if (check_ret == IAVF_VECTOR_PATH)
2656 iavf_recv_pkts_vec_avx512_flex_rxd;
2659 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2663 dev->rx_pkt_burst = use_avx2 ?
2664 iavf_recv_pkts_vec_avx2 :
2666 #ifdef CC_AVX512_SUPPORT
2668 if (check_ret == IAVF_VECTOR_PATH)
2670 iavf_recv_pkts_vec_avx512;
2673 iavf_recv_pkts_vec_avx512_offload;
2683 if (dev->data->scattered_rx) {
2684 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2685 dev->data->port_id);
2686 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2687 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2689 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2690 } else if (adapter->rx_bulk_alloc_allowed) {
2691 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2692 dev->data->port_id);
2693 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2695 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2696 dev->data->port_id);
2697 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2698 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2700 dev->rx_pkt_burst = iavf_recv_pkts;
2704 /* choose tx function*/
2706 iavf_set_tx_function(struct rte_eth_dev *dev)
2709 struct iavf_tx_queue *txq;
2712 bool use_sse = false;
2713 bool use_avx2 = false;
2714 bool use_avx512 = false;
2716 check_ret = iavf_tx_vec_dev_check(dev);
2718 if (check_ret >= 0 &&
2719 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2720 /* SSE and AVX2 not support offload path yet. */
2721 if (check_ret == IAVF_VECTOR_PATH) {
2723 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2724 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2725 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2728 #ifdef CC_AVX512_SUPPORT
2729 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2730 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2731 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2735 if (!use_sse && !use_avx2 && !use_avx512)
2739 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2740 use_avx2 ? "avx2 " : "",
2741 dev->data->port_id);
2742 dev->tx_pkt_burst = use_avx2 ?
2743 iavf_xmit_pkts_vec_avx2 :
2746 dev->tx_pkt_prepare = NULL;
2747 #ifdef CC_AVX512_SUPPORT
2749 if (check_ret == IAVF_VECTOR_PATH) {
2750 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2751 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2752 dev->data->port_id);
2754 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2755 dev->tx_pkt_prepare = iavf_prep_pkts;
2756 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2757 dev->data->port_id);
2762 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2763 txq = dev->data->tx_queues[i];
2766 #ifdef CC_AVX512_SUPPORT
2768 iavf_txq_vec_setup_avx512(txq);
2770 iavf_txq_vec_setup(txq);
2772 iavf_txq_vec_setup(txq);
2781 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2782 dev->data->port_id);
2783 dev->tx_pkt_burst = iavf_xmit_pkts;
2784 dev->tx_pkt_prepare = iavf_prep_pkts;
2788 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2791 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2792 uint16_t i, tx_last, tx_id;
2793 uint16_t nb_tx_free_last;
2794 uint16_t nb_tx_to_clean;
2797 /* Start free mbuf from the next of tx_tail */
2798 tx_last = txq->tx_tail;
2799 tx_id = swr_ring[tx_last].next_id;
2801 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2804 nb_tx_to_clean = txq->nb_free;
2805 nb_tx_free_last = txq->nb_free;
2807 free_cnt = txq->nb_tx_desc;
2809 /* Loop through swr_ring to count the amount of
2810 * freeable mubfs and packets.
2812 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2813 for (i = 0; i < nb_tx_to_clean &&
2814 pkt_cnt < free_cnt &&
2815 tx_id != tx_last; i++) {
2816 if (swr_ring[tx_id].mbuf != NULL) {
2817 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2818 swr_ring[tx_id].mbuf = NULL;
2821 * last segment in the packet,
2822 * increment packet count
2824 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2827 tx_id = swr_ring[tx_id].next_id;
2830 if (txq->rs_thresh > txq->nb_tx_desc -
2831 txq->nb_free || tx_id == tx_last)
2834 if (pkt_cnt < free_cnt) {
2835 if (iavf_xmit_cleanup(txq))
2838 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2839 nb_tx_free_last = txq->nb_free;
2843 return (int)pkt_cnt;
2847 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2849 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2851 return iavf_tx_done_cleanup_full(q, free_cnt);
2855 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2856 struct rte_eth_rxq_info *qinfo)
2858 struct iavf_rx_queue *rxq;
2860 rxq = dev->data->rx_queues[queue_id];
2862 qinfo->mp = rxq->mp;
2863 qinfo->scattered_rx = dev->data->scattered_rx;
2864 qinfo->nb_desc = rxq->nb_rx_desc;
2866 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2867 qinfo->conf.rx_drop_en = true;
2868 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2872 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2873 struct rte_eth_txq_info *qinfo)
2875 struct iavf_tx_queue *txq;
2877 txq = dev->data->tx_queues[queue_id];
2879 qinfo->nb_desc = txq->nb_tx_desc;
2881 qinfo->conf.tx_free_thresh = txq->free_thresh;
2882 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2883 qinfo->conf.offloads = txq->offloads;
2884 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2887 /* Get the number of used descriptors of a rx queue */
2889 iavf_dev_rxq_count(void *rx_queue)
2891 #define IAVF_RXQ_SCAN_INTERVAL 4
2892 volatile union iavf_rx_desc *rxdp;
2893 struct iavf_rx_queue *rxq;
2897 rxdp = &rxq->rx_ring[rxq->rx_tail];
2899 while ((desc < rxq->nb_rx_desc) &&
2900 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2901 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2902 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2903 /* Check the DD bit of a rx descriptor of each 4 in a group,
2904 * to avoid checking too frequently and downgrading performance
2907 desc += IAVF_RXQ_SCAN_INTERVAL;
2908 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2909 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2910 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2911 desc - rxq->nb_rx_desc]);
2918 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2920 struct iavf_rx_queue *rxq = rx_queue;
2921 volatile uint64_t *status;
2925 if (unlikely(offset >= rxq->nb_rx_desc))
2928 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2929 return RTE_ETH_RX_DESC_UNAVAIL;
2931 desc = rxq->rx_tail + offset;
2932 if (desc >= rxq->nb_rx_desc)
2933 desc -= rxq->nb_rx_desc;
2935 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2936 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2937 << IAVF_RXD_QW1_STATUS_SHIFT);
2939 return RTE_ETH_RX_DESC_DONE;
2941 return RTE_ETH_RX_DESC_AVAIL;
2945 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2947 struct iavf_tx_queue *txq = tx_queue;
2948 volatile uint64_t *status;
2949 uint64_t mask, expect;
2952 if (unlikely(offset >= txq->nb_tx_desc))
2955 desc = txq->tx_tail + offset;
2956 /* go to next desc that has the RS bit */
2957 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2959 if (desc >= txq->nb_tx_desc) {
2960 desc -= txq->nb_tx_desc;
2961 if (desc >= txq->nb_tx_desc)
2962 desc -= txq->nb_tx_desc;
2965 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2966 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2967 expect = rte_cpu_to_le_64(
2968 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2969 if ((*status & mask) == expect)
2970 return RTE_ETH_TX_DESC_DONE;
2972 return RTE_ETH_TX_DESC_FULL;
2975 static inline uint32_t
2976 iavf_get_default_ptype(uint16_t ptype)
2978 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2979 __rte_cache_aligned = {
2982 [1] = RTE_PTYPE_L2_ETHER,
2983 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2984 /* [3] - [5] reserved */
2985 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2986 /* [7] - [10] reserved */
2987 [11] = RTE_PTYPE_L2_ETHER_ARP,
2988 /* [12] - [21] reserved */
2990 /* Non tunneled IPv4 */
2991 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2993 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2994 RTE_PTYPE_L4_NONFRAG,
2995 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2998 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3000 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3002 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3006 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3007 RTE_PTYPE_TUNNEL_IP |
3008 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3009 RTE_PTYPE_INNER_L4_FRAG,
3010 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3011 RTE_PTYPE_TUNNEL_IP |
3012 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3013 RTE_PTYPE_INNER_L4_NONFRAG,
3014 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3015 RTE_PTYPE_TUNNEL_IP |
3016 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3017 RTE_PTYPE_INNER_L4_UDP,
3019 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3020 RTE_PTYPE_TUNNEL_IP |
3021 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3022 RTE_PTYPE_INNER_L4_TCP,
3023 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3024 RTE_PTYPE_TUNNEL_IP |
3025 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3026 RTE_PTYPE_INNER_L4_SCTP,
3027 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3028 RTE_PTYPE_TUNNEL_IP |
3029 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3030 RTE_PTYPE_INNER_L4_ICMP,
3033 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3034 RTE_PTYPE_TUNNEL_IP |
3035 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3036 RTE_PTYPE_INNER_L4_FRAG,
3037 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3038 RTE_PTYPE_TUNNEL_IP |
3039 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3040 RTE_PTYPE_INNER_L4_NONFRAG,
3041 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3042 RTE_PTYPE_TUNNEL_IP |
3043 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3044 RTE_PTYPE_INNER_L4_UDP,
3046 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3047 RTE_PTYPE_TUNNEL_IP |
3048 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3049 RTE_PTYPE_INNER_L4_TCP,
3050 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3051 RTE_PTYPE_TUNNEL_IP |
3052 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3053 RTE_PTYPE_INNER_L4_SCTP,
3054 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3055 RTE_PTYPE_TUNNEL_IP |
3056 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3057 RTE_PTYPE_INNER_L4_ICMP,
3059 /* IPv4 --> GRE/Teredo/VXLAN */
3060 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3061 RTE_PTYPE_TUNNEL_GRENAT,
3063 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3064 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3065 RTE_PTYPE_TUNNEL_GRENAT |
3066 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3067 RTE_PTYPE_INNER_L4_FRAG,
3068 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3069 RTE_PTYPE_TUNNEL_GRENAT |
3070 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3071 RTE_PTYPE_INNER_L4_NONFRAG,
3072 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3073 RTE_PTYPE_TUNNEL_GRENAT |
3074 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3075 RTE_PTYPE_INNER_L4_UDP,
3077 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3078 RTE_PTYPE_TUNNEL_GRENAT |
3079 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3080 RTE_PTYPE_INNER_L4_TCP,
3081 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3082 RTE_PTYPE_TUNNEL_GRENAT |
3083 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3084 RTE_PTYPE_INNER_L4_SCTP,
3085 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3086 RTE_PTYPE_TUNNEL_GRENAT |
3087 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3088 RTE_PTYPE_INNER_L4_ICMP,
3090 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3091 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3092 RTE_PTYPE_TUNNEL_GRENAT |
3093 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3094 RTE_PTYPE_INNER_L4_FRAG,
3095 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3096 RTE_PTYPE_TUNNEL_GRENAT |
3097 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3098 RTE_PTYPE_INNER_L4_NONFRAG,
3099 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3100 RTE_PTYPE_TUNNEL_GRENAT |
3101 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3102 RTE_PTYPE_INNER_L4_UDP,
3104 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3105 RTE_PTYPE_TUNNEL_GRENAT |
3106 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3107 RTE_PTYPE_INNER_L4_TCP,
3108 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3109 RTE_PTYPE_TUNNEL_GRENAT |
3110 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3111 RTE_PTYPE_INNER_L4_SCTP,
3112 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3113 RTE_PTYPE_TUNNEL_GRENAT |
3114 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3115 RTE_PTYPE_INNER_L4_ICMP,
3117 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3118 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3119 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3121 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3122 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3123 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3124 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3125 RTE_PTYPE_INNER_L4_FRAG,
3126 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3127 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3128 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3129 RTE_PTYPE_INNER_L4_NONFRAG,
3130 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3131 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3133 RTE_PTYPE_INNER_L4_UDP,
3135 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3136 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3137 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3138 RTE_PTYPE_INNER_L4_TCP,
3139 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3140 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3142 RTE_PTYPE_INNER_L4_SCTP,
3143 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3144 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3145 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3146 RTE_PTYPE_INNER_L4_ICMP,
3148 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3149 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3150 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3151 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3152 RTE_PTYPE_INNER_L4_FRAG,
3153 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3154 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3155 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3156 RTE_PTYPE_INNER_L4_NONFRAG,
3157 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3159 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3160 RTE_PTYPE_INNER_L4_UDP,
3162 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3163 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3164 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3165 RTE_PTYPE_INNER_L4_TCP,
3166 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3167 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3169 RTE_PTYPE_INNER_L4_SCTP,
3170 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3171 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3172 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3173 RTE_PTYPE_INNER_L4_ICMP,
3174 /* [73] - [87] reserved */
3176 /* Non tunneled IPv6 */
3177 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3179 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3180 RTE_PTYPE_L4_NONFRAG,
3181 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3184 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3186 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3188 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3192 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3193 RTE_PTYPE_TUNNEL_IP |
3194 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3195 RTE_PTYPE_INNER_L4_FRAG,
3196 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3197 RTE_PTYPE_TUNNEL_IP |
3198 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3199 RTE_PTYPE_INNER_L4_NONFRAG,
3200 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3201 RTE_PTYPE_TUNNEL_IP |
3202 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3203 RTE_PTYPE_INNER_L4_UDP,
3205 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3206 RTE_PTYPE_TUNNEL_IP |
3207 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3208 RTE_PTYPE_INNER_L4_TCP,
3209 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3210 RTE_PTYPE_TUNNEL_IP |
3211 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3212 RTE_PTYPE_INNER_L4_SCTP,
3213 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3214 RTE_PTYPE_TUNNEL_IP |
3215 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3216 RTE_PTYPE_INNER_L4_ICMP,
3219 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3220 RTE_PTYPE_TUNNEL_IP |
3221 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3222 RTE_PTYPE_INNER_L4_FRAG,
3223 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3224 RTE_PTYPE_TUNNEL_IP |
3225 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3226 RTE_PTYPE_INNER_L4_NONFRAG,
3227 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3228 RTE_PTYPE_TUNNEL_IP |
3229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3230 RTE_PTYPE_INNER_L4_UDP,
3231 /* [105] reserved */
3232 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3233 RTE_PTYPE_TUNNEL_IP |
3234 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3235 RTE_PTYPE_INNER_L4_TCP,
3236 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3237 RTE_PTYPE_TUNNEL_IP |
3238 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3239 RTE_PTYPE_INNER_L4_SCTP,
3240 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3241 RTE_PTYPE_TUNNEL_IP |
3242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3243 RTE_PTYPE_INNER_L4_ICMP,
3245 /* IPv6 --> GRE/Teredo/VXLAN */
3246 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3247 RTE_PTYPE_TUNNEL_GRENAT,
3249 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3250 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3251 RTE_PTYPE_TUNNEL_GRENAT |
3252 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3253 RTE_PTYPE_INNER_L4_FRAG,
3254 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3255 RTE_PTYPE_TUNNEL_GRENAT |
3256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3257 RTE_PTYPE_INNER_L4_NONFRAG,
3258 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3259 RTE_PTYPE_TUNNEL_GRENAT |
3260 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3261 RTE_PTYPE_INNER_L4_UDP,
3262 /* [113] reserved */
3263 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3264 RTE_PTYPE_TUNNEL_GRENAT |
3265 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3266 RTE_PTYPE_INNER_L4_TCP,
3267 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3268 RTE_PTYPE_TUNNEL_GRENAT |
3269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3270 RTE_PTYPE_INNER_L4_SCTP,
3271 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3272 RTE_PTYPE_TUNNEL_GRENAT |
3273 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3274 RTE_PTYPE_INNER_L4_ICMP,
3276 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3277 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3278 RTE_PTYPE_TUNNEL_GRENAT |
3279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3280 RTE_PTYPE_INNER_L4_FRAG,
3281 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3282 RTE_PTYPE_TUNNEL_GRENAT |
3283 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3284 RTE_PTYPE_INNER_L4_NONFRAG,
3285 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3286 RTE_PTYPE_TUNNEL_GRENAT |
3287 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3288 RTE_PTYPE_INNER_L4_UDP,
3289 /* [120] reserved */
3290 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3291 RTE_PTYPE_TUNNEL_GRENAT |
3292 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3293 RTE_PTYPE_INNER_L4_TCP,
3294 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3295 RTE_PTYPE_TUNNEL_GRENAT |
3296 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3297 RTE_PTYPE_INNER_L4_SCTP,
3298 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3299 RTE_PTYPE_TUNNEL_GRENAT |
3300 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3301 RTE_PTYPE_INNER_L4_ICMP,
3303 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3304 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3305 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3307 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3308 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3309 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3310 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3311 RTE_PTYPE_INNER_L4_FRAG,
3312 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3314 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3315 RTE_PTYPE_INNER_L4_NONFRAG,
3316 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3318 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_INNER_L4_UDP,
3320 /* [128] reserved */
3321 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3322 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3323 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3324 RTE_PTYPE_INNER_L4_TCP,
3325 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3326 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3327 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3328 RTE_PTYPE_INNER_L4_SCTP,
3329 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3330 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3331 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3332 RTE_PTYPE_INNER_L4_ICMP,
3334 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3335 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3336 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3337 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3338 RTE_PTYPE_INNER_L4_FRAG,
3339 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3340 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3341 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3342 RTE_PTYPE_INNER_L4_NONFRAG,
3343 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3344 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3345 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3346 RTE_PTYPE_INNER_L4_UDP,
3347 /* [135] reserved */
3348 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3349 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3350 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3351 RTE_PTYPE_INNER_L4_TCP,
3352 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3353 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3354 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3355 RTE_PTYPE_INNER_L4_SCTP,
3356 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3357 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3358 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3359 RTE_PTYPE_INNER_L4_ICMP,
3360 /* [139] - [299] reserved */
3363 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3364 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3366 /* PPPoE --> IPv4 */
3367 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3368 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3370 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3371 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3372 RTE_PTYPE_L4_NONFRAG,
3373 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3374 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3376 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3377 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3379 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3380 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3382 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3383 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3386 /* PPPoE --> IPv6 */
3387 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3388 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3390 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3391 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3392 RTE_PTYPE_L4_NONFRAG,
3393 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3394 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3396 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3397 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3399 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3400 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3402 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3403 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3405 /* [314] - [324] reserved */
3407 /* IPv4/IPv6 --> GTPC/GTPU */
3408 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3409 RTE_PTYPE_TUNNEL_GTPC,
3410 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3411 RTE_PTYPE_TUNNEL_GTPC,
3412 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3413 RTE_PTYPE_TUNNEL_GTPC,
3414 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3415 RTE_PTYPE_TUNNEL_GTPC,
3416 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3417 RTE_PTYPE_TUNNEL_GTPU,
3418 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3419 RTE_PTYPE_TUNNEL_GTPU,
3421 /* IPv4 --> GTPU --> IPv4 */
3422 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3423 RTE_PTYPE_TUNNEL_GTPU |
3424 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3425 RTE_PTYPE_INNER_L4_FRAG,
3426 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3427 RTE_PTYPE_TUNNEL_GTPU |
3428 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3429 RTE_PTYPE_INNER_L4_NONFRAG,
3430 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3431 RTE_PTYPE_TUNNEL_GTPU |
3432 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3433 RTE_PTYPE_INNER_L4_UDP,
3434 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3435 RTE_PTYPE_TUNNEL_GTPU |
3436 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3437 RTE_PTYPE_INNER_L4_TCP,
3438 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3439 RTE_PTYPE_TUNNEL_GTPU |
3440 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3441 RTE_PTYPE_INNER_L4_ICMP,
3443 /* IPv6 --> GTPU --> IPv4 */
3444 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3445 RTE_PTYPE_TUNNEL_GTPU |
3446 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3447 RTE_PTYPE_INNER_L4_FRAG,
3448 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3449 RTE_PTYPE_TUNNEL_GTPU |
3450 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3451 RTE_PTYPE_INNER_L4_NONFRAG,
3452 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3453 RTE_PTYPE_TUNNEL_GTPU |
3454 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3455 RTE_PTYPE_INNER_L4_UDP,
3456 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3457 RTE_PTYPE_TUNNEL_GTPU |
3458 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3459 RTE_PTYPE_INNER_L4_TCP,
3460 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3461 RTE_PTYPE_TUNNEL_GTPU |
3462 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_INNER_L4_ICMP,
3465 /* IPv4 --> GTPU --> IPv6 */
3466 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_TUNNEL_GTPU |
3468 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3469 RTE_PTYPE_INNER_L4_FRAG,
3470 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3471 RTE_PTYPE_TUNNEL_GTPU |
3472 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3473 RTE_PTYPE_INNER_L4_NONFRAG,
3474 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3475 RTE_PTYPE_TUNNEL_GTPU |
3476 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3477 RTE_PTYPE_INNER_L4_UDP,
3478 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3479 RTE_PTYPE_TUNNEL_GTPU |
3480 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3481 RTE_PTYPE_INNER_L4_TCP,
3482 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3483 RTE_PTYPE_TUNNEL_GTPU |
3484 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3485 RTE_PTYPE_INNER_L4_ICMP,
3487 /* IPv6 --> GTPU --> IPv6 */
3488 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3489 RTE_PTYPE_TUNNEL_GTPU |
3490 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3491 RTE_PTYPE_INNER_L4_FRAG,
3492 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3493 RTE_PTYPE_TUNNEL_GTPU |
3494 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3495 RTE_PTYPE_INNER_L4_NONFRAG,
3496 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3497 RTE_PTYPE_TUNNEL_GTPU |
3498 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_INNER_L4_UDP,
3500 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3501 RTE_PTYPE_TUNNEL_GTPU |
3502 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3503 RTE_PTYPE_INNER_L4_TCP,
3504 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3505 RTE_PTYPE_TUNNEL_GTPU |
3506 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3507 RTE_PTYPE_INNER_L4_ICMP,
3509 /* IPv4 --> UDP ECPRI */
3510 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3512 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3514 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3516 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3518 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3520 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3522 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3524 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3526 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3528 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3531 /* IPV6 --> UDP ECPRI */
3532 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3534 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3536 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3538 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3540 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3542 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3544 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3548 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3550 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3552 /* All others reserved */
3555 return ptype_tbl[ptype];
3559 iavf_set_default_ptype_table(struct rte_eth_dev *dev)
3561 struct iavf_adapter *ad =
3562 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3565 for (i = 0; i < IAVF_MAX_PKT_TYPE; i++)
3566 ad->ptype_tbl[i] = iavf_get_default_ptype(i);