1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_ipsec_crypto.h"
31 #include "rte_pmd_iavf.h"
33 /* Offset of mbuf dynamic field for protocol extraction's metadata */
34 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
36 /* Mask of mbuf dynamic flags for protocol extraction's type */
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
42 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
43 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
46 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
48 static uint8_t rxdid_map[] = {
49 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
50 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
51 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
52 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
53 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
54 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
55 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
57 IAVF_RXDID_COMMS_IPSEC_CRYPTO,
60 return flex_type < RTE_DIM(rxdid_map) ?
61 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
65 iavf_monitor_callback(const uint64_t value,
66 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
68 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
70 * we expect the DD bit to be set to 1 if this descriptor was already
73 return (value & m) == m ? -1 : 0;
77 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
79 struct iavf_rx_queue *rxq = rx_queue;
80 volatile union iavf_rx_desc *rxdp;
84 rxdp = &rxq->rx_ring[desc];
85 /* watch for changes in status bit */
86 pmc->addr = &rxdp->wb.qword1.status_error_len;
88 /* comparison callback */
89 pmc->fn = iavf_monitor_callback;
91 /* registers are 64-bit */
92 pmc->size = sizeof(uint64_t);
98 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
100 /* The following constraints must be satisfied:
101 * thresh < rxq->nb_rx_desc
103 if (thresh >= nb_desc) {
104 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
112 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
113 uint16_t tx_free_thresh)
115 /* TX descriptors will have their RS bit set after tx_rs_thresh
116 * descriptors have been used. The TX descriptor ring will be cleaned
117 * after tx_free_thresh descriptors are used or if the number of
118 * descriptors required to transmit a packet is greater than the
119 * number of free TX descriptors.
121 * The following constraints must be satisfied:
122 * - tx_rs_thresh must be less than the size of the ring minus 2.
123 * - tx_free_thresh must be less than the size of the ring minus 3.
124 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
125 * - tx_rs_thresh must be a divisor of the ring size.
127 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
128 * race condition, hence the maximum threshold constraints. When set
129 * to zero use default values.
131 if (tx_rs_thresh >= (nb_desc - 2)) {
132 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
133 "number of TX descriptors (%u) minus 2",
134 tx_rs_thresh, nb_desc);
137 if (tx_free_thresh >= (nb_desc - 3)) {
138 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
139 "number of TX descriptors (%u) minus 3.",
140 tx_free_thresh, nb_desc);
143 if (tx_rs_thresh > tx_free_thresh) {
144 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
145 "equal to tx_free_thresh (%u).",
146 tx_rs_thresh, tx_free_thresh);
149 if ((nb_desc % tx_rs_thresh) != 0) {
150 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
151 "number of TX descriptors (%u).",
152 tx_rs_thresh, nb_desc);
160 check_rx_vec_allow(struct iavf_rx_queue *rxq)
162 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
163 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
164 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
168 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
173 check_tx_vec_allow(struct iavf_tx_queue *txq)
175 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
176 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
177 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
178 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
181 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
186 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
190 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
191 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
192 "rxq->rx_free_thresh=%d, "
193 "IAVF_RX_MAX_BURST=%d",
194 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
196 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
197 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
198 "rxq->nb_rx_desc=%d, "
199 "rxq->rx_free_thresh=%d",
200 rxq->nb_rx_desc, rxq->rx_free_thresh);
207 reset_rx_queue(struct iavf_rx_queue *rxq)
215 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
217 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
218 ((volatile char *)rxq->rx_ring)[i] = 0;
220 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
222 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
223 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
226 rxq->rx_nb_avail = 0;
227 rxq->rx_next_avail = 0;
228 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
233 if (rxq->pkt_first_seg != NULL)
234 rte_pktmbuf_free(rxq->pkt_first_seg);
236 rxq->pkt_first_seg = NULL;
237 rxq->pkt_last_seg = NULL;
239 rxq->rxrearm_start = 0;
243 reset_tx_queue(struct iavf_tx_queue *txq)
245 struct iavf_tx_entry *txe;
250 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
255 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
256 for (i = 0; i < size; i++)
257 ((volatile char *)txq->tx_ring)[i] = 0;
259 prev = (uint16_t)(txq->nb_tx_desc - 1);
260 for (i = 0; i < txq->nb_tx_desc; i++) {
261 txq->tx_ring[i].cmd_type_offset_bsz =
262 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
265 txe[prev].next_id = i;
272 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
273 txq->nb_free = txq->nb_tx_desc - 1;
275 txq->next_dd = txq->rs_thresh - 1;
276 txq->next_rs = txq->rs_thresh - 1;
280 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
282 volatile union iavf_rx_desc *rxd;
283 struct rte_mbuf *mbuf = NULL;
287 for (i = 0; i < rxq->nb_rx_desc; i++) {
288 mbuf = rte_mbuf_raw_alloc(rxq->mp);
289 if (unlikely(!mbuf)) {
290 for (j = 0; j < i; j++) {
291 rte_pktmbuf_free_seg(rxq->sw_ring[j]);
292 rxq->sw_ring[j] = NULL;
294 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
298 rte_mbuf_refcnt_set(mbuf, 1);
300 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
302 mbuf->port = rxq->port_id;
305 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
307 rxd = &rxq->rx_ring[i];
308 rxd->read.pkt_addr = dma_addr;
309 rxd->read.hdr_addr = 0;
310 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
315 rxq->sw_ring[i] = mbuf;
322 release_rxq_mbufs(struct iavf_rx_queue *rxq)
329 for (i = 0; i < rxq->nb_rx_desc; i++) {
330 if (rxq->sw_ring[i]) {
331 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
332 rxq->sw_ring[i] = NULL;
337 if (rxq->rx_nb_avail == 0)
339 for (i = 0; i < rxq->rx_nb_avail; i++) {
340 struct rte_mbuf *mbuf;
342 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
343 rte_pktmbuf_free_seg(mbuf);
345 rxq->rx_nb_avail = 0;
349 release_txq_mbufs(struct iavf_tx_queue *txq)
353 if (!txq || !txq->sw_ring) {
354 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
358 for (i = 0; i < txq->nb_tx_desc; i++) {
359 if (txq->sw_ring[i].mbuf) {
360 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
361 txq->sw_ring[i].mbuf = NULL;
366 static const struct iavf_rxq_ops def_rxq_ops = {
367 .release_mbufs = release_rxq_mbufs,
370 static const struct iavf_txq_ops def_txq_ops = {
371 .release_mbufs = release_txq_mbufs,
375 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
377 volatile union iavf_rx_flex_desc *rxdp)
379 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
380 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
381 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
385 if (desc->flow_id != 0xFFFFFFFF) {
386 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
387 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
390 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
391 stat_err = rte_le_to_cpu_16(desc->status_error0);
392 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
393 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
394 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
400 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
402 volatile union iavf_rx_flex_desc *rxdp)
404 volatile struct iavf_32b_rx_flex_desc_comms *desc =
405 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
408 stat_err = rte_le_to_cpu_16(desc->status_error0);
409 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
410 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
411 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
414 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
415 if (desc->flow_id != 0xFFFFFFFF) {
416 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
417 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
420 if (rxq->xtr_ol_flag) {
421 uint32_t metadata = 0;
423 stat_err = rte_le_to_cpu_16(desc->status_error1);
425 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
426 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
428 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
430 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
433 mb->ol_flags |= rxq->xtr_ol_flag;
435 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
442 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
444 volatile union iavf_rx_flex_desc *rxdp)
446 volatile struct iavf_32b_rx_flex_desc_comms *desc =
447 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
450 stat_err = rte_le_to_cpu_16(desc->status_error0);
451 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
452 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
453 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
456 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
457 if (desc->flow_id != 0xFFFFFFFF) {
458 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
459 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
462 if (rxq->xtr_ol_flag) {
463 uint32_t metadata = 0;
465 if (desc->flex_ts.flex.aux0 != 0xFFFF)
466 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
467 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
468 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
471 mb->ol_flags |= rxq->xtr_ol_flag;
473 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
480 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
483 case IAVF_RXDID_COMMS_AUX_VLAN:
484 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
485 rxq->rxd_to_pkt_fields =
486 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
488 case IAVF_RXDID_COMMS_AUX_IPV4:
489 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
490 rxq->rxd_to_pkt_fields =
491 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
493 case IAVF_RXDID_COMMS_AUX_IPV6:
494 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
495 rxq->rxd_to_pkt_fields =
496 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
498 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
500 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
501 rxq->rxd_to_pkt_fields =
502 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
504 case IAVF_RXDID_COMMS_AUX_TCP:
505 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
506 rxq->rxd_to_pkt_fields =
507 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
509 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
511 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
512 rxq->rxd_to_pkt_fields =
513 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
515 case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
517 rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
518 rxq->rxd_to_pkt_fields =
519 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
521 case IAVF_RXDID_COMMS_OVS_1:
522 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
525 /* update this according to the RXDID for FLEX_DESC_NONE */
526 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
530 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
531 rxq->xtr_ol_flag = 0;
535 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
536 uint16_t nb_desc, unsigned int socket_id,
537 const struct rte_eth_rxconf *rx_conf,
538 struct rte_mempool *mp)
540 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
541 struct iavf_adapter *ad =
542 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
543 struct iavf_info *vf =
544 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
545 struct iavf_vsi *vsi = &vf->vsi;
546 struct iavf_rx_queue *rxq;
547 const struct rte_memzone *mz;
551 uint16_t rx_free_thresh;
554 PMD_INIT_FUNC_TRACE();
556 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
558 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
559 nb_desc > IAVF_MAX_RING_DESC ||
560 nb_desc < IAVF_MIN_RING_DESC) {
561 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
566 /* Check free threshold */
567 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
568 IAVF_DEFAULT_RX_FREE_THRESH :
569 rx_conf->rx_free_thresh;
570 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
573 /* Free memory if needed */
574 if (dev->data->rx_queues[queue_idx]) {
575 iavf_dev_rx_queue_release(dev, queue_idx);
576 dev->data->rx_queues[queue_idx] = NULL;
579 /* Allocate the rx queue data structure */
580 rxq = rte_zmalloc_socket("iavf rxq",
581 sizeof(struct iavf_rx_queue),
585 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
586 "rx queue data structure");
590 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
591 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
593 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
594 rxq->proto_xtr = proto_xtr;
596 rxq->rxdid = IAVF_RXDID_LEGACY_1;
597 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
600 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
601 struct virtchnl_vlan_supported_caps *stripping_support =
602 &vf->vlan_v2_caps.offloads.stripping_support;
603 uint32_t stripping_cap;
605 if (stripping_support->outer)
606 stripping_cap = stripping_support->outer;
608 stripping_cap = stripping_support->inner;
610 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
611 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
612 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
613 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
615 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
618 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
621 rxq->nb_rx_desc = nb_desc;
622 rxq->rx_free_thresh = rx_free_thresh;
623 rxq->queue_id = queue_idx;
624 rxq->port_id = dev->data->port_id;
625 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
628 rxq->offloads = offloads;
630 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
631 rxq->crc_len = RTE_ETHER_CRC_LEN;
635 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
636 rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
638 /* Allocate the software ring. */
639 len = nb_desc + IAVF_RX_MAX_BURST;
641 rte_zmalloc_socket("iavf rx sw ring",
642 sizeof(struct rte_mbuf *) * len,
646 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
651 /* Allocate the maximun number of RX ring hardware descriptor with
652 * a liitle more to support bulk allocate.
654 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
655 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
657 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
658 ring_size, IAVF_RING_BASE_ALIGN,
661 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
662 rte_free(rxq->sw_ring);
666 /* Zero all the descriptors in the ring. */
667 memset(mz->addr, 0, ring_size);
668 rxq->rx_ring_phys_addr = mz->iova;
669 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
674 dev->data->rx_queues[queue_idx] = rxq;
675 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
676 rxq->ops = &def_rxq_ops;
678 if (check_rx_bulk_allow(rxq) == true) {
679 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
680 "satisfied. Rx Burst Bulk Alloc function will be "
681 "used on port=%d, queue=%d.",
682 rxq->port_id, rxq->queue_id);
684 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
685 "not satisfied, Scattered Rx is requested "
686 "on port=%d, queue=%d.",
687 rxq->port_id, rxq->queue_id);
688 ad->rx_bulk_alloc_allowed = false;
691 if (check_rx_vec_allow(rxq) == false)
692 ad->rx_vec_allowed = false;
698 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
701 unsigned int socket_id,
702 const struct rte_eth_txconf *tx_conf)
704 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
705 struct iavf_adapter *adapter =
706 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
707 struct iavf_info *vf =
708 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
709 struct iavf_tx_queue *txq;
710 const struct rte_memzone *mz;
712 uint16_t tx_rs_thresh, tx_free_thresh;
715 PMD_INIT_FUNC_TRACE();
717 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
719 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
720 nb_desc > IAVF_MAX_RING_DESC ||
721 nb_desc < IAVF_MIN_RING_DESC) {
722 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
727 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
728 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
729 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
730 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
731 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
734 /* Free memory if needed. */
735 if (dev->data->tx_queues[queue_idx]) {
736 iavf_dev_tx_queue_release(dev, queue_idx);
737 dev->data->tx_queues[queue_idx] = NULL;
740 /* Allocate the TX queue data structure. */
741 txq = rte_zmalloc_socket("iavf txq",
742 sizeof(struct iavf_tx_queue),
746 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
747 "tx queue structure");
751 if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
752 struct virtchnl_vlan_supported_caps *insertion_support =
753 &adapter->vf.vlan_v2_caps.offloads.insertion_support;
754 uint32_t insertion_cap;
756 if (insertion_support->outer)
757 insertion_cap = insertion_support->outer;
759 insertion_cap = insertion_support->inner;
761 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
762 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
763 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
764 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
766 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
769 txq->nb_tx_desc = nb_desc;
770 txq->rs_thresh = tx_rs_thresh;
771 txq->free_thresh = tx_free_thresh;
772 txq->queue_id = queue_idx;
773 txq->port_id = dev->data->port_id;
774 txq->offloads = offloads;
775 txq->tx_deferred_start = tx_conf->tx_deferred_start;
777 if (iavf_ipsec_crypto_supported(adapter))
778 txq->ipsec_crypto_pkt_md_offset =
779 iavf_security_get_pkt_md_offset(adapter);
781 /* Allocate software ring */
783 rte_zmalloc_socket("iavf tx sw ring",
784 sizeof(struct iavf_tx_entry) * nb_desc,
788 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
793 /* Allocate TX hardware ring descriptors. */
794 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
795 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
796 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
797 ring_size, IAVF_RING_BASE_ALIGN,
800 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
801 rte_free(txq->sw_ring);
805 txq->tx_ring_phys_addr = mz->iova;
806 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
811 dev->data->tx_queues[queue_idx] = txq;
812 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
813 txq->ops = &def_txq_ops;
815 if (check_tx_vec_allow(txq) == false) {
816 struct iavf_adapter *ad =
817 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
818 ad->tx_vec_allowed = false;
821 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
822 vf->tm_conf.committed) {
824 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
825 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
826 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
827 vf->qtc_map[tc].queue_count))
830 if (tc >= vf->qos_cap->num_elem) {
831 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
841 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
843 struct iavf_adapter *adapter =
844 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
845 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
846 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
847 struct iavf_rx_queue *rxq;
850 PMD_DRV_FUNC_TRACE();
852 if (rx_queue_id >= dev->data->nb_rx_queues)
855 rxq = dev->data->rx_queues[rx_queue_id];
857 err = alloc_rxq_mbufs(rxq);
859 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
865 /* Init the RX tail register. */
866 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
867 IAVF_WRITE_FLUSH(hw);
869 /* Ready to switch the queue on */
871 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
873 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
876 release_rxq_mbufs(rxq);
877 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
880 dev->data->rx_queue_state[rx_queue_id] =
881 RTE_ETH_QUEUE_STATE_STARTED;
888 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
890 struct iavf_adapter *adapter =
891 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
892 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
893 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894 struct iavf_tx_queue *txq;
897 PMD_DRV_FUNC_TRACE();
899 if (tx_queue_id >= dev->data->nb_tx_queues)
902 txq = dev->data->tx_queues[tx_queue_id];
904 /* Init the RX tail register. */
905 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
906 IAVF_WRITE_FLUSH(hw);
908 /* Ready to switch the queue on */
910 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
912 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
915 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
918 dev->data->tx_queue_state[tx_queue_id] =
919 RTE_ETH_QUEUE_STATE_STARTED;
925 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
927 struct iavf_adapter *adapter =
928 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
929 struct iavf_rx_queue *rxq;
932 PMD_DRV_FUNC_TRACE();
934 if (rx_queue_id >= dev->data->nb_rx_queues)
937 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
939 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
944 rxq = dev->data->rx_queues[rx_queue_id];
945 rxq->ops->release_mbufs(rxq);
947 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
953 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
955 struct iavf_adapter *adapter =
956 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
957 struct iavf_tx_queue *txq;
960 PMD_DRV_FUNC_TRACE();
962 if (tx_queue_id >= dev->data->nb_tx_queues)
965 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
967 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
972 txq = dev->data->tx_queues[tx_queue_id];
973 txq->ops->release_mbufs(txq);
975 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
981 iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
983 struct iavf_rx_queue *q = dev->data->rx_queues[qid];
988 q->ops->release_mbufs(q);
989 rte_free(q->sw_ring);
990 rte_memzone_free(q->mz);
995 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
997 struct iavf_tx_queue *q = dev->data->tx_queues[qid];
1002 q->ops->release_mbufs(q);
1003 rte_free(q->sw_ring);
1004 rte_memzone_free(q->mz);
1009 iavf_stop_queues(struct rte_eth_dev *dev)
1011 struct iavf_adapter *adapter =
1012 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1013 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1014 struct iavf_rx_queue *rxq;
1015 struct iavf_tx_queue *txq;
1018 /* Stop All queues */
1019 if (!vf->lv_enabled) {
1020 ret = iavf_disable_queues(adapter);
1022 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1024 ret = iavf_disable_queues_lv(adapter);
1026 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1030 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1032 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1033 txq = dev->data->tx_queues[i];
1036 txq->ops->release_mbufs(txq);
1037 reset_tx_queue(txq);
1038 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1040 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1041 rxq = dev->data->rx_queues[i];
1044 rxq->ops->release_mbufs(rxq);
1045 reset_rx_queue(rxq);
1046 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1050 #define IAVF_RX_FLEX_ERR0_BITS \
1051 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1052 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1053 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1054 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1055 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1056 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1059 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1061 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1062 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1063 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1065 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1072 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1073 volatile union iavf_rx_flex_desc *rxdp)
1075 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
1076 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1077 mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
1078 RTE_MBUF_F_RX_VLAN_STRIPPED;
1080 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1085 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1086 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1087 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1088 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
1089 RTE_MBUF_F_RX_QINQ |
1090 RTE_MBUF_F_RX_VLAN_STRIPPED |
1092 mb->vlan_tci_outer = mb->vlan_tci;
1093 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1094 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1095 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1096 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1098 mb->vlan_tci_outer = 0;
1104 iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
1105 volatile union iavf_rx_flex_desc *rxdp)
1107 volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
1108 (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
1110 mb->dynfield1[0] = desc->ipsec_said &
1111 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
1115 iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
1116 volatile union iavf_rx_flex_desc *rxdp,
1117 struct iavf_ipsec_crypto_stats *stats)
1119 uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
1121 if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
1122 uint16_t ipsec_status;
1124 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
1126 ipsec_status = status1 &
1127 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
1130 if (unlikely(ipsec_status !=
1131 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
1132 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
1134 switch (ipsec_status) {
1135 case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
1136 stats->ierrors.sad_miss++;
1138 case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
1139 stats->ierrors.not_processed++;
1141 case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
1142 stats->ierrors.icv_check++;
1144 case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
1145 stats->ierrors.ipsec_length++;
1147 case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
1148 stats->ierrors.misc++;
1152 stats->ierrors.count++;
1157 stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
1159 if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
1161 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
1162 iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
1167 /* Translate the rx descriptor status and error fields to pkt flags */
1168 static inline uint64_t
1169 iavf_rxd_to_pkt_flags(uint64_t qword)
1172 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1174 #define IAVF_RX_ERR_BITS 0x3f
1176 /* Check if RSS_HASH */
1177 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1178 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1179 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
1181 /* Check if FDIR Match */
1182 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1183 RTE_MBUF_F_RX_FDIR : 0);
1185 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1186 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1190 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1191 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1193 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1195 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1196 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1198 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1200 /* TODO: Oversize error bit is not processed here */
1205 static inline uint64_t
1206 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1209 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1212 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1213 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1214 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1216 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1218 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1219 flags |= RTE_MBUF_F_RX_FDIR_ID;
1223 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1224 flags |= RTE_MBUF_F_RX_FDIR_ID;
1229 #define IAVF_RX_FLEX_ERR0_BITS \
1230 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1231 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1232 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1233 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1234 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1235 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1237 /* Rx L3/L4 checksum */
1238 static inline uint64_t
1239 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1243 /* check if HW has decoded the packet and checksum */
1244 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1247 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1248 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1252 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1253 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1255 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1257 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1258 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1260 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1262 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1263 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1268 /* If the number of free RX descriptors is greater than the RX free
1269 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1270 * register. Update the RDT with the value of the last processed RX
1271 * descriptor minus 1, to guarantee that the RDT register is never
1272 * equal to the RDH register, which creates a "full" ring situation
1273 * from the hardware point of view.
1276 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1278 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1280 if (nb_hold > rxq->rx_free_thresh) {
1282 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1283 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1284 rx_id = (uint16_t)((rx_id == 0) ?
1285 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1286 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1289 rxq->nb_rx_hold = nb_hold;
1292 /* implement recv_pkts */
1294 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1296 volatile union iavf_rx_desc *rx_ring;
1297 volatile union iavf_rx_desc *rxdp;
1298 struct iavf_rx_queue *rxq;
1299 union iavf_rx_desc rxd;
1300 struct rte_mbuf *rxe;
1301 struct rte_eth_dev *dev;
1302 struct rte_mbuf *rxm;
1303 struct rte_mbuf *nmb;
1307 uint16_t rx_packet_len;
1308 uint16_t rx_id, nb_hold;
1311 const uint32_t *ptype_tbl;
1316 rx_id = rxq->rx_tail;
1317 rx_ring = rxq->rx_ring;
1318 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1320 while (nb_rx < nb_pkts) {
1321 rxdp = &rx_ring[rx_id];
1322 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1323 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1324 IAVF_RXD_QW1_STATUS_SHIFT;
1326 /* Check the DD bit first */
1327 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1329 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1331 nmb = rte_mbuf_raw_alloc(rxq->mp);
1332 if (unlikely(!nmb)) {
1333 dev = &rte_eth_devices[rxq->port_id];
1334 dev->data->rx_mbuf_alloc_failed++;
1335 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1336 "queue_id=%u", rxq->port_id, rxq->queue_id);
1342 rxe = rxq->sw_ring[rx_id];
1343 rxq->sw_ring[rx_id] = nmb;
1345 if (unlikely(rx_id == rxq->nb_rx_desc))
1348 /* Prefetch next mbuf */
1349 rte_prefetch0(rxq->sw_ring[rx_id]);
1351 /* When next RX descriptor is on a cache line boundary,
1352 * prefetch the next 4 RX descriptors and next 8 pointers
1355 if ((rx_id & 0x3) == 0) {
1356 rte_prefetch0(&rx_ring[rx_id]);
1357 rte_prefetch0(rxq->sw_ring[rx_id]);
1361 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1362 rxdp->read.hdr_addr = 0;
1363 rxdp->read.pkt_addr = dma_addr;
1365 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1366 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1368 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1369 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1372 rxm->pkt_len = rx_packet_len;
1373 rxm->data_len = rx_packet_len;
1374 rxm->port = rxq->port_id;
1376 iavf_rxd_to_vlan_tci(rxm, &rxd);
1377 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1379 ptype_tbl[(uint8_t)((qword1 &
1380 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1382 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1384 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1386 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1387 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1389 rxm->ol_flags |= pkt_flags;
1391 rx_pkts[nb_rx++] = rxm;
1393 rxq->rx_tail = rx_id;
1395 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1400 /* implement recv_pkts for flexible Rx descriptor */
1402 iavf_recv_pkts_flex_rxd(void *rx_queue,
1403 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1405 volatile union iavf_rx_desc *rx_ring;
1406 volatile union iavf_rx_flex_desc *rxdp;
1407 struct iavf_rx_queue *rxq;
1408 union iavf_rx_flex_desc rxd;
1409 struct rte_mbuf *rxe;
1410 struct rte_eth_dev *dev;
1411 struct rte_mbuf *rxm;
1412 struct rte_mbuf *nmb;
1414 uint16_t rx_stat_err0;
1415 uint16_t rx_packet_len;
1416 uint16_t rx_id, nb_hold;
1419 const uint32_t *ptype_tbl;
1424 rx_id = rxq->rx_tail;
1425 rx_ring = rxq->rx_ring;
1426 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1428 while (nb_rx < nb_pkts) {
1429 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1430 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1432 /* Check the DD bit first */
1433 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1435 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1437 nmb = rte_mbuf_raw_alloc(rxq->mp);
1438 if (unlikely(!nmb)) {
1439 dev = &rte_eth_devices[rxq->port_id];
1440 dev->data->rx_mbuf_alloc_failed++;
1441 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1442 "queue_id=%u", rxq->port_id, rxq->queue_id);
1448 rxe = rxq->sw_ring[rx_id];
1449 rxq->sw_ring[rx_id] = nmb;
1451 if (unlikely(rx_id == rxq->nb_rx_desc))
1454 /* Prefetch next mbuf */
1455 rte_prefetch0(rxq->sw_ring[rx_id]);
1457 /* When next RX descriptor is on a cache line boundary,
1458 * prefetch the next 4 RX descriptors and next 8 pointers
1461 if ((rx_id & 0x3) == 0) {
1462 rte_prefetch0(&rx_ring[rx_id]);
1463 rte_prefetch0(rxq->sw_ring[rx_id]);
1467 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1468 rxdp->read.hdr_addr = 0;
1469 rxdp->read.pkt_addr = dma_addr;
1471 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1472 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1474 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1475 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1478 rxm->pkt_len = rx_packet_len;
1479 rxm->data_len = rx_packet_len;
1480 rxm->port = rxq->port_id;
1482 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1483 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1484 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1485 iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
1486 &rxq->stats.ipsec_crypto);
1487 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1488 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1489 rxm->ol_flags |= pkt_flags;
1491 rx_pkts[nb_rx++] = rxm;
1493 rxq->rx_tail = rx_id;
1495 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1500 /* implement recv_scattered_pkts for flexible Rx descriptor */
1502 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1505 struct iavf_rx_queue *rxq = rx_queue;
1506 union iavf_rx_flex_desc rxd;
1507 struct rte_mbuf *rxe;
1508 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1509 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1510 struct rte_mbuf *nmb, *rxm;
1511 uint16_t rx_id = rxq->rx_tail;
1512 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1513 struct rte_eth_dev *dev;
1514 uint16_t rx_stat_err0;
1518 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1519 volatile union iavf_rx_flex_desc *rxdp;
1520 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1522 while (nb_rx < nb_pkts) {
1523 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1524 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1526 /* Check the DD bit */
1527 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1529 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1531 nmb = rte_mbuf_raw_alloc(rxq->mp);
1532 if (unlikely(!nmb)) {
1533 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1534 "queue_id=%u", rxq->port_id, rxq->queue_id);
1535 dev = &rte_eth_devices[rxq->port_id];
1536 dev->data->rx_mbuf_alloc_failed++;
1542 rxe = rxq->sw_ring[rx_id];
1543 rxq->sw_ring[rx_id] = nmb;
1545 if (rx_id == rxq->nb_rx_desc)
1548 /* Prefetch next mbuf */
1549 rte_prefetch0(rxq->sw_ring[rx_id]);
1551 /* When next RX descriptor is on a cache line boundary,
1552 * prefetch the next 4 RX descriptors and next 8 pointers
1555 if ((rx_id & 0x3) == 0) {
1556 rte_prefetch0(&rx_ring[rx_id]);
1557 rte_prefetch0(rxq->sw_ring[rx_id]);
1562 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1564 /* Set data buffer address and data length of the mbuf */
1565 rxdp->read.hdr_addr = 0;
1566 rxdp->read.pkt_addr = dma_addr;
1567 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1568 IAVF_RX_FLX_DESC_PKT_LEN_M;
1569 rxm->data_len = rx_packet_len;
1570 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1572 /* If this is the first buffer of the received packet, set the
1573 * pointer to the first mbuf of the packet and initialize its
1574 * context. Otherwise, update the total length and the number
1575 * of segments of the current scattered packet, and update the
1576 * pointer to the last mbuf of the current packet.
1580 first_seg->nb_segs = 1;
1581 first_seg->pkt_len = rx_packet_len;
1583 first_seg->pkt_len =
1584 (uint16_t)(first_seg->pkt_len +
1586 first_seg->nb_segs++;
1587 last_seg->next = rxm;
1590 /* If this is not the last buffer of the received packet,
1591 * update the pointer to the last mbuf of the current scattered
1592 * packet and continue to parse the RX ring.
1594 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1599 /* This is the last buffer of the received packet. If the CRC
1600 * is not stripped by the hardware:
1601 * - Subtract the CRC length from the total packet length.
1602 * - If the last buffer only contains the whole CRC or a part
1603 * of it, free the mbuf associated to the last buffer. If part
1604 * of the CRC is also contained in the previous mbuf, subtract
1605 * the length of that CRC part from the data length of the
1609 if (unlikely(rxq->crc_len > 0)) {
1610 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1611 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1612 rte_pktmbuf_free_seg(rxm);
1613 first_seg->nb_segs--;
1614 last_seg->data_len =
1615 (uint16_t)(last_seg->data_len -
1616 (RTE_ETHER_CRC_LEN - rx_packet_len));
1617 last_seg->next = NULL;
1619 rxm->data_len = (uint16_t)(rx_packet_len -
1624 first_seg->port = rxq->port_id;
1625 first_seg->ol_flags = 0;
1626 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1627 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1628 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1629 iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
1630 &rxq->stats.ipsec_crypto);
1631 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1632 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1634 first_seg->ol_flags |= pkt_flags;
1636 /* Prefetch data of first segment, if configured to do so. */
1637 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1638 first_seg->data_off));
1639 rx_pkts[nb_rx++] = first_seg;
1643 /* Record index of the next RX descriptor to probe. */
1644 rxq->rx_tail = rx_id;
1645 rxq->pkt_first_seg = first_seg;
1646 rxq->pkt_last_seg = last_seg;
1648 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1653 /* implement recv_scattered_pkts */
1655 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1658 struct iavf_rx_queue *rxq = rx_queue;
1659 union iavf_rx_desc rxd;
1660 struct rte_mbuf *rxe;
1661 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1662 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1663 struct rte_mbuf *nmb, *rxm;
1664 uint16_t rx_id = rxq->rx_tail;
1665 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1666 struct rte_eth_dev *dev;
1672 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1673 volatile union iavf_rx_desc *rxdp;
1674 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1676 while (nb_rx < nb_pkts) {
1677 rxdp = &rx_ring[rx_id];
1678 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1679 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1680 IAVF_RXD_QW1_STATUS_SHIFT;
1682 /* Check the DD bit */
1683 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1685 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1687 nmb = rte_mbuf_raw_alloc(rxq->mp);
1688 if (unlikely(!nmb)) {
1689 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1690 "queue_id=%u", rxq->port_id, rxq->queue_id);
1691 dev = &rte_eth_devices[rxq->port_id];
1692 dev->data->rx_mbuf_alloc_failed++;
1698 rxe = rxq->sw_ring[rx_id];
1699 rxq->sw_ring[rx_id] = nmb;
1701 if (rx_id == rxq->nb_rx_desc)
1704 /* Prefetch next mbuf */
1705 rte_prefetch0(rxq->sw_ring[rx_id]);
1707 /* When next RX descriptor is on a cache line boundary,
1708 * prefetch the next 4 RX descriptors and next 8 pointers
1711 if ((rx_id & 0x3) == 0) {
1712 rte_prefetch0(&rx_ring[rx_id]);
1713 rte_prefetch0(rxq->sw_ring[rx_id]);
1718 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1720 /* Set data buffer address and data length of the mbuf */
1721 rxdp->read.hdr_addr = 0;
1722 rxdp->read.pkt_addr = dma_addr;
1723 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1724 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1725 rxm->data_len = rx_packet_len;
1726 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1728 /* If this is the first buffer of the received packet, set the
1729 * pointer to the first mbuf of the packet and initialize its
1730 * context. Otherwise, update the total length and the number
1731 * of segments of the current scattered packet, and update the
1732 * pointer to the last mbuf of the current packet.
1736 first_seg->nb_segs = 1;
1737 first_seg->pkt_len = rx_packet_len;
1739 first_seg->pkt_len =
1740 (uint16_t)(first_seg->pkt_len +
1742 first_seg->nb_segs++;
1743 last_seg->next = rxm;
1746 /* If this is not the last buffer of the received packet,
1747 * update the pointer to the last mbuf of the current scattered
1748 * packet and continue to parse the RX ring.
1750 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1755 /* This is the last buffer of the received packet. If the CRC
1756 * is not stripped by the hardware:
1757 * - Subtract the CRC length from the total packet length.
1758 * - If the last buffer only contains the whole CRC or a part
1759 * of it, free the mbuf associated to the last buffer. If part
1760 * of the CRC is also contained in the previous mbuf, subtract
1761 * the length of that CRC part from the data length of the
1765 if (unlikely(rxq->crc_len > 0)) {
1766 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1767 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1768 rte_pktmbuf_free_seg(rxm);
1769 first_seg->nb_segs--;
1770 last_seg->data_len =
1771 (uint16_t)(last_seg->data_len -
1772 (RTE_ETHER_CRC_LEN - rx_packet_len));
1773 last_seg->next = NULL;
1775 rxm->data_len = (uint16_t)(rx_packet_len -
1779 first_seg->port = rxq->port_id;
1780 first_seg->ol_flags = 0;
1781 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1782 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1783 first_seg->packet_type =
1784 ptype_tbl[(uint8_t)((qword1 &
1785 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1787 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1788 first_seg->hash.rss =
1789 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1791 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1792 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1794 first_seg->ol_flags |= pkt_flags;
1796 /* Prefetch data of first segment, if configured to do so. */
1797 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1798 first_seg->data_off));
1799 rx_pkts[nb_rx++] = first_seg;
1803 /* Record index of the next RX descriptor to probe. */
1804 rxq->rx_tail = rx_id;
1805 rxq->pkt_first_seg = first_seg;
1806 rxq->pkt_last_seg = last_seg;
1808 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1813 #define IAVF_LOOK_AHEAD 8
1815 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1817 volatile union iavf_rx_flex_desc *rxdp;
1818 struct rte_mbuf **rxep;
1819 struct rte_mbuf *mb;
1822 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1823 int32_t i, j, nb_rx = 0;
1825 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1827 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1828 rxep = &rxq->sw_ring[rxq->rx_tail];
1830 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1832 /* Make sure there is at least 1 packet to receive */
1833 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1836 /* Scan LOOK_AHEAD descriptors at a time to determine which
1837 * descriptors reference packets that are ready to be received.
1839 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1840 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1841 /* Read desc statuses backwards to avoid race condition */
1842 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1843 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1847 /* Compute how many status bits were set */
1848 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1849 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1853 /* Translate descriptor info to mbuf parameters */
1854 for (j = 0; j < nb_dd; j++) {
1855 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1857 i * IAVF_LOOK_AHEAD + j);
1860 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1861 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1862 mb->data_len = pkt_len;
1863 mb->pkt_len = pkt_len;
1866 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1867 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1868 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1869 iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
1870 &rxq->stats.ipsec_crypto);
1871 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1872 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1873 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1875 mb->ol_flags |= pkt_flags;
1878 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1879 rxq->rx_stage[i + j] = rxep[j];
1881 if (nb_dd != IAVF_LOOK_AHEAD)
1885 /* Clear software ring entries */
1886 for (i = 0; i < nb_rx; i++)
1887 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1893 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1895 volatile union iavf_rx_desc *rxdp;
1896 struct rte_mbuf **rxep;
1897 struct rte_mbuf *mb;
1901 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1902 int32_t i, j, nb_rx = 0;
1904 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1906 rxdp = &rxq->rx_ring[rxq->rx_tail];
1907 rxep = &rxq->sw_ring[rxq->rx_tail];
1909 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1910 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1911 IAVF_RXD_QW1_STATUS_SHIFT;
1913 /* Make sure there is at least 1 packet to receive */
1914 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1917 /* Scan LOOK_AHEAD descriptors at a time to determine which
1918 * descriptors reference packets that are ready to be received.
1920 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1921 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1922 /* Read desc statuses backwards to avoid race condition */
1923 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1924 qword1 = rte_le_to_cpu_64(
1925 rxdp[j].wb.qword1.status_error_len);
1926 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1927 IAVF_RXD_QW1_STATUS_SHIFT;
1932 /* Compute how many status bits were set */
1933 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1934 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1938 /* Translate descriptor info to mbuf parameters */
1939 for (j = 0; j < nb_dd; j++) {
1940 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1941 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1944 qword1 = rte_le_to_cpu_64
1945 (rxdp[j].wb.qword1.status_error_len);
1946 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1947 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1948 mb->data_len = pkt_len;
1949 mb->pkt_len = pkt_len;
1951 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1952 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1954 ptype_tbl[(uint8_t)((qword1 &
1955 IAVF_RXD_QW1_PTYPE_MASK) >>
1956 IAVF_RXD_QW1_PTYPE_SHIFT)];
1958 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1959 mb->hash.rss = rte_le_to_cpu_32(
1960 rxdp[j].wb.qword0.hi_dword.rss);
1962 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1963 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1965 mb->ol_flags |= pkt_flags;
1968 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1969 rxq->rx_stage[i + j] = rxep[j];
1971 if (nb_dd != IAVF_LOOK_AHEAD)
1975 /* Clear software ring entries */
1976 for (i = 0; i < nb_rx; i++)
1977 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1982 static inline uint16_t
1983 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1984 struct rte_mbuf **rx_pkts,
1988 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1990 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1992 for (i = 0; i < nb_pkts; i++)
1993 rx_pkts[i] = stage[i];
1995 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1996 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
2002 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
2004 volatile union iavf_rx_desc *rxdp;
2005 struct rte_mbuf **rxep;
2006 struct rte_mbuf *mb;
2007 uint16_t alloc_idx, i;
2011 /* Allocate buffers in bulk */
2012 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
2013 (rxq->rx_free_thresh - 1));
2014 rxep = &rxq->sw_ring[alloc_idx];
2015 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
2016 rxq->rx_free_thresh);
2017 if (unlikely(diag != 0)) {
2018 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
2022 rxdp = &rxq->rx_ring[alloc_idx];
2023 for (i = 0; i < rxq->rx_free_thresh; i++) {
2024 if (likely(i < (rxq->rx_free_thresh - 1)))
2025 /* Prefetch next mbuf */
2026 rte_prefetch0(rxep[i + 1]);
2029 rte_mbuf_refcnt_set(mb, 1);
2031 mb->data_off = RTE_PKTMBUF_HEADROOM;
2033 mb->port = rxq->port_id;
2034 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
2035 rxdp[i].read.hdr_addr = 0;
2036 rxdp[i].read.pkt_addr = dma_addr;
2039 /* Update rx tail register */
2041 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
2043 rxq->rx_free_trigger =
2044 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
2045 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
2046 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2051 static inline uint16_t
2052 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2054 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
2060 if (rxq->rx_nb_avail)
2061 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2063 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
2064 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
2066 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
2067 rxq->rx_next_avail = 0;
2068 rxq->rx_nb_avail = nb_rx;
2069 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
2071 if (rxq->rx_tail > rxq->rx_free_trigger) {
2072 if (iavf_rx_alloc_bufs(rxq) != 0) {
2075 /* TODO: count rx_mbuf_alloc_failed here */
2077 rxq->rx_nb_avail = 0;
2078 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
2079 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
2080 rxq->sw_ring[j] = rxq->rx_stage[i];
2086 if (rxq->rx_tail >= rxq->nb_rx_desc)
2089 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
2090 rxq->port_id, rxq->queue_id,
2091 rxq->rx_tail, nb_rx);
2093 if (rxq->rx_nb_avail)
2094 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2100 iavf_recv_pkts_bulk_alloc(void *rx_queue,
2101 struct rte_mbuf **rx_pkts,
2104 uint16_t nb_rx = 0, n, count;
2106 if (unlikely(nb_pkts == 0))
2109 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2110 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2113 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2114 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2115 nb_rx = (uint16_t)(nb_rx + count);
2116 nb_pkts = (uint16_t)(nb_pkts - count);
2125 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2127 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2128 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2129 uint16_t nb_tx_desc = txq->nb_tx_desc;
2130 uint16_t desc_to_clean_to;
2131 uint16_t nb_tx_to_clean;
2133 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2135 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2136 if (desc_to_clean_to >= nb_tx_desc)
2137 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2139 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2140 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2141 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2142 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2143 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2144 "(port=%d queue=%d)", desc_to_clean_to,
2145 txq->port_id, txq->queue_id);
2149 if (last_desc_cleaned > desc_to_clean_to)
2150 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2153 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2156 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2158 txq->last_desc_cleaned = desc_to_clean_to;
2159 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2164 /* Check if the context descriptor is needed for TX offloading */
2165 static inline uint16_t
2166 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2168 if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
2169 RTE_MBUF_F_TX_TUNNEL_MASK))
2171 if (flags & RTE_MBUF_F_TX_VLAN &&
2172 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2178 iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m,
2184 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
2185 cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2187 if (m->ol_flags & RTE_MBUF_F_TX_VLAN &&
2188 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2189 cmd |= IAVF_TX_CTX_DESC_IL2TAG2
2190 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2197 iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
2198 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2200 uint64_t ipsec_field =
2201 (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
2202 IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
2204 *field |= ipsec_field;
2209 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
2210 const struct rte_mbuf *m)
2212 uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
2213 uint64_t eip_len = 0;
2214 uint64_t eip_noinc = 0;
2215 /* Default - IP_ID is increment in each segment of LSO */
2217 switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
2218 RTE_MBUF_F_TX_OUTER_IPV6 |
2219 RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
2220 case RTE_MBUF_F_TX_OUTER_IPV4:
2221 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
2222 eip_len = m->outer_l3_len >> 2;
2224 case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
2225 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
2226 eip_len = m->outer_l3_len >> 2;
2228 case RTE_MBUF_F_TX_OUTER_IPV6:
2229 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
2230 eip_len = m->outer_l3_len >> 2;
2234 *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
2235 eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
2236 eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
2239 static inline uint16_t
2240 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
2241 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2243 uint64_t segmentation_field = 0;
2244 uint64_t total_length = 0;
2246 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
2247 total_length = ipsec_md->l4_payload_len;
2249 total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
2251 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2252 total_length -= m->outer_l3_len;
2255 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
2256 if (!m->l4_len || !m->tso_segsz)
2257 PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
2258 m->l4_len, m->tso_segsz);
2259 if (m->tso_segsz < 88)
2260 PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
2263 segmentation_field =
2264 (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
2265 IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
2266 (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
2267 IAVF_TXD_CTX_QW1_MSS_MASK);
2269 *field |= segmentation_field;
2271 return total_length;
2275 struct iavf_tx_context_desc_qws {
2281 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
2282 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
2283 uint16_t *tlen, uint8_t vlan_flag)
2285 volatile struct iavf_tx_context_desc_qws *desc_qws =
2286 (volatile struct iavf_tx_context_desc_qws *)desc;
2287 /* fill descriptor type field */
2288 desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
2290 /* fill command field */
2291 iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag);
2293 /* fill segmentation field */
2294 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
2295 /* fill IPsec field */
2296 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2297 iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
2300 *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
2304 /* fill tunnelling field */
2305 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2306 iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
2310 desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
2311 desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
2313 if (vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2314 desc->l2tag2 = m->vlan_tci;
2319 iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
2320 const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
2322 desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
2323 IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
2324 ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
2325 ((uint64_t)md->esp_trailer_len <<
2326 IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
2328 desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
2329 IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
2330 ((uint64_t)md->next_proto <<
2331 IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
2332 ((uint64_t)(md->len_iv & 0x3) <<
2333 IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
2334 ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2336 IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
2337 (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
2340 * TODO: Pre-calculate this in the Session initialization
2342 * Calculate IPsec length required in data descriptor func when TSO
2343 * offload is enabled
2345 *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
2346 (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2347 sizeof(struct rte_udp_hdr) : 0);
2351 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
2352 struct rte_mbuf *m, uint8_t vlan_flag)
2354 uint64_t command = 0;
2355 uint64_t offset = 0;
2356 uint64_t l2tag1 = 0;
2358 *qw1 = IAVF_TX_DESC_DTYPE_DATA;
2360 command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
2362 /* Descriptor based VLAN insertion */
2363 if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) &&
2364 m->ol_flags & RTE_MBUF_F_TX_VLAN) {
2365 command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
2366 l2tag1 |= m->vlan_tci;
2370 offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2372 /* Enable L3 checksum offloading inner */
2373 if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
2374 command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2375 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2376 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
2377 command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2378 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2379 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
2380 command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2381 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2384 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2385 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2386 offset |= (m->l4_len >> 2) <<
2387 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2390 /* Enable L4 checksum offloads */
2391 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2392 case RTE_MBUF_F_TX_TCP_CKSUM:
2393 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2394 offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2395 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2397 case RTE_MBUF_F_TX_SCTP_CKSUM:
2398 command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2399 offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2400 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2402 case RTE_MBUF_F_TX_UDP_CKSUM:
2403 command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2404 offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2405 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2409 *qw1 = rte_cpu_to_le_64((((uint64_t)command <<
2410 IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
2411 (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
2412 IAVF_TXD_DATA_QW1_OFFSET_MASK) |
2413 ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
2417 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
2418 struct rte_mbuf *m, uint64_t desc_template,
2419 uint16_t tlen, uint16_t ipseclen)
2421 uint32_t hdrlen = m->l2_len;
2424 /* fill data descriptor qw1 from template */
2425 desc->cmd_type_offset_bsz = desc_template;
2427 /* set data buffer address */
2428 desc->buffer_addr = rte_mbuf_data_iova(m);
2430 /* calculate data buffer size less set header lengths */
2431 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
2432 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2433 RTE_MBUF_F_TX_UDP_SEG))) {
2434 hdrlen += m->outer_l3_len;
2435 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2436 hdrlen += m->l3_len + m->l4_len;
2438 hdrlen += m->l3_len;
2439 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2441 bufsz = hdrlen + tlen;
2443 bufsz = m->data_len;
2446 /* set data buffer size */
2447 desc->cmd_type_offset_bsz |=
2448 (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
2449 IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
2451 desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
2452 desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
2456 static struct iavf_ipsec_crypto_pkt_metadata *
2457 iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
2460 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2461 return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
2462 struct iavf_ipsec_crypto_pkt_metadata *);
2469 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2471 struct iavf_tx_queue *txq = tx_queue;
2472 volatile struct iavf_tx_desc *txr = txq->tx_ring;
2473 struct iavf_tx_entry *txe_ring = txq->sw_ring;
2474 struct iavf_tx_entry *txe, *txn;
2475 struct rte_mbuf *mb, *mb_seg;
2476 uint16_t desc_idx, desc_idx_last;
2480 /* Check if the descriptor ring needs to be cleaned. */
2481 if (txq->nb_free < txq->free_thresh)
2482 iavf_xmit_cleanup(txq);
2484 desc_idx = txq->tx_tail;
2485 txe = &txe_ring[desc_idx];
2487 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
2488 iavf_dump_tx_entry_ring(txq);
2489 iavf_dump_tx_desc_ring(txq);
2493 for (idx = 0; idx < nb_pkts; idx++) {
2494 volatile struct iavf_tx_desc *ddesc;
2495 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
2497 uint16_t nb_desc_ctx, nb_desc_ipsec;
2498 uint16_t nb_desc_data, nb_desc_required;
2499 uint16_t tlen = 0, ipseclen = 0;
2500 uint64_t ddesc_template = 0;
2501 uint64_t ddesc_cmd = 0;
2505 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2508 * Get metadata for ipsec crypto from mbuf dynamic fields if
2509 * security offload is specified.
2511 ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
2513 nb_desc_data = mb->nb_segs;
2515 iavf_calc_context_desc(mb->ol_flags, txq->vlan_flag);
2516 nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
2519 * The number of descriptors that must be allocated for
2520 * a packet equals to the number of the segments of that
2521 * packet plus the context and ipsec descriptors if needed.
2523 nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
2525 desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
2527 /* wrap descriptor ring */
2528 if (desc_idx_last >= txq->nb_tx_desc)
2530 (uint16_t)(desc_idx_last - txq->nb_tx_desc);
2533 "port_id=%u queue_id=%u tx_first=%u tx_last=%u",
2534 txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
2536 if (nb_desc_required > txq->nb_free) {
2537 if (iavf_xmit_cleanup(txq)) {
2542 if (unlikely(nb_desc_required > txq->rs_thresh)) {
2543 while (nb_desc_required > txq->nb_free) {
2544 if (iavf_xmit_cleanup(txq)) {
2553 iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb,
2556 /* Setup TX context descriptor if required */
2558 volatile struct iavf_tx_context_desc *ctx_desc =
2559 (volatile struct iavf_tx_context_desc *)
2562 /* clear QW0 or the previous writeback value
2563 * may impact next write
2565 *(volatile uint64_t *)ctx_desc = 0;
2567 txn = &txe_ring[txe->next_id];
2568 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2571 rte_pktmbuf_free_seg(txe->mbuf);
2575 iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen,
2577 IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
2579 txe->last_id = desc_idx_last;
2580 desc_idx = txe->next_id;
2584 if (nb_desc_ipsec) {
2585 volatile struct iavf_tx_ipsec_desc *ipsec_desc =
2586 (volatile struct iavf_tx_ipsec_desc *)
2589 txn = &txe_ring[txe->next_id];
2590 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2593 rte_pktmbuf_free_seg(txe->mbuf);
2597 iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
2599 IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
2601 txe->last_id = desc_idx_last;
2602 desc_idx = txe->next_id;
2609 ddesc = (volatile struct iavf_tx_desc *)
2612 txn = &txe_ring[txe->next_id];
2613 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2616 rte_pktmbuf_free_seg(txe->mbuf);
2619 iavf_fill_data_desc(ddesc, mb_seg,
2620 ddesc_template, tlen, ipseclen);
2622 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
2624 txe->last_id = desc_idx_last;
2625 desc_idx = txe->next_id;
2627 mb_seg = mb_seg->next;
2630 /* The last packet data descriptor needs End Of Packet (EOP) */
2631 ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
2633 txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
2634 txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
2636 if (txq->nb_used >= txq->rs_thresh) {
2637 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2638 "%4u (port=%d queue=%d)",
2639 desc_idx_last, txq->port_id, txq->queue_id);
2641 ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
2643 /* Update txq RS bit counters */
2647 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
2648 IAVF_TXD_DATA_QW1_CMD_SHIFT);
2650 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
2656 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2657 txq->port_id, txq->queue_id, desc_idx, idx);
2659 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
2660 txq->tx_tail = desc_idx;
2665 /* Check if the packet with vlan user priority is transmitted in the
2669 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2671 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2672 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2675 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2677 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2678 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2686 /* TX prep functions */
2688 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2694 struct iavf_tx_queue *txq = tx_queue;
2695 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2696 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2698 for (i = 0; i < nb_pkts; i++) {
2700 ol_flags = m->ol_flags;
2702 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2703 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2704 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2708 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2709 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2710 /* MSS outside the range are considered malicious */
2715 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2716 rte_errno = ENOTSUP;
2720 #ifdef RTE_ETHDEV_DEBUG_TX
2721 ret = rte_validate_tx_offload(m);
2727 ret = rte_net_intel_cksum_prepare(m);
2733 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2734 ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
2735 ret = iavf_check_vlan_up2tc(txq, m);
2746 /* choose rx function*/
2748 iavf_set_rx_function(struct rte_eth_dev *dev)
2750 struct iavf_adapter *adapter =
2751 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2752 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2755 struct iavf_rx_queue *rxq;
2758 bool use_avx2 = false;
2759 bool use_avx512 = false;
2760 bool use_flex = false;
2762 check_ret = iavf_rx_vec_dev_check(dev);
2763 if (check_ret >= 0 &&
2764 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2765 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2766 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2767 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2770 #ifdef CC_AVX512_SUPPORT
2771 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2772 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2773 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2777 if (vf->vf_res->vf_cap_flags &
2778 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2781 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2782 rxq = dev->data->rx_queues[i];
2783 (void)iavf_rxq_vec_setup(rxq);
2786 if (dev->data->scattered_rx) {
2789 "Using %sVector Scattered Rx (port %d).",
2790 use_avx2 ? "avx2 " : "",
2791 dev->data->port_id);
2793 if (check_ret == IAVF_VECTOR_PATH)
2795 "Using AVX512 Vector Scattered Rx (port %d).",
2796 dev->data->port_id);
2799 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2800 dev->data->port_id);
2803 dev->rx_pkt_burst = use_avx2 ?
2804 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2805 iavf_recv_scattered_pkts_vec_flex_rxd;
2806 #ifdef CC_AVX512_SUPPORT
2808 if (check_ret == IAVF_VECTOR_PATH)
2810 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2813 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2817 dev->rx_pkt_burst = use_avx2 ?
2818 iavf_recv_scattered_pkts_vec_avx2 :
2819 iavf_recv_scattered_pkts_vec;
2820 #ifdef CC_AVX512_SUPPORT
2822 if (check_ret == IAVF_VECTOR_PATH)
2824 iavf_recv_scattered_pkts_vec_avx512;
2827 iavf_recv_scattered_pkts_vec_avx512_offload;
2833 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2834 use_avx2 ? "avx2 " : "",
2835 dev->data->port_id);
2837 if (check_ret == IAVF_VECTOR_PATH)
2839 "Using AVX512 Vector Rx (port %d).",
2840 dev->data->port_id);
2843 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2844 dev->data->port_id);
2847 dev->rx_pkt_burst = use_avx2 ?
2848 iavf_recv_pkts_vec_avx2_flex_rxd :
2849 iavf_recv_pkts_vec_flex_rxd;
2850 #ifdef CC_AVX512_SUPPORT
2852 if (check_ret == IAVF_VECTOR_PATH)
2854 iavf_recv_pkts_vec_avx512_flex_rxd;
2857 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2861 dev->rx_pkt_burst = use_avx2 ?
2862 iavf_recv_pkts_vec_avx2 :
2864 #ifdef CC_AVX512_SUPPORT
2866 if (check_ret == IAVF_VECTOR_PATH)
2868 iavf_recv_pkts_vec_avx512;
2871 iavf_recv_pkts_vec_avx512_offload;
2881 if (dev->data->scattered_rx) {
2882 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2883 dev->data->port_id);
2884 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2885 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2887 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2888 } else if (adapter->rx_bulk_alloc_allowed) {
2889 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2890 dev->data->port_id);
2891 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2893 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2894 dev->data->port_id);
2895 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2896 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2898 dev->rx_pkt_burst = iavf_recv_pkts;
2902 /* choose tx function*/
2904 iavf_set_tx_function(struct rte_eth_dev *dev)
2907 struct iavf_tx_queue *txq;
2910 bool use_sse = false;
2911 bool use_avx2 = false;
2912 bool use_avx512 = false;
2914 check_ret = iavf_tx_vec_dev_check(dev);
2916 if (check_ret >= 0 &&
2917 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2918 /* SSE and AVX2 not support offload path yet. */
2919 if (check_ret == IAVF_VECTOR_PATH) {
2921 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2922 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2923 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2926 #ifdef CC_AVX512_SUPPORT
2927 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2928 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2929 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2933 if (!use_sse && !use_avx2 && !use_avx512)
2937 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2938 use_avx2 ? "avx2 " : "",
2939 dev->data->port_id);
2940 dev->tx_pkt_burst = use_avx2 ?
2941 iavf_xmit_pkts_vec_avx2 :
2944 dev->tx_pkt_prepare = NULL;
2945 #ifdef CC_AVX512_SUPPORT
2947 if (check_ret == IAVF_VECTOR_PATH) {
2948 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2949 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2950 dev->data->port_id);
2952 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2953 dev->tx_pkt_prepare = iavf_prep_pkts;
2954 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2955 dev->data->port_id);
2960 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2961 txq = dev->data->tx_queues[i];
2964 #ifdef CC_AVX512_SUPPORT
2966 iavf_txq_vec_setup_avx512(txq);
2968 iavf_txq_vec_setup(txq);
2970 iavf_txq_vec_setup(txq);
2979 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2980 dev->data->port_id);
2981 dev->tx_pkt_burst = iavf_xmit_pkts;
2982 dev->tx_pkt_prepare = iavf_prep_pkts;
2986 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2989 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2990 uint16_t i, tx_last, tx_id;
2991 uint16_t nb_tx_free_last;
2992 uint16_t nb_tx_to_clean;
2995 /* Start free mbuf from the next of tx_tail */
2996 tx_last = txq->tx_tail;
2997 tx_id = swr_ring[tx_last].next_id;
2999 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
3002 nb_tx_to_clean = txq->nb_free;
3003 nb_tx_free_last = txq->nb_free;
3005 free_cnt = txq->nb_tx_desc;
3007 /* Loop through swr_ring to count the amount of
3008 * freeable mubfs and packets.
3010 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3011 for (i = 0; i < nb_tx_to_clean &&
3012 pkt_cnt < free_cnt &&
3013 tx_id != tx_last; i++) {
3014 if (swr_ring[tx_id].mbuf != NULL) {
3015 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3016 swr_ring[tx_id].mbuf = NULL;
3019 * last segment in the packet,
3020 * increment packet count
3022 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3025 tx_id = swr_ring[tx_id].next_id;
3028 if (txq->rs_thresh > txq->nb_tx_desc -
3029 txq->nb_free || tx_id == tx_last)
3032 if (pkt_cnt < free_cnt) {
3033 if (iavf_xmit_cleanup(txq))
3036 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
3037 nb_tx_free_last = txq->nb_free;
3041 return (int)pkt_cnt;
3045 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
3047 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
3049 return iavf_tx_done_cleanup_full(q, free_cnt);
3053 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3054 struct rte_eth_rxq_info *qinfo)
3056 struct iavf_rx_queue *rxq;
3058 rxq = dev->data->rx_queues[queue_id];
3060 qinfo->mp = rxq->mp;
3061 qinfo->scattered_rx = dev->data->scattered_rx;
3062 qinfo->nb_desc = rxq->nb_rx_desc;
3064 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3065 qinfo->conf.rx_drop_en = true;
3066 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3070 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3071 struct rte_eth_txq_info *qinfo)
3073 struct iavf_tx_queue *txq;
3075 txq = dev->data->tx_queues[queue_id];
3077 qinfo->nb_desc = txq->nb_tx_desc;
3079 qinfo->conf.tx_free_thresh = txq->free_thresh;
3080 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
3081 qinfo->conf.offloads = txq->offloads;
3082 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3085 /* Get the number of used descriptors of a rx queue */
3087 iavf_dev_rxq_count(void *rx_queue)
3089 #define IAVF_RXQ_SCAN_INTERVAL 4
3090 volatile union iavf_rx_desc *rxdp;
3091 struct iavf_rx_queue *rxq;
3095 rxdp = &rxq->rx_ring[rxq->rx_tail];
3097 while ((desc < rxq->nb_rx_desc) &&
3098 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
3099 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
3100 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
3101 /* Check the DD bit of a rx descriptor of each 4 in a group,
3102 * to avoid checking too frequently and downgrading performance
3105 desc += IAVF_RXQ_SCAN_INTERVAL;
3106 rxdp += IAVF_RXQ_SCAN_INTERVAL;
3107 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3108 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3109 desc - rxq->nb_rx_desc]);
3116 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
3118 struct iavf_rx_queue *rxq = rx_queue;
3119 volatile uint64_t *status;
3123 if (unlikely(offset >= rxq->nb_rx_desc))
3126 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
3127 return RTE_ETH_RX_DESC_UNAVAIL;
3129 desc = rxq->rx_tail + offset;
3130 if (desc >= rxq->nb_rx_desc)
3131 desc -= rxq->nb_rx_desc;
3133 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
3134 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
3135 << IAVF_RXD_QW1_STATUS_SHIFT);
3137 return RTE_ETH_RX_DESC_DONE;
3139 return RTE_ETH_RX_DESC_AVAIL;
3143 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
3145 struct iavf_tx_queue *txq = tx_queue;
3146 volatile uint64_t *status;
3147 uint64_t mask, expect;
3150 if (unlikely(offset >= txq->nb_tx_desc))
3153 desc = txq->tx_tail + offset;
3154 /* go to next desc that has the RS bit */
3155 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
3157 if (desc >= txq->nb_tx_desc) {
3158 desc -= txq->nb_tx_desc;
3159 if (desc >= txq->nb_tx_desc)
3160 desc -= txq->nb_tx_desc;
3163 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
3164 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
3165 expect = rte_cpu_to_le_64(
3166 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
3167 if ((*status & mask) == expect)
3168 return RTE_ETH_TX_DESC_DONE;
3170 return RTE_ETH_TX_DESC_FULL;
3173 static inline uint32_t
3174 iavf_get_default_ptype(uint16_t ptype)
3176 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
3177 __rte_cache_aligned = {
3180 [1] = RTE_PTYPE_L2_ETHER,
3181 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3182 /* [3] - [5] reserved */
3183 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3184 /* [7] - [10] reserved */
3185 [11] = RTE_PTYPE_L2_ETHER_ARP,
3186 /* [12] - [21] reserved */
3188 /* Non tunneled IPv4 */
3189 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3191 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3192 RTE_PTYPE_L4_NONFRAG,
3193 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3196 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3198 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3200 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3204 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3205 RTE_PTYPE_TUNNEL_IP |
3206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3207 RTE_PTYPE_INNER_L4_FRAG,
3208 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3209 RTE_PTYPE_TUNNEL_IP |
3210 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3211 RTE_PTYPE_INNER_L4_NONFRAG,
3212 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3213 RTE_PTYPE_TUNNEL_IP |
3214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3215 RTE_PTYPE_INNER_L4_UDP,
3217 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3218 RTE_PTYPE_TUNNEL_IP |
3219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3220 RTE_PTYPE_INNER_L4_TCP,
3221 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3222 RTE_PTYPE_TUNNEL_IP |
3223 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3224 RTE_PTYPE_INNER_L4_SCTP,
3225 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3226 RTE_PTYPE_TUNNEL_IP |
3227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3228 RTE_PTYPE_INNER_L4_ICMP,
3231 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3232 RTE_PTYPE_TUNNEL_IP |
3233 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3234 RTE_PTYPE_INNER_L4_FRAG,
3235 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3236 RTE_PTYPE_TUNNEL_IP |
3237 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3238 RTE_PTYPE_INNER_L4_NONFRAG,
3239 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3240 RTE_PTYPE_TUNNEL_IP |
3241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3242 RTE_PTYPE_INNER_L4_UDP,
3244 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3245 RTE_PTYPE_TUNNEL_IP |
3246 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3247 RTE_PTYPE_INNER_L4_TCP,
3248 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3249 RTE_PTYPE_TUNNEL_IP |
3250 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3251 RTE_PTYPE_INNER_L4_SCTP,
3252 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3253 RTE_PTYPE_TUNNEL_IP |
3254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3255 RTE_PTYPE_INNER_L4_ICMP,
3257 /* IPv4 --> GRE/Teredo/VXLAN */
3258 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3259 RTE_PTYPE_TUNNEL_GRENAT,
3261 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3262 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3263 RTE_PTYPE_TUNNEL_GRENAT |
3264 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3265 RTE_PTYPE_INNER_L4_FRAG,
3266 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3267 RTE_PTYPE_TUNNEL_GRENAT |
3268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3269 RTE_PTYPE_INNER_L4_NONFRAG,
3270 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3271 RTE_PTYPE_TUNNEL_GRENAT |
3272 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3273 RTE_PTYPE_INNER_L4_UDP,
3275 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3276 RTE_PTYPE_TUNNEL_GRENAT |
3277 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3278 RTE_PTYPE_INNER_L4_TCP,
3279 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3280 RTE_PTYPE_TUNNEL_GRENAT |
3281 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3282 RTE_PTYPE_INNER_L4_SCTP,
3283 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3284 RTE_PTYPE_TUNNEL_GRENAT |
3285 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3286 RTE_PTYPE_INNER_L4_ICMP,
3288 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3289 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3290 RTE_PTYPE_TUNNEL_GRENAT |
3291 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3292 RTE_PTYPE_INNER_L4_FRAG,
3293 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3294 RTE_PTYPE_TUNNEL_GRENAT |
3295 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3296 RTE_PTYPE_INNER_L4_NONFRAG,
3297 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3298 RTE_PTYPE_TUNNEL_GRENAT |
3299 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3300 RTE_PTYPE_INNER_L4_UDP,
3302 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3303 RTE_PTYPE_TUNNEL_GRENAT |
3304 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3305 RTE_PTYPE_INNER_L4_TCP,
3306 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3307 RTE_PTYPE_TUNNEL_GRENAT |
3308 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3309 RTE_PTYPE_INNER_L4_SCTP,
3310 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3311 RTE_PTYPE_TUNNEL_GRENAT |
3312 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3313 RTE_PTYPE_INNER_L4_ICMP,
3315 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3316 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3319 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3320 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3322 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3323 RTE_PTYPE_INNER_L4_FRAG,
3324 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3325 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_INNER_L4_NONFRAG,
3328 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3330 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3331 RTE_PTYPE_INNER_L4_UDP,
3333 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3334 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3335 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3336 RTE_PTYPE_INNER_L4_TCP,
3337 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3338 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3339 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3340 RTE_PTYPE_INNER_L4_SCTP,
3341 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3342 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3343 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3344 RTE_PTYPE_INNER_L4_ICMP,
3346 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3347 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3348 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3349 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3350 RTE_PTYPE_INNER_L4_FRAG,
3351 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3352 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3353 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3354 RTE_PTYPE_INNER_L4_NONFRAG,
3355 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3356 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3357 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3358 RTE_PTYPE_INNER_L4_UDP,
3360 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3362 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3363 RTE_PTYPE_INNER_L4_TCP,
3364 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3366 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3367 RTE_PTYPE_INNER_L4_SCTP,
3368 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3369 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3370 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3371 RTE_PTYPE_INNER_L4_ICMP,
3372 /* [73] - [87] reserved */
3374 /* Non tunneled IPv6 */
3375 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3377 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3378 RTE_PTYPE_L4_NONFRAG,
3379 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3382 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3384 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3386 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3390 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3391 RTE_PTYPE_TUNNEL_IP |
3392 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3393 RTE_PTYPE_INNER_L4_FRAG,
3394 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3395 RTE_PTYPE_TUNNEL_IP |
3396 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3397 RTE_PTYPE_INNER_L4_NONFRAG,
3398 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3399 RTE_PTYPE_TUNNEL_IP |
3400 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3401 RTE_PTYPE_INNER_L4_UDP,
3403 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3404 RTE_PTYPE_TUNNEL_IP |
3405 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3406 RTE_PTYPE_INNER_L4_TCP,
3407 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3408 RTE_PTYPE_TUNNEL_IP |
3409 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3410 RTE_PTYPE_INNER_L4_SCTP,
3411 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3412 RTE_PTYPE_TUNNEL_IP |
3413 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3414 RTE_PTYPE_INNER_L4_ICMP,
3417 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3418 RTE_PTYPE_TUNNEL_IP |
3419 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3420 RTE_PTYPE_INNER_L4_FRAG,
3421 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3422 RTE_PTYPE_TUNNEL_IP |
3423 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3424 RTE_PTYPE_INNER_L4_NONFRAG,
3425 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3426 RTE_PTYPE_TUNNEL_IP |
3427 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3428 RTE_PTYPE_INNER_L4_UDP,
3429 /* [105] reserved */
3430 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3431 RTE_PTYPE_TUNNEL_IP |
3432 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3433 RTE_PTYPE_INNER_L4_TCP,
3434 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3435 RTE_PTYPE_TUNNEL_IP |
3436 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3437 RTE_PTYPE_INNER_L4_SCTP,
3438 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3439 RTE_PTYPE_TUNNEL_IP |
3440 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3441 RTE_PTYPE_INNER_L4_ICMP,
3443 /* IPv6 --> GRE/Teredo/VXLAN */
3444 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3445 RTE_PTYPE_TUNNEL_GRENAT,
3447 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3448 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3449 RTE_PTYPE_TUNNEL_GRENAT |
3450 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3451 RTE_PTYPE_INNER_L4_FRAG,
3452 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3453 RTE_PTYPE_TUNNEL_GRENAT |
3454 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3455 RTE_PTYPE_INNER_L4_NONFRAG,
3456 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3457 RTE_PTYPE_TUNNEL_GRENAT |
3458 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3459 RTE_PTYPE_INNER_L4_UDP,
3460 /* [113] reserved */
3461 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3462 RTE_PTYPE_TUNNEL_GRENAT |
3463 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3464 RTE_PTYPE_INNER_L4_TCP,
3465 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3466 RTE_PTYPE_TUNNEL_GRENAT |
3467 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3468 RTE_PTYPE_INNER_L4_SCTP,
3469 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3470 RTE_PTYPE_TUNNEL_GRENAT |
3471 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3472 RTE_PTYPE_INNER_L4_ICMP,
3474 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3475 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3476 RTE_PTYPE_TUNNEL_GRENAT |
3477 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3478 RTE_PTYPE_INNER_L4_FRAG,
3479 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3480 RTE_PTYPE_TUNNEL_GRENAT |
3481 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3482 RTE_PTYPE_INNER_L4_NONFRAG,
3483 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3484 RTE_PTYPE_TUNNEL_GRENAT |
3485 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3486 RTE_PTYPE_INNER_L4_UDP,
3487 /* [120] reserved */
3488 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3489 RTE_PTYPE_TUNNEL_GRENAT |
3490 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3491 RTE_PTYPE_INNER_L4_TCP,
3492 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3493 RTE_PTYPE_TUNNEL_GRENAT |
3494 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3495 RTE_PTYPE_INNER_L4_SCTP,
3496 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3497 RTE_PTYPE_TUNNEL_GRENAT |
3498 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_INNER_L4_ICMP,
3501 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3502 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3503 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3505 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3506 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3507 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3508 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3509 RTE_PTYPE_INNER_L4_FRAG,
3510 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3511 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3512 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3513 RTE_PTYPE_INNER_L4_NONFRAG,
3514 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3515 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3516 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3517 RTE_PTYPE_INNER_L4_UDP,
3518 /* [128] reserved */
3519 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3520 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3521 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3522 RTE_PTYPE_INNER_L4_TCP,
3523 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3524 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3525 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3526 RTE_PTYPE_INNER_L4_SCTP,
3527 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3528 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3529 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3530 RTE_PTYPE_INNER_L4_ICMP,
3532 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3533 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3534 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3535 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_INNER_L4_FRAG,
3537 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3539 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3540 RTE_PTYPE_INNER_L4_NONFRAG,
3541 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3542 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3543 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3544 RTE_PTYPE_INNER_L4_UDP,
3545 /* [135] reserved */
3546 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3547 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3548 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3549 RTE_PTYPE_INNER_L4_TCP,
3550 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3551 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3552 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3553 RTE_PTYPE_INNER_L4_SCTP,
3554 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3555 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3556 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_INNER_L4_ICMP,
3558 /* [139] - [299] reserved */
3561 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3562 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3564 /* PPPoE --> IPv4 */
3565 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3566 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3568 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3569 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3570 RTE_PTYPE_L4_NONFRAG,
3571 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3572 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3574 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3575 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3577 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3578 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3580 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3581 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3584 /* PPPoE --> IPv6 */
3585 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3586 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3588 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3589 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3590 RTE_PTYPE_L4_NONFRAG,
3591 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3592 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3594 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3595 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3597 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3598 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3600 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3601 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3603 /* [314] - [324] reserved */
3605 /* IPv4/IPv6 --> GTPC/GTPU */
3606 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3607 RTE_PTYPE_TUNNEL_GTPC,
3608 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_GTPC,
3610 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3611 RTE_PTYPE_TUNNEL_GTPC,
3612 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3613 RTE_PTYPE_TUNNEL_GTPC,
3614 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3615 RTE_PTYPE_TUNNEL_GTPU,
3616 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3617 RTE_PTYPE_TUNNEL_GTPU,
3619 /* IPv4 --> GTPU --> IPv4 */
3620 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3621 RTE_PTYPE_TUNNEL_GTPU |
3622 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3623 RTE_PTYPE_INNER_L4_FRAG,
3624 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3625 RTE_PTYPE_TUNNEL_GTPU |
3626 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3627 RTE_PTYPE_INNER_L4_NONFRAG,
3628 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3629 RTE_PTYPE_TUNNEL_GTPU |
3630 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3631 RTE_PTYPE_INNER_L4_UDP,
3632 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3633 RTE_PTYPE_TUNNEL_GTPU |
3634 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3635 RTE_PTYPE_INNER_L4_TCP,
3636 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3637 RTE_PTYPE_TUNNEL_GTPU |
3638 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3639 RTE_PTYPE_INNER_L4_ICMP,
3641 /* IPv6 --> GTPU --> IPv4 */
3642 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3643 RTE_PTYPE_TUNNEL_GTPU |
3644 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3645 RTE_PTYPE_INNER_L4_FRAG,
3646 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3647 RTE_PTYPE_TUNNEL_GTPU |
3648 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3649 RTE_PTYPE_INNER_L4_NONFRAG,
3650 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3651 RTE_PTYPE_TUNNEL_GTPU |
3652 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3653 RTE_PTYPE_INNER_L4_UDP,
3654 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3655 RTE_PTYPE_TUNNEL_GTPU |
3656 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3657 RTE_PTYPE_INNER_L4_TCP,
3658 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3659 RTE_PTYPE_TUNNEL_GTPU |
3660 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3661 RTE_PTYPE_INNER_L4_ICMP,
3663 /* IPv4 --> GTPU --> IPv6 */
3664 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3665 RTE_PTYPE_TUNNEL_GTPU |
3666 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3667 RTE_PTYPE_INNER_L4_FRAG,
3668 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3669 RTE_PTYPE_TUNNEL_GTPU |
3670 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3671 RTE_PTYPE_INNER_L4_NONFRAG,
3672 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3673 RTE_PTYPE_TUNNEL_GTPU |
3674 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3675 RTE_PTYPE_INNER_L4_UDP,
3676 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3677 RTE_PTYPE_TUNNEL_GTPU |
3678 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3679 RTE_PTYPE_INNER_L4_TCP,
3680 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3681 RTE_PTYPE_TUNNEL_GTPU |
3682 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3683 RTE_PTYPE_INNER_L4_ICMP,
3685 /* IPv6 --> GTPU --> IPv6 */
3686 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3687 RTE_PTYPE_TUNNEL_GTPU |
3688 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_INNER_L4_FRAG,
3690 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3691 RTE_PTYPE_TUNNEL_GTPU |
3692 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3693 RTE_PTYPE_INNER_L4_NONFRAG,
3694 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3695 RTE_PTYPE_TUNNEL_GTPU |
3696 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3697 RTE_PTYPE_INNER_L4_UDP,
3698 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3699 RTE_PTYPE_TUNNEL_GTPU |
3700 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3701 RTE_PTYPE_INNER_L4_TCP,
3702 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_TUNNEL_GTPU |
3704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_INNER_L4_ICMP,
3707 /* IPv4 --> UDP ECPRI */
3708 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3710 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3712 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3714 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3716 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3718 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3720 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3722 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3724 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3726 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3729 /* IPV6 --> UDP ECPRI */
3730 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3732 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3734 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3736 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3738 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3740 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3742 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3744 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3746 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3748 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3750 /* All others reserved */
3753 return ptype_tbl[ptype];
3757 iavf_set_default_ptype_table(struct rte_eth_dev *dev)
3759 struct iavf_adapter *ad =
3760 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3763 for (i = 0; i < IAVF_MAX_PKT_TYPE; i++)
3764 ad->ptype_tbl[i] = iavf_get_default_ptype(i);