1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_ipsec_crypto.h"
31 #include "rte_pmd_iavf.h"
33 /* Offset of mbuf dynamic field for protocol extraction's metadata */
34 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
36 /* Mask of mbuf dynamic flags for protocol extraction's type */
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
42 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
43 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
46 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
48 static uint8_t rxdid_map[] = {
49 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
50 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
51 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
52 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
53 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
54 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
55 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
57 IAVF_RXDID_COMMS_IPSEC_CRYPTO,
60 return flex_type < RTE_DIM(rxdid_map) ?
61 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
65 iavf_monitor_callback(const uint64_t value,
66 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
68 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
70 * we expect the DD bit to be set to 1 if this descriptor was already
73 return (value & m) == m ? -1 : 0;
77 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
79 struct iavf_rx_queue *rxq = rx_queue;
80 volatile union iavf_rx_desc *rxdp;
84 rxdp = &rxq->rx_ring[desc];
85 /* watch for changes in status bit */
86 pmc->addr = &rxdp->wb.qword1.status_error_len;
88 /* comparison callback */
89 pmc->fn = iavf_monitor_callback;
91 /* registers are 64-bit */
92 pmc->size = sizeof(uint64_t);
98 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
100 /* The following constraints must be satisfied:
101 * thresh < rxq->nb_rx_desc
103 if (thresh >= nb_desc) {
104 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
112 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
113 uint16_t tx_free_thresh)
115 /* TX descriptors will have their RS bit set after tx_rs_thresh
116 * descriptors have been used. The TX descriptor ring will be cleaned
117 * after tx_free_thresh descriptors are used or if the number of
118 * descriptors required to transmit a packet is greater than the
119 * number of free TX descriptors.
121 * The following constraints must be satisfied:
122 * - tx_rs_thresh must be less than the size of the ring minus 2.
123 * - tx_free_thresh must be less than the size of the ring minus 3.
124 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
125 * - tx_rs_thresh must be a divisor of the ring size.
127 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
128 * race condition, hence the maximum threshold constraints. When set
129 * to zero use default values.
131 if (tx_rs_thresh >= (nb_desc - 2)) {
132 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
133 "number of TX descriptors (%u) minus 2",
134 tx_rs_thresh, nb_desc);
137 if (tx_free_thresh >= (nb_desc - 3)) {
138 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
139 "number of TX descriptors (%u) minus 3.",
140 tx_free_thresh, nb_desc);
143 if (tx_rs_thresh > tx_free_thresh) {
144 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
145 "equal to tx_free_thresh (%u).",
146 tx_rs_thresh, tx_free_thresh);
149 if ((nb_desc % tx_rs_thresh) != 0) {
150 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
151 "number of TX descriptors (%u).",
152 tx_rs_thresh, nb_desc);
160 check_rx_vec_allow(struct iavf_rx_queue *rxq)
162 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
163 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
164 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
168 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
173 check_tx_vec_allow(struct iavf_tx_queue *txq)
175 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
176 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
177 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
178 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
181 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
186 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
190 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
191 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
192 "rxq->rx_free_thresh=%d, "
193 "IAVF_RX_MAX_BURST=%d",
194 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
196 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
197 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
198 "rxq->nb_rx_desc=%d, "
199 "rxq->rx_free_thresh=%d",
200 rxq->nb_rx_desc, rxq->rx_free_thresh);
207 reset_rx_queue(struct iavf_rx_queue *rxq)
215 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
217 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
218 ((volatile char *)rxq->rx_ring)[i] = 0;
220 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
222 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
223 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
226 rxq->rx_nb_avail = 0;
227 rxq->rx_next_avail = 0;
228 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
233 rte_pktmbuf_free(rxq->pkt_first_seg);
235 rxq->pkt_first_seg = NULL;
236 rxq->pkt_last_seg = NULL;
238 rxq->rxrearm_start = 0;
242 reset_tx_queue(struct iavf_tx_queue *txq)
244 struct iavf_tx_entry *txe;
249 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
254 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
255 for (i = 0; i < size; i++)
256 ((volatile char *)txq->tx_ring)[i] = 0;
258 prev = (uint16_t)(txq->nb_tx_desc - 1);
259 for (i = 0; i < txq->nb_tx_desc; i++) {
260 txq->tx_ring[i].cmd_type_offset_bsz =
261 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
264 txe[prev].next_id = i;
271 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
272 txq->nb_free = txq->nb_tx_desc - 1;
274 txq->next_dd = txq->rs_thresh - 1;
275 txq->next_rs = txq->rs_thresh - 1;
279 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
281 volatile union iavf_rx_desc *rxd;
282 struct rte_mbuf *mbuf = NULL;
286 for (i = 0; i < rxq->nb_rx_desc; i++) {
287 mbuf = rte_mbuf_raw_alloc(rxq->mp);
288 if (unlikely(!mbuf)) {
289 for (j = 0; j < i; j++) {
290 rte_pktmbuf_free_seg(rxq->sw_ring[j]);
291 rxq->sw_ring[j] = NULL;
293 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
297 rte_mbuf_refcnt_set(mbuf, 1);
299 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
301 mbuf->port = rxq->port_id;
304 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
306 rxd = &rxq->rx_ring[i];
307 rxd->read.pkt_addr = dma_addr;
308 rxd->read.hdr_addr = 0;
309 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
314 rxq->sw_ring[i] = mbuf;
321 release_rxq_mbufs(struct iavf_rx_queue *rxq)
328 for (i = 0; i < rxq->nb_rx_desc; i++) {
329 if (rxq->sw_ring[i]) {
330 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
331 rxq->sw_ring[i] = NULL;
336 if (rxq->rx_nb_avail == 0)
338 for (i = 0; i < rxq->rx_nb_avail; i++) {
339 struct rte_mbuf *mbuf;
341 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
342 rte_pktmbuf_free_seg(mbuf);
344 rxq->rx_nb_avail = 0;
348 release_txq_mbufs(struct iavf_tx_queue *txq)
352 if (!txq || !txq->sw_ring) {
353 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
357 for (i = 0; i < txq->nb_tx_desc; i++) {
358 if (txq->sw_ring[i].mbuf) {
359 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
360 txq->sw_ring[i].mbuf = NULL;
365 static const struct iavf_rxq_ops def_rxq_ops = {
366 .release_mbufs = release_rxq_mbufs,
369 static const struct iavf_txq_ops def_txq_ops = {
370 .release_mbufs = release_txq_mbufs,
374 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
376 volatile union iavf_rx_flex_desc *rxdp)
378 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
379 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
380 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
384 if (desc->flow_id != 0xFFFFFFFF) {
385 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
386 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
389 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
390 stat_err = rte_le_to_cpu_16(desc->status_error0);
391 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
392 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
393 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
399 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
401 volatile union iavf_rx_flex_desc *rxdp)
403 volatile struct iavf_32b_rx_flex_desc_comms *desc =
404 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
407 stat_err = rte_le_to_cpu_16(desc->status_error0);
408 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
409 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
410 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
413 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
414 if (desc->flow_id != 0xFFFFFFFF) {
415 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
416 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
419 if (rxq->xtr_ol_flag) {
420 uint32_t metadata = 0;
422 stat_err = rte_le_to_cpu_16(desc->status_error1);
424 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
425 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
427 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
429 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
432 mb->ol_flags |= rxq->xtr_ol_flag;
434 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
441 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
443 volatile union iavf_rx_flex_desc *rxdp)
445 volatile struct iavf_32b_rx_flex_desc_comms *desc =
446 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
449 stat_err = rte_le_to_cpu_16(desc->status_error0);
450 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
451 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
452 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
455 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
456 if (desc->flow_id != 0xFFFFFFFF) {
457 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
458 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
461 if (rxq->xtr_ol_flag) {
462 uint32_t metadata = 0;
464 if (desc->flex_ts.flex.aux0 != 0xFFFF)
465 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
466 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
467 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
470 mb->ol_flags |= rxq->xtr_ol_flag;
472 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
479 iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[IAVF_RXDID_LAST + 1] = {
480 [IAVF_RXDID_LEGACY_0] = iavf_rxd_to_pkt_fields_by_comms_ovs,
481 [IAVF_RXDID_LEGACY_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
482 [IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
483 [IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
484 [IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
485 [IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
486 iavf_rxd_to_pkt_fields_by_comms_aux_v1,
487 [IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
488 [IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
489 iavf_rxd_to_pkt_fields_by_comms_aux_v2,
490 [IAVF_RXDID_COMMS_IPSEC_CRYPTO] =
491 iavf_rxd_to_pkt_fields_by_comms_aux_v2,
492 [IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
496 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
501 case IAVF_RXDID_COMMS_AUX_VLAN:
502 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
504 case IAVF_RXDID_COMMS_AUX_IPV4:
505 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
507 case IAVF_RXDID_COMMS_AUX_IPV6:
508 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
510 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
512 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
514 case IAVF_RXDID_COMMS_AUX_TCP:
515 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
517 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
519 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
521 case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
523 rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
525 case IAVF_RXDID_COMMS_OVS_1:
526 case IAVF_RXDID_LEGACY_0:
527 case IAVF_RXDID_LEGACY_1:
530 /* update this according to the RXDID for FLEX_DESC_NONE */
531 rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
535 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
536 rxq->xtr_ol_flag = 0;
540 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
541 uint16_t nb_desc, unsigned int socket_id,
542 const struct rte_eth_rxconf *rx_conf,
543 struct rte_mempool *mp)
545 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
546 struct iavf_adapter *ad =
547 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
548 struct iavf_info *vf =
549 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
550 struct iavf_vsi *vsi = &vf->vsi;
551 struct iavf_rx_queue *rxq;
552 const struct rte_memzone *mz;
556 uint16_t rx_free_thresh;
559 PMD_INIT_FUNC_TRACE();
561 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
563 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
564 nb_desc > IAVF_MAX_RING_DESC ||
565 nb_desc < IAVF_MIN_RING_DESC) {
566 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
571 /* Check free threshold */
572 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
573 IAVF_DEFAULT_RX_FREE_THRESH :
574 rx_conf->rx_free_thresh;
575 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
578 /* Free memory if needed */
579 if (dev->data->rx_queues[queue_idx]) {
580 iavf_dev_rx_queue_release(dev, queue_idx);
581 dev->data->rx_queues[queue_idx] = NULL;
584 /* Allocate the rx queue data structure */
585 rxq = rte_zmalloc_socket("iavf rxq",
586 sizeof(struct iavf_rx_queue),
590 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
591 "rx queue data structure");
595 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
596 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
598 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
599 rxq->proto_xtr = proto_xtr;
601 rxq->rxdid = IAVF_RXDID_LEGACY_1;
602 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
605 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
606 struct virtchnl_vlan_supported_caps *stripping_support =
607 &vf->vlan_v2_caps.offloads.stripping_support;
608 uint32_t stripping_cap;
610 if (stripping_support->outer)
611 stripping_cap = stripping_support->outer;
613 stripping_cap = stripping_support->inner;
615 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
616 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
617 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
618 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
620 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
623 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
626 rxq->nb_rx_desc = nb_desc;
627 rxq->rx_free_thresh = rx_free_thresh;
628 rxq->queue_id = queue_idx;
629 rxq->port_id = dev->data->port_id;
630 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
633 rxq->offloads = offloads;
635 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
636 rxq->crc_len = RTE_ETHER_CRC_LEN;
640 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
641 rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
643 /* Allocate the software ring. */
644 len = nb_desc + IAVF_RX_MAX_BURST;
646 rte_zmalloc_socket("iavf rx sw ring",
647 sizeof(struct rte_mbuf *) * len,
651 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
656 /* Allocate the maximum number of RX ring hardware descriptor with
657 * a little more to support bulk allocate.
659 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
660 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
662 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
663 ring_size, IAVF_RING_BASE_ALIGN,
666 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
667 rte_free(rxq->sw_ring);
671 /* Zero all the descriptors in the ring. */
672 memset(mz->addr, 0, ring_size);
673 rxq->rx_ring_phys_addr = mz->iova;
674 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
679 dev->data->rx_queues[queue_idx] = rxq;
680 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
681 rxq->ops = &def_rxq_ops;
683 if (check_rx_bulk_allow(rxq) == true) {
684 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
685 "satisfied. Rx Burst Bulk Alloc function will be "
686 "used on port=%d, queue=%d.",
687 rxq->port_id, rxq->queue_id);
689 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
690 "not satisfied, Scattered Rx is requested "
691 "on port=%d, queue=%d.",
692 rxq->port_id, rxq->queue_id);
693 ad->rx_bulk_alloc_allowed = false;
696 if (check_rx_vec_allow(rxq) == false)
697 ad->rx_vec_allowed = false;
703 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
706 unsigned int socket_id,
707 const struct rte_eth_txconf *tx_conf)
709 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710 struct iavf_adapter *adapter =
711 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
712 struct iavf_info *vf =
713 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
714 struct iavf_tx_queue *txq;
715 const struct rte_memzone *mz;
717 uint16_t tx_rs_thresh, tx_free_thresh;
720 PMD_INIT_FUNC_TRACE();
722 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
724 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
725 nb_desc > IAVF_MAX_RING_DESC ||
726 nb_desc < IAVF_MIN_RING_DESC) {
727 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
732 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
733 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
734 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
735 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
736 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
739 /* Free memory if needed. */
740 if (dev->data->tx_queues[queue_idx]) {
741 iavf_dev_tx_queue_release(dev, queue_idx);
742 dev->data->tx_queues[queue_idx] = NULL;
745 /* Allocate the TX queue data structure. */
746 txq = rte_zmalloc_socket("iavf txq",
747 sizeof(struct iavf_tx_queue),
751 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
752 "tx queue structure");
756 if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
757 struct virtchnl_vlan_supported_caps *insertion_support =
758 &adapter->vf.vlan_v2_caps.offloads.insertion_support;
759 uint32_t insertion_cap;
761 if (insertion_support->outer)
762 insertion_cap = insertion_support->outer;
764 insertion_cap = insertion_support->inner;
766 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
767 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
768 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
769 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
771 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
774 txq->nb_tx_desc = nb_desc;
775 txq->rs_thresh = tx_rs_thresh;
776 txq->free_thresh = tx_free_thresh;
777 txq->queue_id = queue_idx;
778 txq->port_id = dev->data->port_id;
779 txq->offloads = offloads;
780 txq->tx_deferred_start = tx_conf->tx_deferred_start;
782 if (iavf_ipsec_crypto_supported(adapter))
783 txq->ipsec_crypto_pkt_md_offset =
784 iavf_security_get_pkt_md_offset(adapter);
786 /* Allocate software ring */
788 rte_zmalloc_socket("iavf tx sw ring",
789 sizeof(struct iavf_tx_entry) * nb_desc,
793 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
798 /* Allocate TX hardware ring descriptors. */
799 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
800 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
801 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
802 ring_size, IAVF_RING_BASE_ALIGN,
805 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
806 rte_free(txq->sw_ring);
810 txq->tx_ring_phys_addr = mz->iova;
811 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
816 dev->data->tx_queues[queue_idx] = txq;
817 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
818 txq->ops = &def_txq_ops;
820 if (check_tx_vec_allow(txq) == false) {
821 struct iavf_adapter *ad =
822 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
823 ad->tx_vec_allowed = false;
826 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
827 vf->tm_conf.committed) {
829 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
830 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
831 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
832 vf->qtc_map[tc].queue_count))
835 if (tc >= vf->qos_cap->num_elem) {
836 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
846 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
848 struct iavf_adapter *adapter =
849 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
850 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
851 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852 struct iavf_rx_queue *rxq;
855 PMD_DRV_FUNC_TRACE();
857 if (rx_queue_id >= dev->data->nb_rx_queues)
860 rxq = dev->data->rx_queues[rx_queue_id];
862 err = alloc_rxq_mbufs(rxq);
864 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
870 /* Init the RX tail register. */
871 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
872 IAVF_WRITE_FLUSH(hw);
874 /* Ready to switch the queue on */
876 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
878 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
881 release_rxq_mbufs(rxq);
882 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
885 dev->data->rx_queue_state[rx_queue_id] =
886 RTE_ETH_QUEUE_STATE_STARTED;
893 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
895 struct iavf_adapter *adapter =
896 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
897 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
898 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899 struct iavf_tx_queue *txq;
902 PMD_DRV_FUNC_TRACE();
904 if (tx_queue_id >= dev->data->nb_tx_queues)
907 txq = dev->data->tx_queues[tx_queue_id];
909 /* Init the RX tail register. */
910 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
911 IAVF_WRITE_FLUSH(hw);
913 /* Ready to switch the queue on */
915 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
917 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
920 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
923 dev->data->tx_queue_state[tx_queue_id] =
924 RTE_ETH_QUEUE_STATE_STARTED;
930 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
932 struct iavf_adapter *adapter =
933 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
934 struct iavf_rx_queue *rxq;
937 PMD_DRV_FUNC_TRACE();
939 if (rx_queue_id >= dev->data->nb_rx_queues)
942 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
944 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
949 rxq = dev->data->rx_queues[rx_queue_id];
950 rxq->ops->release_mbufs(rxq);
952 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
958 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
960 struct iavf_adapter *adapter =
961 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
962 struct iavf_tx_queue *txq;
965 PMD_DRV_FUNC_TRACE();
967 if (tx_queue_id >= dev->data->nb_tx_queues)
970 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
972 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
977 txq = dev->data->tx_queues[tx_queue_id];
978 txq->ops->release_mbufs(txq);
980 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
986 iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
988 struct iavf_rx_queue *q = dev->data->rx_queues[qid];
993 q->ops->release_mbufs(q);
994 rte_free(q->sw_ring);
995 rte_memzone_free(q->mz);
1000 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1002 struct iavf_tx_queue *q = dev->data->tx_queues[qid];
1007 q->ops->release_mbufs(q);
1008 rte_free(q->sw_ring);
1009 rte_memzone_free(q->mz);
1014 iavf_stop_queues(struct rte_eth_dev *dev)
1016 struct iavf_adapter *adapter =
1017 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1018 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1019 struct iavf_rx_queue *rxq;
1020 struct iavf_tx_queue *txq;
1023 /* Stop All queues */
1024 if (!vf->lv_enabled) {
1025 ret = iavf_disable_queues(adapter);
1027 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1029 ret = iavf_disable_queues_lv(adapter);
1031 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1035 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1037 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1038 txq = dev->data->tx_queues[i];
1041 txq->ops->release_mbufs(txq);
1042 reset_tx_queue(txq);
1043 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1045 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1046 rxq = dev->data->rx_queues[i];
1049 rxq->ops->release_mbufs(rxq);
1050 reset_rx_queue(rxq);
1051 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1055 #define IAVF_RX_FLEX_ERR0_BITS \
1056 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1057 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1058 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1059 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1060 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1061 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1064 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1066 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1067 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1068 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1070 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1077 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1078 volatile union iavf_rx_flex_desc *rxdp)
1080 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
1081 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1082 mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
1083 RTE_MBUF_F_RX_VLAN_STRIPPED;
1085 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1090 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1091 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1092 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1093 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
1094 RTE_MBUF_F_RX_QINQ |
1095 RTE_MBUF_F_RX_VLAN_STRIPPED |
1097 mb->vlan_tci_outer = mb->vlan_tci;
1098 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1099 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1100 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1101 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1103 mb->vlan_tci_outer = 0;
1109 iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
1110 volatile union iavf_rx_flex_desc *rxdp)
1112 volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
1113 (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
1115 mb->dynfield1[0] = desc->ipsec_said &
1116 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
1120 iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
1121 volatile union iavf_rx_flex_desc *rxdp,
1122 struct iavf_ipsec_crypto_stats *stats)
1124 uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
1126 if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
1127 uint16_t ipsec_status;
1129 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
1131 ipsec_status = status1 &
1132 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
1135 if (unlikely(ipsec_status !=
1136 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
1137 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
1139 switch (ipsec_status) {
1140 case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
1141 stats->ierrors.sad_miss++;
1143 case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
1144 stats->ierrors.not_processed++;
1146 case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
1147 stats->ierrors.icv_check++;
1149 case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
1150 stats->ierrors.ipsec_length++;
1152 case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
1153 stats->ierrors.misc++;
1157 stats->ierrors.count++;
1162 stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
1164 if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
1166 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
1167 iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
1172 /* Translate the rx descriptor status and error fields to pkt flags */
1173 static inline uint64_t
1174 iavf_rxd_to_pkt_flags(uint64_t qword)
1177 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1179 #define IAVF_RX_ERR_BITS 0x3f
1181 /* Check if RSS_HASH */
1182 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1183 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1184 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
1186 /* Check if FDIR Match */
1187 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1188 RTE_MBUF_F_RX_FDIR : 0);
1190 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1191 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1195 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1196 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1198 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1200 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1201 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1203 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1205 /* TODO: Oversize error bit is not processed here */
1210 static inline uint64_t
1211 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1214 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1217 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1218 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1219 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1221 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1223 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1224 flags |= RTE_MBUF_F_RX_FDIR_ID;
1228 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1229 flags |= RTE_MBUF_F_RX_FDIR_ID;
1234 #define IAVF_RX_FLEX_ERR0_BITS \
1235 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1236 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1237 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1238 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1239 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1240 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1242 /* Rx L3/L4 checksum */
1243 static inline uint64_t
1244 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1248 /* check if HW has decoded the packet and checksum */
1249 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1252 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1253 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1257 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1258 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1260 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1262 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1263 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1265 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1267 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1268 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1273 /* If the number of free RX descriptors is greater than the RX free
1274 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1275 * register. Update the RDT with the value of the last processed RX
1276 * descriptor minus 1, to guarantee that the RDT register is never
1277 * equal to the RDH register, which creates a "full" ring situation
1278 * from the hardware point of view.
1281 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1283 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1285 if (nb_hold > rxq->rx_free_thresh) {
1287 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1288 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1289 rx_id = (uint16_t)((rx_id == 0) ?
1290 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1291 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1294 rxq->nb_rx_hold = nb_hold;
1297 /* implement recv_pkts */
1299 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1301 volatile union iavf_rx_desc *rx_ring;
1302 volatile union iavf_rx_desc *rxdp;
1303 struct iavf_rx_queue *rxq;
1304 union iavf_rx_desc rxd;
1305 struct rte_mbuf *rxe;
1306 struct rte_eth_dev *dev;
1307 struct rte_mbuf *rxm;
1308 struct rte_mbuf *nmb;
1312 uint16_t rx_packet_len;
1313 uint16_t rx_id, nb_hold;
1316 const uint32_t *ptype_tbl;
1321 rx_id = rxq->rx_tail;
1322 rx_ring = rxq->rx_ring;
1323 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1325 while (nb_rx < nb_pkts) {
1326 rxdp = &rx_ring[rx_id];
1327 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1328 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1329 IAVF_RXD_QW1_STATUS_SHIFT;
1331 /* Check the DD bit first */
1332 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1334 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1336 nmb = rte_mbuf_raw_alloc(rxq->mp);
1337 if (unlikely(!nmb)) {
1338 dev = &rte_eth_devices[rxq->port_id];
1339 dev->data->rx_mbuf_alloc_failed++;
1340 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1341 "queue_id=%u", rxq->port_id, rxq->queue_id);
1347 rxe = rxq->sw_ring[rx_id];
1348 rxq->sw_ring[rx_id] = nmb;
1350 if (unlikely(rx_id == rxq->nb_rx_desc))
1353 /* Prefetch next mbuf */
1354 rte_prefetch0(rxq->sw_ring[rx_id]);
1356 /* When next RX descriptor is on a cache line boundary,
1357 * prefetch the next 4 RX descriptors and next 8 pointers
1360 if ((rx_id & 0x3) == 0) {
1361 rte_prefetch0(&rx_ring[rx_id]);
1362 rte_prefetch0(rxq->sw_ring[rx_id]);
1366 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1367 rxdp->read.hdr_addr = 0;
1368 rxdp->read.pkt_addr = dma_addr;
1370 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1371 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1373 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1374 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1377 rxm->pkt_len = rx_packet_len;
1378 rxm->data_len = rx_packet_len;
1379 rxm->port = rxq->port_id;
1381 iavf_rxd_to_vlan_tci(rxm, &rxd);
1382 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1384 ptype_tbl[(uint8_t)((qword1 &
1385 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1387 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1389 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1391 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1392 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1394 rxm->ol_flags |= pkt_flags;
1396 rx_pkts[nb_rx++] = rxm;
1398 rxq->rx_tail = rx_id;
1400 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1405 /* implement recv_pkts for flexible Rx descriptor */
1407 iavf_recv_pkts_flex_rxd(void *rx_queue,
1408 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1410 volatile union iavf_rx_desc *rx_ring;
1411 volatile union iavf_rx_flex_desc *rxdp;
1412 struct iavf_rx_queue *rxq;
1413 union iavf_rx_flex_desc rxd;
1414 struct rte_mbuf *rxe;
1415 struct rte_eth_dev *dev;
1416 struct rte_mbuf *rxm;
1417 struct rte_mbuf *nmb;
1419 uint16_t rx_stat_err0;
1420 uint16_t rx_packet_len;
1421 uint16_t rx_id, nb_hold;
1424 const uint32_t *ptype_tbl;
1429 rx_id = rxq->rx_tail;
1430 rx_ring = rxq->rx_ring;
1431 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1433 struct iavf_adapter *ad = rxq->vsi->adapter;
1436 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1437 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1438 if (sw_cur_time - ad->hw_time_update > 4) {
1439 if (iavf_get_phc_time(ad))
1440 PMD_DRV_LOG(ERR, "get physical time failed");
1441 ad->hw_time_update = sw_cur_time;
1445 while (nb_rx < nb_pkts) {
1446 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1447 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1449 /* Check the DD bit first */
1450 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1452 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1454 nmb = rte_mbuf_raw_alloc(rxq->mp);
1455 if (unlikely(!nmb)) {
1456 dev = &rte_eth_devices[rxq->port_id];
1457 dev->data->rx_mbuf_alloc_failed++;
1458 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1459 "queue_id=%u", rxq->port_id, rxq->queue_id);
1465 rxe = rxq->sw_ring[rx_id];
1466 rxq->sw_ring[rx_id] = nmb;
1468 if (unlikely(rx_id == rxq->nb_rx_desc))
1471 /* Prefetch next mbuf */
1472 rte_prefetch0(rxq->sw_ring[rx_id]);
1474 /* When next RX descriptor is on a cache line boundary,
1475 * prefetch the next 4 RX descriptors and next 8 pointers
1478 if ((rx_id & 0x3) == 0) {
1479 rte_prefetch0(&rx_ring[rx_id]);
1480 rte_prefetch0(rxq->sw_ring[rx_id]);
1484 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1485 rxdp->read.hdr_addr = 0;
1486 rxdp->read.pkt_addr = dma_addr;
1488 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1489 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1491 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1492 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1495 rxm->pkt_len = rx_packet_len;
1496 rxm->data_len = rx_packet_len;
1497 rxm->port = rxq->port_id;
1499 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1500 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1501 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1502 iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
1503 &rxq->stats.ipsec_crypto);
1504 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
1505 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1507 if (iavf_timestamp_dynflag > 0) {
1508 ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
1509 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1511 ad->phc_time = ts_ns;
1512 ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1514 *RTE_MBUF_DYNFIELD(rxm,
1515 iavf_timestamp_dynfield_offset,
1516 rte_mbuf_timestamp_t *) = ts_ns;
1517 rxm->ol_flags |= iavf_timestamp_dynflag;
1520 rxm->ol_flags |= pkt_flags;
1522 rx_pkts[nb_rx++] = rxm;
1524 rxq->rx_tail = rx_id;
1526 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1531 /* implement recv_scattered_pkts for flexible Rx descriptor */
1533 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1536 struct iavf_rx_queue *rxq = rx_queue;
1537 union iavf_rx_flex_desc rxd;
1538 struct rte_mbuf *rxe;
1539 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1540 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1541 struct rte_mbuf *nmb, *rxm;
1542 uint16_t rx_id = rxq->rx_tail;
1543 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1544 struct rte_eth_dev *dev;
1545 uint16_t rx_stat_err0;
1548 struct iavf_adapter *ad = rxq->vsi->adapter;
1551 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1552 volatile union iavf_rx_flex_desc *rxdp;
1553 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1555 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1556 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1557 if (sw_cur_time - ad->hw_time_update > 4) {
1558 if (iavf_get_phc_time(ad))
1559 PMD_DRV_LOG(ERR, "get physical time failed");
1560 ad->hw_time_update = sw_cur_time;
1564 while (nb_rx < nb_pkts) {
1565 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1566 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1568 /* Check the DD bit */
1569 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1571 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1573 nmb = rte_mbuf_raw_alloc(rxq->mp);
1574 if (unlikely(!nmb)) {
1575 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1576 "queue_id=%u", rxq->port_id, rxq->queue_id);
1577 dev = &rte_eth_devices[rxq->port_id];
1578 dev->data->rx_mbuf_alloc_failed++;
1584 rxe = rxq->sw_ring[rx_id];
1585 rxq->sw_ring[rx_id] = nmb;
1587 if (rx_id == rxq->nb_rx_desc)
1590 /* Prefetch next mbuf */
1591 rte_prefetch0(rxq->sw_ring[rx_id]);
1593 /* When next RX descriptor is on a cache line boundary,
1594 * prefetch the next 4 RX descriptors and next 8 pointers
1597 if ((rx_id & 0x3) == 0) {
1598 rte_prefetch0(&rx_ring[rx_id]);
1599 rte_prefetch0(rxq->sw_ring[rx_id]);
1604 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1606 /* Set data buffer address and data length of the mbuf */
1607 rxdp->read.hdr_addr = 0;
1608 rxdp->read.pkt_addr = dma_addr;
1609 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1610 IAVF_RX_FLX_DESC_PKT_LEN_M;
1611 rxm->data_len = rx_packet_len;
1612 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1614 /* If this is the first buffer of the received packet, set the
1615 * pointer to the first mbuf of the packet and initialize its
1616 * context. Otherwise, update the total length and the number
1617 * of segments of the current scattered packet, and update the
1618 * pointer to the last mbuf of the current packet.
1622 first_seg->nb_segs = 1;
1623 first_seg->pkt_len = rx_packet_len;
1625 first_seg->pkt_len =
1626 (uint16_t)(first_seg->pkt_len +
1628 first_seg->nb_segs++;
1629 last_seg->next = rxm;
1632 /* If this is not the last buffer of the received packet,
1633 * update the pointer to the last mbuf of the current scattered
1634 * packet and continue to parse the RX ring.
1636 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1641 /* This is the last buffer of the received packet. If the CRC
1642 * is not stripped by the hardware:
1643 * - Subtract the CRC length from the total packet length.
1644 * - If the last buffer only contains the whole CRC or a part
1645 * of it, free the mbuf associated to the last buffer. If part
1646 * of the CRC is also contained in the previous mbuf, subtract
1647 * the length of that CRC part from the data length of the
1651 if (unlikely(rxq->crc_len > 0)) {
1652 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1653 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1654 rte_pktmbuf_free_seg(rxm);
1655 first_seg->nb_segs--;
1656 last_seg->data_len =
1657 (uint16_t)(last_seg->data_len -
1658 (RTE_ETHER_CRC_LEN - rx_packet_len));
1659 last_seg->next = NULL;
1661 rxm->data_len = (uint16_t)(rx_packet_len -
1666 first_seg->port = rxq->port_id;
1667 first_seg->ol_flags = 0;
1668 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1669 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1670 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1671 iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
1672 &rxq->stats.ipsec_crypto);
1673 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
1674 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1676 if (iavf_timestamp_dynflag > 0) {
1677 ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
1678 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1680 ad->phc_time = ts_ns;
1681 ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1683 *RTE_MBUF_DYNFIELD(first_seg,
1684 iavf_timestamp_dynfield_offset,
1685 rte_mbuf_timestamp_t *) = ts_ns;
1686 first_seg->ol_flags |= iavf_timestamp_dynflag;
1689 first_seg->ol_flags |= pkt_flags;
1691 /* Prefetch data of first segment, if configured to do so. */
1692 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1693 first_seg->data_off));
1694 rx_pkts[nb_rx++] = first_seg;
1698 /* Record index of the next RX descriptor to probe. */
1699 rxq->rx_tail = rx_id;
1700 rxq->pkt_first_seg = first_seg;
1701 rxq->pkt_last_seg = last_seg;
1703 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1708 /* implement recv_scattered_pkts */
1710 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1713 struct iavf_rx_queue *rxq = rx_queue;
1714 union iavf_rx_desc rxd;
1715 struct rte_mbuf *rxe;
1716 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1717 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1718 struct rte_mbuf *nmb, *rxm;
1719 uint16_t rx_id = rxq->rx_tail;
1720 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1721 struct rte_eth_dev *dev;
1727 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1728 volatile union iavf_rx_desc *rxdp;
1729 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1731 while (nb_rx < nb_pkts) {
1732 rxdp = &rx_ring[rx_id];
1733 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1734 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1735 IAVF_RXD_QW1_STATUS_SHIFT;
1737 /* Check the DD bit */
1738 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1740 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1742 nmb = rte_mbuf_raw_alloc(rxq->mp);
1743 if (unlikely(!nmb)) {
1744 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1745 "queue_id=%u", rxq->port_id, rxq->queue_id);
1746 dev = &rte_eth_devices[rxq->port_id];
1747 dev->data->rx_mbuf_alloc_failed++;
1753 rxe = rxq->sw_ring[rx_id];
1754 rxq->sw_ring[rx_id] = nmb;
1756 if (rx_id == rxq->nb_rx_desc)
1759 /* Prefetch next mbuf */
1760 rte_prefetch0(rxq->sw_ring[rx_id]);
1762 /* When next RX descriptor is on a cache line boundary,
1763 * prefetch the next 4 RX descriptors and next 8 pointers
1766 if ((rx_id & 0x3) == 0) {
1767 rte_prefetch0(&rx_ring[rx_id]);
1768 rte_prefetch0(rxq->sw_ring[rx_id]);
1773 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1775 /* Set data buffer address and data length of the mbuf */
1776 rxdp->read.hdr_addr = 0;
1777 rxdp->read.pkt_addr = dma_addr;
1778 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1779 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1780 rxm->data_len = rx_packet_len;
1781 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1783 /* If this is the first buffer of the received packet, set the
1784 * pointer to the first mbuf of the packet and initialize its
1785 * context. Otherwise, update the total length and the number
1786 * of segments of the current scattered packet, and update the
1787 * pointer to the last mbuf of the current packet.
1791 first_seg->nb_segs = 1;
1792 first_seg->pkt_len = rx_packet_len;
1794 first_seg->pkt_len =
1795 (uint16_t)(first_seg->pkt_len +
1797 first_seg->nb_segs++;
1798 last_seg->next = rxm;
1801 /* If this is not the last buffer of the received packet,
1802 * update the pointer to the last mbuf of the current scattered
1803 * packet and continue to parse the RX ring.
1805 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1810 /* This is the last buffer of the received packet. If the CRC
1811 * is not stripped by the hardware:
1812 * - Subtract the CRC length from the total packet length.
1813 * - If the last buffer only contains the whole CRC or a part
1814 * of it, free the mbuf associated to the last buffer. If part
1815 * of the CRC is also contained in the previous mbuf, subtract
1816 * the length of that CRC part from the data length of the
1820 if (unlikely(rxq->crc_len > 0)) {
1821 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1822 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1823 rte_pktmbuf_free_seg(rxm);
1824 first_seg->nb_segs--;
1825 last_seg->data_len =
1826 (uint16_t)(last_seg->data_len -
1827 (RTE_ETHER_CRC_LEN - rx_packet_len));
1828 last_seg->next = NULL;
1830 rxm->data_len = (uint16_t)(rx_packet_len -
1834 first_seg->port = rxq->port_id;
1835 first_seg->ol_flags = 0;
1836 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1837 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1838 first_seg->packet_type =
1839 ptype_tbl[(uint8_t)((qword1 &
1840 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1842 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1843 first_seg->hash.rss =
1844 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1846 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1847 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1849 first_seg->ol_flags |= pkt_flags;
1851 /* Prefetch data of first segment, if configured to do so. */
1852 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1853 first_seg->data_off));
1854 rx_pkts[nb_rx++] = first_seg;
1858 /* Record index of the next RX descriptor to probe. */
1859 rxq->rx_tail = rx_id;
1860 rxq->pkt_first_seg = first_seg;
1861 rxq->pkt_last_seg = last_seg;
1863 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1868 #define IAVF_LOOK_AHEAD 8
1870 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
1871 struct rte_mbuf **rx_pkts,
1874 volatile union iavf_rx_flex_desc *rxdp;
1875 struct rte_mbuf **rxep;
1876 struct rte_mbuf *mb;
1879 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
1880 int32_t i, j, nb_rx = 0;
1881 int32_t nb_staged = 0;
1883 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1884 struct iavf_adapter *ad = rxq->vsi->adapter;
1887 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1888 rxep = &rxq->sw_ring[rxq->rx_tail];
1890 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1892 /* Make sure there is at least 1 packet to receive */
1893 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1896 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1897 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1898 if (sw_cur_time - ad->hw_time_update > 4) {
1899 if (iavf_get_phc_time(ad))
1900 PMD_DRV_LOG(ERR, "get physical time failed");
1901 ad->hw_time_update = sw_cur_time;
1905 /* Scan LOOK_AHEAD descriptors at a time to determine which
1906 * descriptors reference packets that are ready to be received.
1908 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1909 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1910 /* Read desc statuses backwards to avoid race condition */
1911 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1912 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1914 /* This barrier is to order loads of different words in the descriptor */
1915 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1917 /* Compute how many contiguous DD bits were set */
1918 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
1919 var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1921 /* For Arm platforms, count only contiguous descriptors
1922 * whose DD bit is set to 1. On Arm platforms, reads of
1923 * descriptors can be reordered. Since the CPU may
1924 * be reading the descriptors as the NIC updates them
1925 * in memory, it is possbile that the DD bit for a
1926 * descriptor earlier in the queue is read as not set
1927 * while the DD bit for a descriptor later in the queue
1939 /* Translate descriptor info to mbuf parameters */
1940 for (j = 0; j < nb_dd; j++) {
1941 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1943 i * IAVF_LOOK_AHEAD + j);
1946 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1947 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1948 mb->data_len = pkt_len;
1949 mb->pkt_len = pkt_len;
1952 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1953 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1954 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1955 iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
1956 &rxq->stats.ipsec_crypto);
1957 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1958 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1959 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1961 if (iavf_timestamp_dynflag > 0) {
1962 ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
1963 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1965 ad->phc_time = ts_ns;
1966 ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1968 *RTE_MBUF_DYNFIELD(mb,
1969 iavf_timestamp_dynfield_offset,
1970 rte_mbuf_timestamp_t *) = ts_ns;
1971 mb->ol_flags |= iavf_timestamp_dynflag;
1974 mb->ol_flags |= pkt_flags;
1976 /* Put up to nb_pkts directly into buffers */
1977 if ((i + j) < nb_pkts) {
1978 rx_pkts[i + j] = rxep[j];
1981 /* Stage excess pkts received */
1982 rxq->rx_stage[nb_staged] = rxep[j];
1987 if (nb_dd != IAVF_LOOK_AHEAD)
1991 /* Update rxq->rx_nb_avail to reflect number of staged pkts */
1992 rxq->rx_nb_avail = nb_staged;
1994 /* Clear software ring entries */
1995 for (i = 0; i < (nb_rx + nb_staged); i++)
1996 rxq->sw_ring[rxq->rx_tail + i] = NULL;
2002 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2004 volatile union iavf_rx_desc *rxdp;
2005 struct rte_mbuf **rxep;
2006 struct rte_mbuf *mb;
2010 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
2011 int32_t i, j, nb_rx = 0;
2012 int32_t nb_staged = 0;
2014 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2016 rxdp = &rxq->rx_ring[rxq->rx_tail];
2017 rxep = &rxq->sw_ring[rxq->rx_tail];
2019 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
2020 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
2021 IAVF_RXD_QW1_STATUS_SHIFT;
2023 /* Make sure there is at least 1 packet to receive */
2024 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
2027 /* Scan LOOK_AHEAD descriptors at a time to determine which
2028 * descriptors reference packets that are ready to be received.
2030 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
2031 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
2032 /* Read desc statuses backwards to avoid race condition */
2033 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
2034 qword1 = rte_le_to_cpu_64(
2035 rxdp[j].wb.qword1.status_error_len);
2036 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
2037 IAVF_RXD_QW1_STATUS_SHIFT;
2040 /* This barrier is to order loads of different words in the descriptor */
2041 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2043 /* Compute how many contiguous DD bits were set */
2044 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
2045 var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
2047 /* For Arm platforms, count only contiguous descriptors
2048 * whose DD bit is set to 1. On Arm platforms, reads of
2049 * descriptors can be reordered. Since the CPU may
2050 * be reading the descriptors as the NIC updates them
2051 * in memory, it is possbile that the DD bit for a
2052 * descriptor earlier in the queue is read as not set
2053 * while the DD bit for a descriptor later in the queue
2065 /* Translate descriptor info to mbuf parameters */
2066 for (j = 0; j < nb_dd; j++) {
2067 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
2068 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
2071 qword1 = rte_le_to_cpu_64
2072 (rxdp[j].wb.qword1.status_error_len);
2073 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
2074 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
2075 mb->data_len = pkt_len;
2076 mb->pkt_len = pkt_len;
2078 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
2079 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
2081 ptype_tbl[(uint8_t)((qword1 &
2082 IAVF_RXD_QW1_PTYPE_MASK) >>
2083 IAVF_RXD_QW1_PTYPE_SHIFT)];
2085 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
2086 mb->hash.rss = rte_le_to_cpu_32(
2087 rxdp[j].wb.qword0.hi_dword.rss);
2089 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
2090 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
2092 mb->ol_flags |= pkt_flags;
2094 /* Put up to nb_pkts directly into buffers */
2095 if ((i + j) < nb_pkts) {
2096 rx_pkts[i + j] = rxep[j];
2098 } else { /* Stage excess pkts received */
2099 rxq->rx_stage[nb_staged] = rxep[j];
2104 if (nb_dd != IAVF_LOOK_AHEAD)
2108 /* Update rxq->rx_nb_avail to reflect number of staged pkts */
2109 rxq->rx_nb_avail = nb_staged;
2111 /* Clear software ring entries */
2112 for (i = 0; i < (nb_rx + nb_staged); i++)
2113 rxq->sw_ring[rxq->rx_tail + i] = NULL;
2118 static inline uint16_t
2119 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
2120 struct rte_mbuf **rx_pkts,
2124 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
2126 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
2128 for (i = 0; i < nb_pkts; i++)
2129 rx_pkts[i] = stage[i];
2131 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
2132 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
2138 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
2140 volatile union iavf_rx_desc *rxdp;
2141 struct rte_mbuf **rxep;
2142 struct rte_mbuf *mb;
2143 uint16_t alloc_idx, i;
2147 /* Allocate buffers in bulk */
2148 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
2149 (rxq->rx_free_thresh - 1));
2150 rxep = &rxq->sw_ring[alloc_idx];
2151 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
2152 rxq->rx_free_thresh);
2153 if (unlikely(diag != 0)) {
2154 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
2158 rxdp = &rxq->rx_ring[alloc_idx];
2159 for (i = 0; i < rxq->rx_free_thresh; i++) {
2160 if (likely(i < (rxq->rx_free_thresh - 1)))
2161 /* Prefetch next mbuf */
2162 rte_prefetch0(rxep[i + 1]);
2165 rte_mbuf_refcnt_set(mb, 1);
2167 mb->data_off = RTE_PKTMBUF_HEADROOM;
2169 mb->port = rxq->port_id;
2170 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
2171 rxdp[i].read.hdr_addr = 0;
2172 rxdp[i].read.pkt_addr = dma_addr;
2175 /* Update rx tail register */
2177 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
2179 rxq->rx_free_trigger =
2180 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
2181 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
2182 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2187 static inline uint16_t
2188 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2190 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
2196 if (rxq->rx_nb_avail)
2197 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2199 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
2200 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq, rx_pkts, nb_pkts);
2202 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq, rx_pkts, nb_pkts);
2204 rxq->rx_next_avail = 0;
2205 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx + rxq->rx_nb_avail);
2207 if (rxq->rx_tail > rxq->rx_free_trigger) {
2208 if (iavf_rx_alloc_bufs(rxq) != 0) {
2209 uint16_t i, j, nb_staged;
2211 /* TODO: count rx_mbuf_alloc_failed here */
2213 nb_staged = rxq->rx_nb_avail;
2214 rxq->rx_nb_avail = 0;
2216 rxq->rx_tail = (uint16_t)(rxq->rx_tail - (nb_rx + nb_staged));
2217 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) {
2218 rxq->sw_ring[j] = rx_pkts[i];
2221 for (i = 0, j = rxq->rx_tail + nb_rx; i < nb_staged; i++, j++) {
2222 rxq->sw_ring[j] = rxq->rx_stage[i];
2230 if (rxq->rx_tail >= rxq->nb_rx_desc)
2233 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
2234 rxq->port_id, rxq->queue_id,
2235 rxq->rx_tail, nb_rx);
2241 iavf_recv_pkts_bulk_alloc(void *rx_queue,
2242 struct rte_mbuf **rx_pkts,
2245 uint16_t nb_rx = 0, n, count;
2247 if (unlikely(nb_pkts == 0))
2250 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2251 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2254 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2255 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2256 nb_rx = (uint16_t)(nb_rx + count);
2257 nb_pkts = (uint16_t)(nb_pkts - count);
2266 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2268 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2269 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2270 uint16_t nb_tx_desc = txq->nb_tx_desc;
2271 uint16_t desc_to_clean_to;
2272 uint16_t nb_tx_to_clean;
2274 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2276 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2277 if (desc_to_clean_to >= nb_tx_desc)
2278 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2280 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2281 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2282 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2283 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2284 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2285 "(port=%d queue=%d)", desc_to_clean_to,
2286 txq->port_id, txq->queue_id);
2290 if (last_desc_cleaned > desc_to_clean_to)
2291 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2294 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2297 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2299 txq->last_desc_cleaned = desc_to_clean_to;
2300 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2305 /* Check if the context descriptor is needed for TX offloading */
2306 static inline uint16_t
2307 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2309 if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
2310 RTE_MBUF_F_TX_TUNNEL_MASK))
2312 if (flags & RTE_MBUF_F_TX_VLAN &&
2313 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2319 iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m,
2325 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
2326 cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2328 if (m->ol_flags & RTE_MBUF_F_TX_VLAN &&
2329 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2330 cmd |= IAVF_TX_CTX_DESC_IL2TAG2
2331 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2338 iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
2339 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2341 uint64_t ipsec_field =
2342 (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
2343 IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
2345 *field |= ipsec_field;
2350 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
2351 const struct rte_mbuf *m)
2353 uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
2354 uint64_t eip_len = 0;
2355 uint64_t eip_noinc = 0;
2356 /* Default - IP_ID is increment in each segment of LSO */
2358 switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
2359 RTE_MBUF_F_TX_OUTER_IPV6 |
2360 RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
2361 case RTE_MBUF_F_TX_OUTER_IPV4:
2362 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
2363 eip_len = m->outer_l3_len >> 2;
2365 case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
2366 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
2367 eip_len = m->outer_l3_len >> 2;
2369 case RTE_MBUF_F_TX_OUTER_IPV6:
2370 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
2371 eip_len = m->outer_l3_len >> 2;
2375 *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
2376 eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
2377 eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
2380 static inline uint16_t
2381 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
2382 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2384 uint64_t segmentation_field = 0;
2385 uint64_t total_length = 0;
2387 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
2388 total_length = ipsec_md->l4_payload_len;
2390 total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
2392 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2393 total_length -= m->outer_l3_len;
2396 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
2397 if (!m->l4_len || !m->tso_segsz)
2398 PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
2399 m->l4_len, m->tso_segsz);
2400 if (m->tso_segsz < 88)
2401 PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
2404 segmentation_field =
2405 (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
2406 IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
2407 (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
2408 IAVF_TXD_CTX_QW1_MSS_MASK);
2410 *field |= segmentation_field;
2412 return total_length;
2416 struct iavf_tx_context_desc_qws {
2422 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
2423 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
2424 uint16_t *tlen, uint8_t vlan_flag)
2426 volatile struct iavf_tx_context_desc_qws *desc_qws =
2427 (volatile struct iavf_tx_context_desc_qws *)desc;
2428 /* fill descriptor type field */
2429 desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
2431 /* fill command field */
2432 iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag);
2434 /* fill segmentation field */
2435 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
2436 /* fill IPsec field */
2437 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2438 iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
2441 *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
2445 /* fill tunnelling field */
2446 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2447 iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
2451 desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
2452 desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
2454 if (vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2455 desc->l2tag2 = m->vlan_tci;
2460 iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
2461 const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
2463 desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
2464 IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
2465 ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
2466 ((uint64_t)md->esp_trailer_len <<
2467 IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
2469 desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
2470 IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
2471 ((uint64_t)md->next_proto <<
2472 IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
2473 ((uint64_t)(md->len_iv & 0x3) <<
2474 IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
2475 ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2477 IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
2478 (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
2481 * TODO: Pre-calculate this in the Session initialization
2483 * Calculate IPsec length required in data descriptor func when TSO
2484 * offload is enabled
2486 *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
2487 (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2488 sizeof(struct rte_udp_hdr) : 0);
2492 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
2493 struct rte_mbuf *m, uint8_t vlan_flag)
2495 uint64_t command = 0;
2496 uint64_t offset = 0;
2497 uint64_t l2tag1 = 0;
2499 *qw1 = IAVF_TX_DESC_DTYPE_DATA;
2501 command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
2503 /* Descriptor based VLAN insertion */
2504 if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) &&
2505 m->ol_flags & RTE_MBUF_F_TX_VLAN) {
2506 command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
2507 l2tag1 |= m->vlan_tci;
2511 offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2513 /* Enable L3 checksum offloading inner */
2514 if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
2515 command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2516 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2517 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
2518 command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2519 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2520 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
2521 command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2522 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2525 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2526 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2527 offset |= (m->l4_len >> 2) <<
2528 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2531 /* Enable L4 checksum offloads */
2532 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2533 case RTE_MBUF_F_TX_TCP_CKSUM:
2534 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2535 offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2536 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2538 case RTE_MBUF_F_TX_SCTP_CKSUM:
2539 command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2540 offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2541 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2543 case RTE_MBUF_F_TX_UDP_CKSUM:
2544 command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2545 offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2546 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2550 *qw1 = rte_cpu_to_le_64((((uint64_t)command <<
2551 IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
2552 (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
2553 IAVF_TXD_DATA_QW1_OFFSET_MASK) |
2554 ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
2558 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
2559 struct rte_mbuf *m, uint64_t desc_template,
2560 uint16_t tlen, uint16_t ipseclen)
2562 uint32_t hdrlen = m->l2_len;
2565 /* fill data descriptor qw1 from template */
2566 desc->cmd_type_offset_bsz = desc_template;
2568 /* set data buffer address */
2569 desc->buffer_addr = rte_mbuf_data_iova(m);
2571 /* calculate data buffer size less set header lengths */
2572 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
2573 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2574 RTE_MBUF_F_TX_UDP_SEG))) {
2575 hdrlen += m->outer_l3_len;
2576 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2577 hdrlen += m->l3_len + m->l4_len;
2579 hdrlen += m->l3_len;
2580 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2582 bufsz = hdrlen + tlen;
2583 } else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
2584 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2585 RTE_MBUF_F_TX_UDP_SEG))) {
2586 hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
2587 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2588 hdrlen += m->l4_len;
2589 bufsz = hdrlen + tlen;
2592 bufsz = m->data_len;
2595 /* set data buffer size */
2596 desc->cmd_type_offset_bsz |=
2597 (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
2598 IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
2600 desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
2601 desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
2605 static struct iavf_ipsec_crypto_pkt_metadata *
2606 iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
2609 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2610 return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
2611 struct iavf_ipsec_crypto_pkt_metadata *);
2618 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2620 struct iavf_tx_queue *txq = tx_queue;
2621 volatile struct iavf_tx_desc *txr = txq->tx_ring;
2622 struct iavf_tx_entry *txe_ring = txq->sw_ring;
2623 struct iavf_tx_entry *txe, *txn;
2624 struct rte_mbuf *mb, *mb_seg;
2625 uint16_t desc_idx, desc_idx_last;
2629 /* Check if the descriptor ring needs to be cleaned. */
2630 if (txq->nb_free < txq->free_thresh)
2631 iavf_xmit_cleanup(txq);
2633 desc_idx = txq->tx_tail;
2634 txe = &txe_ring[desc_idx];
2636 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
2637 iavf_dump_tx_entry_ring(txq);
2638 iavf_dump_tx_desc_ring(txq);
2642 for (idx = 0; idx < nb_pkts; idx++) {
2643 volatile struct iavf_tx_desc *ddesc;
2644 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
2646 uint16_t nb_desc_ctx, nb_desc_ipsec;
2647 uint16_t nb_desc_data, nb_desc_required;
2648 uint16_t tlen = 0, ipseclen = 0;
2649 uint64_t ddesc_template = 0;
2650 uint64_t ddesc_cmd = 0;
2654 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2657 * Get metadata for ipsec crypto from mbuf dynamic fields if
2658 * security offload is specified.
2660 ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
2662 nb_desc_data = mb->nb_segs;
2664 iavf_calc_context_desc(mb->ol_flags, txq->vlan_flag);
2665 nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
2668 * The number of descriptors that must be allocated for
2669 * a packet equals to the number of the segments of that
2670 * packet plus the context and ipsec descriptors if needed.
2672 nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
2674 desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
2676 /* wrap descriptor ring */
2677 if (desc_idx_last >= txq->nb_tx_desc)
2679 (uint16_t)(desc_idx_last - txq->nb_tx_desc);
2682 "port_id=%u queue_id=%u tx_first=%u tx_last=%u",
2683 txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
2685 if (nb_desc_required > txq->nb_free) {
2686 if (iavf_xmit_cleanup(txq)) {
2691 if (unlikely(nb_desc_required > txq->rs_thresh)) {
2692 while (nb_desc_required > txq->nb_free) {
2693 if (iavf_xmit_cleanup(txq)) {
2702 iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb,
2705 /* Setup TX context descriptor if required */
2707 volatile struct iavf_tx_context_desc *ctx_desc =
2708 (volatile struct iavf_tx_context_desc *)
2711 /* clear QW0 or the previous writeback value
2712 * may impact next write
2714 *(volatile uint64_t *)ctx_desc = 0;
2716 txn = &txe_ring[txe->next_id];
2717 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2720 rte_pktmbuf_free_seg(txe->mbuf);
2724 iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen,
2726 IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
2728 txe->last_id = desc_idx_last;
2729 desc_idx = txe->next_id;
2733 if (nb_desc_ipsec) {
2734 volatile struct iavf_tx_ipsec_desc *ipsec_desc =
2735 (volatile struct iavf_tx_ipsec_desc *)
2738 txn = &txe_ring[txe->next_id];
2739 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2742 rte_pktmbuf_free_seg(txe->mbuf);
2746 iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
2748 IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
2750 txe->last_id = desc_idx_last;
2751 desc_idx = txe->next_id;
2758 ddesc = (volatile struct iavf_tx_desc *)
2761 txn = &txe_ring[txe->next_id];
2762 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2765 rte_pktmbuf_free_seg(txe->mbuf);
2768 iavf_fill_data_desc(ddesc, mb_seg,
2769 ddesc_template, tlen, ipseclen);
2771 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
2773 txe->last_id = desc_idx_last;
2774 desc_idx = txe->next_id;
2776 mb_seg = mb_seg->next;
2779 /* The last packet data descriptor needs End Of Packet (EOP) */
2780 ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
2782 txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
2783 txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
2785 if (txq->nb_used >= txq->rs_thresh) {
2786 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2787 "%4u (port=%d queue=%d)",
2788 desc_idx_last, txq->port_id, txq->queue_id);
2790 ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
2792 /* Update txq RS bit counters */
2796 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
2797 IAVF_TXD_DATA_QW1_CMD_SHIFT);
2799 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
2805 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2806 txq->port_id, txq->queue_id, desc_idx, idx);
2808 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
2809 txq->tx_tail = desc_idx;
2814 /* Check if the packet with vlan user priority is transmitted in the
2818 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2820 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2821 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2824 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2826 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2827 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2835 /* TX prep functions */
2837 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2843 struct iavf_tx_queue *txq = tx_queue;
2844 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2845 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2847 for (i = 0; i < nb_pkts; i++) {
2849 ol_flags = m->ol_flags;
2851 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2852 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2853 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2857 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2858 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2859 /* MSS outside the range are considered malicious */
2864 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2865 rte_errno = ENOTSUP;
2869 #ifdef RTE_ETHDEV_DEBUG_TX
2870 ret = rte_validate_tx_offload(m);
2876 ret = rte_net_intel_cksum_prepare(m);
2882 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2883 ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
2884 ret = iavf_check_vlan_up2tc(txq, m);
2895 /* choose rx function*/
2897 iavf_set_rx_function(struct rte_eth_dev *dev)
2899 struct iavf_adapter *adapter =
2900 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2901 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2903 struct iavf_rx_queue *rxq;
2904 bool use_flex = true;
2906 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2907 rxq = dev->data->rx_queues[i];
2908 if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
2909 PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, "
2910 "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
2912 } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
2913 PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, "
2914 "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
2921 bool use_avx2 = false;
2922 bool use_avx512 = false;
2924 check_ret = iavf_rx_vec_dev_check(dev);
2925 if (check_ret >= 0 &&
2926 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2927 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2928 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2929 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2932 #ifdef CC_AVX512_SUPPORT
2933 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2934 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2935 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2939 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2940 rxq = dev->data->rx_queues[i];
2941 (void)iavf_rxq_vec_setup(rxq);
2944 if (dev->data->scattered_rx) {
2947 "Using %sVector Scattered Rx (port %d).",
2948 use_avx2 ? "avx2 " : "",
2949 dev->data->port_id);
2951 if (check_ret == IAVF_VECTOR_PATH)
2953 "Using AVX512 Vector Scattered Rx (port %d).",
2954 dev->data->port_id);
2957 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2958 dev->data->port_id);
2961 dev->rx_pkt_burst = use_avx2 ?
2962 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2963 iavf_recv_scattered_pkts_vec_flex_rxd;
2964 #ifdef CC_AVX512_SUPPORT
2966 if (check_ret == IAVF_VECTOR_PATH)
2968 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2971 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2975 dev->rx_pkt_burst = use_avx2 ?
2976 iavf_recv_scattered_pkts_vec_avx2 :
2977 iavf_recv_scattered_pkts_vec;
2978 #ifdef CC_AVX512_SUPPORT
2980 if (check_ret == IAVF_VECTOR_PATH)
2982 iavf_recv_scattered_pkts_vec_avx512;
2985 iavf_recv_scattered_pkts_vec_avx512_offload;
2991 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2992 use_avx2 ? "avx2 " : "",
2993 dev->data->port_id);
2995 if (check_ret == IAVF_VECTOR_PATH)
2997 "Using AVX512 Vector Rx (port %d).",
2998 dev->data->port_id);
3001 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3002 dev->data->port_id);
3005 dev->rx_pkt_burst = use_avx2 ?
3006 iavf_recv_pkts_vec_avx2_flex_rxd :
3007 iavf_recv_pkts_vec_flex_rxd;
3008 #ifdef CC_AVX512_SUPPORT
3010 if (check_ret == IAVF_VECTOR_PATH)
3012 iavf_recv_pkts_vec_avx512_flex_rxd;
3015 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
3019 dev->rx_pkt_burst = use_avx2 ?
3020 iavf_recv_pkts_vec_avx2 :
3022 #ifdef CC_AVX512_SUPPORT
3024 if (check_ret == IAVF_VECTOR_PATH)
3026 iavf_recv_pkts_vec_avx512;
3029 iavf_recv_pkts_vec_avx512_offload;
3039 if (dev->data->scattered_rx) {
3040 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
3041 dev->data->port_id);
3043 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
3045 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
3046 } else if (adapter->rx_bulk_alloc_allowed) {
3047 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
3048 dev->data->port_id);
3049 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
3051 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
3052 dev->data->port_id);
3054 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
3056 dev->rx_pkt_burst = iavf_recv_pkts;
3060 /* choose tx function*/
3062 iavf_set_tx_function(struct rte_eth_dev *dev)
3065 struct iavf_tx_queue *txq;
3068 bool use_sse = false;
3069 bool use_avx2 = false;
3070 bool use_avx512 = false;
3072 check_ret = iavf_tx_vec_dev_check(dev);
3074 if (check_ret >= 0 &&
3075 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3076 /* SSE and AVX2 not support offload path yet. */
3077 if (check_ret == IAVF_VECTOR_PATH) {
3079 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3080 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3081 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3084 #ifdef CC_AVX512_SUPPORT
3085 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3086 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
3087 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
3091 if (!use_sse && !use_avx2 && !use_avx512)
3095 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3096 use_avx2 ? "avx2 " : "",
3097 dev->data->port_id);
3098 dev->tx_pkt_burst = use_avx2 ?
3099 iavf_xmit_pkts_vec_avx2 :
3102 dev->tx_pkt_prepare = NULL;
3103 #ifdef CC_AVX512_SUPPORT
3105 if (check_ret == IAVF_VECTOR_PATH) {
3106 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
3107 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
3108 dev->data->port_id);
3110 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
3111 dev->tx_pkt_prepare = iavf_prep_pkts;
3112 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
3113 dev->data->port_id);
3118 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3119 txq = dev->data->tx_queues[i];
3122 #ifdef CC_AVX512_SUPPORT
3124 iavf_txq_vec_setup_avx512(txq);
3126 iavf_txq_vec_setup(txq);
3128 iavf_txq_vec_setup(txq);
3137 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
3138 dev->data->port_id);
3139 dev->tx_pkt_burst = iavf_xmit_pkts;
3140 dev->tx_pkt_prepare = iavf_prep_pkts;
3144 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
3147 struct iavf_tx_entry *swr_ring = txq->sw_ring;
3148 uint16_t i, tx_last, tx_id;
3149 uint16_t nb_tx_free_last;
3150 uint16_t nb_tx_to_clean;
3153 /* Start free mbuf from the next of tx_tail */
3154 tx_last = txq->tx_tail;
3155 tx_id = swr_ring[tx_last].next_id;
3157 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
3160 nb_tx_to_clean = txq->nb_free;
3161 nb_tx_free_last = txq->nb_free;
3163 free_cnt = txq->nb_tx_desc;
3165 /* Loop through swr_ring to count the amount of
3166 * freeable mubfs and packets.
3168 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3169 for (i = 0; i < nb_tx_to_clean &&
3170 pkt_cnt < free_cnt &&
3171 tx_id != tx_last; i++) {
3172 if (swr_ring[tx_id].mbuf != NULL) {
3173 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3174 swr_ring[tx_id].mbuf = NULL;
3177 * last segment in the packet,
3178 * increment packet count
3180 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3183 tx_id = swr_ring[tx_id].next_id;
3186 if (txq->rs_thresh > txq->nb_tx_desc -
3187 txq->nb_free || tx_id == tx_last)
3190 if (pkt_cnt < free_cnt) {
3191 if (iavf_xmit_cleanup(txq))
3194 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
3195 nb_tx_free_last = txq->nb_free;
3199 return (int)pkt_cnt;
3203 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
3205 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
3207 return iavf_tx_done_cleanup_full(q, free_cnt);
3211 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3212 struct rte_eth_rxq_info *qinfo)
3214 struct iavf_rx_queue *rxq;
3216 rxq = dev->data->rx_queues[queue_id];
3218 qinfo->mp = rxq->mp;
3219 qinfo->scattered_rx = dev->data->scattered_rx;
3220 qinfo->nb_desc = rxq->nb_rx_desc;
3222 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3223 qinfo->conf.rx_drop_en = true;
3224 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3228 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3229 struct rte_eth_txq_info *qinfo)
3231 struct iavf_tx_queue *txq;
3233 txq = dev->data->tx_queues[queue_id];
3235 qinfo->nb_desc = txq->nb_tx_desc;
3237 qinfo->conf.tx_free_thresh = txq->free_thresh;
3238 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
3239 qinfo->conf.offloads = txq->offloads;
3240 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3243 /* Get the number of used descriptors of a rx queue */
3245 iavf_dev_rxq_count(void *rx_queue)
3247 #define IAVF_RXQ_SCAN_INTERVAL 4
3248 volatile union iavf_rx_desc *rxdp;
3249 struct iavf_rx_queue *rxq;
3253 rxdp = &rxq->rx_ring[rxq->rx_tail];
3255 while ((desc < rxq->nb_rx_desc) &&
3256 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
3257 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
3258 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
3259 /* Check the DD bit of a rx descriptor of each 4 in a group,
3260 * to avoid checking too frequently and downgrading performance
3263 desc += IAVF_RXQ_SCAN_INTERVAL;
3264 rxdp += IAVF_RXQ_SCAN_INTERVAL;
3265 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3266 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3267 desc - rxq->nb_rx_desc]);
3274 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
3276 struct iavf_rx_queue *rxq = rx_queue;
3277 volatile uint64_t *status;
3281 if (unlikely(offset >= rxq->nb_rx_desc))
3284 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
3285 return RTE_ETH_RX_DESC_UNAVAIL;
3287 desc = rxq->rx_tail + offset;
3288 if (desc >= rxq->nb_rx_desc)
3289 desc -= rxq->nb_rx_desc;
3291 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
3292 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
3293 << IAVF_RXD_QW1_STATUS_SHIFT);
3295 return RTE_ETH_RX_DESC_DONE;
3297 return RTE_ETH_RX_DESC_AVAIL;
3301 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
3303 struct iavf_tx_queue *txq = tx_queue;
3304 volatile uint64_t *status;
3305 uint64_t mask, expect;
3308 if (unlikely(offset >= txq->nb_tx_desc))
3311 desc = txq->tx_tail + offset;
3312 /* go to next desc that has the RS bit */
3313 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
3315 if (desc >= txq->nb_tx_desc) {
3316 desc -= txq->nb_tx_desc;
3317 if (desc >= txq->nb_tx_desc)
3318 desc -= txq->nb_tx_desc;
3321 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
3322 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
3323 expect = rte_cpu_to_le_64(
3324 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
3325 if ((*status & mask) == expect)
3326 return RTE_ETH_TX_DESC_DONE;
3328 return RTE_ETH_TX_DESC_FULL;
3331 static inline uint32_t
3332 iavf_get_default_ptype(uint16_t ptype)
3334 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
3335 __rte_cache_aligned = {
3338 [1] = RTE_PTYPE_L2_ETHER,
3339 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3340 /* [3] - [5] reserved */
3341 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3342 /* [7] - [10] reserved */
3343 [11] = RTE_PTYPE_L2_ETHER_ARP,
3344 /* [12] - [21] reserved */
3346 /* Non tunneled IPv4 */
3347 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3349 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_L4_NONFRAG,
3351 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3354 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3356 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3358 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3362 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3363 RTE_PTYPE_TUNNEL_IP |
3364 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_INNER_L4_FRAG,
3366 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3367 RTE_PTYPE_TUNNEL_IP |
3368 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3369 RTE_PTYPE_INNER_L4_NONFRAG,
3370 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3371 RTE_PTYPE_TUNNEL_IP |
3372 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3373 RTE_PTYPE_INNER_L4_UDP,
3375 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3376 RTE_PTYPE_TUNNEL_IP |
3377 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3378 RTE_PTYPE_INNER_L4_TCP,
3379 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3380 RTE_PTYPE_TUNNEL_IP |
3381 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3382 RTE_PTYPE_INNER_L4_SCTP,
3383 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3384 RTE_PTYPE_TUNNEL_IP |
3385 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3386 RTE_PTYPE_INNER_L4_ICMP,
3389 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3390 RTE_PTYPE_TUNNEL_IP |
3391 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3392 RTE_PTYPE_INNER_L4_FRAG,
3393 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3394 RTE_PTYPE_TUNNEL_IP |
3395 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3396 RTE_PTYPE_INNER_L4_NONFRAG,
3397 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3398 RTE_PTYPE_TUNNEL_IP |
3399 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3400 RTE_PTYPE_INNER_L4_UDP,
3402 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3403 RTE_PTYPE_TUNNEL_IP |
3404 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3405 RTE_PTYPE_INNER_L4_TCP,
3406 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3407 RTE_PTYPE_TUNNEL_IP |
3408 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3409 RTE_PTYPE_INNER_L4_SCTP,
3410 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3411 RTE_PTYPE_TUNNEL_IP |
3412 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3413 RTE_PTYPE_INNER_L4_ICMP,
3415 /* IPv4 --> GRE/Teredo/VXLAN */
3416 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3417 RTE_PTYPE_TUNNEL_GRENAT,
3419 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3420 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3421 RTE_PTYPE_TUNNEL_GRENAT |
3422 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3423 RTE_PTYPE_INNER_L4_FRAG,
3424 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3425 RTE_PTYPE_TUNNEL_GRENAT |
3426 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3427 RTE_PTYPE_INNER_L4_NONFRAG,
3428 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3429 RTE_PTYPE_TUNNEL_GRENAT |
3430 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3431 RTE_PTYPE_INNER_L4_UDP,
3433 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3434 RTE_PTYPE_TUNNEL_GRENAT |
3435 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3436 RTE_PTYPE_INNER_L4_TCP,
3437 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3438 RTE_PTYPE_TUNNEL_GRENAT |
3439 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3440 RTE_PTYPE_INNER_L4_SCTP,
3441 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3442 RTE_PTYPE_TUNNEL_GRENAT |
3443 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3444 RTE_PTYPE_INNER_L4_ICMP,
3446 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3447 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3448 RTE_PTYPE_TUNNEL_GRENAT |
3449 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3450 RTE_PTYPE_INNER_L4_FRAG,
3451 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3452 RTE_PTYPE_TUNNEL_GRENAT |
3453 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3454 RTE_PTYPE_INNER_L4_NONFRAG,
3455 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3456 RTE_PTYPE_TUNNEL_GRENAT |
3457 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3458 RTE_PTYPE_INNER_L4_UDP,
3460 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3461 RTE_PTYPE_TUNNEL_GRENAT |
3462 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3463 RTE_PTYPE_INNER_L4_TCP,
3464 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3465 RTE_PTYPE_TUNNEL_GRENAT |
3466 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3467 RTE_PTYPE_INNER_L4_SCTP,
3468 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3469 RTE_PTYPE_TUNNEL_GRENAT |
3470 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3471 RTE_PTYPE_INNER_L4_ICMP,
3473 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3474 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3475 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3477 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3478 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3479 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3480 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3481 RTE_PTYPE_INNER_L4_FRAG,
3482 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3483 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3484 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3485 RTE_PTYPE_INNER_L4_NONFRAG,
3486 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3487 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3488 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3489 RTE_PTYPE_INNER_L4_UDP,
3491 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3492 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3493 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_INNER_L4_TCP,
3495 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3496 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3497 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3498 RTE_PTYPE_INNER_L4_SCTP,
3499 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3500 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3501 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3502 RTE_PTYPE_INNER_L4_ICMP,
3504 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3505 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3506 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3507 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3508 RTE_PTYPE_INNER_L4_FRAG,
3509 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3510 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3511 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3512 RTE_PTYPE_INNER_L4_NONFRAG,
3513 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3514 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3515 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3516 RTE_PTYPE_INNER_L4_UDP,
3518 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3519 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3520 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3521 RTE_PTYPE_INNER_L4_TCP,
3522 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3523 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3524 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3525 RTE_PTYPE_INNER_L4_SCTP,
3526 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3527 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3528 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3529 RTE_PTYPE_INNER_L4_ICMP,
3530 /* [73] - [87] reserved */
3532 /* Non tunneled IPv6 */
3533 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3535 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_L4_NONFRAG,
3537 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3540 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3542 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3544 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3548 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3549 RTE_PTYPE_TUNNEL_IP |
3550 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3551 RTE_PTYPE_INNER_L4_FRAG,
3552 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3553 RTE_PTYPE_TUNNEL_IP |
3554 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3555 RTE_PTYPE_INNER_L4_NONFRAG,
3556 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_TUNNEL_IP |
3558 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3559 RTE_PTYPE_INNER_L4_UDP,
3561 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3562 RTE_PTYPE_TUNNEL_IP |
3563 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3564 RTE_PTYPE_INNER_L4_TCP,
3565 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3566 RTE_PTYPE_TUNNEL_IP |
3567 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3568 RTE_PTYPE_INNER_L4_SCTP,
3569 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3570 RTE_PTYPE_TUNNEL_IP |
3571 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3572 RTE_PTYPE_INNER_L4_ICMP,
3575 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3576 RTE_PTYPE_TUNNEL_IP |
3577 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3578 RTE_PTYPE_INNER_L4_FRAG,
3579 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3580 RTE_PTYPE_TUNNEL_IP |
3581 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3582 RTE_PTYPE_INNER_L4_NONFRAG,
3583 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3584 RTE_PTYPE_TUNNEL_IP |
3585 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3586 RTE_PTYPE_INNER_L4_UDP,
3587 /* [105] reserved */
3588 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3589 RTE_PTYPE_TUNNEL_IP |
3590 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3591 RTE_PTYPE_INNER_L4_TCP,
3592 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3593 RTE_PTYPE_TUNNEL_IP |
3594 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3595 RTE_PTYPE_INNER_L4_SCTP,
3596 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3597 RTE_PTYPE_TUNNEL_IP |
3598 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3599 RTE_PTYPE_INNER_L4_ICMP,
3601 /* IPv6 --> GRE/Teredo/VXLAN */
3602 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3603 RTE_PTYPE_TUNNEL_GRENAT,
3605 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3606 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3607 RTE_PTYPE_TUNNEL_GRENAT |
3608 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3609 RTE_PTYPE_INNER_L4_FRAG,
3610 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3611 RTE_PTYPE_TUNNEL_GRENAT |
3612 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_INNER_L4_NONFRAG,
3614 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3615 RTE_PTYPE_TUNNEL_GRENAT |
3616 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3617 RTE_PTYPE_INNER_L4_UDP,
3618 /* [113] reserved */
3619 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3620 RTE_PTYPE_TUNNEL_GRENAT |
3621 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3622 RTE_PTYPE_INNER_L4_TCP,
3623 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3624 RTE_PTYPE_TUNNEL_GRENAT |
3625 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3626 RTE_PTYPE_INNER_L4_SCTP,
3627 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3628 RTE_PTYPE_TUNNEL_GRENAT |
3629 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3630 RTE_PTYPE_INNER_L4_ICMP,
3632 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3633 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3634 RTE_PTYPE_TUNNEL_GRENAT |
3635 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_INNER_L4_FRAG,
3637 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_TUNNEL_GRENAT |
3639 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3640 RTE_PTYPE_INNER_L4_NONFRAG,
3641 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_TUNNEL_GRENAT |
3643 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3644 RTE_PTYPE_INNER_L4_UDP,
3645 /* [120] reserved */
3646 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3647 RTE_PTYPE_TUNNEL_GRENAT |
3648 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3649 RTE_PTYPE_INNER_L4_TCP,
3650 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3651 RTE_PTYPE_TUNNEL_GRENAT |
3652 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3653 RTE_PTYPE_INNER_L4_SCTP,
3654 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3655 RTE_PTYPE_TUNNEL_GRENAT |
3656 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3657 RTE_PTYPE_INNER_L4_ICMP,
3659 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3660 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3661 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3663 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3664 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3665 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3666 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3667 RTE_PTYPE_INNER_L4_FRAG,
3668 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3669 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3670 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_INNER_L4_NONFRAG,
3672 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3673 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3674 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3675 RTE_PTYPE_INNER_L4_UDP,
3676 /* [128] reserved */
3677 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3679 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3680 RTE_PTYPE_INNER_L4_TCP,
3681 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3683 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3684 RTE_PTYPE_INNER_L4_SCTP,
3685 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3687 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3688 RTE_PTYPE_INNER_L4_ICMP,
3690 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3691 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3693 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_INNER_L4_FRAG,
3695 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3697 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_INNER_L4_NONFRAG,
3699 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3701 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_INNER_L4_UDP,
3703 /* [135] reserved */
3704 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3706 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_INNER_L4_TCP,
3708 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3709 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3710 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_INNER_L4_SCTP,
3712 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3713 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3714 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3715 RTE_PTYPE_INNER_L4_ICMP,
3716 /* [139] - [299] reserved */
3719 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3720 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3722 /* PPPoE --> IPv4 */
3723 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3724 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3726 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3727 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3728 RTE_PTYPE_L4_NONFRAG,
3729 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3730 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3732 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3733 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3735 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3736 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3738 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3739 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3742 /* PPPoE --> IPv6 */
3743 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3744 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3746 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3747 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3748 RTE_PTYPE_L4_NONFRAG,
3749 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3750 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3752 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3753 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3755 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3756 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3758 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3759 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3761 /* [314] - [324] reserved */
3763 /* IPv4/IPv6 --> GTPC/GTPU */
3764 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3765 RTE_PTYPE_TUNNEL_GTPC,
3766 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3767 RTE_PTYPE_TUNNEL_GTPC,
3768 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3769 RTE_PTYPE_TUNNEL_GTPC,
3770 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3771 RTE_PTYPE_TUNNEL_GTPC,
3772 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3773 RTE_PTYPE_TUNNEL_GTPU,
3774 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3775 RTE_PTYPE_TUNNEL_GTPU,
3777 /* IPv4 --> GTPU --> IPv4 */
3778 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3779 RTE_PTYPE_TUNNEL_GTPU |
3780 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3781 RTE_PTYPE_INNER_L4_FRAG,
3782 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3783 RTE_PTYPE_TUNNEL_GTPU |
3784 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3785 RTE_PTYPE_INNER_L4_NONFRAG,
3786 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3787 RTE_PTYPE_TUNNEL_GTPU |
3788 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3789 RTE_PTYPE_INNER_L4_UDP,
3790 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3791 RTE_PTYPE_TUNNEL_GTPU |
3792 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3793 RTE_PTYPE_INNER_L4_TCP,
3794 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3795 RTE_PTYPE_TUNNEL_GTPU |
3796 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3797 RTE_PTYPE_INNER_L4_ICMP,
3799 /* IPv6 --> GTPU --> IPv4 */
3800 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3801 RTE_PTYPE_TUNNEL_GTPU |
3802 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3803 RTE_PTYPE_INNER_L4_FRAG,
3804 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3805 RTE_PTYPE_TUNNEL_GTPU |
3806 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3807 RTE_PTYPE_INNER_L4_NONFRAG,
3808 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3809 RTE_PTYPE_TUNNEL_GTPU |
3810 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3811 RTE_PTYPE_INNER_L4_UDP,
3812 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3813 RTE_PTYPE_TUNNEL_GTPU |
3814 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3815 RTE_PTYPE_INNER_L4_TCP,
3816 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3817 RTE_PTYPE_TUNNEL_GTPU |
3818 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3819 RTE_PTYPE_INNER_L4_ICMP,
3821 /* IPv4 --> GTPU --> IPv6 */
3822 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3823 RTE_PTYPE_TUNNEL_GTPU |
3824 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3825 RTE_PTYPE_INNER_L4_FRAG,
3826 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3827 RTE_PTYPE_TUNNEL_GTPU |
3828 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3829 RTE_PTYPE_INNER_L4_NONFRAG,
3830 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3831 RTE_PTYPE_TUNNEL_GTPU |
3832 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3833 RTE_PTYPE_INNER_L4_UDP,
3834 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3835 RTE_PTYPE_TUNNEL_GTPU |
3836 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3837 RTE_PTYPE_INNER_L4_TCP,
3838 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3839 RTE_PTYPE_TUNNEL_GTPU |
3840 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3841 RTE_PTYPE_INNER_L4_ICMP,
3843 /* IPv6 --> GTPU --> IPv6 */
3844 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3845 RTE_PTYPE_TUNNEL_GTPU |
3846 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3847 RTE_PTYPE_INNER_L4_FRAG,
3848 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3849 RTE_PTYPE_TUNNEL_GTPU |
3850 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3851 RTE_PTYPE_INNER_L4_NONFRAG,
3852 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3853 RTE_PTYPE_TUNNEL_GTPU |
3854 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3855 RTE_PTYPE_INNER_L4_UDP,
3856 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3857 RTE_PTYPE_TUNNEL_GTPU |
3858 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3859 RTE_PTYPE_INNER_L4_TCP,
3860 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3861 RTE_PTYPE_TUNNEL_GTPU |
3862 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3863 RTE_PTYPE_INNER_L4_ICMP,
3865 /* IPv4 --> UDP ECPRI */
3866 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3868 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3870 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3872 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3874 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3876 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3878 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3880 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3882 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3884 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3887 /* IPV6 --> UDP ECPRI */
3888 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3890 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3892 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3894 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3896 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3898 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3900 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3902 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3904 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3906 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3908 /* All others reserved */
3911 return ptype_tbl[ptype];
3915 iavf_set_default_ptype_table(struct rte_eth_dev *dev)
3917 struct iavf_adapter *ad =
3918 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3921 for (i = 0; i < IAVF_MAX_PKT_TYPE; i++)
3922 ad->ptype_tbl[i] = iavf_get_default_ptype(i);