1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_ipsec_crypto.h"
31 #include "rte_pmd_iavf.h"
33 /* Offset of mbuf dynamic field for protocol extraction's metadata */
34 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
36 /* Mask of mbuf dynamic flags for protocol extraction's type */
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
42 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
43 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
46 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
48 static uint8_t rxdid_map[] = {
49 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
50 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
51 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
52 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
53 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
54 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
55 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
57 IAVF_RXDID_COMMS_IPSEC_CRYPTO,
60 return flex_type < RTE_DIM(rxdid_map) ?
61 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
65 iavf_monitor_callback(const uint64_t value,
66 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
68 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
70 * we expect the DD bit to be set to 1 if this descriptor was already
73 return (value & m) == m ? -1 : 0;
77 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
79 struct iavf_rx_queue *rxq = rx_queue;
80 volatile union iavf_rx_desc *rxdp;
84 rxdp = &rxq->rx_ring[desc];
85 /* watch for changes in status bit */
86 pmc->addr = &rxdp->wb.qword1.status_error_len;
88 /* comparison callback */
89 pmc->fn = iavf_monitor_callback;
91 /* registers are 64-bit */
92 pmc->size = sizeof(uint64_t);
98 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
100 /* The following constraints must be satisfied:
101 * thresh < rxq->nb_rx_desc
103 if (thresh >= nb_desc) {
104 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
112 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
113 uint16_t tx_free_thresh)
115 /* TX descriptors will have their RS bit set after tx_rs_thresh
116 * descriptors have been used. The TX descriptor ring will be cleaned
117 * after tx_free_thresh descriptors are used or if the number of
118 * descriptors required to transmit a packet is greater than the
119 * number of free TX descriptors.
121 * The following constraints must be satisfied:
122 * - tx_rs_thresh must be less than the size of the ring minus 2.
123 * - tx_free_thresh must be less than the size of the ring minus 3.
124 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
125 * - tx_rs_thresh must be a divisor of the ring size.
127 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
128 * race condition, hence the maximum threshold constraints. When set
129 * to zero use default values.
131 if (tx_rs_thresh >= (nb_desc - 2)) {
132 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
133 "number of TX descriptors (%u) minus 2",
134 tx_rs_thresh, nb_desc);
137 if (tx_free_thresh >= (nb_desc - 3)) {
138 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
139 "number of TX descriptors (%u) minus 3.",
140 tx_free_thresh, nb_desc);
143 if (tx_rs_thresh > tx_free_thresh) {
144 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
145 "equal to tx_free_thresh (%u).",
146 tx_rs_thresh, tx_free_thresh);
149 if ((nb_desc % tx_rs_thresh) != 0) {
150 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
151 "number of TX descriptors (%u).",
152 tx_rs_thresh, nb_desc);
160 check_rx_vec_allow(struct iavf_rx_queue *rxq)
162 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
163 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
164 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
168 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
173 check_tx_vec_allow(struct iavf_tx_queue *txq)
175 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
176 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
177 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
178 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
181 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
186 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
190 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
191 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
192 "rxq->rx_free_thresh=%d, "
193 "IAVF_RX_MAX_BURST=%d",
194 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
196 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
197 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
198 "rxq->nb_rx_desc=%d, "
199 "rxq->rx_free_thresh=%d",
200 rxq->nb_rx_desc, rxq->rx_free_thresh);
207 reset_rx_queue(struct iavf_rx_queue *rxq)
215 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
217 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
218 ((volatile char *)rxq->rx_ring)[i] = 0;
220 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
222 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
223 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
226 rxq->rx_nb_avail = 0;
227 rxq->rx_next_avail = 0;
228 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
233 rte_pktmbuf_free(rxq->pkt_first_seg);
235 rxq->pkt_first_seg = NULL;
236 rxq->pkt_last_seg = NULL;
238 rxq->rxrearm_start = 0;
242 reset_tx_queue(struct iavf_tx_queue *txq)
244 struct iavf_tx_entry *txe;
249 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
254 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
255 for (i = 0; i < size; i++)
256 ((volatile char *)txq->tx_ring)[i] = 0;
258 prev = (uint16_t)(txq->nb_tx_desc - 1);
259 for (i = 0; i < txq->nb_tx_desc; i++) {
260 txq->tx_ring[i].cmd_type_offset_bsz =
261 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
264 txe[prev].next_id = i;
271 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
272 txq->nb_free = txq->nb_tx_desc - 1;
274 txq->next_dd = txq->rs_thresh - 1;
275 txq->next_rs = txq->rs_thresh - 1;
279 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
281 volatile union iavf_rx_desc *rxd;
282 struct rte_mbuf *mbuf = NULL;
286 for (i = 0; i < rxq->nb_rx_desc; i++) {
287 mbuf = rte_mbuf_raw_alloc(rxq->mp);
288 if (unlikely(!mbuf)) {
289 for (j = 0; j < i; j++) {
290 rte_pktmbuf_free_seg(rxq->sw_ring[j]);
291 rxq->sw_ring[j] = NULL;
293 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
297 rte_mbuf_refcnt_set(mbuf, 1);
299 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
301 mbuf->port = rxq->port_id;
304 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
306 rxd = &rxq->rx_ring[i];
307 rxd->read.pkt_addr = dma_addr;
308 rxd->read.hdr_addr = 0;
309 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
314 rxq->sw_ring[i] = mbuf;
321 release_rxq_mbufs(struct iavf_rx_queue *rxq)
328 for (i = 0; i < rxq->nb_rx_desc; i++) {
329 if (rxq->sw_ring[i]) {
330 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
331 rxq->sw_ring[i] = NULL;
336 if (rxq->rx_nb_avail == 0)
338 for (i = 0; i < rxq->rx_nb_avail; i++) {
339 struct rte_mbuf *mbuf;
341 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
342 rte_pktmbuf_free_seg(mbuf);
344 rxq->rx_nb_avail = 0;
348 release_txq_mbufs(struct iavf_tx_queue *txq)
352 if (!txq || !txq->sw_ring) {
353 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
357 for (i = 0; i < txq->nb_tx_desc; i++) {
358 if (txq->sw_ring[i].mbuf) {
359 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
360 txq->sw_ring[i].mbuf = NULL;
366 struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
367 [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
369 [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
374 struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
375 [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
377 [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
378 #ifdef CC_AVX512_SUPPORT
379 [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
386 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
388 volatile union iavf_rx_flex_desc *rxdp)
390 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
391 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
392 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
396 if (desc->flow_id != 0xFFFFFFFF) {
397 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
398 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
401 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
402 stat_err = rte_le_to_cpu_16(desc->status_error0);
403 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
404 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
405 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
411 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
413 volatile union iavf_rx_flex_desc *rxdp)
415 volatile struct iavf_32b_rx_flex_desc_comms *desc =
416 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
419 stat_err = rte_le_to_cpu_16(desc->status_error0);
420 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
421 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
422 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
425 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
426 if (desc->flow_id != 0xFFFFFFFF) {
427 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
428 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
431 if (rxq->xtr_ol_flag) {
432 uint32_t metadata = 0;
434 stat_err = rte_le_to_cpu_16(desc->status_error1);
436 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
437 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
439 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
441 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
444 mb->ol_flags |= rxq->xtr_ol_flag;
446 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
453 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
455 volatile union iavf_rx_flex_desc *rxdp)
457 volatile struct iavf_32b_rx_flex_desc_comms *desc =
458 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
461 stat_err = rte_le_to_cpu_16(desc->status_error0);
462 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
463 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
464 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
467 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
468 if (desc->flow_id != 0xFFFFFFFF) {
469 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
470 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
473 if (rxq->xtr_ol_flag) {
474 uint32_t metadata = 0;
476 if (desc->flex_ts.flex.aux0 != 0xFFFF)
477 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
478 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
479 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
482 mb->ol_flags |= rxq->xtr_ol_flag;
484 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
491 iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[IAVF_RXDID_LAST + 1] = {
492 [IAVF_RXDID_LEGACY_0] = iavf_rxd_to_pkt_fields_by_comms_ovs,
493 [IAVF_RXDID_LEGACY_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
494 [IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
495 [IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
496 [IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
497 [IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
498 iavf_rxd_to_pkt_fields_by_comms_aux_v1,
499 [IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
500 [IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
501 iavf_rxd_to_pkt_fields_by_comms_aux_v2,
502 [IAVF_RXDID_COMMS_IPSEC_CRYPTO] =
503 iavf_rxd_to_pkt_fields_by_comms_aux_v2,
504 [IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
508 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
513 case IAVF_RXDID_COMMS_AUX_VLAN:
514 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
516 case IAVF_RXDID_COMMS_AUX_IPV4:
517 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
519 case IAVF_RXDID_COMMS_AUX_IPV6:
520 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
522 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
524 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
526 case IAVF_RXDID_COMMS_AUX_TCP:
527 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
529 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
531 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
533 case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
535 rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
537 case IAVF_RXDID_COMMS_OVS_1:
538 case IAVF_RXDID_LEGACY_0:
539 case IAVF_RXDID_LEGACY_1:
542 /* update this according to the RXDID for FLEX_DESC_NONE */
543 rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
547 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
548 rxq->xtr_ol_flag = 0;
552 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
553 uint16_t nb_desc, unsigned int socket_id,
554 const struct rte_eth_rxconf *rx_conf,
555 struct rte_mempool *mp)
557 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558 struct iavf_adapter *ad =
559 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
560 struct iavf_info *vf =
561 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
562 struct iavf_vsi *vsi = &vf->vsi;
563 struct iavf_rx_queue *rxq;
564 const struct rte_memzone *mz;
568 uint16_t rx_free_thresh;
571 PMD_INIT_FUNC_TRACE();
576 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
578 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
579 nb_desc > IAVF_MAX_RING_DESC ||
580 nb_desc < IAVF_MIN_RING_DESC) {
581 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
586 /* Check free threshold */
587 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
588 IAVF_DEFAULT_RX_FREE_THRESH :
589 rx_conf->rx_free_thresh;
590 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
593 /* Free memory if needed */
594 if (dev->data->rx_queues[queue_idx]) {
595 iavf_dev_rx_queue_release(dev, queue_idx);
596 dev->data->rx_queues[queue_idx] = NULL;
599 /* Allocate the rx queue data structure */
600 rxq = rte_zmalloc_socket("iavf rxq",
601 sizeof(struct iavf_rx_queue),
605 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
606 "rx queue data structure");
610 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
611 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
613 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
614 rxq->proto_xtr = proto_xtr;
616 rxq->rxdid = IAVF_RXDID_LEGACY_1;
617 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
620 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
621 struct virtchnl_vlan_supported_caps *stripping_support =
622 &vf->vlan_v2_caps.offloads.stripping_support;
623 uint32_t stripping_cap;
625 if (stripping_support->outer)
626 stripping_cap = stripping_support->outer;
628 stripping_cap = stripping_support->inner;
630 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
631 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
632 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
633 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
635 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
638 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
641 rxq->nb_rx_desc = nb_desc;
642 rxq->rx_free_thresh = rx_free_thresh;
643 rxq->queue_id = queue_idx;
644 rxq->port_id = dev->data->port_id;
645 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
648 rxq->offloads = offloads;
650 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
651 rxq->crc_len = RTE_ETHER_CRC_LEN;
655 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
656 rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
658 /* Allocate the software ring. */
659 len = nb_desc + IAVF_RX_MAX_BURST;
661 rte_zmalloc_socket("iavf rx sw ring",
662 sizeof(struct rte_mbuf *) * len,
666 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
671 /* Allocate the maximum number of RX ring hardware descriptor with
672 * a little more to support bulk allocate.
674 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
675 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
677 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
678 ring_size, IAVF_RING_BASE_ALIGN,
681 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
682 rte_free(rxq->sw_ring);
686 /* Zero all the descriptors in the ring. */
687 memset(mz->addr, 0, ring_size);
688 rxq->rx_ring_phys_addr = mz->iova;
689 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
694 dev->data->rx_queues[queue_idx] = rxq;
695 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
696 rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
698 if (check_rx_bulk_allow(rxq) == true) {
699 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
700 "satisfied. Rx Burst Bulk Alloc function will be "
701 "used on port=%d, queue=%d.",
702 rxq->port_id, rxq->queue_id);
704 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
705 "not satisfied, Scattered Rx is requested "
706 "on port=%d, queue=%d.",
707 rxq->port_id, rxq->queue_id);
708 ad->rx_bulk_alloc_allowed = false;
711 if (check_rx_vec_allow(rxq) == false)
712 ad->rx_vec_allowed = false;
718 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
721 unsigned int socket_id,
722 const struct rte_eth_txconf *tx_conf)
724 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
725 struct iavf_adapter *adapter =
726 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
727 struct iavf_info *vf =
728 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
729 struct iavf_tx_queue *txq;
730 const struct rte_memzone *mz;
732 uint16_t tx_rs_thresh, tx_free_thresh;
735 PMD_INIT_FUNC_TRACE();
740 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
742 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
743 nb_desc > IAVF_MAX_RING_DESC ||
744 nb_desc < IAVF_MIN_RING_DESC) {
745 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
750 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
751 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
752 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
753 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
754 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
757 /* Free memory if needed. */
758 if (dev->data->tx_queues[queue_idx]) {
759 iavf_dev_tx_queue_release(dev, queue_idx);
760 dev->data->tx_queues[queue_idx] = NULL;
763 /* Allocate the TX queue data structure. */
764 txq = rte_zmalloc_socket("iavf txq",
765 sizeof(struct iavf_tx_queue),
769 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
770 "tx queue structure");
774 if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
775 struct virtchnl_vlan_supported_caps *insertion_support =
776 &adapter->vf.vlan_v2_caps.offloads.insertion_support;
777 uint32_t insertion_cap;
779 if (insertion_support->outer)
780 insertion_cap = insertion_support->outer;
782 insertion_cap = insertion_support->inner;
784 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
785 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
786 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
787 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
789 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
792 txq->nb_tx_desc = nb_desc;
793 txq->rs_thresh = tx_rs_thresh;
794 txq->free_thresh = tx_free_thresh;
795 txq->queue_id = queue_idx;
796 txq->port_id = dev->data->port_id;
797 txq->offloads = offloads;
798 txq->tx_deferred_start = tx_conf->tx_deferred_start;
800 if (iavf_ipsec_crypto_supported(adapter))
801 txq->ipsec_crypto_pkt_md_offset =
802 iavf_security_get_pkt_md_offset(adapter);
804 /* Allocate software ring */
806 rte_zmalloc_socket("iavf tx sw ring",
807 sizeof(struct iavf_tx_entry) * nb_desc,
811 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
816 /* Allocate TX hardware ring descriptors. */
817 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
818 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
819 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
820 ring_size, IAVF_RING_BASE_ALIGN,
823 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
824 rte_free(txq->sw_ring);
828 txq->tx_ring_phys_addr = mz->iova;
829 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
834 dev->data->tx_queues[queue_idx] = txq;
835 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
836 txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
838 if (check_tx_vec_allow(txq) == false) {
839 struct iavf_adapter *ad =
840 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
841 ad->tx_vec_allowed = false;
844 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
845 vf->tm_conf.committed) {
847 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
848 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
849 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
850 vf->qtc_map[tc].queue_count))
853 if (tc >= vf->qos_cap->num_elem) {
854 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
864 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
866 struct iavf_adapter *adapter =
867 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
868 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
869 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
870 struct iavf_rx_queue *rxq;
873 PMD_DRV_FUNC_TRACE();
875 if (rx_queue_id >= dev->data->nb_rx_queues)
878 rxq = dev->data->rx_queues[rx_queue_id];
880 err = alloc_rxq_mbufs(rxq);
882 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
888 /* Init the RX tail register. */
889 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
890 IAVF_WRITE_FLUSH(hw);
892 /* Ready to switch the queue on */
894 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
896 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
899 release_rxq_mbufs(rxq);
900 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
903 dev->data->rx_queue_state[rx_queue_id] =
904 RTE_ETH_QUEUE_STATE_STARTED;
907 if (dev->data->dev_conf.rxmode.offloads &
908 RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
909 if (iavf_get_phc_time(rxq)) {
910 PMD_DRV_LOG(ERR, "get physical time failed");
913 rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
920 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
922 struct iavf_adapter *adapter =
923 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
924 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
925 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
926 struct iavf_tx_queue *txq;
929 PMD_DRV_FUNC_TRACE();
931 if (tx_queue_id >= dev->data->nb_tx_queues)
934 txq = dev->data->tx_queues[tx_queue_id];
936 /* Init the RX tail register. */
937 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
938 IAVF_WRITE_FLUSH(hw);
940 /* Ready to switch the queue on */
942 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
944 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
947 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
950 dev->data->tx_queue_state[tx_queue_id] =
951 RTE_ETH_QUEUE_STATE_STARTED;
957 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
959 struct iavf_adapter *adapter =
960 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
961 struct iavf_rx_queue *rxq;
964 PMD_DRV_FUNC_TRACE();
966 if (rx_queue_id >= dev->data->nb_rx_queues)
969 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
971 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
976 rxq = dev->data->rx_queues[rx_queue_id];
977 iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
979 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
985 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
987 struct iavf_adapter *adapter =
988 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
989 struct iavf_tx_queue *txq;
992 PMD_DRV_FUNC_TRACE();
994 if (tx_queue_id >= dev->data->nb_tx_queues)
997 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
999 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1004 txq = dev->data->tx_queues[tx_queue_id];
1005 iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
1006 reset_tx_queue(txq);
1007 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1013 iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1015 struct iavf_rx_queue *q = dev->data->rx_queues[qid];
1020 iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
1021 rte_free(q->sw_ring);
1022 rte_memzone_free(q->mz);
1027 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1029 struct iavf_tx_queue *q = dev->data->tx_queues[qid];
1034 iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
1035 rte_free(q->sw_ring);
1036 rte_memzone_free(q->mz);
1041 iavf_stop_queues(struct rte_eth_dev *dev)
1043 struct iavf_adapter *adapter =
1044 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1045 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1046 struct iavf_rx_queue *rxq;
1047 struct iavf_tx_queue *txq;
1050 /* Stop All queues */
1051 if (!vf->lv_enabled) {
1052 ret = iavf_disable_queues(adapter);
1054 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1056 ret = iavf_disable_queues_lv(adapter);
1058 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1062 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1064 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1065 txq = dev->data->tx_queues[i];
1068 iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
1069 reset_tx_queue(txq);
1070 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1072 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1073 rxq = dev->data->rx_queues[i];
1076 iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
1077 reset_rx_queue(rxq);
1078 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1082 #define IAVF_RX_FLEX_ERR0_BITS \
1083 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1084 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1085 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1086 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1087 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1088 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1091 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1093 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1094 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1095 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1097 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1104 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1105 volatile union iavf_rx_flex_desc *rxdp)
1107 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
1108 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1109 mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
1110 RTE_MBUF_F_RX_VLAN_STRIPPED;
1112 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1117 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1118 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1119 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1120 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
1121 RTE_MBUF_F_RX_QINQ |
1122 RTE_MBUF_F_RX_VLAN_STRIPPED |
1124 mb->vlan_tci_outer = mb->vlan_tci;
1125 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1126 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1127 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1128 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1130 mb->vlan_tci_outer = 0;
1136 iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
1137 volatile union iavf_rx_flex_desc *rxdp)
1139 volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
1140 (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
1142 mb->dynfield1[0] = desc->ipsec_said &
1143 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
1147 iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
1148 volatile union iavf_rx_flex_desc *rxdp,
1149 struct iavf_ipsec_crypto_stats *stats)
1151 uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
1153 if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
1154 uint16_t ipsec_status;
1156 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
1158 ipsec_status = status1 &
1159 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
1162 if (unlikely(ipsec_status !=
1163 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
1164 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
1166 switch (ipsec_status) {
1167 case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
1168 stats->ierrors.sad_miss++;
1170 case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
1171 stats->ierrors.not_processed++;
1173 case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
1174 stats->ierrors.icv_check++;
1176 case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
1177 stats->ierrors.ipsec_length++;
1179 case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
1180 stats->ierrors.misc++;
1184 stats->ierrors.count++;
1189 stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
1191 if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
1193 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
1194 iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
1199 /* Translate the rx descriptor status and error fields to pkt flags */
1200 static inline uint64_t
1201 iavf_rxd_to_pkt_flags(uint64_t qword)
1204 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1206 #define IAVF_RX_ERR_BITS 0x3f
1208 /* Check if RSS_HASH */
1209 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1210 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1211 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
1213 /* Check if FDIR Match */
1214 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1215 RTE_MBUF_F_RX_FDIR : 0);
1217 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1218 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1222 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1223 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1225 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1227 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1228 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1230 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1232 /* TODO: Oversize error bit is not processed here */
1237 static inline uint64_t
1238 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1241 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1244 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1245 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1246 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1248 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1250 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1251 flags |= RTE_MBUF_F_RX_FDIR_ID;
1255 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1256 flags |= RTE_MBUF_F_RX_FDIR_ID;
1261 #define IAVF_RX_FLEX_ERR0_BITS \
1262 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1263 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1264 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1265 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1266 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1267 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1269 /* Rx L3/L4 checksum */
1270 static inline uint64_t
1271 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1275 /* check if HW has decoded the packet and checksum */
1276 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1279 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1280 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1284 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1285 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1287 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1289 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1290 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1292 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1294 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1295 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1300 /* If the number of free RX descriptors is greater than the RX free
1301 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1302 * register. Update the RDT with the value of the last processed RX
1303 * descriptor minus 1, to guarantee that the RDT register is never
1304 * equal to the RDH register, which creates a "full" ring situation
1305 * from the hardware point of view.
1308 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1310 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1312 if (nb_hold > rxq->rx_free_thresh) {
1314 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1315 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1316 rx_id = (uint16_t)((rx_id == 0) ?
1317 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1318 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1321 rxq->nb_rx_hold = nb_hold;
1324 /* implement recv_pkts */
1326 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1328 volatile union iavf_rx_desc *rx_ring;
1329 volatile union iavf_rx_desc *rxdp;
1330 struct iavf_rx_queue *rxq;
1331 union iavf_rx_desc rxd;
1332 struct rte_mbuf *rxe;
1333 struct rte_eth_dev *dev;
1334 struct rte_mbuf *rxm;
1335 struct rte_mbuf *nmb;
1339 uint16_t rx_packet_len;
1340 uint16_t rx_id, nb_hold;
1343 const uint32_t *ptype_tbl;
1348 rx_id = rxq->rx_tail;
1349 rx_ring = rxq->rx_ring;
1350 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1352 while (nb_rx < nb_pkts) {
1353 rxdp = &rx_ring[rx_id];
1354 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1355 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1356 IAVF_RXD_QW1_STATUS_SHIFT;
1358 /* Check the DD bit first */
1359 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1361 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1363 nmb = rte_mbuf_raw_alloc(rxq->mp);
1364 if (unlikely(!nmb)) {
1365 dev = &rte_eth_devices[rxq->port_id];
1366 dev->data->rx_mbuf_alloc_failed++;
1367 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1368 "queue_id=%u", rxq->port_id, rxq->queue_id);
1374 rxe = rxq->sw_ring[rx_id];
1375 rxq->sw_ring[rx_id] = nmb;
1377 if (unlikely(rx_id == rxq->nb_rx_desc))
1380 /* Prefetch next mbuf */
1381 rte_prefetch0(rxq->sw_ring[rx_id]);
1383 /* When next RX descriptor is on a cache line boundary,
1384 * prefetch the next 4 RX descriptors and next 8 pointers
1387 if ((rx_id & 0x3) == 0) {
1388 rte_prefetch0(&rx_ring[rx_id]);
1389 rte_prefetch0(rxq->sw_ring[rx_id]);
1393 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1394 rxdp->read.hdr_addr = 0;
1395 rxdp->read.pkt_addr = dma_addr;
1397 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1398 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1400 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1401 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1404 rxm->pkt_len = rx_packet_len;
1405 rxm->data_len = rx_packet_len;
1406 rxm->port = rxq->port_id;
1408 iavf_rxd_to_vlan_tci(rxm, &rxd);
1409 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1411 ptype_tbl[(uint8_t)((qword1 &
1412 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1414 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1416 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1418 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1419 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1421 rxm->ol_flags |= pkt_flags;
1423 rx_pkts[nb_rx++] = rxm;
1425 rxq->rx_tail = rx_id;
1427 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1432 /* implement recv_pkts for flexible Rx descriptor */
1434 iavf_recv_pkts_flex_rxd(void *rx_queue,
1435 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1437 volatile union iavf_rx_desc *rx_ring;
1438 volatile union iavf_rx_flex_desc *rxdp;
1439 struct iavf_rx_queue *rxq;
1440 union iavf_rx_flex_desc rxd;
1441 struct rte_mbuf *rxe;
1442 struct rte_eth_dev *dev;
1443 struct rte_mbuf *rxm;
1444 struct rte_mbuf *nmb;
1446 uint16_t rx_stat_err0;
1447 uint16_t rx_packet_len;
1448 uint16_t rx_id, nb_hold;
1451 const uint32_t *ptype_tbl;
1457 rx_id = rxq->rx_tail;
1458 rx_ring = rxq->rx_ring;
1459 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1461 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1462 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1464 if (sw_cur_time - rxq->hw_time_update > 4) {
1465 if (iavf_get_phc_time(rxq))
1466 PMD_DRV_LOG(ERR, "get physical time failed");
1467 rxq->hw_time_update = sw_cur_time;
1471 while (nb_rx < nb_pkts) {
1472 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1473 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1475 /* Check the DD bit first */
1476 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1478 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1480 nmb = rte_mbuf_raw_alloc(rxq->mp);
1481 if (unlikely(!nmb)) {
1482 dev = &rte_eth_devices[rxq->port_id];
1483 dev->data->rx_mbuf_alloc_failed++;
1484 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1485 "queue_id=%u", rxq->port_id, rxq->queue_id);
1491 rxe = rxq->sw_ring[rx_id];
1492 rxq->sw_ring[rx_id] = nmb;
1494 if (unlikely(rx_id == rxq->nb_rx_desc))
1497 /* Prefetch next mbuf */
1498 rte_prefetch0(rxq->sw_ring[rx_id]);
1500 /* When next RX descriptor is on a cache line boundary,
1501 * prefetch the next 4 RX descriptors and next 8 pointers
1504 if ((rx_id & 0x3) == 0) {
1505 rte_prefetch0(&rx_ring[rx_id]);
1506 rte_prefetch0(rxq->sw_ring[rx_id]);
1510 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1511 rxdp->read.hdr_addr = 0;
1512 rxdp->read.pkt_addr = dma_addr;
1514 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1515 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1517 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1518 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1521 rxm->pkt_len = rx_packet_len;
1522 rxm->data_len = rx_packet_len;
1523 rxm->port = rxq->port_id;
1525 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1526 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1527 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1528 iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
1529 &rxq->stats.ipsec_crypto);
1530 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
1531 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1533 if (iavf_timestamp_dynflag > 0) {
1534 ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
1535 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1537 rxq->phc_time = ts_ns;
1538 rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1540 *RTE_MBUF_DYNFIELD(rxm,
1541 iavf_timestamp_dynfield_offset,
1542 rte_mbuf_timestamp_t *) = ts_ns;
1543 rxm->ol_flags |= iavf_timestamp_dynflag;
1546 rxm->ol_flags |= pkt_flags;
1548 rx_pkts[nb_rx++] = rxm;
1550 rxq->rx_tail = rx_id;
1552 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1557 /* implement recv_scattered_pkts for flexible Rx descriptor */
1559 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1562 struct iavf_rx_queue *rxq = rx_queue;
1563 union iavf_rx_flex_desc rxd;
1564 struct rte_mbuf *rxe;
1565 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1566 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1567 struct rte_mbuf *nmb, *rxm;
1568 uint16_t rx_id = rxq->rx_tail;
1569 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1570 struct rte_eth_dev *dev;
1571 uint16_t rx_stat_err0;
1576 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1577 volatile union iavf_rx_flex_desc *rxdp;
1578 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1580 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1581 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1583 if (sw_cur_time - rxq->hw_time_update > 4) {
1584 if (iavf_get_phc_time(rxq))
1585 PMD_DRV_LOG(ERR, "get physical time failed");
1586 rxq->hw_time_update = sw_cur_time;
1590 while (nb_rx < nb_pkts) {
1591 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1592 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1594 /* Check the DD bit */
1595 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1597 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1599 nmb = rte_mbuf_raw_alloc(rxq->mp);
1600 if (unlikely(!nmb)) {
1601 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1602 "queue_id=%u", rxq->port_id, rxq->queue_id);
1603 dev = &rte_eth_devices[rxq->port_id];
1604 dev->data->rx_mbuf_alloc_failed++;
1610 rxe = rxq->sw_ring[rx_id];
1611 rxq->sw_ring[rx_id] = nmb;
1613 if (rx_id == rxq->nb_rx_desc)
1616 /* Prefetch next mbuf */
1617 rte_prefetch0(rxq->sw_ring[rx_id]);
1619 /* When next RX descriptor is on a cache line boundary,
1620 * prefetch the next 4 RX descriptors and next 8 pointers
1623 if ((rx_id & 0x3) == 0) {
1624 rte_prefetch0(&rx_ring[rx_id]);
1625 rte_prefetch0(rxq->sw_ring[rx_id]);
1630 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1632 /* Set data buffer address and data length of the mbuf */
1633 rxdp->read.hdr_addr = 0;
1634 rxdp->read.pkt_addr = dma_addr;
1635 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1636 IAVF_RX_FLX_DESC_PKT_LEN_M;
1637 rxm->data_len = rx_packet_len;
1638 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1640 /* If this is the first buffer of the received packet, set the
1641 * pointer to the first mbuf of the packet and initialize its
1642 * context. Otherwise, update the total length and the number
1643 * of segments of the current scattered packet, and update the
1644 * pointer to the last mbuf of the current packet.
1648 first_seg->nb_segs = 1;
1649 first_seg->pkt_len = rx_packet_len;
1651 first_seg->pkt_len =
1652 (uint16_t)(first_seg->pkt_len +
1654 first_seg->nb_segs++;
1655 last_seg->next = rxm;
1658 /* If this is not the last buffer of the received packet,
1659 * update the pointer to the last mbuf of the current scattered
1660 * packet and continue to parse the RX ring.
1662 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1667 /* This is the last buffer of the received packet. If the CRC
1668 * is not stripped by the hardware:
1669 * - Subtract the CRC length from the total packet length.
1670 * - If the last buffer only contains the whole CRC or a part
1671 * of it, free the mbuf associated to the last buffer. If part
1672 * of the CRC is also contained in the previous mbuf, subtract
1673 * the length of that CRC part from the data length of the
1677 if (unlikely(rxq->crc_len > 0)) {
1678 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1679 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1680 rte_pktmbuf_free_seg(rxm);
1681 first_seg->nb_segs--;
1682 last_seg->data_len =
1683 (uint16_t)(last_seg->data_len -
1684 (RTE_ETHER_CRC_LEN - rx_packet_len));
1685 last_seg->next = NULL;
1687 rxm->data_len = (uint16_t)(rx_packet_len -
1692 first_seg->port = rxq->port_id;
1693 first_seg->ol_flags = 0;
1694 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1695 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1696 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1697 iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
1698 &rxq->stats.ipsec_crypto);
1699 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
1700 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1702 if (iavf_timestamp_dynflag > 0) {
1703 ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
1704 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1706 rxq->phc_time = ts_ns;
1707 rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1709 *RTE_MBUF_DYNFIELD(first_seg,
1710 iavf_timestamp_dynfield_offset,
1711 rte_mbuf_timestamp_t *) = ts_ns;
1712 first_seg->ol_flags |= iavf_timestamp_dynflag;
1715 first_seg->ol_flags |= pkt_flags;
1717 /* Prefetch data of first segment, if configured to do so. */
1718 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1719 first_seg->data_off));
1720 rx_pkts[nb_rx++] = first_seg;
1724 /* Record index of the next RX descriptor to probe. */
1725 rxq->rx_tail = rx_id;
1726 rxq->pkt_first_seg = first_seg;
1727 rxq->pkt_last_seg = last_seg;
1729 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1734 /* implement recv_scattered_pkts */
1736 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1739 struct iavf_rx_queue *rxq = rx_queue;
1740 union iavf_rx_desc rxd;
1741 struct rte_mbuf *rxe;
1742 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1743 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1744 struct rte_mbuf *nmb, *rxm;
1745 uint16_t rx_id = rxq->rx_tail;
1746 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1747 struct rte_eth_dev *dev;
1753 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1754 volatile union iavf_rx_desc *rxdp;
1755 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1757 while (nb_rx < nb_pkts) {
1758 rxdp = &rx_ring[rx_id];
1759 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1760 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1761 IAVF_RXD_QW1_STATUS_SHIFT;
1763 /* Check the DD bit */
1764 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1766 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1768 nmb = rte_mbuf_raw_alloc(rxq->mp);
1769 if (unlikely(!nmb)) {
1770 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1771 "queue_id=%u", rxq->port_id, rxq->queue_id);
1772 dev = &rte_eth_devices[rxq->port_id];
1773 dev->data->rx_mbuf_alloc_failed++;
1779 rxe = rxq->sw_ring[rx_id];
1780 rxq->sw_ring[rx_id] = nmb;
1782 if (rx_id == rxq->nb_rx_desc)
1785 /* Prefetch next mbuf */
1786 rte_prefetch0(rxq->sw_ring[rx_id]);
1788 /* When next RX descriptor is on a cache line boundary,
1789 * prefetch the next 4 RX descriptors and next 8 pointers
1792 if ((rx_id & 0x3) == 0) {
1793 rte_prefetch0(&rx_ring[rx_id]);
1794 rte_prefetch0(rxq->sw_ring[rx_id]);
1799 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1801 /* Set data buffer address and data length of the mbuf */
1802 rxdp->read.hdr_addr = 0;
1803 rxdp->read.pkt_addr = dma_addr;
1804 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1805 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1806 rxm->data_len = rx_packet_len;
1807 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1809 /* If this is the first buffer of the received packet, set the
1810 * pointer to the first mbuf of the packet and initialize its
1811 * context. Otherwise, update the total length and the number
1812 * of segments of the current scattered packet, and update the
1813 * pointer to the last mbuf of the current packet.
1817 first_seg->nb_segs = 1;
1818 first_seg->pkt_len = rx_packet_len;
1820 first_seg->pkt_len =
1821 (uint16_t)(first_seg->pkt_len +
1823 first_seg->nb_segs++;
1824 last_seg->next = rxm;
1827 /* If this is not the last buffer of the received packet,
1828 * update the pointer to the last mbuf of the current scattered
1829 * packet and continue to parse the RX ring.
1831 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1836 /* This is the last buffer of the received packet. If the CRC
1837 * is not stripped by the hardware:
1838 * - Subtract the CRC length from the total packet length.
1839 * - If the last buffer only contains the whole CRC or a part
1840 * of it, free the mbuf associated to the last buffer. If part
1841 * of the CRC is also contained in the previous mbuf, subtract
1842 * the length of that CRC part from the data length of the
1846 if (unlikely(rxq->crc_len > 0)) {
1847 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1848 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1849 rte_pktmbuf_free_seg(rxm);
1850 first_seg->nb_segs--;
1851 last_seg->data_len =
1852 (uint16_t)(last_seg->data_len -
1853 (RTE_ETHER_CRC_LEN - rx_packet_len));
1854 last_seg->next = NULL;
1856 rxm->data_len = (uint16_t)(rx_packet_len -
1860 first_seg->port = rxq->port_id;
1861 first_seg->ol_flags = 0;
1862 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1863 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1864 first_seg->packet_type =
1865 ptype_tbl[(uint8_t)((qword1 &
1866 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1868 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1869 first_seg->hash.rss =
1870 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1872 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1873 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1875 first_seg->ol_flags |= pkt_flags;
1877 /* Prefetch data of first segment, if configured to do so. */
1878 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1879 first_seg->data_off));
1880 rx_pkts[nb_rx++] = first_seg;
1884 /* Record index of the next RX descriptor to probe. */
1885 rxq->rx_tail = rx_id;
1886 rxq->pkt_first_seg = first_seg;
1887 rxq->pkt_last_seg = last_seg;
1889 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1894 #define IAVF_LOOK_AHEAD 8
1896 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
1897 struct rte_mbuf **rx_pkts,
1900 volatile union iavf_rx_flex_desc *rxdp;
1901 struct rte_mbuf **rxep;
1902 struct rte_mbuf *mb;
1905 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
1906 int32_t i, j, nb_rx = 0;
1907 int32_t nb_staged = 0;
1909 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1912 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1913 rxep = &rxq->sw_ring[rxq->rx_tail];
1915 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1917 /* Make sure there is at least 1 packet to receive */
1918 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1921 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1922 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1924 if (sw_cur_time - rxq->hw_time_update > 4) {
1925 if (iavf_get_phc_time(rxq))
1926 PMD_DRV_LOG(ERR, "get physical time failed");
1927 rxq->hw_time_update = sw_cur_time;
1931 /* Scan LOOK_AHEAD descriptors at a time to determine which
1932 * descriptors reference packets that are ready to be received.
1934 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1935 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1936 /* Read desc statuses backwards to avoid race condition */
1937 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1938 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1940 /* This barrier is to order loads of different words in the descriptor */
1941 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1943 /* Compute how many contiguous DD bits were set */
1944 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
1945 var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1947 /* For Arm platforms, count only contiguous descriptors
1948 * whose DD bit is set to 1. On Arm platforms, reads of
1949 * descriptors can be reordered. Since the CPU may
1950 * be reading the descriptors as the NIC updates them
1951 * in memory, it is possbile that the DD bit for a
1952 * descriptor earlier in the queue is read as not set
1953 * while the DD bit for a descriptor later in the queue
1965 /* Translate descriptor info to mbuf parameters */
1966 for (j = 0; j < nb_dd; j++) {
1967 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1969 i * IAVF_LOOK_AHEAD + j);
1972 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1973 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1974 mb->data_len = pkt_len;
1975 mb->pkt_len = pkt_len;
1978 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1979 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1980 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1981 iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
1982 &rxq->stats.ipsec_crypto);
1983 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1984 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1985 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1987 if (iavf_timestamp_dynflag > 0) {
1988 ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
1989 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1991 rxq->phc_time = ts_ns;
1992 rxq->hw_time_update = rte_get_timer_cycles() /
1993 (rte_get_timer_hz() / 1000);
1995 *RTE_MBUF_DYNFIELD(mb,
1996 iavf_timestamp_dynfield_offset,
1997 rte_mbuf_timestamp_t *) = ts_ns;
1998 mb->ol_flags |= iavf_timestamp_dynflag;
2001 mb->ol_flags |= pkt_flags;
2003 /* Put up to nb_pkts directly into buffers */
2004 if ((i + j) < nb_pkts) {
2005 rx_pkts[i + j] = rxep[j];
2008 /* Stage excess pkts received */
2009 rxq->rx_stage[nb_staged] = rxep[j];
2014 if (nb_dd != IAVF_LOOK_AHEAD)
2018 /* Update rxq->rx_nb_avail to reflect number of staged pkts */
2019 rxq->rx_nb_avail = nb_staged;
2021 /* Clear software ring entries */
2022 for (i = 0; i < (nb_rx + nb_staged); i++)
2023 rxq->sw_ring[rxq->rx_tail + i] = NULL;
2029 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2031 volatile union iavf_rx_desc *rxdp;
2032 struct rte_mbuf **rxep;
2033 struct rte_mbuf *mb;
2037 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
2038 int32_t i, j, nb_rx = 0;
2039 int32_t nb_staged = 0;
2041 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2043 rxdp = &rxq->rx_ring[rxq->rx_tail];
2044 rxep = &rxq->sw_ring[rxq->rx_tail];
2046 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
2047 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
2048 IAVF_RXD_QW1_STATUS_SHIFT;
2050 /* Make sure there is at least 1 packet to receive */
2051 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
2054 /* Scan LOOK_AHEAD descriptors at a time to determine which
2055 * descriptors reference packets that are ready to be received.
2057 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
2058 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
2059 /* Read desc statuses backwards to avoid race condition */
2060 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
2061 qword1 = rte_le_to_cpu_64(
2062 rxdp[j].wb.qword1.status_error_len);
2063 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
2064 IAVF_RXD_QW1_STATUS_SHIFT;
2067 /* This barrier is to order loads of different words in the descriptor */
2068 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2070 /* Compute how many contiguous DD bits were set */
2071 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
2072 var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
2074 /* For Arm platforms, count only contiguous descriptors
2075 * whose DD bit is set to 1. On Arm platforms, reads of
2076 * descriptors can be reordered. Since the CPU may
2077 * be reading the descriptors as the NIC updates them
2078 * in memory, it is possbile that the DD bit for a
2079 * descriptor earlier in the queue is read as not set
2080 * while the DD bit for a descriptor later in the queue
2092 /* Translate descriptor info to mbuf parameters */
2093 for (j = 0; j < nb_dd; j++) {
2094 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
2095 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
2098 qword1 = rte_le_to_cpu_64
2099 (rxdp[j].wb.qword1.status_error_len);
2100 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
2101 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
2102 mb->data_len = pkt_len;
2103 mb->pkt_len = pkt_len;
2105 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
2106 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
2108 ptype_tbl[(uint8_t)((qword1 &
2109 IAVF_RXD_QW1_PTYPE_MASK) >>
2110 IAVF_RXD_QW1_PTYPE_SHIFT)];
2112 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
2113 mb->hash.rss = rte_le_to_cpu_32(
2114 rxdp[j].wb.qword0.hi_dword.rss);
2116 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
2117 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
2119 mb->ol_flags |= pkt_flags;
2121 /* Put up to nb_pkts directly into buffers */
2122 if ((i + j) < nb_pkts) {
2123 rx_pkts[i + j] = rxep[j];
2125 } else { /* Stage excess pkts received */
2126 rxq->rx_stage[nb_staged] = rxep[j];
2131 if (nb_dd != IAVF_LOOK_AHEAD)
2135 /* Update rxq->rx_nb_avail to reflect number of staged pkts */
2136 rxq->rx_nb_avail = nb_staged;
2138 /* Clear software ring entries */
2139 for (i = 0; i < (nb_rx + nb_staged); i++)
2140 rxq->sw_ring[rxq->rx_tail + i] = NULL;
2145 static inline uint16_t
2146 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
2147 struct rte_mbuf **rx_pkts,
2151 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
2153 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
2155 for (i = 0; i < nb_pkts; i++)
2156 rx_pkts[i] = stage[i];
2158 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
2159 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
2165 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
2167 volatile union iavf_rx_desc *rxdp;
2168 struct rte_mbuf **rxep;
2169 struct rte_mbuf *mb;
2170 uint16_t alloc_idx, i;
2174 /* Allocate buffers in bulk */
2175 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
2176 (rxq->rx_free_thresh - 1));
2177 rxep = &rxq->sw_ring[alloc_idx];
2178 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
2179 rxq->rx_free_thresh);
2180 if (unlikely(diag != 0)) {
2181 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
2185 rxdp = &rxq->rx_ring[alloc_idx];
2186 for (i = 0; i < rxq->rx_free_thresh; i++) {
2187 if (likely(i < (rxq->rx_free_thresh - 1)))
2188 /* Prefetch next mbuf */
2189 rte_prefetch0(rxep[i + 1]);
2192 rte_mbuf_refcnt_set(mb, 1);
2194 mb->data_off = RTE_PKTMBUF_HEADROOM;
2196 mb->port = rxq->port_id;
2197 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
2198 rxdp[i].read.hdr_addr = 0;
2199 rxdp[i].read.pkt_addr = dma_addr;
2202 /* Update rx tail register */
2204 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
2206 rxq->rx_free_trigger =
2207 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
2208 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
2209 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2214 static inline uint16_t
2215 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2217 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
2223 if (rxq->rx_nb_avail)
2224 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2226 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
2227 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq, rx_pkts, nb_pkts);
2229 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq, rx_pkts, nb_pkts);
2231 rxq->rx_next_avail = 0;
2232 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx + rxq->rx_nb_avail);
2234 if (rxq->rx_tail > rxq->rx_free_trigger) {
2235 if (iavf_rx_alloc_bufs(rxq) != 0) {
2236 uint16_t i, j, nb_staged;
2238 /* TODO: count rx_mbuf_alloc_failed here */
2240 nb_staged = rxq->rx_nb_avail;
2241 rxq->rx_nb_avail = 0;
2243 rxq->rx_tail = (uint16_t)(rxq->rx_tail - (nb_rx + nb_staged));
2244 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) {
2245 rxq->sw_ring[j] = rx_pkts[i];
2248 for (i = 0, j = rxq->rx_tail + nb_rx; i < nb_staged; i++, j++) {
2249 rxq->sw_ring[j] = rxq->rx_stage[i];
2257 if (rxq->rx_tail >= rxq->nb_rx_desc)
2260 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
2261 rxq->port_id, rxq->queue_id,
2262 rxq->rx_tail, nb_rx);
2268 iavf_recv_pkts_bulk_alloc(void *rx_queue,
2269 struct rte_mbuf **rx_pkts,
2272 uint16_t nb_rx = 0, n, count;
2274 if (unlikely(nb_pkts == 0))
2277 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2278 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2281 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2282 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2283 nb_rx = (uint16_t)(nb_rx + count);
2284 nb_pkts = (uint16_t)(nb_pkts - count);
2293 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2295 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2296 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2297 uint16_t nb_tx_desc = txq->nb_tx_desc;
2298 uint16_t desc_to_clean_to;
2299 uint16_t nb_tx_to_clean;
2301 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2303 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2304 if (desc_to_clean_to >= nb_tx_desc)
2305 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2307 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2308 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2309 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2310 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2311 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2312 "(port=%d queue=%d)", desc_to_clean_to,
2313 txq->port_id, txq->queue_id);
2317 if (last_desc_cleaned > desc_to_clean_to)
2318 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2321 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2324 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2326 txq->last_desc_cleaned = desc_to_clean_to;
2327 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2332 /* Check if the context descriptor is needed for TX offloading */
2333 static inline uint16_t
2334 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2336 if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
2337 RTE_MBUF_F_TX_TUNNEL_MASK))
2339 if (flags & RTE_MBUF_F_TX_VLAN &&
2340 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2346 iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m,
2352 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
2353 cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2355 if (m->ol_flags & RTE_MBUF_F_TX_VLAN &&
2356 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2357 cmd |= IAVF_TX_CTX_DESC_IL2TAG2
2358 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2365 iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
2366 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2368 uint64_t ipsec_field =
2369 (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
2370 IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
2372 *field |= ipsec_field;
2377 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
2378 const struct rte_mbuf *m)
2380 uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
2381 uint64_t eip_len = 0;
2382 uint64_t eip_noinc = 0;
2383 /* Default - IP_ID is increment in each segment of LSO */
2385 switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
2386 RTE_MBUF_F_TX_OUTER_IPV6 |
2387 RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
2388 case RTE_MBUF_F_TX_OUTER_IPV4:
2389 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
2390 eip_len = m->outer_l3_len >> 2;
2392 case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
2393 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
2394 eip_len = m->outer_l3_len >> 2;
2396 case RTE_MBUF_F_TX_OUTER_IPV6:
2397 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
2398 eip_len = m->outer_l3_len >> 2;
2402 *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
2403 eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
2404 eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
2407 static inline uint16_t
2408 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
2409 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2411 uint64_t segmentation_field = 0;
2412 uint64_t total_length = 0;
2414 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
2415 total_length = ipsec_md->l4_payload_len;
2417 total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
2419 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2420 total_length -= m->outer_l3_len;
2423 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
2424 if (!m->l4_len || !m->tso_segsz)
2425 PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
2426 m->l4_len, m->tso_segsz);
2427 if (m->tso_segsz < 88)
2428 PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
2431 segmentation_field =
2432 (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
2433 IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
2434 (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
2435 IAVF_TXD_CTX_QW1_MSS_MASK);
2437 *field |= segmentation_field;
2439 return total_length;
2443 struct iavf_tx_context_desc_qws {
2449 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
2450 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
2451 uint16_t *tlen, uint8_t vlan_flag)
2453 volatile struct iavf_tx_context_desc_qws *desc_qws =
2454 (volatile struct iavf_tx_context_desc_qws *)desc;
2455 /* fill descriptor type field */
2456 desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
2458 /* fill command field */
2459 iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag);
2461 /* fill segmentation field */
2462 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
2463 /* fill IPsec field */
2464 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2465 iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
2468 *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
2472 /* fill tunnelling field */
2473 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2474 iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
2478 desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
2479 desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
2481 if (vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2482 desc->l2tag2 = m->vlan_tci;
2487 iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
2488 const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
2490 desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
2491 IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
2492 ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
2493 ((uint64_t)md->esp_trailer_len <<
2494 IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
2496 desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
2497 IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
2498 ((uint64_t)md->next_proto <<
2499 IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
2500 ((uint64_t)(md->len_iv & 0x3) <<
2501 IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
2502 ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2504 IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
2505 (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
2508 * TODO: Pre-calculate this in the Session initialization
2510 * Calculate IPsec length required in data descriptor func when TSO
2511 * offload is enabled
2513 *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
2514 (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2515 sizeof(struct rte_udp_hdr) : 0);
2519 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
2520 struct rte_mbuf *m, uint8_t vlan_flag)
2522 uint64_t command = 0;
2523 uint64_t offset = 0;
2524 uint64_t l2tag1 = 0;
2526 *qw1 = IAVF_TX_DESC_DTYPE_DATA;
2528 command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
2530 /* Descriptor based VLAN insertion */
2531 if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) &&
2532 m->ol_flags & RTE_MBUF_F_TX_VLAN) {
2533 command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
2534 l2tag1 |= m->vlan_tci;
2538 offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2540 /* Enable L3 checksum offloading inner */
2541 if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
2542 command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2543 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2544 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
2545 command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2546 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2547 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
2548 command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2549 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2552 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2553 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2554 offset |= (m->l4_len >> 2) <<
2555 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2558 /* Enable L4 checksum offloads */
2559 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2560 case RTE_MBUF_F_TX_TCP_CKSUM:
2561 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2562 offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2563 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2565 case RTE_MBUF_F_TX_SCTP_CKSUM:
2566 command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2567 offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2568 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2570 case RTE_MBUF_F_TX_UDP_CKSUM:
2571 command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2572 offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2573 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2577 *qw1 = rte_cpu_to_le_64((((uint64_t)command <<
2578 IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
2579 (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
2580 IAVF_TXD_DATA_QW1_OFFSET_MASK) |
2581 ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
2585 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
2586 struct rte_mbuf *m, uint64_t desc_template,
2587 uint16_t tlen, uint16_t ipseclen)
2589 uint32_t hdrlen = m->l2_len;
2592 /* fill data descriptor qw1 from template */
2593 desc->cmd_type_offset_bsz = desc_template;
2595 /* set data buffer address */
2596 desc->buffer_addr = rte_mbuf_data_iova(m);
2598 /* calculate data buffer size less set header lengths */
2599 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
2600 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2601 RTE_MBUF_F_TX_UDP_SEG))) {
2602 hdrlen += m->outer_l3_len;
2603 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2604 hdrlen += m->l3_len + m->l4_len;
2606 hdrlen += m->l3_len;
2607 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2609 bufsz = hdrlen + tlen;
2610 } else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
2611 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2612 RTE_MBUF_F_TX_UDP_SEG))) {
2613 hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
2614 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2615 hdrlen += m->l4_len;
2616 bufsz = hdrlen + tlen;
2619 bufsz = m->data_len;
2622 /* set data buffer size */
2623 desc->cmd_type_offset_bsz |=
2624 (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
2625 IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
2627 desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
2628 desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
2632 static struct iavf_ipsec_crypto_pkt_metadata *
2633 iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
2636 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2637 return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
2638 struct iavf_ipsec_crypto_pkt_metadata *);
2645 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2647 struct iavf_tx_queue *txq = tx_queue;
2648 volatile struct iavf_tx_desc *txr = txq->tx_ring;
2649 struct iavf_tx_entry *txe_ring = txq->sw_ring;
2650 struct iavf_tx_entry *txe, *txn;
2651 struct rte_mbuf *mb, *mb_seg;
2652 uint16_t desc_idx, desc_idx_last;
2656 /* Check if the descriptor ring needs to be cleaned. */
2657 if (txq->nb_free < txq->free_thresh)
2658 iavf_xmit_cleanup(txq);
2660 desc_idx = txq->tx_tail;
2661 txe = &txe_ring[desc_idx];
2663 for (idx = 0; idx < nb_pkts; idx++) {
2664 volatile struct iavf_tx_desc *ddesc;
2665 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
2667 uint16_t nb_desc_ctx, nb_desc_ipsec;
2668 uint16_t nb_desc_data, nb_desc_required;
2669 uint16_t tlen = 0, ipseclen = 0;
2670 uint64_t ddesc_template = 0;
2671 uint64_t ddesc_cmd = 0;
2675 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2678 * Get metadata for ipsec crypto from mbuf dynamic fields if
2679 * security offload is specified.
2681 ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
2683 nb_desc_data = mb->nb_segs;
2685 iavf_calc_context_desc(mb->ol_flags, txq->vlan_flag);
2686 nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
2689 * The number of descriptors that must be allocated for
2690 * a packet equals to the number of the segments of that
2691 * packet plus the context and ipsec descriptors if needed.
2693 nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
2695 desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
2697 /* wrap descriptor ring */
2698 if (desc_idx_last >= txq->nb_tx_desc)
2700 (uint16_t)(desc_idx_last - txq->nb_tx_desc);
2703 "port_id=%u queue_id=%u tx_first=%u tx_last=%u",
2704 txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
2706 if (nb_desc_required > txq->nb_free) {
2707 if (iavf_xmit_cleanup(txq)) {
2712 if (unlikely(nb_desc_required > txq->rs_thresh)) {
2713 while (nb_desc_required > txq->nb_free) {
2714 if (iavf_xmit_cleanup(txq)) {
2723 iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb,
2726 /* Setup TX context descriptor if required */
2728 volatile struct iavf_tx_context_desc *ctx_desc =
2729 (volatile struct iavf_tx_context_desc *)
2732 /* clear QW0 or the previous writeback value
2733 * may impact next write
2735 *(volatile uint64_t *)ctx_desc = 0;
2737 txn = &txe_ring[txe->next_id];
2738 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2741 rte_pktmbuf_free_seg(txe->mbuf);
2745 iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen,
2747 IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
2749 txe->last_id = desc_idx_last;
2750 desc_idx = txe->next_id;
2754 if (nb_desc_ipsec) {
2755 volatile struct iavf_tx_ipsec_desc *ipsec_desc =
2756 (volatile struct iavf_tx_ipsec_desc *)
2759 txn = &txe_ring[txe->next_id];
2760 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2763 rte_pktmbuf_free_seg(txe->mbuf);
2767 iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
2769 IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
2771 txe->last_id = desc_idx_last;
2772 desc_idx = txe->next_id;
2779 ddesc = (volatile struct iavf_tx_desc *)
2782 txn = &txe_ring[txe->next_id];
2783 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2786 rte_pktmbuf_free_seg(txe->mbuf);
2789 iavf_fill_data_desc(ddesc, mb_seg,
2790 ddesc_template, tlen, ipseclen);
2792 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
2794 txe->last_id = desc_idx_last;
2795 desc_idx = txe->next_id;
2797 mb_seg = mb_seg->next;
2800 /* The last packet data descriptor needs End Of Packet (EOP) */
2801 ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
2803 txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
2804 txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
2806 if (txq->nb_used >= txq->rs_thresh) {
2807 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2808 "%4u (port=%d queue=%d)",
2809 desc_idx_last, txq->port_id, txq->queue_id);
2811 ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
2813 /* Update txq RS bit counters */
2817 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
2818 IAVF_TXD_DATA_QW1_CMD_SHIFT);
2820 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
2826 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2827 txq->port_id, txq->queue_id, desc_idx, idx);
2829 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
2830 txq->tx_tail = desc_idx;
2835 /* Check if the packet with vlan user priority is transmitted in the
2839 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2841 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2842 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2845 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2847 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2848 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2856 /* TX prep functions */
2858 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2864 struct iavf_tx_queue *txq = tx_queue;
2865 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2866 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2867 struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2869 if (adapter->closed)
2872 for (i = 0; i < nb_pkts; i++) {
2874 ol_flags = m->ol_flags;
2876 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2877 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2878 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2882 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2883 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2884 /* MSS outside the range are considered malicious */
2889 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2890 rte_errno = ENOTSUP;
2894 #ifdef RTE_ETHDEV_DEBUG_TX
2895 ret = rte_validate_tx_offload(m);
2901 ret = rte_net_intel_cksum_prepare(m);
2907 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2908 ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
2909 ret = iavf_check_vlan_up2tc(txq, m);
2920 /* choose rx function*/
2922 iavf_set_rx_function(struct rte_eth_dev *dev)
2924 struct iavf_adapter *adapter =
2925 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2926 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2928 struct iavf_rx_queue *rxq;
2929 bool use_flex = true;
2931 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2932 rxq = dev->data->rx_queues[i];
2933 if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
2934 PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is legacy, "
2935 "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
2937 } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
2938 PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is not supported, "
2939 "set rx_pkt_burst as legacy for all queues", rxq->rxdid, i);
2946 bool use_avx2 = false;
2947 bool use_avx512 = false;
2949 check_ret = iavf_rx_vec_dev_check(dev);
2950 if (check_ret >= 0 &&
2951 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2952 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2953 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2954 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2957 #ifdef CC_AVX512_SUPPORT
2958 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2959 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2960 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2964 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2965 rxq = dev->data->rx_queues[i];
2966 (void)iavf_rxq_vec_setup(rxq);
2969 if (dev->data->scattered_rx) {
2972 "Using %sVector Scattered Rx (port %d).",
2973 use_avx2 ? "avx2 " : "",
2974 dev->data->port_id);
2976 if (check_ret == IAVF_VECTOR_PATH)
2978 "Using AVX512 Vector Scattered Rx (port %d).",
2979 dev->data->port_id);
2982 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2983 dev->data->port_id);
2986 dev->rx_pkt_burst = use_avx2 ?
2987 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2988 iavf_recv_scattered_pkts_vec_flex_rxd;
2989 #ifdef CC_AVX512_SUPPORT
2991 if (check_ret == IAVF_VECTOR_PATH)
2993 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2996 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
3000 dev->rx_pkt_burst = use_avx2 ?
3001 iavf_recv_scattered_pkts_vec_avx2 :
3002 iavf_recv_scattered_pkts_vec;
3003 #ifdef CC_AVX512_SUPPORT
3005 if (check_ret == IAVF_VECTOR_PATH)
3007 iavf_recv_scattered_pkts_vec_avx512;
3010 iavf_recv_scattered_pkts_vec_avx512_offload;
3016 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
3017 use_avx2 ? "avx2 " : "",
3018 dev->data->port_id);
3020 if (check_ret == IAVF_VECTOR_PATH)
3022 "Using AVX512 Vector Rx (port %d).",
3023 dev->data->port_id);
3026 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3027 dev->data->port_id);
3030 dev->rx_pkt_burst = use_avx2 ?
3031 iavf_recv_pkts_vec_avx2_flex_rxd :
3032 iavf_recv_pkts_vec_flex_rxd;
3033 #ifdef CC_AVX512_SUPPORT
3035 if (check_ret == IAVF_VECTOR_PATH)
3037 iavf_recv_pkts_vec_avx512_flex_rxd;
3040 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
3044 dev->rx_pkt_burst = use_avx2 ?
3045 iavf_recv_pkts_vec_avx2 :
3047 #ifdef CC_AVX512_SUPPORT
3049 if (check_ret == IAVF_VECTOR_PATH)
3051 iavf_recv_pkts_vec_avx512;
3054 iavf_recv_pkts_vec_avx512_offload;
3064 if (dev->data->scattered_rx) {
3065 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
3066 dev->data->port_id);
3068 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
3070 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
3071 } else if (adapter->rx_bulk_alloc_allowed) {
3072 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
3073 dev->data->port_id);
3074 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
3076 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
3077 dev->data->port_id);
3079 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
3081 dev->rx_pkt_burst = iavf_recv_pkts;
3085 /* choose tx function*/
3087 iavf_set_tx_function(struct rte_eth_dev *dev)
3090 struct iavf_tx_queue *txq;
3093 bool use_sse = false;
3094 bool use_avx2 = false;
3095 bool use_avx512 = false;
3097 check_ret = iavf_tx_vec_dev_check(dev);
3099 if (check_ret >= 0 &&
3100 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3101 /* SSE and AVX2 not support offload path yet. */
3102 if (check_ret == IAVF_VECTOR_PATH) {
3104 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3105 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3106 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3109 #ifdef CC_AVX512_SUPPORT
3110 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3111 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
3112 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
3116 if (!use_sse && !use_avx2 && !use_avx512)
3120 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3121 use_avx2 ? "avx2 " : "",
3122 dev->data->port_id);
3123 dev->tx_pkt_burst = use_avx2 ?
3124 iavf_xmit_pkts_vec_avx2 :
3127 dev->tx_pkt_prepare = NULL;
3128 #ifdef CC_AVX512_SUPPORT
3130 if (check_ret == IAVF_VECTOR_PATH) {
3131 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
3132 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
3133 dev->data->port_id);
3135 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
3136 dev->tx_pkt_prepare = iavf_prep_pkts;
3137 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
3138 dev->data->port_id);
3143 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3144 txq = dev->data->tx_queues[i];
3147 #ifdef CC_AVX512_SUPPORT
3149 iavf_txq_vec_setup_avx512(txq);
3151 iavf_txq_vec_setup(txq);
3153 iavf_txq_vec_setup(txq);
3162 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
3163 dev->data->port_id);
3164 dev->tx_pkt_burst = iavf_xmit_pkts;
3165 dev->tx_pkt_prepare = iavf_prep_pkts;
3169 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
3172 struct iavf_tx_entry *swr_ring = txq->sw_ring;
3173 uint16_t i, tx_last, tx_id;
3174 uint16_t nb_tx_free_last;
3175 uint16_t nb_tx_to_clean;
3178 /* Start free mbuf from the next of tx_tail */
3179 tx_last = txq->tx_tail;
3180 tx_id = swr_ring[tx_last].next_id;
3182 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
3185 nb_tx_to_clean = txq->nb_free;
3186 nb_tx_free_last = txq->nb_free;
3188 free_cnt = txq->nb_tx_desc;
3190 /* Loop through swr_ring to count the amount of
3191 * freeable mubfs and packets.
3193 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3194 for (i = 0; i < nb_tx_to_clean &&
3195 pkt_cnt < free_cnt &&
3196 tx_id != tx_last; i++) {
3197 if (swr_ring[tx_id].mbuf != NULL) {
3198 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3199 swr_ring[tx_id].mbuf = NULL;
3202 * last segment in the packet,
3203 * increment packet count
3205 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3208 tx_id = swr_ring[tx_id].next_id;
3211 if (txq->rs_thresh > txq->nb_tx_desc -
3212 txq->nb_free || tx_id == tx_last)
3215 if (pkt_cnt < free_cnt) {
3216 if (iavf_xmit_cleanup(txq))
3219 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
3220 nb_tx_free_last = txq->nb_free;
3224 return (int)pkt_cnt;
3228 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
3230 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
3232 return iavf_tx_done_cleanup_full(q, free_cnt);
3236 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3237 struct rte_eth_rxq_info *qinfo)
3239 struct iavf_rx_queue *rxq;
3241 rxq = dev->data->rx_queues[queue_id];
3243 qinfo->mp = rxq->mp;
3244 qinfo->scattered_rx = dev->data->scattered_rx;
3245 qinfo->nb_desc = rxq->nb_rx_desc;
3247 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3248 qinfo->conf.rx_drop_en = true;
3249 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3253 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3254 struct rte_eth_txq_info *qinfo)
3256 struct iavf_tx_queue *txq;
3258 txq = dev->data->tx_queues[queue_id];
3260 qinfo->nb_desc = txq->nb_tx_desc;
3262 qinfo->conf.tx_free_thresh = txq->free_thresh;
3263 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
3264 qinfo->conf.offloads = txq->offloads;
3265 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3268 /* Get the number of used descriptors of a rx queue */
3270 iavf_dev_rxq_count(void *rx_queue)
3272 #define IAVF_RXQ_SCAN_INTERVAL 4
3273 volatile union iavf_rx_desc *rxdp;
3274 struct iavf_rx_queue *rxq;
3278 rxdp = &rxq->rx_ring[rxq->rx_tail];
3280 while ((desc < rxq->nb_rx_desc) &&
3281 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
3282 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
3283 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
3284 /* Check the DD bit of a rx descriptor of each 4 in a group,
3285 * to avoid checking too frequently and downgrading performance
3288 desc += IAVF_RXQ_SCAN_INTERVAL;
3289 rxdp += IAVF_RXQ_SCAN_INTERVAL;
3290 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3291 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3292 desc - rxq->nb_rx_desc]);
3299 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
3301 struct iavf_rx_queue *rxq = rx_queue;
3302 volatile uint64_t *status;
3306 if (unlikely(offset >= rxq->nb_rx_desc))
3309 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
3310 return RTE_ETH_RX_DESC_UNAVAIL;
3312 desc = rxq->rx_tail + offset;
3313 if (desc >= rxq->nb_rx_desc)
3314 desc -= rxq->nb_rx_desc;
3316 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
3317 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
3318 << IAVF_RXD_QW1_STATUS_SHIFT);
3320 return RTE_ETH_RX_DESC_DONE;
3322 return RTE_ETH_RX_DESC_AVAIL;
3326 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
3328 struct iavf_tx_queue *txq = tx_queue;
3329 volatile uint64_t *status;
3330 uint64_t mask, expect;
3333 if (unlikely(offset >= txq->nb_tx_desc))
3336 desc = txq->tx_tail + offset;
3337 /* go to next desc that has the RS bit */
3338 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
3340 if (desc >= txq->nb_tx_desc) {
3341 desc -= txq->nb_tx_desc;
3342 if (desc >= txq->nb_tx_desc)
3343 desc -= txq->nb_tx_desc;
3346 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
3347 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
3348 expect = rte_cpu_to_le_64(
3349 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
3350 if ((*status & mask) == expect)
3351 return RTE_ETH_TX_DESC_DONE;
3353 return RTE_ETH_TX_DESC_FULL;
3356 static inline uint32_t
3357 iavf_get_default_ptype(uint16_t ptype)
3359 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
3360 __rte_cache_aligned = {
3363 [1] = RTE_PTYPE_L2_ETHER,
3364 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3365 /* [3] - [5] reserved */
3366 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3367 /* [7] - [10] reserved */
3368 [11] = RTE_PTYPE_L2_ETHER_ARP,
3369 /* [12] - [21] reserved */
3371 /* Non tunneled IPv4 */
3372 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3374 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3375 RTE_PTYPE_L4_NONFRAG,
3376 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3379 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3381 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3383 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3387 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3388 RTE_PTYPE_TUNNEL_IP |
3389 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3390 RTE_PTYPE_INNER_L4_FRAG,
3391 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3392 RTE_PTYPE_TUNNEL_IP |
3393 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3394 RTE_PTYPE_INNER_L4_NONFRAG,
3395 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3396 RTE_PTYPE_TUNNEL_IP |
3397 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3398 RTE_PTYPE_INNER_L4_UDP,
3400 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3401 RTE_PTYPE_TUNNEL_IP |
3402 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3403 RTE_PTYPE_INNER_L4_TCP,
3404 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3405 RTE_PTYPE_TUNNEL_IP |
3406 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3407 RTE_PTYPE_INNER_L4_SCTP,
3408 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3409 RTE_PTYPE_TUNNEL_IP |
3410 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3411 RTE_PTYPE_INNER_L4_ICMP,
3414 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3415 RTE_PTYPE_TUNNEL_IP |
3416 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3417 RTE_PTYPE_INNER_L4_FRAG,
3418 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3419 RTE_PTYPE_TUNNEL_IP |
3420 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3421 RTE_PTYPE_INNER_L4_NONFRAG,
3422 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3423 RTE_PTYPE_TUNNEL_IP |
3424 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3425 RTE_PTYPE_INNER_L4_UDP,
3427 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3428 RTE_PTYPE_TUNNEL_IP |
3429 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3430 RTE_PTYPE_INNER_L4_TCP,
3431 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3432 RTE_PTYPE_TUNNEL_IP |
3433 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3434 RTE_PTYPE_INNER_L4_SCTP,
3435 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3436 RTE_PTYPE_TUNNEL_IP |
3437 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3438 RTE_PTYPE_INNER_L4_ICMP,
3440 /* IPv4 --> GRE/Teredo/VXLAN */
3441 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3442 RTE_PTYPE_TUNNEL_GRENAT,
3444 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3445 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3446 RTE_PTYPE_TUNNEL_GRENAT |
3447 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3448 RTE_PTYPE_INNER_L4_FRAG,
3449 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3450 RTE_PTYPE_TUNNEL_GRENAT |
3451 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3452 RTE_PTYPE_INNER_L4_NONFRAG,
3453 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3454 RTE_PTYPE_TUNNEL_GRENAT |
3455 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3456 RTE_PTYPE_INNER_L4_UDP,
3458 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3459 RTE_PTYPE_TUNNEL_GRENAT |
3460 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3461 RTE_PTYPE_INNER_L4_TCP,
3462 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_TUNNEL_GRENAT |
3464 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3465 RTE_PTYPE_INNER_L4_SCTP,
3466 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_TUNNEL_GRENAT |
3468 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3469 RTE_PTYPE_INNER_L4_ICMP,
3471 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3472 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3473 RTE_PTYPE_TUNNEL_GRENAT |
3474 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3475 RTE_PTYPE_INNER_L4_FRAG,
3476 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3477 RTE_PTYPE_TUNNEL_GRENAT |
3478 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3479 RTE_PTYPE_INNER_L4_NONFRAG,
3480 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3481 RTE_PTYPE_TUNNEL_GRENAT |
3482 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3483 RTE_PTYPE_INNER_L4_UDP,
3485 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3486 RTE_PTYPE_TUNNEL_GRENAT |
3487 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3488 RTE_PTYPE_INNER_L4_TCP,
3489 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3490 RTE_PTYPE_TUNNEL_GRENAT |
3491 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3492 RTE_PTYPE_INNER_L4_SCTP,
3493 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_TUNNEL_GRENAT |
3495 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3496 RTE_PTYPE_INNER_L4_ICMP,
3498 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3499 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3500 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3502 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3503 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3504 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3505 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3506 RTE_PTYPE_INNER_L4_FRAG,
3507 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3508 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3509 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3510 RTE_PTYPE_INNER_L4_NONFRAG,
3511 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3512 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3513 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3514 RTE_PTYPE_INNER_L4_UDP,
3516 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3517 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3518 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3519 RTE_PTYPE_INNER_L4_TCP,
3520 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3522 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3523 RTE_PTYPE_INNER_L4_SCTP,
3524 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3526 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3527 RTE_PTYPE_INNER_L4_ICMP,
3529 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3530 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3531 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3532 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3533 RTE_PTYPE_INNER_L4_FRAG,
3534 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3535 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3536 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3537 RTE_PTYPE_INNER_L4_NONFRAG,
3538 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3539 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3540 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3541 RTE_PTYPE_INNER_L4_UDP,
3543 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3544 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3545 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_INNER_L4_TCP,
3547 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3548 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3549 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_INNER_L4_SCTP,
3551 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3552 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3553 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3554 RTE_PTYPE_INNER_L4_ICMP,
3555 /* [73] - [87] reserved */
3557 /* Non tunneled IPv6 */
3558 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3560 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3561 RTE_PTYPE_L4_NONFRAG,
3562 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3565 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3567 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3569 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3573 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3574 RTE_PTYPE_TUNNEL_IP |
3575 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3576 RTE_PTYPE_INNER_L4_FRAG,
3577 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3578 RTE_PTYPE_TUNNEL_IP |
3579 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3580 RTE_PTYPE_INNER_L4_NONFRAG,
3581 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3582 RTE_PTYPE_TUNNEL_IP |
3583 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3584 RTE_PTYPE_INNER_L4_UDP,
3586 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3587 RTE_PTYPE_TUNNEL_IP |
3588 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3589 RTE_PTYPE_INNER_L4_TCP,
3590 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3591 RTE_PTYPE_TUNNEL_IP |
3592 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3593 RTE_PTYPE_INNER_L4_SCTP,
3594 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3595 RTE_PTYPE_TUNNEL_IP |
3596 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3597 RTE_PTYPE_INNER_L4_ICMP,
3600 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3601 RTE_PTYPE_TUNNEL_IP |
3602 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3603 RTE_PTYPE_INNER_L4_FRAG,
3604 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3605 RTE_PTYPE_TUNNEL_IP |
3606 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3607 RTE_PTYPE_INNER_L4_NONFRAG,
3608 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_IP |
3610 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3611 RTE_PTYPE_INNER_L4_UDP,
3612 /* [105] reserved */
3613 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3614 RTE_PTYPE_TUNNEL_IP |
3615 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3616 RTE_PTYPE_INNER_L4_TCP,
3617 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3618 RTE_PTYPE_TUNNEL_IP |
3619 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3620 RTE_PTYPE_INNER_L4_SCTP,
3621 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3622 RTE_PTYPE_TUNNEL_IP |
3623 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3624 RTE_PTYPE_INNER_L4_ICMP,
3626 /* IPv6 --> GRE/Teredo/VXLAN */
3627 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3628 RTE_PTYPE_TUNNEL_GRENAT,
3630 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3631 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_GRENAT |
3633 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_FRAG,
3635 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_GRENAT |
3637 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_NONFRAG,
3639 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3640 RTE_PTYPE_TUNNEL_GRENAT |
3641 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3642 RTE_PTYPE_INNER_L4_UDP,
3643 /* [113] reserved */
3644 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3645 RTE_PTYPE_TUNNEL_GRENAT |
3646 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3647 RTE_PTYPE_INNER_L4_TCP,
3648 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3649 RTE_PTYPE_TUNNEL_GRENAT |
3650 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3651 RTE_PTYPE_INNER_L4_SCTP,
3652 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3653 RTE_PTYPE_TUNNEL_GRENAT |
3654 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3655 RTE_PTYPE_INNER_L4_ICMP,
3657 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3658 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3659 RTE_PTYPE_TUNNEL_GRENAT |
3660 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3661 RTE_PTYPE_INNER_L4_FRAG,
3662 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3663 RTE_PTYPE_TUNNEL_GRENAT |
3664 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3665 RTE_PTYPE_INNER_L4_NONFRAG,
3666 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3667 RTE_PTYPE_TUNNEL_GRENAT |
3668 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3669 RTE_PTYPE_INNER_L4_UDP,
3670 /* [120] reserved */
3671 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3672 RTE_PTYPE_TUNNEL_GRENAT |
3673 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3674 RTE_PTYPE_INNER_L4_TCP,
3675 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GRENAT |
3677 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_SCTP,
3679 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_GRENAT |
3681 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_ICMP,
3684 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3685 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3688 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3689 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3691 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_FRAG,
3693 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3695 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_NONFRAG,
3697 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3699 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_UDP,
3701 /* [128] reserved */
3702 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3704 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3705 RTE_PTYPE_INNER_L4_TCP,
3706 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3708 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3709 RTE_PTYPE_INNER_L4_SCTP,
3710 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3712 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3713 RTE_PTYPE_INNER_L4_ICMP,
3715 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3716 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3717 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3718 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3719 RTE_PTYPE_INNER_L4_FRAG,
3720 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3721 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3722 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3723 RTE_PTYPE_INNER_L4_NONFRAG,
3724 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3725 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3726 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3727 RTE_PTYPE_INNER_L4_UDP,
3728 /* [135] reserved */
3729 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3730 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3731 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3732 RTE_PTYPE_INNER_L4_TCP,
3733 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3734 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3735 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3736 RTE_PTYPE_INNER_L4_SCTP,
3737 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3738 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3739 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3740 RTE_PTYPE_INNER_L4_ICMP,
3741 /* [139] - [299] reserved */
3744 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3745 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3747 /* PPPoE --> IPv4 */
3748 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3749 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3751 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3752 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3753 RTE_PTYPE_L4_NONFRAG,
3754 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3755 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3757 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3758 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3760 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3761 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3763 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3764 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3767 /* PPPoE --> IPv6 */
3768 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3769 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3771 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3772 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3773 RTE_PTYPE_L4_NONFRAG,
3774 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3775 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3777 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3778 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3780 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3781 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3783 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3784 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3786 /* [314] - [324] reserved */
3788 /* IPv4/IPv6 --> GTPC/GTPU */
3789 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3790 RTE_PTYPE_TUNNEL_GTPC,
3791 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3792 RTE_PTYPE_TUNNEL_GTPC,
3793 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3794 RTE_PTYPE_TUNNEL_GTPC,
3795 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3796 RTE_PTYPE_TUNNEL_GTPC,
3797 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3798 RTE_PTYPE_TUNNEL_GTPU,
3799 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3800 RTE_PTYPE_TUNNEL_GTPU,
3802 /* IPv4 --> GTPU --> IPv4 */
3803 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3804 RTE_PTYPE_TUNNEL_GTPU |
3805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3806 RTE_PTYPE_INNER_L4_FRAG,
3807 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3808 RTE_PTYPE_TUNNEL_GTPU |
3809 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3810 RTE_PTYPE_INNER_L4_NONFRAG,
3811 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3812 RTE_PTYPE_TUNNEL_GTPU |
3813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3814 RTE_PTYPE_INNER_L4_UDP,
3815 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3816 RTE_PTYPE_TUNNEL_GTPU |
3817 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3818 RTE_PTYPE_INNER_L4_TCP,
3819 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3820 RTE_PTYPE_TUNNEL_GTPU |
3821 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3822 RTE_PTYPE_INNER_L4_ICMP,
3824 /* IPv6 --> GTPU --> IPv4 */
3825 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3826 RTE_PTYPE_TUNNEL_GTPU |
3827 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3828 RTE_PTYPE_INNER_L4_FRAG,
3829 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3830 RTE_PTYPE_TUNNEL_GTPU |
3831 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3832 RTE_PTYPE_INNER_L4_NONFRAG,
3833 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3834 RTE_PTYPE_TUNNEL_GTPU |
3835 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3836 RTE_PTYPE_INNER_L4_UDP,
3837 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3838 RTE_PTYPE_TUNNEL_GTPU |
3839 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3840 RTE_PTYPE_INNER_L4_TCP,
3841 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3842 RTE_PTYPE_TUNNEL_GTPU |
3843 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3844 RTE_PTYPE_INNER_L4_ICMP,
3846 /* IPv4 --> GTPU --> IPv6 */
3847 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3848 RTE_PTYPE_TUNNEL_GTPU |
3849 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3850 RTE_PTYPE_INNER_L4_FRAG,
3851 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3852 RTE_PTYPE_TUNNEL_GTPU |
3853 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3854 RTE_PTYPE_INNER_L4_NONFRAG,
3855 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3856 RTE_PTYPE_TUNNEL_GTPU |
3857 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3858 RTE_PTYPE_INNER_L4_UDP,
3859 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3860 RTE_PTYPE_TUNNEL_GTPU |
3861 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_INNER_L4_TCP,
3863 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3864 RTE_PTYPE_TUNNEL_GTPU |
3865 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3866 RTE_PTYPE_INNER_L4_ICMP,
3868 /* IPv6 --> GTPU --> IPv6 */
3869 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3870 RTE_PTYPE_TUNNEL_GTPU |
3871 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3872 RTE_PTYPE_INNER_L4_FRAG,
3873 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3874 RTE_PTYPE_TUNNEL_GTPU |
3875 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3876 RTE_PTYPE_INNER_L4_NONFRAG,
3877 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3878 RTE_PTYPE_TUNNEL_GTPU |
3879 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3880 RTE_PTYPE_INNER_L4_UDP,
3881 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3882 RTE_PTYPE_TUNNEL_GTPU |
3883 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3884 RTE_PTYPE_INNER_L4_TCP,
3885 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3886 RTE_PTYPE_TUNNEL_GTPU |
3887 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3888 RTE_PTYPE_INNER_L4_ICMP,
3890 /* IPv4 --> UDP ECPRI */
3891 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3893 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3895 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3897 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3899 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3901 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3903 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3905 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3907 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3909 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3912 /* IPV6 --> UDP ECPRI */
3913 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3915 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3917 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3919 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3921 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3923 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3925 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3927 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3929 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3931 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3933 /* All others reserved */
3936 return ptype_tbl[ptype];
3940 iavf_set_default_ptype_table(struct rte_eth_dev *dev)
3942 struct iavf_adapter *ad =
3943 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3946 for (i = 0; i < IAVF_MAX_PKT_TYPE; i++)
3947 ad->ptype_tbl[i] = iavf_get_default_ptype(i);