1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
63 /* The following constraints must be satisfied:
64 * thresh < rxq->nb_rx_desc
66 if (thresh >= nb_desc) {
67 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76 uint16_t tx_free_thresh)
78 /* TX descriptors will have their RS bit set after tx_rs_thresh
79 * descriptors have been used. The TX descriptor ring will be cleaned
80 * after tx_free_thresh descriptors are used or if the number of
81 * descriptors required to transmit a packet is greater than the
82 * number of free TX descriptors.
84 * The following constraints must be satisfied:
85 * - tx_rs_thresh must be less than the size of the ring minus 2.
86 * - tx_free_thresh must be less than the size of the ring minus 3.
87 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
88 * - tx_rs_thresh must be a divisor of the ring size.
90 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91 * race condition, hence the maximum threshold constraints. When set
92 * to zero use default values.
94 if (tx_rs_thresh >= (nb_desc - 2)) {
95 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96 "number of TX descriptors (%u) minus 2",
97 tx_rs_thresh, nb_desc);
100 if (tx_free_thresh >= (nb_desc - 3)) {
101 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102 "number of TX descriptors (%u) minus 3.",
103 tx_free_thresh, nb_desc);
106 if (tx_rs_thresh > tx_free_thresh) {
107 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108 "equal to tx_free_thresh (%u).",
109 tx_rs_thresh, tx_free_thresh);
112 if ((nb_desc % tx_rs_thresh) != 0) {
113 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114 "number of TX descriptors (%u).",
115 tx_rs_thresh, nb_desc);
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
125 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
131 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
138 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
144 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
153 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155 "rxq->rx_free_thresh=%d, "
156 "IAVF_RX_MAX_BURST=%d",
157 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
159 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161 "rxq->nb_rx_desc=%d, "
162 "rxq->rx_free_thresh=%d",
163 rxq->nb_rx_desc, rxq->rx_free_thresh);
170 reset_rx_queue(struct iavf_rx_queue *rxq)
178 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
180 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181 ((volatile char *)rxq->rx_ring)[i] = 0;
183 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
185 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
189 rxq->rx_nb_avail = 0;
190 rxq->rx_next_avail = 0;
191 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
195 rxq->pkt_first_seg = NULL;
196 rxq->pkt_last_seg = NULL;
201 reset_tx_queue(struct iavf_tx_queue *txq)
203 struct iavf_tx_entry *txe;
208 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
213 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
214 for (i = 0; i < size; i++)
215 ((volatile char *)txq->tx_ring)[i] = 0;
217 prev = (uint16_t)(txq->nb_tx_desc - 1);
218 for (i = 0; i < txq->nb_tx_desc; i++) {
219 txq->tx_ring[i].cmd_type_offset_bsz =
220 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
223 txe[prev].next_id = i;
230 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
231 txq->nb_free = txq->nb_tx_desc - 1;
233 txq->next_dd = txq->rs_thresh - 1;
234 txq->next_rs = txq->rs_thresh - 1;
238 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
240 volatile union iavf_rx_desc *rxd;
241 struct rte_mbuf *mbuf = NULL;
245 for (i = 0; i < rxq->nb_rx_desc; i++) {
246 mbuf = rte_mbuf_raw_alloc(rxq->mp);
247 if (unlikely(!mbuf)) {
248 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
252 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->port = rxq->port_id;
259 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
261 rxd = &rxq->rx_ring[i];
262 rxd->read.pkt_addr = dma_addr;
263 rxd->read.hdr_addr = 0;
264 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
269 rxq->sw_ring[i] = mbuf;
276 release_rxq_mbufs(struct iavf_rx_queue *rxq)
283 for (i = 0; i < rxq->nb_rx_desc; i++) {
284 if (rxq->sw_ring[i]) {
285 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
286 rxq->sw_ring[i] = NULL;
291 if (rxq->rx_nb_avail == 0)
293 for (i = 0; i < rxq->rx_nb_avail; i++) {
294 struct rte_mbuf *mbuf;
296 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
297 rte_pktmbuf_free_seg(mbuf);
299 rxq->rx_nb_avail = 0;
303 release_txq_mbufs(struct iavf_tx_queue *txq)
307 if (!txq || !txq->sw_ring) {
308 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
312 for (i = 0; i < txq->nb_tx_desc; i++) {
313 if (txq->sw_ring[i].mbuf) {
314 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
315 txq->sw_ring[i].mbuf = NULL;
320 static const struct iavf_rxq_ops def_rxq_ops = {
321 .release_mbufs = release_rxq_mbufs,
324 static const struct iavf_txq_ops def_txq_ops = {
325 .release_mbufs = release_txq_mbufs,
329 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
331 volatile union iavf_rx_flex_desc *rxdp)
333 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
334 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
335 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
339 if (desc->flow_id != 0xFFFFFFFF) {
340 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
341 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
344 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
345 stat_err = rte_le_to_cpu_16(desc->status_error0);
346 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
347 mb->ol_flags |= PKT_RX_RSS_HASH;
348 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
354 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
356 volatile union iavf_rx_flex_desc *rxdp)
358 volatile struct iavf_32b_rx_flex_desc_comms *desc =
359 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
362 stat_err = rte_le_to_cpu_16(desc->status_error0);
363 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
364 mb->ol_flags |= PKT_RX_RSS_HASH;
365 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
368 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
369 if (desc->flow_id != 0xFFFFFFFF) {
370 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
371 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
374 if (rxq->xtr_ol_flag) {
375 uint32_t metadata = 0;
377 stat_err = rte_le_to_cpu_16(desc->status_error1);
379 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
380 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
382 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
384 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
387 mb->ol_flags |= rxq->xtr_ol_flag;
389 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
396 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
398 volatile union iavf_rx_flex_desc *rxdp)
400 volatile struct iavf_32b_rx_flex_desc_comms *desc =
401 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
404 stat_err = rte_le_to_cpu_16(desc->status_error0);
405 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
406 mb->ol_flags |= PKT_RX_RSS_HASH;
407 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
410 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
411 if (desc->flow_id != 0xFFFFFFFF) {
412 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
413 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
416 if (rxq->xtr_ol_flag) {
417 uint32_t metadata = 0;
419 if (desc->flex_ts.flex.aux0 != 0xFFFF)
420 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
421 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
422 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
425 mb->ol_flags |= rxq->xtr_ol_flag;
427 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
434 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
437 case IAVF_RXDID_COMMS_AUX_VLAN:
438 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
439 rxq->rxd_to_pkt_fields =
440 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
442 case IAVF_RXDID_COMMS_AUX_IPV4:
443 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
444 rxq->rxd_to_pkt_fields =
445 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
447 case IAVF_RXDID_COMMS_AUX_IPV6:
448 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
449 rxq->rxd_to_pkt_fields =
450 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
452 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
454 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
455 rxq->rxd_to_pkt_fields =
456 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
458 case IAVF_RXDID_COMMS_AUX_TCP:
459 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
460 rxq->rxd_to_pkt_fields =
461 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
463 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
465 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
466 rxq->rxd_to_pkt_fields =
467 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
469 case IAVF_RXDID_COMMS_OVS_1:
470 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
473 /* update this according to the RXDID for FLEX_DESC_NONE */
474 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
478 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
479 rxq->xtr_ol_flag = 0;
483 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
484 uint16_t nb_desc, unsigned int socket_id,
485 const struct rte_eth_rxconf *rx_conf,
486 struct rte_mempool *mp)
488 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
489 struct iavf_adapter *ad =
490 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
491 struct iavf_info *vf =
492 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
493 struct iavf_vsi *vsi = &vf->vsi;
494 struct iavf_rx_queue *rxq;
495 const struct rte_memzone *mz;
499 uint16_t rx_free_thresh;
501 PMD_INIT_FUNC_TRACE();
503 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
504 nb_desc > IAVF_MAX_RING_DESC ||
505 nb_desc < IAVF_MIN_RING_DESC) {
506 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
511 /* Check free threshold */
512 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
513 IAVF_DEFAULT_RX_FREE_THRESH :
514 rx_conf->rx_free_thresh;
515 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
518 /* Free memory if needed */
519 if (dev->data->rx_queues[queue_idx]) {
520 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
521 dev->data->rx_queues[queue_idx] = NULL;
524 /* Allocate the rx queue data structure */
525 rxq = rte_zmalloc_socket("iavf rxq",
526 sizeof(struct iavf_rx_queue),
530 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
531 "rx queue data structure");
535 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
536 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
538 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
539 rxq->proto_xtr = proto_xtr;
541 rxq->rxdid = IAVF_RXDID_LEGACY_1;
542 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
545 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
548 rxq->nb_rx_desc = nb_desc;
549 rxq->rx_free_thresh = rx_free_thresh;
550 rxq->queue_id = queue_idx;
551 rxq->port_id = dev->data->port_id;
552 rxq->crc_len = 0; /* crc stripping by default */
553 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
557 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
558 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
560 /* Allocate the software ring. */
561 len = nb_desc + IAVF_RX_MAX_BURST;
563 rte_zmalloc_socket("iavf rx sw ring",
564 sizeof(struct rte_mbuf *) * len,
568 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
573 /* Allocate the maximun number of RX ring hardware descriptor with
574 * a liitle more to support bulk allocate.
576 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
577 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
579 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
580 ring_size, IAVF_RING_BASE_ALIGN,
583 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
584 rte_free(rxq->sw_ring);
588 /* Zero all the descriptors in the ring. */
589 memset(mz->addr, 0, ring_size);
590 rxq->rx_ring_phys_addr = mz->iova;
591 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
596 dev->data->rx_queues[queue_idx] = rxq;
597 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
598 rxq->ops = &def_rxq_ops;
600 if (check_rx_bulk_allow(rxq) == true) {
601 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
602 "satisfied. Rx Burst Bulk Alloc function will be "
603 "used on port=%d, queue=%d.",
604 rxq->port_id, rxq->queue_id);
606 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
607 "not satisfied, Scattered Rx is requested "
608 "on port=%d, queue=%d.",
609 rxq->port_id, rxq->queue_id);
610 ad->rx_bulk_alloc_allowed = false;
613 if (check_rx_vec_allow(rxq) == false)
614 ad->rx_vec_allowed = false;
620 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
623 unsigned int socket_id,
624 const struct rte_eth_txconf *tx_conf)
626 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 struct iavf_tx_queue *txq;
628 const struct rte_memzone *mz;
630 uint16_t tx_rs_thresh, tx_free_thresh;
633 PMD_INIT_FUNC_TRACE();
635 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
637 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
638 nb_desc > IAVF_MAX_RING_DESC ||
639 nb_desc < IAVF_MIN_RING_DESC) {
640 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
645 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
646 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
647 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
648 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
649 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
651 /* Free memory if needed. */
652 if (dev->data->tx_queues[queue_idx]) {
653 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
654 dev->data->tx_queues[queue_idx] = NULL;
657 /* Allocate the TX queue data structure. */
658 txq = rte_zmalloc_socket("iavf txq",
659 sizeof(struct iavf_tx_queue),
663 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
664 "tx queue structure");
668 txq->nb_tx_desc = nb_desc;
669 txq->rs_thresh = tx_rs_thresh;
670 txq->free_thresh = tx_free_thresh;
671 txq->queue_id = queue_idx;
672 txq->port_id = dev->data->port_id;
673 txq->offloads = offloads;
674 txq->tx_deferred_start = tx_conf->tx_deferred_start;
676 /* Allocate software ring */
678 rte_zmalloc_socket("iavf tx sw ring",
679 sizeof(struct iavf_tx_entry) * nb_desc,
683 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
688 /* Allocate TX hardware ring descriptors. */
689 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
690 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
691 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
692 ring_size, IAVF_RING_BASE_ALIGN,
695 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
696 rte_free(txq->sw_ring);
700 txq->tx_ring_phys_addr = mz->iova;
701 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
706 dev->data->tx_queues[queue_idx] = txq;
707 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
708 txq->ops = &def_txq_ops;
710 if (check_tx_vec_allow(txq) == false) {
711 struct iavf_adapter *ad =
712 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
713 ad->tx_vec_allowed = false;
720 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
722 struct iavf_adapter *adapter =
723 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
724 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
725 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
726 struct iavf_rx_queue *rxq;
729 PMD_DRV_FUNC_TRACE();
731 if (rx_queue_id >= dev->data->nb_rx_queues)
734 rxq = dev->data->rx_queues[rx_queue_id];
736 err = alloc_rxq_mbufs(rxq);
738 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
744 /* Init the RX tail register. */
745 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
746 IAVF_WRITE_FLUSH(hw);
748 /* Ready to switch the queue on */
750 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
752 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
755 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
758 dev->data->rx_queue_state[rx_queue_id] =
759 RTE_ETH_QUEUE_STATE_STARTED;
765 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
767 struct iavf_adapter *adapter =
768 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
769 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
770 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
771 struct iavf_tx_queue *txq;
774 PMD_DRV_FUNC_TRACE();
776 if (tx_queue_id >= dev->data->nb_tx_queues)
779 txq = dev->data->tx_queues[tx_queue_id];
781 /* Init the RX tail register. */
782 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
783 IAVF_WRITE_FLUSH(hw);
785 /* Ready to switch the queue on */
787 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
789 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
792 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
795 dev->data->tx_queue_state[tx_queue_id] =
796 RTE_ETH_QUEUE_STATE_STARTED;
802 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
804 struct iavf_adapter *adapter =
805 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
806 struct iavf_rx_queue *rxq;
809 PMD_DRV_FUNC_TRACE();
811 if (rx_queue_id >= dev->data->nb_rx_queues)
814 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
816 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
821 rxq = dev->data->rx_queues[rx_queue_id];
822 rxq->ops->release_mbufs(rxq);
824 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
830 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
832 struct iavf_adapter *adapter =
833 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
834 struct iavf_tx_queue *txq;
837 PMD_DRV_FUNC_TRACE();
839 if (tx_queue_id >= dev->data->nb_tx_queues)
842 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
844 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
849 txq = dev->data->tx_queues[tx_queue_id];
850 txq->ops->release_mbufs(txq);
852 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
858 iavf_dev_rx_queue_release(void *rxq)
860 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
865 q->ops->release_mbufs(q);
866 rte_free(q->sw_ring);
867 rte_memzone_free(q->mz);
872 iavf_dev_tx_queue_release(void *txq)
874 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
879 q->ops->release_mbufs(q);
880 rte_free(q->sw_ring);
881 rte_memzone_free(q->mz);
886 iavf_stop_queues(struct rte_eth_dev *dev)
888 struct iavf_adapter *adapter =
889 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
890 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
891 struct iavf_rx_queue *rxq;
892 struct iavf_tx_queue *txq;
895 /* Stop All queues */
896 if (!vf->lv_enabled) {
897 ret = iavf_disable_queues(adapter);
899 PMD_DRV_LOG(WARNING, "Fail to stop queues");
901 ret = iavf_disable_queues_lv(adapter);
903 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
907 PMD_DRV_LOG(WARNING, "Fail to stop queues");
909 for (i = 0; i < dev->data->nb_tx_queues; i++) {
910 txq = dev->data->tx_queues[i];
913 txq->ops->release_mbufs(txq);
915 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
917 for (i = 0; i < dev->data->nb_rx_queues; i++) {
918 rxq = dev->data->rx_queues[i];
921 rxq->ops->release_mbufs(rxq);
923 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
927 #define IAVF_RX_FLEX_ERR0_BITS \
928 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
929 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
930 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
931 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
932 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
933 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
936 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
938 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
939 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
940 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
942 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
949 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
950 volatile union iavf_rx_flex_desc *rxdp)
952 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
953 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
954 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
956 rte_le_to_cpu_16(rxdp->wb.l2tag1);
961 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
962 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
963 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
964 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
965 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
966 mb->vlan_tci_outer = mb->vlan_tci;
967 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
968 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
969 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
970 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
972 mb->vlan_tci_outer = 0;
977 /* Translate the rx descriptor status and error fields to pkt flags */
978 static inline uint64_t
979 iavf_rxd_to_pkt_flags(uint64_t qword)
982 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
984 #define IAVF_RX_ERR_BITS 0x3f
986 /* Check if RSS_HASH */
987 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
988 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
989 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
991 /* Check if FDIR Match */
992 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
995 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
996 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1000 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1001 flags |= PKT_RX_IP_CKSUM_BAD;
1003 flags |= PKT_RX_IP_CKSUM_GOOD;
1005 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1006 flags |= PKT_RX_L4_CKSUM_BAD;
1008 flags |= PKT_RX_L4_CKSUM_GOOD;
1010 /* TODO: Oversize error bit is not processed here */
1015 static inline uint64_t
1016 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1019 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1022 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1023 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1024 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1026 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1028 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1029 flags |= PKT_RX_FDIR_ID;
1033 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1034 flags |= PKT_RX_FDIR_ID;
1039 #define IAVF_RX_FLEX_ERR0_BITS \
1040 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1041 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1042 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1043 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1044 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1045 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1047 /* Rx L3/L4 checksum */
1048 static inline uint64_t
1049 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1053 /* check if HW has decoded the packet and checksum */
1054 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1057 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1058 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1062 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1063 flags |= PKT_RX_IP_CKSUM_BAD;
1065 flags |= PKT_RX_IP_CKSUM_GOOD;
1067 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1068 flags |= PKT_RX_L4_CKSUM_BAD;
1070 flags |= PKT_RX_L4_CKSUM_GOOD;
1072 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1073 flags |= PKT_RX_EIP_CKSUM_BAD;
1078 /* If the number of free RX descriptors is greater than the RX free
1079 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1080 * register. Update the RDT with the value of the last processed RX
1081 * descriptor minus 1, to guarantee that the RDT register is never
1082 * equal to the RDH register, which creates a "full" ring situation
1083 * from the hardware point of view.
1086 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1088 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1090 if (nb_hold > rxq->rx_free_thresh) {
1092 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1093 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1094 rx_id = (uint16_t)((rx_id == 0) ?
1095 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1096 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1099 rxq->nb_rx_hold = nb_hold;
1102 /* implement recv_pkts */
1104 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1106 volatile union iavf_rx_desc *rx_ring;
1107 volatile union iavf_rx_desc *rxdp;
1108 struct iavf_rx_queue *rxq;
1109 union iavf_rx_desc rxd;
1110 struct rte_mbuf *rxe;
1111 struct rte_eth_dev *dev;
1112 struct rte_mbuf *rxm;
1113 struct rte_mbuf *nmb;
1117 uint16_t rx_packet_len;
1118 uint16_t rx_id, nb_hold;
1121 const uint32_t *ptype_tbl;
1126 rx_id = rxq->rx_tail;
1127 rx_ring = rxq->rx_ring;
1128 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1130 while (nb_rx < nb_pkts) {
1131 rxdp = &rx_ring[rx_id];
1132 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1133 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1134 IAVF_RXD_QW1_STATUS_SHIFT;
1136 /* Check the DD bit first */
1137 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1139 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1141 nmb = rte_mbuf_raw_alloc(rxq->mp);
1142 if (unlikely(!nmb)) {
1143 dev = &rte_eth_devices[rxq->port_id];
1144 dev->data->rx_mbuf_alloc_failed++;
1145 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1146 "queue_id=%u", rxq->port_id, rxq->queue_id);
1152 rxe = rxq->sw_ring[rx_id];
1154 if (unlikely(rx_id == rxq->nb_rx_desc))
1157 /* Prefetch next mbuf */
1158 rte_prefetch0(rxq->sw_ring[rx_id]);
1160 /* When next RX descriptor is on a cache line boundary,
1161 * prefetch the next 4 RX descriptors and next 8 pointers
1164 if ((rx_id & 0x3) == 0) {
1165 rte_prefetch0(&rx_ring[rx_id]);
1166 rte_prefetch0(rxq->sw_ring[rx_id]);
1170 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1171 rxdp->read.hdr_addr = 0;
1172 rxdp->read.pkt_addr = dma_addr;
1174 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1175 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1177 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1178 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1181 rxm->pkt_len = rx_packet_len;
1182 rxm->data_len = rx_packet_len;
1183 rxm->port = rxq->port_id;
1185 iavf_rxd_to_vlan_tci(rxm, &rxd);
1186 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1188 ptype_tbl[(uint8_t)((qword1 &
1189 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1191 if (pkt_flags & PKT_RX_RSS_HASH)
1193 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1195 if (pkt_flags & PKT_RX_FDIR)
1196 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1198 rxm->ol_flags |= pkt_flags;
1200 rx_pkts[nb_rx++] = rxm;
1202 rxq->rx_tail = rx_id;
1204 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1209 /* implement recv_pkts for flexible Rx descriptor */
1211 iavf_recv_pkts_flex_rxd(void *rx_queue,
1212 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1214 volatile union iavf_rx_desc *rx_ring;
1215 volatile union iavf_rx_flex_desc *rxdp;
1216 struct iavf_rx_queue *rxq;
1217 union iavf_rx_flex_desc rxd;
1218 struct rte_mbuf *rxe;
1219 struct rte_eth_dev *dev;
1220 struct rte_mbuf *rxm;
1221 struct rte_mbuf *nmb;
1223 uint16_t rx_stat_err0;
1224 uint16_t rx_packet_len;
1225 uint16_t rx_id, nb_hold;
1228 const uint32_t *ptype_tbl;
1233 rx_id = rxq->rx_tail;
1234 rx_ring = rxq->rx_ring;
1235 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1237 while (nb_rx < nb_pkts) {
1238 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1239 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1241 /* Check the DD bit first */
1242 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1244 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1246 nmb = rte_mbuf_raw_alloc(rxq->mp);
1247 if (unlikely(!nmb)) {
1248 dev = &rte_eth_devices[rxq->port_id];
1249 dev->data->rx_mbuf_alloc_failed++;
1250 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1251 "queue_id=%u", rxq->port_id, rxq->queue_id);
1257 rxe = rxq->sw_ring[rx_id];
1259 if (unlikely(rx_id == rxq->nb_rx_desc))
1262 /* Prefetch next mbuf */
1263 rte_prefetch0(rxq->sw_ring[rx_id]);
1265 /* When next RX descriptor is on a cache line boundary,
1266 * prefetch the next 4 RX descriptors and next 8 pointers
1269 if ((rx_id & 0x3) == 0) {
1270 rte_prefetch0(&rx_ring[rx_id]);
1271 rte_prefetch0(rxq->sw_ring[rx_id]);
1275 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1276 rxdp->read.hdr_addr = 0;
1277 rxdp->read.pkt_addr = dma_addr;
1279 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1280 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1282 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1283 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1286 rxm->pkt_len = rx_packet_len;
1287 rxm->data_len = rx_packet_len;
1288 rxm->port = rxq->port_id;
1290 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1291 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1292 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1293 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1294 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1295 rxm->ol_flags |= pkt_flags;
1297 rx_pkts[nb_rx++] = rxm;
1299 rxq->rx_tail = rx_id;
1301 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1306 /* implement recv_scattered_pkts for flexible Rx descriptor */
1308 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1311 struct iavf_rx_queue *rxq = rx_queue;
1312 union iavf_rx_flex_desc rxd;
1313 struct rte_mbuf *rxe;
1314 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1315 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1316 struct rte_mbuf *nmb, *rxm;
1317 uint16_t rx_id = rxq->rx_tail;
1318 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1319 struct rte_eth_dev *dev;
1320 uint16_t rx_stat_err0;
1324 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1325 volatile union iavf_rx_flex_desc *rxdp;
1326 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1328 while (nb_rx < nb_pkts) {
1329 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1330 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1332 /* Check the DD bit */
1333 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1335 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1337 nmb = rte_mbuf_raw_alloc(rxq->mp);
1338 if (unlikely(!nmb)) {
1339 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1340 "queue_id=%u", rxq->port_id, rxq->queue_id);
1341 dev = &rte_eth_devices[rxq->port_id];
1342 dev->data->rx_mbuf_alloc_failed++;
1348 rxe = rxq->sw_ring[rx_id];
1350 if (rx_id == rxq->nb_rx_desc)
1353 /* Prefetch next mbuf */
1354 rte_prefetch0(rxq->sw_ring[rx_id]);
1356 /* When next RX descriptor is on a cache line boundary,
1357 * prefetch the next 4 RX descriptors and next 8 pointers
1360 if ((rx_id & 0x3) == 0) {
1361 rte_prefetch0(&rx_ring[rx_id]);
1362 rte_prefetch0(rxq->sw_ring[rx_id]);
1367 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1369 /* Set data buffer address and data length of the mbuf */
1370 rxdp->read.hdr_addr = 0;
1371 rxdp->read.pkt_addr = dma_addr;
1372 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1373 IAVF_RX_FLX_DESC_PKT_LEN_M;
1374 rxm->data_len = rx_packet_len;
1375 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1377 /* If this is the first buffer of the received packet, set the
1378 * pointer to the first mbuf of the packet and initialize its
1379 * context. Otherwise, update the total length and the number
1380 * of segments of the current scattered packet, and update the
1381 * pointer to the last mbuf of the current packet.
1385 first_seg->nb_segs = 1;
1386 first_seg->pkt_len = rx_packet_len;
1388 first_seg->pkt_len =
1389 (uint16_t)(first_seg->pkt_len +
1391 first_seg->nb_segs++;
1392 last_seg->next = rxm;
1395 /* If this is not the last buffer of the received packet,
1396 * update the pointer to the last mbuf of the current scattered
1397 * packet and continue to parse the RX ring.
1399 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1404 /* This is the last buffer of the received packet. If the CRC
1405 * is not stripped by the hardware:
1406 * - Subtract the CRC length from the total packet length.
1407 * - If the last buffer only contains the whole CRC or a part
1408 * of it, free the mbuf associated to the last buffer. If part
1409 * of the CRC is also contained in the previous mbuf, subtract
1410 * the length of that CRC part from the data length of the
1414 if (unlikely(rxq->crc_len > 0)) {
1415 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1416 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1417 rte_pktmbuf_free_seg(rxm);
1418 first_seg->nb_segs--;
1419 last_seg->data_len =
1420 (uint16_t)(last_seg->data_len -
1421 (RTE_ETHER_CRC_LEN - rx_packet_len));
1422 last_seg->next = NULL;
1424 rxm->data_len = (uint16_t)(rx_packet_len -
1429 first_seg->port = rxq->port_id;
1430 first_seg->ol_flags = 0;
1431 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1432 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1433 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1434 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1435 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1437 first_seg->ol_flags |= pkt_flags;
1439 /* Prefetch data of first segment, if configured to do so. */
1440 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1441 first_seg->data_off));
1442 rx_pkts[nb_rx++] = first_seg;
1446 /* Record index of the next RX descriptor to probe. */
1447 rxq->rx_tail = rx_id;
1448 rxq->pkt_first_seg = first_seg;
1449 rxq->pkt_last_seg = last_seg;
1451 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1456 /* implement recv_scattered_pkts */
1458 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1461 struct iavf_rx_queue *rxq = rx_queue;
1462 union iavf_rx_desc rxd;
1463 struct rte_mbuf *rxe;
1464 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1465 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1466 struct rte_mbuf *nmb, *rxm;
1467 uint16_t rx_id = rxq->rx_tail;
1468 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1469 struct rte_eth_dev *dev;
1475 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1476 volatile union iavf_rx_desc *rxdp;
1477 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1479 while (nb_rx < nb_pkts) {
1480 rxdp = &rx_ring[rx_id];
1481 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1482 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1483 IAVF_RXD_QW1_STATUS_SHIFT;
1485 /* Check the DD bit */
1486 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1488 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1490 nmb = rte_mbuf_raw_alloc(rxq->mp);
1491 if (unlikely(!nmb)) {
1492 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1493 "queue_id=%u", rxq->port_id, rxq->queue_id);
1494 dev = &rte_eth_devices[rxq->port_id];
1495 dev->data->rx_mbuf_alloc_failed++;
1501 rxe = rxq->sw_ring[rx_id];
1503 if (rx_id == rxq->nb_rx_desc)
1506 /* Prefetch next mbuf */
1507 rte_prefetch0(rxq->sw_ring[rx_id]);
1509 /* When next RX descriptor is on a cache line boundary,
1510 * prefetch the next 4 RX descriptors and next 8 pointers
1513 if ((rx_id & 0x3) == 0) {
1514 rte_prefetch0(&rx_ring[rx_id]);
1515 rte_prefetch0(rxq->sw_ring[rx_id]);
1520 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1522 /* Set data buffer address and data length of the mbuf */
1523 rxdp->read.hdr_addr = 0;
1524 rxdp->read.pkt_addr = dma_addr;
1525 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1526 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1527 rxm->data_len = rx_packet_len;
1528 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1530 /* If this is the first buffer of the received packet, set the
1531 * pointer to the first mbuf of the packet and initialize its
1532 * context. Otherwise, update the total length and the number
1533 * of segments of the current scattered packet, and update the
1534 * pointer to the last mbuf of the current packet.
1538 first_seg->nb_segs = 1;
1539 first_seg->pkt_len = rx_packet_len;
1541 first_seg->pkt_len =
1542 (uint16_t)(first_seg->pkt_len +
1544 first_seg->nb_segs++;
1545 last_seg->next = rxm;
1548 /* If this is not the last buffer of the received packet,
1549 * update the pointer to the last mbuf of the current scattered
1550 * packet and continue to parse the RX ring.
1552 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1557 /* This is the last buffer of the received packet. If the CRC
1558 * is not stripped by the hardware:
1559 * - Subtract the CRC length from the total packet length.
1560 * - If the last buffer only contains the whole CRC or a part
1561 * of it, free the mbuf associated to the last buffer. If part
1562 * of the CRC is also contained in the previous mbuf, subtract
1563 * the length of that CRC part from the data length of the
1567 if (unlikely(rxq->crc_len > 0)) {
1568 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1569 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1570 rte_pktmbuf_free_seg(rxm);
1571 first_seg->nb_segs--;
1572 last_seg->data_len =
1573 (uint16_t)(last_seg->data_len -
1574 (RTE_ETHER_CRC_LEN - rx_packet_len));
1575 last_seg->next = NULL;
1577 rxm->data_len = (uint16_t)(rx_packet_len -
1581 first_seg->port = rxq->port_id;
1582 first_seg->ol_flags = 0;
1583 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1584 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1585 first_seg->packet_type =
1586 ptype_tbl[(uint8_t)((qword1 &
1587 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1589 if (pkt_flags & PKT_RX_RSS_HASH)
1590 first_seg->hash.rss =
1591 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1593 if (pkt_flags & PKT_RX_FDIR)
1594 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1596 first_seg->ol_flags |= pkt_flags;
1598 /* Prefetch data of first segment, if configured to do so. */
1599 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1600 first_seg->data_off));
1601 rx_pkts[nb_rx++] = first_seg;
1605 /* Record index of the next RX descriptor to probe. */
1606 rxq->rx_tail = rx_id;
1607 rxq->pkt_first_seg = first_seg;
1608 rxq->pkt_last_seg = last_seg;
1610 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1615 #define IAVF_LOOK_AHEAD 8
1617 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1619 volatile union iavf_rx_flex_desc *rxdp;
1620 struct rte_mbuf **rxep;
1621 struct rte_mbuf *mb;
1624 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1625 int32_t i, j, nb_rx = 0;
1627 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1629 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1630 rxep = &rxq->sw_ring[rxq->rx_tail];
1632 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1634 /* Make sure there is at least 1 packet to receive */
1635 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1638 /* Scan LOOK_AHEAD descriptors at a time to determine which
1639 * descriptors reference packets that are ready to be received.
1641 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1642 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1643 /* Read desc statuses backwards to avoid race condition */
1644 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1645 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1649 /* Compute how many status bits were set */
1650 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1651 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1655 /* Translate descriptor info to mbuf parameters */
1656 for (j = 0; j < nb_dd; j++) {
1657 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1659 i * IAVF_LOOK_AHEAD + j);
1662 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1663 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1664 mb->data_len = pkt_len;
1665 mb->pkt_len = pkt_len;
1668 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1669 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1670 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1671 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1672 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1673 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1675 mb->ol_flags |= pkt_flags;
1678 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1679 rxq->rx_stage[i + j] = rxep[j];
1681 if (nb_dd != IAVF_LOOK_AHEAD)
1685 /* Clear software ring entries */
1686 for (i = 0; i < nb_rx; i++)
1687 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1693 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1695 volatile union iavf_rx_desc *rxdp;
1696 struct rte_mbuf **rxep;
1697 struct rte_mbuf *mb;
1701 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1702 int32_t i, j, nb_rx = 0;
1704 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1706 rxdp = &rxq->rx_ring[rxq->rx_tail];
1707 rxep = &rxq->sw_ring[rxq->rx_tail];
1709 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1710 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1711 IAVF_RXD_QW1_STATUS_SHIFT;
1713 /* Make sure there is at least 1 packet to receive */
1714 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1717 /* Scan LOOK_AHEAD descriptors at a time to determine which
1718 * descriptors reference packets that are ready to be received.
1720 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1721 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1722 /* Read desc statuses backwards to avoid race condition */
1723 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1724 qword1 = rte_le_to_cpu_64(
1725 rxdp[j].wb.qword1.status_error_len);
1726 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1727 IAVF_RXD_QW1_STATUS_SHIFT;
1732 /* Compute how many status bits were set */
1733 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1734 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1738 /* Translate descriptor info to mbuf parameters */
1739 for (j = 0; j < nb_dd; j++) {
1740 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1741 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1744 qword1 = rte_le_to_cpu_64
1745 (rxdp[j].wb.qword1.status_error_len);
1746 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1747 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1748 mb->data_len = pkt_len;
1749 mb->pkt_len = pkt_len;
1751 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1752 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1754 ptype_tbl[(uint8_t)((qword1 &
1755 IAVF_RXD_QW1_PTYPE_MASK) >>
1756 IAVF_RXD_QW1_PTYPE_SHIFT)];
1758 if (pkt_flags & PKT_RX_RSS_HASH)
1759 mb->hash.rss = rte_le_to_cpu_32(
1760 rxdp[j].wb.qword0.hi_dword.rss);
1762 if (pkt_flags & PKT_RX_FDIR)
1763 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1765 mb->ol_flags |= pkt_flags;
1768 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1769 rxq->rx_stage[i + j] = rxep[j];
1771 if (nb_dd != IAVF_LOOK_AHEAD)
1775 /* Clear software ring entries */
1776 for (i = 0; i < nb_rx; i++)
1777 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1782 static inline uint16_t
1783 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1784 struct rte_mbuf **rx_pkts,
1788 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1790 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1792 for (i = 0; i < nb_pkts; i++)
1793 rx_pkts[i] = stage[i];
1795 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1796 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1802 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1804 volatile union iavf_rx_desc *rxdp;
1805 struct rte_mbuf **rxep;
1806 struct rte_mbuf *mb;
1807 uint16_t alloc_idx, i;
1811 /* Allocate buffers in bulk */
1812 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1813 (rxq->rx_free_thresh - 1));
1814 rxep = &rxq->sw_ring[alloc_idx];
1815 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1816 rxq->rx_free_thresh);
1817 if (unlikely(diag != 0)) {
1818 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1822 rxdp = &rxq->rx_ring[alloc_idx];
1823 for (i = 0; i < rxq->rx_free_thresh; i++) {
1824 if (likely(i < (rxq->rx_free_thresh - 1)))
1825 /* Prefetch next mbuf */
1826 rte_prefetch0(rxep[i + 1]);
1829 rte_mbuf_refcnt_set(mb, 1);
1831 mb->data_off = RTE_PKTMBUF_HEADROOM;
1833 mb->port = rxq->port_id;
1834 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1835 rxdp[i].read.hdr_addr = 0;
1836 rxdp[i].read.pkt_addr = dma_addr;
1839 /* Update rx tail register */
1841 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1843 rxq->rx_free_trigger =
1844 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1845 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1846 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1851 static inline uint16_t
1852 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1854 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1860 if (rxq->rx_nb_avail)
1861 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1863 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1864 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1866 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1867 rxq->rx_next_avail = 0;
1868 rxq->rx_nb_avail = nb_rx;
1869 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1871 if (rxq->rx_tail > rxq->rx_free_trigger) {
1872 if (iavf_rx_alloc_bufs(rxq) != 0) {
1875 /* TODO: count rx_mbuf_alloc_failed here */
1877 rxq->rx_nb_avail = 0;
1878 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1879 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1880 rxq->sw_ring[j] = rxq->rx_stage[i];
1886 if (rxq->rx_tail >= rxq->nb_rx_desc)
1889 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1890 rxq->port_id, rxq->queue_id,
1891 rxq->rx_tail, nb_rx);
1893 if (rxq->rx_nb_avail)
1894 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1900 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1901 struct rte_mbuf **rx_pkts,
1904 uint16_t nb_rx = 0, n, count;
1906 if (unlikely(nb_pkts == 0))
1909 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1910 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1913 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1914 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1915 nb_rx = (uint16_t)(nb_rx + count);
1916 nb_pkts = (uint16_t)(nb_pkts - count);
1925 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1927 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1928 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1929 uint16_t nb_tx_desc = txq->nb_tx_desc;
1930 uint16_t desc_to_clean_to;
1931 uint16_t nb_tx_to_clean;
1933 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1935 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1936 if (desc_to_clean_to >= nb_tx_desc)
1937 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1939 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1940 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1941 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1942 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1943 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1944 "(port=%d queue=%d)", desc_to_clean_to,
1945 txq->port_id, txq->queue_id);
1949 if (last_desc_cleaned > desc_to_clean_to)
1950 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1953 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1956 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1958 txq->last_desc_cleaned = desc_to_clean_to;
1959 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1964 /* Check if the context descriptor is needed for TX offloading */
1965 static inline uint16_t
1966 iavf_calc_context_desc(uint64_t flags)
1968 static uint64_t mask = PKT_TX_TCP_SEG;
1970 return (flags & mask) ? 1 : 0;
1974 iavf_txd_enable_checksum(uint64_t ol_flags,
1976 uint32_t *td_offset,
1977 union iavf_tx_offload tx_offload)
1980 *td_offset |= (tx_offload.l2_len >> 1) <<
1981 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1983 /* Enable L3 checksum offloads */
1984 if (ol_flags & PKT_TX_IP_CKSUM) {
1985 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1986 *td_offset |= (tx_offload.l3_len >> 2) <<
1987 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1988 } else if (ol_flags & PKT_TX_IPV4) {
1989 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1990 *td_offset |= (tx_offload.l3_len >> 2) <<
1991 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1992 } else if (ol_flags & PKT_TX_IPV6) {
1993 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1994 *td_offset |= (tx_offload.l3_len >> 2) <<
1995 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1998 if (ol_flags & PKT_TX_TCP_SEG) {
1999 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2000 *td_offset |= (tx_offload.l4_len >> 2) <<
2001 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2005 /* Enable L4 checksum offloads */
2006 switch (ol_flags & PKT_TX_L4_MASK) {
2007 case PKT_TX_TCP_CKSUM:
2008 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2009 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2010 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2012 case PKT_TX_SCTP_CKSUM:
2013 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2014 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2015 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2017 case PKT_TX_UDP_CKSUM:
2018 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2019 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2020 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2027 /* set TSO context descriptor
2028 * support IP -> L4 and IP -> IP -> L4
2030 static inline uint64_t
2031 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2033 uint64_t ctx_desc = 0;
2034 uint32_t cd_cmd, hdr_len, cd_tso_len;
2036 if (!tx_offload.l4_len) {
2037 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2041 hdr_len = tx_offload.l2_len +
2045 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2046 cd_tso_len = mbuf->pkt_len - hdr_len;
2047 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2048 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2049 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2054 /* Construct the tx flags */
2055 static inline uint64_t
2056 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2059 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2060 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2061 ((uint64_t)td_offset <<
2062 IAVF_TXD_QW1_OFFSET_SHIFT) |
2064 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2065 ((uint64_t)td_tag <<
2066 IAVF_TXD_QW1_L2TAG1_SHIFT));
2071 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2073 volatile struct iavf_tx_desc *txd;
2074 volatile struct iavf_tx_desc *txr;
2075 struct iavf_tx_queue *txq;
2076 struct iavf_tx_entry *sw_ring;
2077 struct iavf_tx_entry *txe, *txn;
2078 struct rte_mbuf *tx_pkt;
2079 struct rte_mbuf *m_seg;
2090 uint64_t buf_dma_addr;
2091 union iavf_tx_offload tx_offload = {0};
2094 sw_ring = txq->sw_ring;
2096 tx_id = txq->tx_tail;
2097 txe = &sw_ring[tx_id];
2099 /* Check if the descriptor ring needs to be cleaned. */
2100 if (txq->nb_free < txq->free_thresh)
2101 (void)iavf_xmit_cleanup(txq);
2103 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2108 tx_pkt = *tx_pkts++;
2109 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2111 ol_flags = tx_pkt->ol_flags;
2112 tx_offload.l2_len = tx_pkt->l2_len;
2113 tx_offload.l3_len = tx_pkt->l3_len;
2114 tx_offload.l4_len = tx_pkt->l4_len;
2115 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2116 /* Calculate the number of context descriptors needed. */
2117 nb_ctx = iavf_calc_context_desc(ol_flags);
2119 /* The number of descriptors that must be allocated for
2120 * a packet equals to the number of the segments of that
2121 * packet plus 1 context descriptor if needed.
2123 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2124 tx_last = (uint16_t)(tx_id + nb_used - 1);
2127 if (tx_last >= txq->nb_tx_desc)
2128 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2130 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2131 " tx_first=%u tx_last=%u",
2132 txq->port_id, txq->queue_id, tx_id, tx_last);
2134 if (nb_used > txq->nb_free) {
2135 if (iavf_xmit_cleanup(txq)) {
2140 if (unlikely(nb_used > txq->rs_thresh)) {
2141 while (nb_used > txq->nb_free) {
2142 if (iavf_xmit_cleanup(txq)) {
2151 /* Descriptor based VLAN insertion */
2152 if (ol_flags & PKT_TX_VLAN_PKT) {
2153 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2154 td_tag = tx_pkt->vlan_tci;
2157 /* According to datasheet, the bit2 is reserved and must be
2162 /* Enable checksum offloading */
2163 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2164 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2165 &td_offset, tx_offload);
2168 /* Setup TX context descriptor if required */
2169 uint64_t cd_type_cmd_tso_mss =
2170 IAVF_TX_DESC_DTYPE_CONTEXT;
2171 volatile struct iavf_tx_context_desc *ctx_txd =
2172 (volatile struct iavf_tx_context_desc *)
2175 txn = &sw_ring[txe->next_id];
2176 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2178 rte_pktmbuf_free_seg(txe->mbuf);
2183 if (ol_flags & PKT_TX_TCP_SEG)
2184 cd_type_cmd_tso_mss |=
2185 iavf_set_tso_ctx(tx_pkt, tx_offload);
2187 ctx_txd->type_cmd_tso_mss =
2188 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2190 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2191 txe->last_id = tx_last;
2192 tx_id = txe->next_id;
2199 txn = &sw_ring[txe->next_id];
2202 rte_pktmbuf_free_seg(txe->mbuf);
2205 /* Setup TX Descriptor */
2206 slen = m_seg->data_len;
2207 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2208 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2209 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2214 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2215 txe->last_id = tx_last;
2216 tx_id = txe->next_id;
2218 m_seg = m_seg->next;
2221 /* The last packet data descriptor needs End Of Packet (EOP) */
2222 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2223 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2224 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2226 if (txq->nb_used >= txq->rs_thresh) {
2227 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2228 "%4u (port=%d queue=%d)",
2229 tx_last, txq->port_id, txq->queue_id);
2231 td_cmd |= IAVF_TX_DESC_CMD_RS;
2233 /* Update txq RS bit counters */
2237 txd->cmd_type_offset_bsz |=
2238 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2239 IAVF_TXD_QW1_CMD_SHIFT);
2240 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2246 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2247 txq->port_id, txq->queue_id, tx_id, nb_tx);
2249 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2250 txq->tx_tail = tx_id;
2255 /* TX prep functions */
2257 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2264 for (i = 0; i < nb_pkts; i++) {
2266 ol_flags = m->ol_flags;
2268 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2269 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2270 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2274 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2275 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2276 /* MSS outside the range are considered malicious */
2281 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2282 rte_errno = ENOTSUP;
2286 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2287 ret = rte_validate_tx_offload(m);
2293 ret = rte_net_intel_cksum_prepare(m);
2303 /* choose rx function*/
2305 iavf_set_rx_function(struct rte_eth_dev *dev)
2307 struct iavf_adapter *adapter =
2308 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2309 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2312 struct iavf_rx_queue *rxq;
2314 bool use_avx2 = false;
2315 #ifdef CC_AVX512_SUPPORT
2316 bool use_avx512 = false;
2319 if (!iavf_rx_vec_dev_check(dev) &&
2320 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2321 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2322 rxq = dev->data->rx_queues[i];
2323 (void)iavf_rxq_vec_setup(rxq);
2326 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2327 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2328 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2330 #ifdef CC_AVX512_SUPPORT
2331 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2332 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2333 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2337 if (dev->data->scattered_rx) {
2339 "Using %sVector Scattered Rx (port %d).",
2340 use_avx2 ? "avx2 " : "",
2341 dev->data->port_id);
2342 if (vf->vf_res->vf_cap_flags &
2343 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2344 dev->rx_pkt_burst = use_avx2 ?
2345 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2346 iavf_recv_scattered_pkts_vec_flex_rxd;
2347 #ifdef CC_AVX512_SUPPORT
2350 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2353 dev->rx_pkt_burst = use_avx2 ?
2354 iavf_recv_scattered_pkts_vec_avx2 :
2355 iavf_recv_scattered_pkts_vec;
2356 #ifdef CC_AVX512_SUPPORT
2359 iavf_recv_scattered_pkts_vec_avx512;
2363 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2364 use_avx2 ? "avx2 " : "",
2365 dev->data->port_id);
2366 if (vf->vf_res->vf_cap_flags &
2367 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2368 dev->rx_pkt_burst = use_avx2 ?
2369 iavf_recv_pkts_vec_avx2_flex_rxd :
2370 iavf_recv_pkts_vec_flex_rxd;
2371 #ifdef CC_AVX512_SUPPORT
2374 iavf_recv_pkts_vec_avx512_flex_rxd;
2377 dev->rx_pkt_burst = use_avx2 ?
2378 iavf_recv_pkts_vec_avx2 :
2380 #ifdef CC_AVX512_SUPPORT
2383 iavf_recv_pkts_vec_avx512;
2392 if (dev->data->scattered_rx) {
2393 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2394 dev->data->port_id);
2395 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2396 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2398 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2399 } else if (adapter->rx_bulk_alloc_allowed) {
2400 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2401 dev->data->port_id);
2402 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2404 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2405 dev->data->port_id);
2406 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2407 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2409 dev->rx_pkt_burst = iavf_recv_pkts;
2413 /* choose tx function*/
2415 iavf_set_tx_function(struct rte_eth_dev *dev)
2418 struct iavf_tx_queue *txq;
2420 bool use_avx2 = false;
2421 #ifdef CC_AVX512_SUPPORT
2422 bool use_avx512 = false;
2425 if (!iavf_tx_vec_dev_check(dev) &&
2426 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2427 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2428 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2429 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2431 #ifdef CC_AVX512_SUPPORT
2432 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2433 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2434 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2438 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2439 use_avx2 ? "avx2 " : "",
2440 dev->data->port_id);
2441 dev->tx_pkt_burst = use_avx2 ?
2442 iavf_xmit_pkts_vec_avx2 :
2444 #ifdef CC_AVX512_SUPPORT
2446 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2448 dev->tx_pkt_prepare = NULL;
2450 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2451 txq = dev->data->tx_queues[i];
2454 #ifdef CC_AVX512_SUPPORT
2456 iavf_txq_vec_setup_avx512(txq);
2458 iavf_txq_vec_setup(txq);
2460 iavf_txq_vec_setup(txq);
2468 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2469 dev->data->port_id);
2470 dev->tx_pkt_burst = iavf_xmit_pkts;
2471 dev->tx_pkt_prepare = iavf_prep_pkts;
2475 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2478 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2479 uint16_t i, tx_last, tx_id;
2480 uint16_t nb_tx_free_last;
2481 uint16_t nb_tx_to_clean;
2484 /* Start free mbuf from the next of tx_tail */
2485 tx_last = txq->tx_tail;
2486 tx_id = swr_ring[tx_last].next_id;
2488 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2491 nb_tx_to_clean = txq->nb_free;
2492 nb_tx_free_last = txq->nb_free;
2494 free_cnt = txq->nb_tx_desc;
2496 /* Loop through swr_ring to count the amount of
2497 * freeable mubfs and packets.
2499 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2500 for (i = 0; i < nb_tx_to_clean &&
2501 pkt_cnt < free_cnt &&
2502 tx_id != tx_last; i++) {
2503 if (swr_ring[tx_id].mbuf != NULL) {
2504 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2505 swr_ring[tx_id].mbuf = NULL;
2508 * last segment in the packet,
2509 * increment packet count
2511 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2514 tx_id = swr_ring[tx_id].next_id;
2517 if (txq->rs_thresh > txq->nb_tx_desc -
2518 txq->nb_free || tx_id == tx_last)
2521 if (pkt_cnt < free_cnt) {
2522 if (iavf_xmit_cleanup(txq))
2525 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2526 nb_tx_free_last = txq->nb_free;
2530 return (int)pkt_cnt;
2534 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2536 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2538 return iavf_tx_done_cleanup_full(q, free_cnt);
2542 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2543 struct rte_eth_rxq_info *qinfo)
2545 struct iavf_rx_queue *rxq;
2547 rxq = dev->data->rx_queues[queue_id];
2549 qinfo->mp = rxq->mp;
2550 qinfo->scattered_rx = dev->data->scattered_rx;
2551 qinfo->nb_desc = rxq->nb_rx_desc;
2553 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2554 qinfo->conf.rx_drop_en = true;
2555 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2559 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2560 struct rte_eth_txq_info *qinfo)
2562 struct iavf_tx_queue *txq;
2564 txq = dev->data->tx_queues[queue_id];
2566 qinfo->nb_desc = txq->nb_tx_desc;
2568 qinfo->conf.tx_free_thresh = txq->free_thresh;
2569 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2570 qinfo->conf.offloads = txq->offloads;
2571 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2574 /* Get the number of used descriptors of a rx queue */
2576 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2578 #define IAVF_RXQ_SCAN_INTERVAL 4
2579 volatile union iavf_rx_desc *rxdp;
2580 struct iavf_rx_queue *rxq;
2583 rxq = dev->data->rx_queues[queue_id];
2584 rxdp = &rxq->rx_ring[rxq->rx_tail];
2586 while ((desc < rxq->nb_rx_desc) &&
2587 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2588 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2589 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2590 /* Check the DD bit of a rx descriptor of each 4 in a group,
2591 * to avoid checking too frequently and downgrading performance
2594 desc += IAVF_RXQ_SCAN_INTERVAL;
2595 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2596 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2597 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2598 desc - rxq->nb_rx_desc]);
2605 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2607 struct iavf_rx_queue *rxq = rx_queue;
2608 volatile uint64_t *status;
2612 if (unlikely(offset >= rxq->nb_rx_desc))
2615 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2616 return RTE_ETH_RX_DESC_UNAVAIL;
2618 desc = rxq->rx_tail + offset;
2619 if (desc >= rxq->nb_rx_desc)
2620 desc -= rxq->nb_rx_desc;
2622 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2623 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2624 << IAVF_RXD_QW1_STATUS_SHIFT);
2626 return RTE_ETH_RX_DESC_DONE;
2628 return RTE_ETH_RX_DESC_AVAIL;
2632 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2634 struct iavf_tx_queue *txq = tx_queue;
2635 volatile uint64_t *status;
2636 uint64_t mask, expect;
2639 if (unlikely(offset >= txq->nb_tx_desc))
2642 desc = txq->tx_tail + offset;
2643 /* go to next desc that has the RS bit */
2644 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2646 if (desc >= txq->nb_tx_desc) {
2647 desc -= txq->nb_tx_desc;
2648 if (desc >= txq->nb_tx_desc)
2649 desc -= txq->nb_tx_desc;
2652 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2653 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2654 expect = rte_cpu_to_le_64(
2655 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2656 if ((*status & mask) == expect)
2657 return RTE_ETH_TX_DESC_DONE;
2659 return RTE_ETH_TX_DESC_FULL;
2663 iavf_get_default_ptype_table(void)
2665 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2666 __rte_cache_aligned = {
2669 [1] = RTE_PTYPE_L2_ETHER,
2670 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2671 /* [3] - [5] reserved */
2672 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2673 /* [7] - [10] reserved */
2674 [11] = RTE_PTYPE_L2_ETHER_ARP,
2675 /* [12] - [21] reserved */
2677 /* Non tunneled IPv4 */
2678 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2681 RTE_PTYPE_L4_NONFRAG,
2682 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2685 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2687 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2689 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2693 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2694 RTE_PTYPE_TUNNEL_IP |
2695 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2696 RTE_PTYPE_INNER_L4_FRAG,
2697 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2698 RTE_PTYPE_TUNNEL_IP |
2699 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2700 RTE_PTYPE_INNER_L4_NONFRAG,
2701 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2702 RTE_PTYPE_TUNNEL_IP |
2703 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2704 RTE_PTYPE_INNER_L4_UDP,
2706 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2707 RTE_PTYPE_TUNNEL_IP |
2708 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2709 RTE_PTYPE_INNER_L4_TCP,
2710 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2711 RTE_PTYPE_TUNNEL_IP |
2712 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2713 RTE_PTYPE_INNER_L4_SCTP,
2714 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2715 RTE_PTYPE_TUNNEL_IP |
2716 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2717 RTE_PTYPE_INNER_L4_ICMP,
2720 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2721 RTE_PTYPE_TUNNEL_IP |
2722 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2723 RTE_PTYPE_INNER_L4_FRAG,
2724 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2725 RTE_PTYPE_TUNNEL_IP |
2726 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2727 RTE_PTYPE_INNER_L4_NONFRAG,
2728 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2729 RTE_PTYPE_TUNNEL_IP |
2730 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2731 RTE_PTYPE_INNER_L4_UDP,
2733 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2734 RTE_PTYPE_TUNNEL_IP |
2735 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2736 RTE_PTYPE_INNER_L4_TCP,
2737 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2738 RTE_PTYPE_TUNNEL_IP |
2739 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2740 RTE_PTYPE_INNER_L4_SCTP,
2741 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2742 RTE_PTYPE_TUNNEL_IP |
2743 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2744 RTE_PTYPE_INNER_L4_ICMP,
2746 /* IPv4 --> GRE/Teredo/VXLAN */
2747 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2748 RTE_PTYPE_TUNNEL_GRENAT,
2750 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2751 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2752 RTE_PTYPE_TUNNEL_GRENAT |
2753 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2754 RTE_PTYPE_INNER_L4_FRAG,
2755 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2756 RTE_PTYPE_TUNNEL_GRENAT |
2757 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2758 RTE_PTYPE_INNER_L4_NONFRAG,
2759 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2760 RTE_PTYPE_TUNNEL_GRENAT |
2761 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2762 RTE_PTYPE_INNER_L4_UDP,
2764 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2765 RTE_PTYPE_TUNNEL_GRENAT |
2766 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2767 RTE_PTYPE_INNER_L4_TCP,
2768 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2769 RTE_PTYPE_TUNNEL_GRENAT |
2770 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2771 RTE_PTYPE_INNER_L4_SCTP,
2772 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2773 RTE_PTYPE_TUNNEL_GRENAT |
2774 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2775 RTE_PTYPE_INNER_L4_ICMP,
2777 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2778 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2779 RTE_PTYPE_TUNNEL_GRENAT |
2780 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2781 RTE_PTYPE_INNER_L4_FRAG,
2782 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2783 RTE_PTYPE_TUNNEL_GRENAT |
2784 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2785 RTE_PTYPE_INNER_L4_NONFRAG,
2786 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2787 RTE_PTYPE_TUNNEL_GRENAT |
2788 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2789 RTE_PTYPE_INNER_L4_UDP,
2791 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2792 RTE_PTYPE_TUNNEL_GRENAT |
2793 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2794 RTE_PTYPE_INNER_L4_TCP,
2795 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2796 RTE_PTYPE_TUNNEL_GRENAT |
2797 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2798 RTE_PTYPE_INNER_L4_SCTP,
2799 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2800 RTE_PTYPE_TUNNEL_GRENAT |
2801 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2802 RTE_PTYPE_INNER_L4_ICMP,
2804 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2805 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2806 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2808 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2809 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2810 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2811 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2812 RTE_PTYPE_INNER_L4_FRAG,
2813 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2814 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2815 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2816 RTE_PTYPE_INNER_L4_NONFRAG,
2817 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2818 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2819 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2820 RTE_PTYPE_INNER_L4_UDP,
2822 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2823 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2824 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2825 RTE_PTYPE_INNER_L4_TCP,
2826 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2827 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2828 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2829 RTE_PTYPE_INNER_L4_SCTP,
2830 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2831 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2832 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2833 RTE_PTYPE_INNER_L4_ICMP,
2835 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2836 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2837 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2838 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2839 RTE_PTYPE_INNER_L4_FRAG,
2840 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2841 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2842 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2843 RTE_PTYPE_INNER_L4_NONFRAG,
2844 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2845 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2846 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2847 RTE_PTYPE_INNER_L4_UDP,
2849 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2850 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2851 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2852 RTE_PTYPE_INNER_L4_TCP,
2853 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2854 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2855 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2856 RTE_PTYPE_INNER_L4_SCTP,
2857 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2858 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2859 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2860 RTE_PTYPE_INNER_L4_ICMP,
2861 /* [73] - [87] reserved */
2863 /* Non tunneled IPv6 */
2864 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2866 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2867 RTE_PTYPE_L4_NONFRAG,
2868 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2871 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2873 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2875 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2879 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2880 RTE_PTYPE_TUNNEL_IP |
2881 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2882 RTE_PTYPE_INNER_L4_FRAG,
2883 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2884 RTE_PTYPE_TUNNEL_IP |
2885 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2886 RTE_PTYPE_INNER_L4_NONFRAG,
2887 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2888 RTE_PTYPE_TUNNEL_IP |
2889 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2890 RTE_PTYPE_INNER_L4_UDP,
2892 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2893 RTE_PTYPE_TUNNEL_IP |
2894 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2895 RTE_PTYPE_INNER_L4_TCP,
2896 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2897 RTE_PTYPE_TUNNEL_IP |
2898 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2899 RTE_PTYPE_INNER_L4_SCTP,
2900 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2901 RTE_PTYPE_TUNNEL_IP |
2902 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2903 RTE_PTYPE_INNER_L4_ICMP,
2906 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2907 RTE_PTYPE_TUNNEL_IP |
2908 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2909 RTE_PTYPE_INNER_L4_FRAG,
2910 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2911 RTE_PTYPE_TUNNEL_IP |
2912 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2913 RTE_PTYPE_INNER_L4_NONFRAG,
2914 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2915 RTE_PTYPE_TUNNEL_IP |
2916 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2917 RTE_PTYPE_INNER_L4_UDP,
2918 /* [105] reserved */
2919 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2920 RTE_PTYPE_TUNNEL_IP |
2921 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2922 RTE_PTYPE_INNER_L4_TCP,
2923 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2924 RTE_PTYPE_TUNNEL_IP |
2925 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2926 RTE_PTYPE_INNER_L4_SCTP,
2927 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2928 RTE_PTYPE_TUNNEL_IP |
2929 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2930 RTE_PTYPE_INNER_L4_ICMP,
2932 /* IPv6 --> GRE/Teredo/VXLAN */
2933 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2934 RTE_PTYPE_TUNNEL_GRENAT,
2936 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2937 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2938 RTE_PTYPE_TUNNEL_GRENAT |
2939 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2940 RTE_PTYPE_INNER_L4_FRAG,
2941 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2942 RTE_PTYPE_TUNNEL_GRENAT |
2943 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2944 RTE_PTYPE_INNER_L4_NONFRAG,
2945 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2946 RTE_PTYPE_TUNNEL_GRENAT |
2947 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2948 RTE_PTYPE_INNER_L4_UDP,
2949 /* [113] reserved */
2950 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2951 RTE_PTYPE_TUNNEL_GRENAT |
2952 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2953 RTE_PTYPE_INNER_L4_TCP,
2954 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2955 RTE_PTYPE_TUNNEL_GRENAT |
2956 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2957 RTE_PTYPE_INNER_L4_SCTP,
2958 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2959 RTE_PTYPE_TUNNEL_GRENAT |
2960 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2961 RTE_PTYPE_INNER_L4_ICMP,
2963 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2964 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2965 RTE_PTYPE_TUNNEL_GRENAT |
2966 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2967 RTE_PTYPE_INNER_L4_FRAG,
2968 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2969 RTE_PTYPE_TUNNEL_GRENAT |
2970 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2971 RTE_PTYPE_INNER_L4_NONFRAG,
2972 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2973 RTE_PTYPE_TUNNEL_GRENAT |
2974 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2975 RTE_PTYPE_INNER_L4_UDP,
2976 /* [120] reserved */
2977 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2978 RTE_PTYPE_TUNNEL_GRENAT |
2979 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2980 RTE_PTYPE_INNER_L4_TCP,
2981 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2982 RTE_PTYPE_TUNNEL_GRENAT |
2983 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2984 RTE_PTYPE_INNER_L4_SCTP,
2985 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2986 RTE_PTYPE_TUNNEL_GRENAT |
2987 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2988 RTE_PTYPE_INNER_L4_ICMP,
2990 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2991 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2992 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2994 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2995 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2996 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2997 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2998 RTE_PTYPE_INNER_L4_FRAG,
2999 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3000 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3001 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3002 RTE_PTYPE_INNER_L4_NONFRAG,
3003 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3004 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3005 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3006 RTE_PTYPE_INNER_L4_UDP,
3007 /* [128] reserved */
3008 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3009 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3010 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3011 RTE_PTYPE_INNER_L4_TCP,
3012 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3013 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3014 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3015 RTE_PTYPE_INNER_L4_SCTP,
3016 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3017 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3018 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3019 RTE_PTYPE_INNER_L4_ICMP,
3021 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3022 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3023 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3024 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3025 RTE_PTYPE_INNER_L4_FRAG,
3026 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3027 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3028 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3029 RTE_PTYPE_INNER_L4_NONFRAG,
3030 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3031 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3032 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3033 RTE_PTYPE_INNER_L4_UDP,
3034 /* [135] reserved */
3035 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3036 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3037 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3038 RTE_PTYPE_INNER_L4_TCP,
3039 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3040 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3041 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3042 RTE_PTYPE_INNER_L4_SCTP,
3043 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3044 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3045 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3046 RTE_PTYPE_INNER_L4_ICMP,
3047 /* [139] - [299] reserved */
3050 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3051 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3053 /* PPPoE --> IPv4 */
3054 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3055 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3057 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3058 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3059 RTE_PTYPE_L4_NONFRAG,
3060 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3061 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3063 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3064 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3066 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3067 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3069 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3070 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3073 /* PPPoE --> IPv6 */
3074 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3075 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3077 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3078 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3079 RTE_PTYPE_L4_NONFRAG,
3080 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3081 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3083 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3084 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3086 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3087 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3089 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3090 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3092 /* [314] - [324] reserved */
3094 /* IPv4/IPv6 --> GTPC/GTPU */
3095 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3096 RTE_PTYPE_TUNNEL_GTPC,
3097 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3098 RTE_PTYPE_TUNNEL_GTPC,
3099 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3100 RTE_PTYPE_TUNNEL_GTPC,
3101 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3102 RTE_PTYPE_TUNNEL_GTPC,
3103 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3104 RTE_PTYPE_TUNNEL_GTPU,
3105 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3106 RTE_PTYPE_TUNNEL_GTPU,
3108 /* IPv4 --> GTPU --> IPv4 */
3109 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3110 RTE_PTYPE_TUNNEL_GTPU |
3111 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3112 RTE_PTYPE_INNER_L4_FRAG,
3113 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3114 RTE_PTYPE_TUNNEL_GTPU |
3115 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3116 RTE_PTYPE_INNER_L4_NONFRAG,
3117 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3118 RTE_PTYPE_TUNNEL_GTPU |
3119 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3120 RTE_PTYPE_INNER_L4_UDP,
3121 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3122 RTE_PTYPE_TUNNEL_GTPU |
3123 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3124 RTE_PTYPE_INNER_L4_TCP,
3125 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3126 RTE_PTYPE_TUNNEL_GTPU |
3127 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3128 RTE_PTYPE_INNER_L4_ICMP,
3130 /* IPv6 --> GTPU --> IPv4 */
3131 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3132 RTE_PTYPE_TUNNEL_GTPU |
3133 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3134 RTE_PTYPE_INNER_L4_FRAG,
3135 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3136 RTE_PTYPE_TUNNEL_GTPU |
3137 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3138 RTE_PTYPE_INNER_L4_NONFRAG,
3139 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3140 RTE_PTYPE_TUNNEL_GTPU |
3141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3142 RTE_PTYPE_INNER_L4_UDP,
3143 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3144 RTE_PTYPE_TUNNEL_GTPU |
3145 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3146 RTE_PTYPE_INNER_L4_TCP,
3147 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3148 RTE_PTYPE_TUNNEL_GTPU |
3149 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3150 RTE_PTYPE_INNER_L4_ICMP,
3152 /* IPv4 --> GTPU --> IPv6 */
3153 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3154 RTE_PTYPE_TUNNEL_GTPU |
3155 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3156 RTE_PTYPE_INNER_L4_FRAG,
3157 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GTPU |
3159 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3160 RTE_PTYPE_INNER_L4_NONFRAG,
3161 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3162 RTE_PTYPE_TUNNEL_GTPU |
3163 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3164 RTE_PTYPE_INNER_L4_UDP,
3165 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3166 RTE_PTYPE_TUNNEL_GTPU |
3167 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3168 RTE_PTYPE_INNER_L4_TCP,
3169 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3170 RTE_PTYPE_TUNNEL_GTPU |
3171 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3172 RTE_PTYPE_INNER_L4_ICMP,
3174 /* IPv6 --> GTPU --> IPv6 */
3175 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3176 RTE_PTYPE_TUNNEL_GTPU |
3177 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3178 RTE_PTYPE_INNER_L4_FRAG,
3179 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3180 RTE_PTYPE_TUNNEL_GTPU |
3181 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3182 RTE_PTYPE_INNER_L4_NONFRAG,
3183 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3184 RTE_PTYPE_TUNNEL_GTPU |
3185 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3186 RTE_PTYPE_INNER_L4_UDP,
3187 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3188 RTE_PTYPE_TUNNEL_GTPU |
3189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3190 RTE_PTYPE_INNER_L4_TCP,
3191 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3192 RTE_PTYPE_TUNNEL_GTPU |
3193 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3194 RTE_PTYPE_INNER_L4_ICMP,
3195 /* All others reserved */