1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
63 /* The following constraints must be satisfied:
64 * thresh < rxq->nb_rx_desc
66 if (thresh >= nb_desc) {
67 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76 uint16_t tx_free_thresh)
78 /* TX descriptors will have their RS bit set after tx_rs_thresh
79 * descriptors have been used. The TX descriptor ring will be cleaned
80 * after tx_free_thresh descriptors are used or if the number of
81 * descriptors required to transmit a packet is greater than the
82 * number of free TX descriptors.
84 * The following constraints must be satisfied:
85 * - tx_rs_thresh must be less than the size of the ring minus 2.
86 * - tx_free_thresh must be less than the size of the ring minus 3.
87 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
88 * - tx_rs_thresh must be a divisor of the ring size.
90 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91 * race condition, hence the maximum threshold constraints. When set
92 * to zero use default values.
94 if (tx_rs_thresh >= (nb_desc - 2)) {
95 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96 "number of TX descriptors (%u) minus 2",
97 tx_rs_thresh, nb_desc);
100 if (tx_free_thresh >= (nb_desc - 3)) {
101 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102 "number of TX descriptors (%u) minus 3.",
103 tx_free_thresh, nb_desc);
106 if (tx_rs_thresh > tx_free_thresh) {
107 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108 "equal to tx_free_thresh (%u).",
109 tx_rs_thresh, tx_free_thresh);
112 if ((nb_desc % tx_rs_thresh) != 0) {
113 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114 "number of TX descriptors (%u).",
115 tx_rs_thresh, nb_desc);
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
125 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
131 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
138 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
144 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
153 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155 "rxq->rx_free_thresh=%d, "
156 "IAVF_RX_MAX_BURST=%d",
157 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
159 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161 "rxq->nb_rx_desc=%d, "
162 "rxq->rx_free_thresh=%d",
163 rxq->nb_rx_desc, rxq->rx_free_thresh);
170 reset_rx_queue(struct iavf_rx_queue *rxq)
178 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
180 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181 ((volatile char *)rxq->rx_ring)[i] = 0;
183 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
185 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
189 rxq->rx_nb_avail = 0;
190 rxq->rx_next_avail = 0;
191 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
195 rxq->pkt_first_seg = NULL;
196 rxq->pkt_last_seg = NULL;
198 rxq->rxrearm_start = 0;
202 reset_tx_queue(struct iavf_tx_queue *txq)
204 struct iavf_tx_entry *txe;
209 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
214 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
215 for (i = 0; i < size; i++)
216 ((volatile char *)txq->tx_ring)[i] = 0;
218 prev = (uint16_t)(txq->nb_tx_desc - 1);
219 for (i = 0; i < txq->nb_tx_desc; i++) {
220 txq->tx_ring[i].cmd_type_offset_bsz =
221 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
224 txe[prev].next_id = i;
231 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
232 txq->nb_free = txq->nb_tx_desc - 1;
234 txq->next_dd = txq->rs_thresh - 1;
235 txq->next_rs = txq->rs_thresh - 1;
239 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
241 volatile union iavf_rx_desc *rxd;
242 struct rte_mbuf *mbuf = NULL;
246 for (i = 0; i < rxq->nb_rx_desc; i++) {
247 mbuf = rte_mbuf_raw_alloc(rxq->mp);
248 if (unlikely(!mbuf)) {
249 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
253 rte_mbuf_refcnt_set(mbuf, 1);
255 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
257 mbuf->port = rxq->port_id;
260 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
262 rxd = &rxq->rx_ring[i];
263 rxd->read.pkt_addr = dma_addr;
264 rxd->read.hdr_addr = 0;
265 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
270 rxq->sw_ring[i] = mbuf;
277 release_rxq_mbufs(struct iavf_rx_queue *rxq)
284 for (i = 0; i < rxq->nb_rx_desc; i++) {
285 if (rxq->sw_ring[i]) {
286 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
287 rxq->sw_ring[i] = NULL;
292 if (rxq->rx_nb_avail == 0)
294 for (i = 0; i < rxq->rx_nb_avail; i++) {
295 struct rte_mbuf *mbuf;
297 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
298 rte_pktmbuf_free_seg(mbuf);
300 rxq->rx_nb_avail = 0;
304 release_txq_mbufs(struct iavf_tx_queue *txq)
308 if (!txq || !txq->sw_ring) {
309 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
313 for (i = 0; i < txq->nb_tx_desc; i++) {
314 if (txq->sw_ring[i].mbuf) {
315 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
316 txq->sw_ring[i].mbuf = NULL;
321 static const struct iavf_rxq_ops def_rxq_ops = {
322 .release_mbufs = release_rxq_mbufs,
325 static const struct iavf_txq_ops def_txq_ops = {
326 .release_mbufs = release_txq_mbufs,
330 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
332 volatile union iavf_rx_flex_desc *rxdp)
334 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
335 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
336 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
340 if (desc->flow_id != 0xFFFFFFFF) {
341 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
342 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
345 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
346 stat_err = rte_le_to_cpu_16(desc->status_error0);
347 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
348 mb->ol_flags |= PKT_RX_RSS_HASH;
349 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
355 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
357 volatile union iavf_rx_flex_desc *rxdp)
359 volatile struct iavf_32b_rx_flex_desc_comms *desc =
360 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
363 stat_err = rte_le_to_cpu_16(desc->status_error0);
364 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
365 mb->ol_flags |= PKT_RX_RSS_HASH;
366 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
370 if (desc->flow_id != 0xFFFFFFFF) {
371 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
372 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
375 if (rxq->xtr_ol_flag) {
376 uint32_t metadata = 0;
378 stat_err = rte_le_to_cpu_16(desc->status_error1);
380 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
381 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
383 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
385 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
388 mb->ol_flags |= rxq->xtr_ol_flag;
390 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
397 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
399 volatile union iavf_rx_flex_desc *rxdp)
401 volatile struct iavf_32b_rx_flex_desc_comms *desc =
402 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
405 stat_err = rte_le_to_cpu_16(desc->status_error0);
406 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
407 mb->ol_flags |= PKT_RX_RSS_HASH;
408 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
411 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
412 if (desc->flow_id != 0xFFFFFFFF) {
413 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
414 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
417 if (rxq->xtr_ol_flag) {
418 uint32_t metadata = 0;
420 if (desc->flex_ts.flex.aux0 != 0xFFFF)
421 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
422 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
423 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
426 mb->ol_flags |= rxq->xtr_ol_flag;
428 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
435 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
438 case IAVF_RXDID_COMMS_AUX_VLAN:
439 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
440 rxq->rxd_to_pkt_fields =
441 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
443 case IAVF_RXDID_COMMS_AUX_IPV4:
444 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
445 rxq->rxd_to_pkt_fields =
446 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
448 case IAVF_RXDID_COMMS_AUX_IPV6:
449 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
450 rxq->rxd_to_pkt_fields =
451 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
453 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
455 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
456 rxq->rxd_to_pkt_fields =
457 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
459 case IAVF_RXDID_COMMS_AUX_TCP:
460 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
461 rxq->rxd_to_pkt_fields =
462 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
464 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
466 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
467 rxq->rxd_to_pkt_fields =
468 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
470 case IAVF_RXDID_COMMS_OVS_1:
471 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
474 /* update this according to the RXDID for FLEX_DESC_NONE */
475 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
479 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
480 rxq->xtr_ol_flag = 0;
484 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
485 uint16_t nb_desc, unsigned int socket_id,
486 const struct rte_eth_rxconf *rx_conf,
487 struct rte_mempool *mp)
489 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 struct iavf_adapter *ad =
491 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
492 struct iavf_info *vf =
493 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
494 struct iavf_vsi *vsi = &vf->vsi;
495 struct iavf_rx_queue *rxq;
496 const struct rte_memzone *mz;
500 uint16_t rx_free_thresh;
502 PMD_INIT_FUNC_TRACE();
504 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
505 nb_desc > IAVF_MAX_RING_DESC ||
506 nb_desc < IAVF_MIN_RING_DESC) {
507 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
512 /* Check free threshold */
513 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
514 IAVF_DEFAULT_RX_FREE_THRESH :
515 rx_conf->rx_free_thresh;
516 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
519 /* Free memory if needed */
520 if (dev->data->rx_queues[queue_idx]) {
521 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
522 dev->data->rx_queues[queue_idx] = NULL;
525 /* Allocate the rx queue data structure */
526 rxq = rte_zmalloc_socket("iavf rxq",
527 sizeof(struct iavf_rx_queue),
531 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
532 "rx queue data structure");
536 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
537 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
539 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
540 rxq->proto_xtr = proto_xtr;
542 rxq->rxdid = IAVF_RXDID_LEGACY_1;
543 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
546 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
547 struct virtchnl_vlan_supported_caps *stripping_support =
548 &vf->vlan_v2_caps.offloads.stripping_support;
549 uint32_t stripping_cap;
551 if (stripping_support->outer)
552 stripping_cap = stripping_support->outer;
554 stripping_cap = stripping_support->inner;
556 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
557 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
558 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
559 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
561 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
564 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
567 rxq->nb_rx_desc = nb_desc;
568 rxq->rx_free_thresh = rx_free_thresh;
569 rxq->queue_id = queue_idx;
570 rxq->port_id = dev->data->port_id;
571 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
575 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
576 rxq->crc_len = RTE_ETHER_CRC_LEN;
580 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
581 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
583 /* Allocate the software ring. */
584 len = nb_desc + IAVF_RX_MAX_BURST;
586 rte_zmalloc_socket("iavf rx sw ring",
587 sizeof(struct rte_mbuf *) * len,
591 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
596 /* Allocate the maximun number of RX ring hardware descriptor with
597 * a liitle more to support bulk allocate.
599 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
600 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
602 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
603 ring_size, IAVF_RING_BASE_ALIGN,
606 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
607 rte_free(rxq->sw_ring);
611 /* Zero all the descriptors in the ring. */
612 memset(mz->addr, 0, ring_size);
613 rxq->rx_ring_phys_addr = mz->iova;
614 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
619 dev->data->rx_queues[queue_idx] = rxq;
620 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
621 rxq->ops = &def_rxq_ops;
623 if (check_rx_bulk_allow(rxq) == true) {
624 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
625 "satisfied. Rx Burst Bulk Alloc function will be "
626 "used on port=%d, queue=%d.",
627 rxq->port_id, rxq->queue_id);
629 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
630 "not satisfied, Scattered Rx is requested "
631 "on port=%d, queue=%d.",
632 rxq->port_id, rxq->queue_id);
633 ad->rx_bulk_alloc_allowed = false;
636 if (check_rx_vec_allow(rxq) == false)
637 ad->rx_vec_allowed = false;
643 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
646 unsigned int socket_id,
647 const struct rte_eth_txconf *tx_conf)
649 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650 struct iavf_info *vf =
651 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
652 struct iavf_tx_queue *txq;
653 const struct rte_memzone *mz;
655 uint16_t tx_rs_thresh, tx_free_thresh;
658 PMD_INIT_FUNC_TRACE();
660 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
662 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
663 nb_desc > IAVF_MAX_RING_DESC ||
664 nb_desc < IAVF_MIN_RING_DESC) {
665 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
670 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
671 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
672 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
673 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
674 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
676 /* Free memory if needed. */
677 if (dev->data->tx_queues[queue_idx]) {
678 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
679 dev->data->tx_queues[queue_idx] = NULL;
682 /* Allocate the TX queue data structure. */
683 txq = rte_zmalloc_socket("iavf txq",
684 sizeof(struct iavf_tx_queue),
688 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
689 "tx queue structure");
693 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
694 struct virtchnl_vlan_supported_caps *insertion_support =
695 &vf->vlan_v2_caps.offloads.insertion_support;
696 uint32_t insertion_cap;
698 if (insertion_support->outer)
699 insertion_cap = insertion_support->outer;
701 insertion_cap = insertion_support->inner;
703 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
704 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
705 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
706 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
708 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
711 txq->nb_tx_desc = nb_desc;
712 txq->rs_thresh = tx_rs_thresh;
713 txq->free_thresh = tx_free_thresh;
714 txq->queue_id = queue_idx;
715 txq->port_id = dev->data->port_id;
716 txq->offloads = offloads;
717 txq->tx_deferred_start = tx_conf->tx_deferred_start;
719 /* Allocate software ring */
721 rte_zmalloc_socket("iavf tx sw ring",
722 sizeof(struct iavf_tx_entry) * nb_desc,
726 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
731 /* Allocate TX hardware ring descriptors. */
732 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
733 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
734 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
735 ring_size, IAVF_RING_BASE_ALIGN,
738 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
739 rte_free(txq->sw_ring);
743 txq->tx_ring_phys_addr = mz->iova;
744 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
749 dev->data->tx_queues[queue_idx] = txq;
750 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
751 txq->ops = &def_txq_ops;
753 if (check_tx_vec_allow(txq) == false) {
754 struct iavf_adapter *ad =
755 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
756 ad->tx_vec_allowed = false;
763 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
765 struct iavf_adapter *adapter =
766 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
767 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
768 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
769 struct iavf_rx_queue *rxq;
772 PMD_DRV_FUNC_TRACE();
774 if (rx_queue_id >= dev->data->nb_rx_queues)
777 rxq = dev->data->rx_queues[rx_queue_id];
779 err = alloc_rxq_mbufs(rxq);
781 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
787 /* Init the RX tail register. */
788 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
789 IAVF_WRITE_FLUSH(hw);
791 /* Ready to switch the queue on */
793 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
795 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
798 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
801 dev->data->rx_queue_state[rx_queue_id] =
802 RTE_ETH_QUEUE_STATE_STARTED;
808 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
810 struct iavf_adapter *adapter =
811 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
812 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
813 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
814 struct iavf_tx_queue *txq;
817 PMD_DRV_FUNC_TRACE();
819 if (tx_queue_id >= dev->data->nb_tx_queues)
822 txq = dev->data->tx_queues[tx_queue_id];
824 /* Init the RX tail register. */
825 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
826 IAVF_WRITE_FLUSH(hw);
828 /* Ready to switch the queue on */
830 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
832 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
835 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
838 dev->data->tx_queue_state[tx_queue_id] =
839 RTE_ETH_QUEUE_STATE_STARTED;
845 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
847 struct iavf_adapter *adapter =
848 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
849 struct iavf_rx_queue *rxq;
852 PMD_DRV_FUNC_TRACE();
854 if (rx_queue_id >= dev->data->nb_rx_queues)
857 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
859 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
864 rxq = dev->data->rx_queues[rx_queue_id];
865 rxq->ops->release_mbufs(rxq);
867 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
873 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
875 struct iavf_adapter *adapter =
876 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
877 struct iavf_tx_queue *txq;
880 PMD_DRV_FUNC_TRACE();
882 if (tx_queue_id >= dev->data->nb_tx_queues)
885 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
887 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
892 txq = dev->data->tx_queues[tx_queue_id];
893 txq->ops->release_mbufs(txq);
895 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
901 iavf_dev_rx_queue_release(void *rxq)
903 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
908 q->ops->release_mbufs(q);
909 rte_free(q->sw_ring);
910 rte_memzone_free(q->mz);
915 iavf_dev_tx_queue_release(void *txq)
917 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
922 q->ops->release_mbufs(q);
923 rte_free(q->sw_ring);
924 rte_memzone_free(q->mz);
929 iavf_stop_queues(struct rte_eth_dev *dev)
931 struct iavf_adapter *adapter =
932 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
933 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
934 struct iavf_rx_queue *rxq;
935 struct iavf_tx_queue *txq;
938 /* Stop All queues */
939 if (!vf->lv_enabled) {
940 ret = iavf_disable_queues(adapter);
942 PMD_DRV_LOG(WARNING, "Fail to stop queues");
944 ret = iavf_disable_queues_lv(adapter);
946 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
950 PMD_DRV_LOG(WARNING, "Fail to stop queues");
952 for (i = 0; i < dev->data->nb_tx_queues; i++) {
953 txq = dev->data->tx_queues[i];
956 txq->ops->release_mbufs(txq);
958 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
960 for (i = 0; i < dev->data->nb_rx_queues; i++) {
961 rxq = dev->data->rx_queues[i];
964 rxq->ops->release_mbufs(rxq);
966 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
970 #define IAVF_RX_FLEX_ERR0_BITS \
971 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
972 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
973 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
974 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
975 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
976 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
979 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
981 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
982 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
983 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
985 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
992 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
993 volatile union iavf_rx_flex_desc *rxdp,
996 uint16_t vlan_tci = 0;
998 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
999 rte_le_to_cpu_64(rxdp->wb.status_error0) &
1000 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
1001 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
1003 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1004 if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
1005 rte_le_to_cpu_16(rxdp->wb.status_error1) &
1006 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
1007 vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1011 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1012 mb->vlan_tci = vlan_tci;
1016 /* Translate the rx descriptor status and error fields to pkt flags */
1017 static inline uint64_t
1018 iavf_rxd_to_pkt_flags(uint64_t qword)
1021 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1023 #define IAVF_RX_ERR_BITS 0x3f
1025 /* Check if RSS_HASH */
1026 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1027 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1028 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
1030 /* Check if FDIR Match */
1031 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1034 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1035 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1039 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1040 flags |= PKT_RX_IP_CKSUM_BAD;
1042 flags |= PKT_RX_IP_CKSUM_GOOD;
1044 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1045 flags |= PKT_RX_L4_CKSUM_BAD;
1047 flags |= PKT_RX_L4_CKSUM_GOOD;
1049 /* TODO: Oversize error bit is not processed here */
1054 static inline uint64_t
1055 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1058 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1061 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1062 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1063 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1065 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1067 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1068 flags |= PKT_RX_FDIR_ID;
1072 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1073 flags |= PKT_RX_FDIR_ID;
1078 #define IAVF_RX_FLEX_ERR0_BITS \
1079 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1080 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1081 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1082 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1083 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1084 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1086 /* Rx L3/L4 checksum */
1087 static inline uint64_t
1088 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1092 /* check if HW has decoded the packet and checksum */
1093 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1096 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1097 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1101 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1102 flags |= PKT_RX_IP_CKSUM_BAD;
1104 flags |= PKT_RX_IP_CKSUM_GOOD;
1106 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1107 flags |= PKT_RX_L4_CKSUM_BAD;
1109 flags |= PKT_RX_L4_CKSUM_GOOD;
1111 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1112 flags |= PKT_RX_EIP_CKSUM_BAD;
1117 /* If the number of free RX descriptors is greater than the RX free
1118 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1119 * register. Update the RDT with the value of the last processed RX
1120 * descriptor minus 1, to guarantee that the RDT register is never
1121 * equal to the RDH register, which creates a "full" ring situation
1122 * from the hardware point of view.
1125 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1127 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1129 if (nb_hold > rxq->rx_free_thresh) {
1131 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1132 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1133 rx_id = (uint16_t)((rx_id == 0) ?
1134 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1135 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1138 rxq->nb_rx_hold = nb_hold;
1141 /* implement recv_pkts */
1143 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1145 volatile union iavf_rx_desc *rx_ring;
1146 volatile union iavf_rx_desc *rxdp;
1147 struct iavf_rx_queue *rxq;
1148 union iavf_rx_desc rxd;
1149 struct rte_mbuf *rxe;
1150 struct rte_eth_dev *dev;
1151 struct rte_mbuf *rxm;
1152 struct rte_mbuf *nmb;
1156 uint16_t rx_packet_len;
1157 uint16_t rx_id, nb_hold;
1160 const uint32_t *ptype_tbl;
1165 rx_id = rxq->rx_tail;
1166 rx_ring = rxq->rx_ring;
1167 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1169 while (nb_rx < nb_pkts) {
1170 rxdp = &rx_ring[rx_id];
1171 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1172 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1173 IAVF_RXD_QW1_STATUS_SHIFT;
1175 /* Check the DD bit first */
1176 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1178 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1180 nmb = rte_mbuf_raw_alloc(rxq->mp);
1181 if (unlikely(!nmb)) {
1182 dev = &rte_eth_devices[rxq->port_id];
1183 dev->data->rx_mbuf_alloc_failed++;
1184 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1185 "queue_id=%u", rxq->port_id, rxq->queue_id);
1191 rxe = rxq->sw_ring[rx_id];
1193 if (unlikely(rx_id == rxq->nb_rx_desc))
1196 /* Prefetch next mbuf */
1197 rte_prefetch0(rxq->sw_ring[rx_id]);
1199 /* When next RX descriptor is on a cache line boundary,
1200 * prefetch the next 4 RX descriptors and next 8 pointers
1203 if ((rx_id & 0x3) == 0) {
1204 rte_prefetch0(&rx_ring[rx_id]);
1205 rte_prefetch0(rxq->sw_ring[rx_id]);
1209 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1210 rxdp->read.hdr_addr = 0;
1211 rxdp->read.pkt_addr = dma_addr;
1213 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1214 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1216 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1217 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1220 rxm->pkt_len = rx_packet_len;
1221 rxm->data_len = rx_packet_len;
1222 rxm->port = rxq->port_id;
1224 iavf_rxd_to_vlan_tci(rxm, &rxd);
1225 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1227 ptype_tbl[(uint8_t)((qword1 &
1228 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1230 if (pkt_flags & PKT_RX_RSS_HASH)
1232 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1234 if (pkt_flags & PKT_RX_FDIR)
1235 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1237 rxm->ol_flags |= pkt_flags;
1239 rx_pkts[nb_rx++] = rxm;
1241 rxq->rx_tail = rx_id;
1243 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1248 /* implement recv_pkts for flexible Rx descriptor */
1250 iavf_recv_pkts_flex_rxd(void *rx_queue,
1251 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1253 volatile union iavf_rx_desc *rx_ring;
1254 volatile union iavf_rx_flex_desc *rxdp;
1255 struct iavf_rx_queue *rxq;
1256 union iavf_rx_flex_desc rxd;
1257 struct rte_mbuf *rxe;
1258 struct rte_eth_dev *dev;
1259 struct rte_mbuf *rxm;
1260 struct rte_mbuf *nmb;
1262 uint16_t rx_stat_err0;
1263 uint16_t rx_packet_len;
1264 uint16_t rx_id, nb_hold;
1267 const uint32_t *ptype_tbl;
1272 rx_id = rxq->rx_tail;
1273 rx_ring = rxq->rx_ring;
1274 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1276 while (nb_rx < nb_pkts) {
1277 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1278 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1280 /* Check the DD bit first */
1281 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1283 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1285 nmb = rte_mbuf_raw_alloc(rxq->mp);
1286 if (unlikely(!nmb)) {
1287 dev = &rte_eth_devices[rxq->port_id];
1288 dev->data->rx_mbuf_alloc_failed++;
1289 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1290 "queue_id=%u", rxq->port_id, rxq->queue_id);
1296 rxe = rxq->sw_ring[rx_id];
1298 if (unlikely(rx_id == rxq->nb_rx_desc))
1301 /* Prefetch next mbuf */
1302 rte_prefetch0(rxq->sw_ring[rx_id]);
1304 /* When next RX descriptor is on a cache line boundary,
1305 * prefetch the next 4 RX descriptors and next 8 pointers
1308 if ((rx_id & 0x3) == 0) {
1309 rte_prefetch0(&rx_ring[rx_id]);
1310 rte_prefetch0(rxq->sw_ring[rx_id]);
1314 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1315 rxdp->read.hdr_addr = 0;
1316 rxdp->read.pkt_addr = dma_addr;
1318 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1319 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1321 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1322 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1325 rxm->pkt_len = rx_packet_len;
1326 rxm->data_len = rx_packet_len;
1327 rxm->port = rxq->port_id;
1329 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1330 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1331 iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
1332 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1333 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1334 rxm->ol_flags |= pkt_flags;
1336 rx_pkts[nb_rx++] = rxm;
1338 rxq->rx_tail = rx_id;
1340 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1345 /* implement recv_scattered_pkts for flexible Rx descriptor */
1347 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1350 struct iavf_rx_queue *rxq = rx_queue;
1351 union iavf_rx_flex_desc rxd;
1352 struct rte_mbuf *rxe;
1353 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1354 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1355 struct rte_mbuf *nmb, *rxm;
1356 uint16_t rx_id = rxq->rx_tail;
1357 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1358 struct rte_eth_dev *dev;
1359 uint16_t rx_stat_err0;
1363 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1364 volatile union iavf_rx_flex_desc *rxdp;
1365 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1367 while (nb_rx < nb_pkts) {
1368 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1369 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1371 /* Check the DD bit */
1372 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1374 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1376 nmb = rte_mbuf_raw_alloc(rxq->mp);
1377 if (unlikely(!nmb)) {
1378 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1379 "queue_id=%u", rxq->port_id, rxq->queue_id);
1380 dev = &rte_eth_devices[rxq->port_id];
1381 dev->data->rx_mbuf_alloc_failed++;
1387 rxe = rxq->sw_ring[rx_id];
1389 if (rx_id == rxq->nb_rx_desc)
1392 /* Prefetch next mbuf */
1393 rte_prefetch0(rxq->sw_ring[rx_id]);
1395 /* When next RX descriptor is on a cache line boundary,
1396 * prefetch the next 4 RX descriptors and next 8 pointers
1399 if ((rx_id & 0x3) == 0) {
1400 rte_prefetch0(&rx_ring[rx_id]);
1401 rte_prefetch0(rxq->sw_ring[rx_id]);
1406 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1408 /* Set data buffer address and data length of the mbuf */
1409 rxdp->read.hdr_addr = 0;
1410 rxdp->read.pkt_addr = dma_addr;
1411 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1412 IAVF_RX_FLX_DESC_PKT_LEN_M;
1413 rxm->data_len = rx_packet_len;
1414 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1416 /* If this is the first buffer of the received packet, set the
1417 * pointer to the first mbuf of the packet and initialize its
1418 * context. Otherwise, update the total length and the number
1419 * of segments of the current scattered packet, and update the
1420 * pointer to the last mbuf of the current packet.
1424 first_seg->nb_segs = 1;
1425 first_seg->pkt_len = rx_packet_len;
1427 first_seg->pkt_len =
1428 (uint16_t)(first_seg->pkt_len +
1430 first_seg->nb_segs++;
1431 last_seg->next = rxm;
1434 /* If this is not the last buffer of the received packet,
1435 * update the pointer to the last mbuf of the current scattered
1436 * packet and continue to parse the RX ring.
1438 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1443 /* This is the last buffer of the received packet. If the CRC
1444 * is not stripped by the hardware:
1445 * - Subtract the CRC length from the total packet length.
1446 * - If the last buffer only contains the whole CRC or a part
1447 * of it, free the mbuf associated to the last buffer. If part
1448 * of the CRC is also contained in the previous mbuf, subtract
1449 * the length of that CRC part from the data length of the
1453 if (unlikely(rxq->crc_len > 0)) {
1454 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1455 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1456 rte_pktmbuf_free_seg(rxm);
1457 first_seg->nb_segs--;
1458 last_seg->data_len =
1459 (uint16_t)(last_seg->data_len -
1460 (RTE_ETHER_CRC_LEN - rx_packet_len));
1461 last_seg->next = NULL;
1463 rxm->data_len = (uint16_t)(rx_packet_len -
1468 first_seg->port = rxq->port_id;
1469 first_seg->ol_flags = 0;
1470 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1471 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1472 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
1473 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1474 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1476 first_seg->ol_flags |= pkt_flags;
1478 /* Prefetch data of first segment, if configured to do so. */
1479 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1480 first_seg->data_off));
1481 rx_pkts[nb_rx++] = first_seg;
1485 /* Record index of the next RX descriptor to probe. */
1486 rxq->rx_tail = rx_id;
1487 rxq->pkt_first_seg = first_seg;
1488 rxq->pkt_last_seg = last_seg;
1490 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1495 /* implement recv_scattered_pkts */
1497 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1500 struct iavf_rx_queue *rxq = rx_queue;
1501 union iavf_rx_desc rxd;
1502 struct rte_mbuf *rxe;
1503 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1504 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1505 struct rte_mbuf *nmb, *rxm;
1506 uint16_t rx_id = rxq->rx_tail;
1507 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1508 struct rte_eth_dev *dev;
1514 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1515 volatile union iavf_rx_desc *rxdp;
1516 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1518 while (nb_rx < nb_pkts) {
1519 rxdp = &rx_ring[rx_id];
1520 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1521 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1522 IAVF_RXD_QW1_STATUS_SHIFT;
1524 /* Check the DD bit */
1525 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1527 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1529 nmb = rte_mbuf_raw_alloc(rxq->mp);
1530 if (unlikely(!nmb)) {
1531 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1532 "queue_id=%u", rxq->port_id, rxq->queue_id);
1533 dev = &rte_eth_devices[rxq->port_id];
1534 dev->data->rx_mbuf_alloc_failed++;
1540 rxe = rxq->sw_ring[rx_id];
1542 if (rx_id == rxq->nb_rx_desc)
1545 /* Prefetch next mbuf */
1546 rte_prefetch0(rxq->sw_ring[rx_id]);
1548 /* When next RX descriptor is on a cache line boundary,
1549 * prefetch the next 4 RX descriptors and next 8 pointers
1552 if ((rx_id & 0x3) == 0) {
1553 rte_prefetch0(&rx_ring[rx_id]);
1554 rte_prefetch0(rxq->sw_ring[rx_id]);
1559 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1561 /* Set data buffer address and data length of the mbuf */
1562 rxdp->read.hdr_addr = 0;
1563 rxdp->read.pkt_addr = dma_addr;
1564 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1565 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1566 rxm->data_len = rx_packet_len;
1567 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1569 /* If this is the first buffer of the received packet, set the
1570 * pointer to the first mbuf of the packet and initialize its
1571 * context. Otherwise, update the total length and the number
1572 * of segments of the current scattered packet, and update the
1573 * pointer to the last mbuf of the current packet.
1577 first_seg->nb_segs = 1;
1578 first_seg->pkt_len = rx_packet_len;
1580 first_seg->pkt_len =
1581 (uint16_t)(first_seg->pkt_len +
1583 first_seg->nb_segs++;
1584 last_seg->next = rxm;
1587 /* If this is not the last buffer of the received packet,
1588 * update the pointer to the last mbuf of the current scattered
1589 * packet and continue to parse the RX ring.
1591 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1596 /* This is the last buffer of the received packet. If the CRC
1597 * is not stripped by the hardware:
1598 * - Subtract the CRC length from the total packet length.
1599 * - If the last buffer only contains the whole CRC or a part
1600 * of it, free the mbuf associated to the last buffer. If part
1601 * of the CRC is also contained in the previous mbuf, subtract
1602 * the length of that CRC part from the data length of the
1606 if (unlikely(rxq->crc_len > 0)) {
1607 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1608 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1609 rte_pktmbuf_free_seg(rxm);
1610 first_seg->nb_segs--;
1611 last_seg->data_len =
1612 (uint16_t)(last_seg->data_len -
1613 (RTE_ETHER_CRC_LEN - rx_packet_len));
1614 last_seg->next = NULL;
1616 rxm->data_len = (uint16_t)(rx_packet_len -
1620 first_seg->port = rxq->port_id;
1621 first_seg->ol_flags = 0;
1622 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1623 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1624 first_seg->packet_type =
1625 ptype_tbl[(uint8_t)((qword1 &
1626 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1628 if (pkt_flags & PKT_RX_RSS_HASH)
1629 first_seg->hash.rss =
1630 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1632 if (pkt_flags & PKT_RX_FDIR)
1633 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1635 first_seg->ol_flags |= pkt_flags;
1637 /* Prefetch data of first segment, if configured to do so. */
1638 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1639 first_seg->data_off));
1640 rx_pkts[nb_rx++] = first_seg;
1644 /* Record index of the next RX descriptor to probe. */
1645 rxq->rx_tail = rx_id;
1646 rxq->pkt_first_seg = first_seg;
1647 rxq->pkt_last_seg = last_seg;
1649 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1654 #define IAVF_LOOK_AHEAD 8
1656 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1658 volatile union iavf_rx_flex_desc *rxdp;
1659 struct rte_mbuf **rxep;
1660 struct rte_mbuf *mb;
1663 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1664 int32_t i, j, nb_rx = 0;
1666 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1668 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1669 rxep = &rxq->sw_ring[rxq->rx_tail];
1671 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1673 /* Make sure there is at least 1 packet to receive */
1674 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1677 /* Scan LOOK_AHEAD descriptors at a time to determine which
1678 * descriptors reference packets that are ready to be received.
1680 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1681 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1682 /* Read desc statuses backwards to avoid race condition */
1683 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1684 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1688 /* Compute how many status bits were set */
1689 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1690 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1694 /* Translate descriptor info to mbuf parameters */
1695 for (j = 0; j < nb_dd; j++) {
1696 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1698 i * IAVF_LOOK_AHEAD + j);
1701 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1702 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1703 mb->data_len = pkt_len;
1704 mb->pkt_len = pkt_len;
1707 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1708 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1709 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
1710 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1711 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1712 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1714 mb->ol_flags |= pkt_flags;
1717 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1718 rxq->rx_stage[i + j] = rxep[j];
1720 if (nb_dd != IAVF_LOOK_AHEAD)
1724 /* Clear software ring entries */
1725 for (i = 0; i < nb_rx; i++)
1726 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1732 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1734 volatile union iavf_rx_desc *rxdp;
1735 struct rte_mbuf **rxep;
1736 struct rte_mbuf *mb;
1740 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1741 int32_t i, j, nb_rx = 0;
1743 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1745 rxdp = &rxq->rx_ring[rxq->rx_tail];
1746 rxep = &rxq->sw_ring[rxq->rx_tail];
1748 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1749 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1750 IAVF_RXD_QW1_STATUS_SHIFT;
1752 /* Make sure there is at least 1 packet to receive */
1753 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1756 /* Scan LOOK_AHEAD descriptors at a time to determine which
1757 * descriptors reference packets that are ready to be received.
1759 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1760 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1761 /* Read desc statuses backwards to avoid race condition */
1762 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1763 qword1 = rte_le_to_cpu_64(
1764 rxdp[j].wb.qword1.status_error_len);
1765 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1766 IAVF_RXD_QW1_STATUS_SHIFT;
1771 /* Compute how many status bits were set */
1772 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1773 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1777 /* Translate descriptor info to mbuf parameters */
1778 for (j = 0; j < nb_dd; j++) {
1779 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1780 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1783 qword1 = rte_le_to_cpu_64
1784 (rxdp[j].wb.qword1.status_error_len);
1785 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1786 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1787 mb->data_len = pkt_len;
1788 mb->pkt_len = pkt_len;
1790 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1791 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1793 ptype_tbl[(uint8_t)((qword1 &
1794 IAVF_RXD_QW1_PTYPE_MASK) >>
1795 IAVF_RXD_QW1_PTYPE_SHIFT)];
1797 if (pkt_flags & PKT_RX_RSS_HASH)
1798 mb->hash.rss = rte_le_to_cpu_32(
1799 rxdp[j].wb.qword0.hi_dword.rss);
1801 if (pkt_flags & PKT_RX_FDIR)
1802 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1804 mb->ol_flags |= pkt_flags;
1807 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1808 rxq->rx_stage[i + j] = rxep[j];
1810 if (nb_dd != IAVF_LOOK_AHEAD)
1814 /* Clear software ring entries */
1815 for (i = 0; i < nb_rx; i++)
1816 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1821 static inline uint16_t
1822 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1823 struct rte_mbuf **rx_pkts,
1827 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1829 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1831 for (i = 0; i < nb_pkts; i++)
1832 rx_pkts[i] = stage[i];
1834 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1835 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1841 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1843 volatile union iavf_rx_desc *rxdp;
1844 struct rte_mbuf **rxep;
1845 struct rte_mbuf *mb;
1846 uint16_t alloc_idx, i;
1850 /* Allocate buffers in bulk */
1851 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1852 (rxq->rx_free_thresh - 1));
1853 rxep = &rxq->sw_ring[alloc_idx];
1854 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1855 rxq->rx_free_thresh);
1856 if (unlikely(diag != 0)) {
1857 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1861 rxdp = &rxq->rx_ring[alloc_idx];
1862 for (i = 0; i < rxq->rx_free_thresh; i++) {
1863 if (likely(i < (rxq->rx_free_thresh - 1)))
1864 /* Prefetch next mbuf */
1865 rte_prefetch0(rxep[i + 1]);
1868 rte_mbuf_refcnt_set(mb, 1);
1870 mb->data_off = RTE_PKTMBUF_HEADROOM;
1872 mb->port = rxq->port_id;
1873 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1874 rxdp[i].read.hdr_addr = 0;
1875 rxdp[i].read.pkt_addr = dma_addr;
1878 /* Update rx tail register */
1880 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1882 rxq->rx_free_trigger =
1883 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1884 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1885 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1890 static inline uint16_t
1891 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1893 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1899 if (rxq->rx_nb_avail)
1900 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1902 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1903 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1905 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1906 rxq->rx_next_avail = 0;
1907 rxq->rx_nb_avail = nb_rx;
1908 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1910 if (rxq->rx_tail > rxq->rx_free_trigger) {
1911 if (iavf_rx_alloc_bufs(rxq) != 0) {
1914 /* TODO: count rx_mbuf_alloc_failed here */
1916 rxq->rx_nb_avail = 0;
1917 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1918 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1919 rxq->sw_ring[j] = rxq->rx_stage[i];
1925 if (rxq->rx_tail >= rxq->nb_rx_desc)
1928 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1929 rxq->port_id, rxq->queue_id,
1930 rxq->rx_tail, nb_rx);
1932 if (rxq->rx_nb_avail)
1933 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1939 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1940 struct rte_mbuf **rx_pkts,
1943 uint16_t nb_rx = 0, n, count;
1945 if (unlikely(nb_pkts == 0))
1948 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1949 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1952 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1953 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1954 nb_rx = (uint16_t)(nb_rx + count);
1955 nb_pkts = (uint16_t)(nb_pkts - count);
1964 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1966 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1967 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1968 uint16_t nb_tx_desc = txq->nb_tx_desc;
1969 uint16_t desc_to_clean_to;
1970 uint16_t nb_tx_to_clean;
1972 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1974 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1975 if (desc_to_clean_to >= nb_tx_desc)
1976 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1978 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1979 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1980 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1981 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1982 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1983 "(port=%d queue=%d)", desc_to_clean_to,
1984 txq->port_id, txq->queue_id);
1988 if (last_desc_cleaned > desc_to_clean_to)
1989 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1992 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1995 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1997 txq->last_desc_cleaned = desc_to_clean_to;
1998 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2003 /* Check if the context descriptor is needed for TX offloading */
2004 static inline uint16_t
2005 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2007 if (flags & PKT_TX_TCP_SEG)
2009 if (flags & PKT_TX_VLAN_PKT &&
2010 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2016 iavf_txd_enable_checksum(uint64_t ol_flags,
2018 uint32_t *td_offset,
2019 union iavf_tx_offload tx_offload)
2022 *td_offset |= (tx_offload.l2_len >> 1) <<
2023 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2025 /* Enable L3 checksum offloads */
2026 if (ol_flags & PKT_TX_IP_CKSUM) {
2027 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2028 *td_offset |= (tx_offload.l3_len >> 2) <<
2029 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2030 } else if (ol_flags & PKT_TX_IPV4) {
2031 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2032 *td_offset |= (tx_offload.l3_len >> 2) <<
2033 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2034 } else if (ol_flags & PKT_TX_IPV6) {
2035 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2036 *td_offset |= (tx_offload.l3_len >> 2) <<
2037 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2040 if (ol_flags & PKT_TX_TCP_SEG) {
2041 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2042 *td_offset |= (tx_offload.l4_len >> 2) <<
2043 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2047 /* Enable L4 checksum offloads */
2048 switch (ol_flags & PKT_TX_L4_MASK) {
2049 case PKT_TX_TCP_CKSUM:
2050 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2051 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2052 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2054 case PKT_TX_SCTP_CKSUM:
2055 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2056 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2057 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2059 case PKT_TX_UDP_CKSUM:
2060 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2061 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2062 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2069 /* set TSO context descriptor
2070 * support IP -> L4 and IP -> IP -> L4
2072 static inline uint64_t
2073 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2075 uint64_t ctx_desc = 0;
2076 uint32_t cd_cmd, hdr_len, cd_tso_len;
2078 if (!tx_offload.l4_len) {
2079 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2083 hdr_len = tx_offload.l2_len +
2087 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2088 cd_tso_len = mbuf->pkt_len - hdr_len;
2089 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2090 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2091 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2096 /* Construct the tx flags */
2097 static inline uint64_t
2098 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2101 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2102 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2103 ((uint64_t)td_offset <<
2104 IAVF_TXD_QW1_OFFSET_SHIFT) |
2106 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2107 ((uint64_t)td_tag <<
2108 IAVF_TXD_QW1_L2TAG1_SHIFT));
2113 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2115 volatile struct iavf_tx_desc *txd;
2116 volatile struct iavf_tx_desc *txr;
2117 struct iavf_tx_queue *txq;
2118 struct iavf_tx_entry *sw_ring;
2119 struct iavf_tx_entry *txe, *txn;
2120 struct rte_mbuf *tx_pkt;
2121 struct rte_mbuf *m_seg;
2132 uint64_t buf_dma_addr;
2133 uint16_t cd_l2tag2 = 0;
2134 union iavf_tx_offload tx_offload = {0};
2137 sw_ring = txq->sw_ring;
2139 tx_id = txq->tx_tail;
2140 txe = &sw_ring[tx_id];
2142 /* Check if the descriptor ring needs to be cleaned. */
2143 if (txq->nb_free < txq->free_thresh)
2144 (void)iavf_xmit_cleanup(txq);
2146 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2151 tx_pkt = *tx_pkts++;
2152 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2154 ol_flags = tx_pkt->ol_flags;
2155 tx_offload.l2_len = tx_pkt->l2_len;
2156 tx_offload.l3_len = tx_pkt->l3_len;
2157 tx_offload.l4_len = tx_pkt->l4_len;
2158 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2159 /* Calculate the number of context descriptors needed. */
2160 nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
2162 /* The number of descriptors that must be allocated for
2163 * a packet equals to the number of the segments of that
2164 * packet plus 1 context descriptor if needed.
2166 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2167 tx_last = (uint16_t)(tx_id + nb_used - 1);
2170 if (tx_last >= txq->nb_tx_desc)
2171 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2173 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2174 " tx_first=%u tx_last=%u",
2175 txq->port_id, txq->queue_id, tx_id, tx_last);
2177 if (nb_used > txq->nb_free) {
2178 if (iavf_xmit_cleanup(txq)) {
2183 if (unlikely(nb_used > txq->rs_thresh)) {
2184 while (nb_used > txq->nb_free) {
2185 if (iavf_xmit_cleanup(txq)) {
2194 /* Descriptor based VLAN insertion */
2195 if (ol_flags & PKT_TX_VLAN_PKT &&
2196 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
2197 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2198 td_tag = tx_pkt->vlan_tci;
2201 /* According to datasheet, the bit2 is reserved and must be
2206 /* Enable checksum offloading */
2207 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2208 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2209 &td_offset, tx_offload);
2212 /* Setup TX context descriptor if required */
2213 uint64_t cd_type_cmd_tso_mss =
2214 IAVF_TX_DESC_DTYPE_CONTEXT;
2215 volatile struct iavf_tx_context_desc *ctx_txd =
2216 (volatile struct iavf_tx_context_desc *)
2219 txn = &sw_ring[txe->next_id];
2220 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2222 rte_pktmbuf_free_seg(txe->mbuf);
2227 if (ol_flags & PKT_TX_TCP_SEG)
2228 cd_type_cmd_tso_mss |=
2229 iavf_set_tso_ctx(tx_pkt, tx_offload);
2231 if (ol_flags & PKT_TX_VLAN_PKT &&
2232 txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2233 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
2234 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2235 cd_l2tag2 = tx_pkt->vlan_tci;
2238 ctx_txd->type_cmd_tso_mss =
2239 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2240 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2242 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2243 txe->last_id = tx_last;
2244 tx_id = txe->next_id;
2251 txn = &sw_ring[txe->next_id];
2254 rte_pktmbuf_free_seg(txe->mbuf);
2257 /* Setup TX Descriptor */
2258 slen = m_seg->data_len;
2259 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2260 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2261 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2266 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2267 txe->last_id = tx_last;
2268 tx_id = txe->next_id;
2270 m_seg = m_seg->next;
2273 /* The last packet data descriptor needs End Of Packet (EOP) */
2274 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2275 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2276 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2278 if (txq->nb_used >= txq->rs_thresh) {
2279 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2280 "%4u (port=%d queue=%d)",
2281 tx_last, txq->port_id, txq->queue_id);
2283 td_cmd |= IAVF_TX_DESC_CMD_RS;
2285 /* Update txq RS bit counters */
2289 txd->cmd_type_offset_bsz |=
2290 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2291 IAVF_TXD_QW1_CMD_SHIFT);
2292 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2298 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2299 txq->port_id, txq->queue_id, tx_id, nb_tx);
2301 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2302 txq->tx_tail = tx_id;
2307 /* TX prep functions */
2309 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2316 for (i = 0; i < nb_pkts; i++) {
2318 ol_flags = m->ol_flags;
2320 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2321 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2322 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2326 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2327 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2328 /* MSS outside the range are considered malicious */
2333 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2334 rte_errno = ENOTSUP;
2338 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2339 ret = rte_validate_tx_offload(m);
2345 ret = rte_net_intel_cksum_prepare(m);
2355 /* choose rx function*/
2357 iavf_set_rx_function(struct rte_eth_dev *dev)
2359 struct iavf_adapter *adapter =
2360 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2361 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2364 struct iavf_rx_queue *rxq;
2366 bool use_avx2 = false;
2367 #ifdef CC_AVX512_SUPPORT
2368 bool use_avx512 = false;
2371 if (!iavf_rx_vec_dev_check(dev) &&
2372 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2373 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2374 rxq = dev->data->rx_queues[i];
2375 (void)iavf_rxq_vec_setup(rxq);
2378 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2379 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2380 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2382 #ifdef CC_AVX512_SUPPORT
2383 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2384 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2385 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2389 if (dev->data->scattered_rx) {
2391 "Using %sVector Scattered Rx (port %d).",
2392 use_avx2 ? "avx2 " : "",
2393 dev->data->port_id);
2394 if (vf->vf_res->vf_cap_flags &
2395 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2396 dev->rx_pkt_burst = use_avx2 ?
2397 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2398 iavf_recv_scattered_pkts_vec_flex_rxd;
2399 #ifdef CC_AVX512_SUPPORT
2402 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2405 dev->rx_pkt_burst = use_avx2 ?
2406 iavf_recv_scattered_pkts_vec_avx2 :
2407 iavf_recv_scattered_pkts_vec;
2408 #ifdef CC_AVX512_SUPPORT
2411 iavf_recv_scattered_pkts_vec_avx512;
2415 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2416 use_avx2 ? "avx2 " : "",
2417 dev->data->port_id);
2418 if (vf->vf_res->vf_cap_flags &
2419 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2420 dev->rx_pkt_burst = use_avx2 ?
2421 iavf_recv_pkts_vec_avx2_flex_rxd :
2422 iavf_recv_pkts_vec_flex_rxd;
2423 #ifdef CC_AVX512_SUPPORT
2426 iavf_recv_pkts_vec_avx512_flex_rxd;
2429 dev->rx_pkt_burst = use_avx2 ?
2430 iavf_recv_pkts_vec_avx2 :
2432 #ifdef CC_AVX512_SUPPORT
2435 iavf_recv_pkts_vec_avx512;
2444 if (dev->data->scattered_rx) {
2445 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2446 dev->data->port_id);
2447 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2448 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2450 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2451 } else if (adapter->rx_bulk_alloc_allowed) {
2452 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2453 dev->data->port_id);
2454 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2456 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2457 dev->data->port_id);
2458 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2459 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2461 dev->rx_pkt_burst = iavf_recv_pkts;
2465 /* choose tx function*/
2467 iavf_set_tx_function(struct rte_eth_dev *dev)
2470 struct iavf_tx_queue *txq;
2472 bool use_avx2 = false;
2473 #ifdef CC_AVX512_SUPPORT
2474 bool use_avx512 = false;
2477 if (!iavf_tx_vec_dev_check(dev) &&
2478 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2479 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2480 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2481 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2483 #ifdef CC_AVX512_SUPPORT
2484 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2485 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2486 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2490 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2491 use_avx2 ? "avx2 " : "",
2492 dev->data->port_id);
2493 dev->tx_pkt_burst = use_avx2 ?
2494 iavf_xmit_pkts_vec_avx2 :
2496 #ifdef CC_AVX512_SUPPORT
2498 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2500 dev->tx_pkt_prepare = NULL;
2502 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2503 txq = dev->data->tx_queues[i];
2506 #ifdef CC_AVX512_SUPPORT
2508 iavf_txq_vec_setup_avx512(txq);
2510 iavf_txq_vec_setup(txq);
2512 iavf_txq_vec_setup(txq);
2520 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2521 dev->data->port_id);
2522 dev->tx_pkt_burst = iavf_xmit_pkts;
2523 dev->tx_pkt_prepare = iavf_prep_pkts;
2527 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2530 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2531 uint16_t i, tx_last, tx_id;
2532 uint16_t nb_tx_free_last;
2533 uint16_t nb_tx_to_clean;
2536 /* Start free mbuf from the next of tx_tail */
2537 tx_last = txq->tx_tail;
2538 tx_id = swr_ring[tx_last].next_id;
2540 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2543 nb_tx_to_clean = txq->nb_free;
2544 nb_tx_free_last = txq->nb_free;
2546 free_cnt = txq->nb_tx_desc;
2548 /* Loop through swr_ring to count the amount of
2549 * freeable mubfs and packets.
2551 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2552 for (i = 0; i < nb_tx_to_clean &&
2553 pkt_cnt < free_cnt &&
2554 tx_id != tx_last; i++) {
2555 if (swr_ring[tx_id].mbuf != NULL) {
2556 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2557 swr_ring[tx_id].mbuf = NULL;
2560 * last segment in the packet,
2561 * increment packet count
2563 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2566 tx_id = swr_ring[tx_id].next_id;
2569 if (txq->rs_thresh > txq->nb_tx_desc -
2570 txq->nb_free || tx_id == tx_last)
2573 if (pkt_cnt < free_cnt) {
2574 if (iavf_xmit_cleanup(txq))
2577 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2578 nb_tx_free_last = txq->nb_free;
2582 return (int)pkt_cnt;
2586 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2588 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2590 return iavf_tx_done_cleanup_full(q, free_cnt);
2594 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2595 struct rte_eth_rxq_info *qinfo)
2597 struct iavf_rx_queue *rxq;
2599 rxq = dev->data->rx_queues[queue_id];
2601 qinfo->mp = rxq->mp;
2602 qinfo->scattered_rx = dev->data->scattered_rx;
2603 qinfo->nb_desc = rxq->nb_rx_desc;
2605 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2606 qinfo->conf.rx_drop_en = true;
2607 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2611 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2612 struct rte_eth_txq_info *qinfo)
2614 struct iavf_tx_queue *txq;
2616 txq = dev->data->tx_queues[queue_id];
2618 qinfo->nb_desc = txq->nb_tx_desc;
2620 qinfo->conf.tx_free_thresh = txq->free_thresh;
2621 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2622 qinfo->conf.offloads = txq->offloads;
2623 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2626 /* Get the number of used descriptors of a rx queue */
2628 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2630 #define IAVF_RXQ_SCAN_INTERVAL 4
2631 volatile union iavf_rx_desc *rxdp;
2632 struct iavf_rx_queue *rxq;
2635 rxq = dev->data->rx_queues[queue_id];
2636 rxdp = &rxq->rx_ring[rxq->rx_tail];
2638 while ((desc < rxq->nb_rx_desc) &&
2639 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2640 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2641 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2642 /* Check the DD bit of a rx descriptor of each 4 in a group,
2643 * to avoid checking too frequently and downgrading performance
2646 desc += IAVF_RXQ_SCAN_INTERVAL;
2647 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2648 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2649 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2650 desc - rxq->nb_rx_desc]);
2657 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2659 struct iavf_rx_queue *rxq = rx_queue;
2660 volatile uint64_t *status;
2664 if (unlikely(offset >= rxq->nb_rx_desc))
2667 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2668 return RTE_ETH_RX_DESC_UNAVAIL;
2670 desc = rxq->rx_tail + offset;
2671 if (desc >= rxq->nb_rx_desc)
2672 desc -= rxq->nb_rx_desc;
2674 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2675 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2676 << IAVF_RXD_QW1_STATUS_SHIFT);
2678 return RTE_ETH_RX_DESC_DONE;
2680 return RTE_ETH_RX_DESC_AVAIL;
2684 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2686 struct iavf_tx_queue *txq = tx_queue;
2687 volatile uint64_t *status;
2688 uint64_t mask, expect;
2691 if (unlikely(offset >= txq->nb_tx_desc))
2694 desc = txq->tx_tail + offset;
2695 /* go to next desc that has the RS bit */
2696 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2698 if (desc >= txq->nb_tx_desc) {
2699 desc -= txq->nb_tx_desc;
2700 if (desc >= txq->nb_tx_desc)
2701 desc -= txq->nb_tx_desc;
2704 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2705 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2706 expect = rte_cpu_to_le_64(
2707 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2708 if ((*status & mask) == expect)
2709 return RTE_ETH_TX_DESC_DONE;
2711 return RTE_ETH_TX_DESC_FULL;
2715 iavf_get_default_ptype_table(void)
2717 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2718 __rte_cache_aligned = {
2721 [1] = RTE_PTYPE_L2_ETHER,
2722 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2723 /* [3] - [5] reserved */
2724 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2725 /* [7] - [10] reserved */
2726 [11] = RTE_PTYPE_L2_ETHER_ARP,
2727 /* [12] - [21] reserved */
2729 /* Non tunneled IPv4 */
2730 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2732 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2733 RTE_PTYPE_L4_NONFRAG,
2734 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2737 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2739 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2741 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2745 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2746 RTE_PTYPE_TUNNEL_IP |
2747 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2748 RTE_PTYPE_INNER_L4_FRAG,
2749 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2750 RTE_PTYPE_TUNNEL_IP |
2751 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2752 RTE_PTYPE_INNER_L4_NONFRAG,
2753 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2754 RTE_PTYPE_TUNNEL_IP |
2755 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2756 RTE_PTYPE_INNER_L4_UDP,
2758 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759 RTE_PTYPE_TUNNEL_IP |
2760 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2761 RTE_PTYPE_INNER_L4_TCP,
2762 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2763 RTE_PTYPE_TUNNEL_IP |
2764 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2765 RTE_PTYPE_INNER_L4_SCTP,
2766 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2767 RTE_PTYPE_TUNNEL_IP |
2768 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2769 RTE_PTYPE_INNER_L4_ICMP,
2772 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2773 RTE_PTYPE_TUNNEL_IP |
2774 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2775 RTE_PTYPE_INNER_L4_FRAG,
2776 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2777 RTE_PTYPE_TUNNEL_IP |
2778 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_NONFRAG,
2780 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2781 RTE_PTYPE_TUNNEL_IP |
2782 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2783 RTE_PTYPE_INNER_L4_UDP,
2785 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786 RTE_PTYPE_TUNNEL_IP |
2787 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2788 RTE_PTYPE_INNER_L4_TCP,
2789 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2790 RTE_PTYPE_TUNNEL_IP |
2791 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2792 RTE_PTYPE_INNER_L4_SCTP,
2793 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2794 RTE_PTYPE_TUNNEL_IP |
2795 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2796 RTE_PTYPE_INNER_L4_ICMP,
2798 /* IPv4 --> GRE/Teredo/VXLAN */
2799 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2800 RTE_PTYPE_TUNNEL_GRENAT,
2802 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2803 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2804 RTE_PTYPE_TUNNEL_GRENAT |
2805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2806 RTE_PTYPE_INNER_L4_FRAG,
2807 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2808 RTE_PTYPE_TUNNEL_GRENAT |
2809 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2810 RTE_PTYPE_INNER_L4_NONFRAG,
2811 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2812 RTE_PTYPE_TUNNEL_GRENAT |
2813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2814 RTE_PTYPE_INNER_L4_UDP,
2816 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2817 RTE_PTYPE_TUNNEL_GRENAT |
2818 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2819 RTE_PTYPE_INNER_L4_TCP,
2820 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2821 RTE_PTYPE_TUNNEL_GRENAT |
2822 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2823 RTE_PTYPE_INNER_L4_SCTP,
2824 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2825 RTE_PTYPE_TUNNEL_GRENAT |
2826 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2827 RTE_PTYPE_INNER_L4_ICMP,
2829 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2830 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2831 RTE_PTYPE_TUNNEL_GRENAT |
2832 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2833 RTE_PTYPE_INNER_L4_FRAG,
2834 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2835 RTE_PTYPE_TUNNEL_GRENAT |
2836 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2837 RTE_PTYPE_INNER_L4_NONFRAG,
2838 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2839 RTE_PTYPE_TUNNEL_GRENAT |
2840 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2841 RTE_PTYPE_INNER_L4_UDP,
2843 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2844 RTE_PTYPE_TUNNEL_GRENAT |
2845 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2846 RTE_PTYPE_INNER_L4_TCP,
2847 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2848 RTE_PTYPE_TUNNEL_GRENAT |
2849 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2850 RTE_PTYPE_INNER_L4_SCTP,
2851 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2852 RTE_PTYPE_TUNNEL_GRENAT |
2853 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2854 RTE_PTYPE_INNER_L4_ICMP,
2856 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2857 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2858 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2860 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2861 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2862 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2863 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2864 RTE_PTYPE_INNER_L4_FRAG,
2865 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2866 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2867 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2868 RTE_PTYPE_INNER_L4_NONFRAG,
2869 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2870 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2871 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2872 RTE_PTYPE_INNER_L4_UDP,
2874 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2875 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2876 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2877 RTE_PTYPE_INNER_L4_TCP,
2878 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2879 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2880 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2881 RTE_PTYPE_INNER_L4_SCTP,
2882 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2883 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2884 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2885 RTE_PTYPE_INNER_L4_ICMP,
2887 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2888 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2889 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2890 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2891 RTE_PTYPE_INNER_L4_FRAG,
2892 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2893 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2894 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2895 RTE_PTYPE_INNER_L4_NONFRAG,
2896 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2897 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2898 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2899 RTE_PTYPE_INNER_L4_UDP,
2901 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2902 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2903 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2904 RTE_PTYPE_INNER_L4_TCP,
2905 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2906 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2907 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2908 RTE_PTYPE_INNER_L4_SCTP,
2909 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2910 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2911 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2912 RTE_PTYPE_INNER_L4_ICMP,
2913 /* [73] - [87] reserved */
2915 /* Non tunneled IPv6 */
2916 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2918 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2919 RTE_PTYPE_L4_NONFRAG,
2920 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2923 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2925 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2927 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2931 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2932 RTE_PTYPE_TUNNEL_IP |
2933 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2934 RTE_PTYPE_INNER_L4_FRAG,
2935 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2936 RTE_PTYPE_TUNNEL_IP |
2937 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2938 RTE_PTYPE_INNER_L4_NONFRAG,
2939 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2940 RTE_PTYPE_TUNNEL_IP |
2941 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2942 RTE_PTYPE_INNER_L4_UDP,
2944 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2945 RTE_PTYPE_TUNNEL_IP |
2946 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2947 RTE_PTYPE_INNER_L4_TCP,
2948 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2949 RTE_PTYPE_TUNNEL_IP |
2950 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2951 RTE_PTYPE_INNER_L4_SCTP,
2952 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2953 RTE_PTYPE_TUNNEL_IP |
2954 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2955 RTE_PTYPE_INNER_L4_ICMP,
2958 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2959 RTE_PTYPE_TUNNEL_IP |
2960 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2961 RTE_PTYPE_INNER_L4_FRAG,
2962 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2963 RTE_PTYPE_TUNNEL_IP |
2964 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2965 RTE_PTYPE_INNER_L4_NONFRAG,
2966 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2967 RTE_PTYPE_TUNNEL_IP |
2968 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2969 RTE_PTYPE_INNER_L4_UDP,
2970 /* [105] reserved */
2971 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2972 RTE_PTYPE_TUNNEL_IP |
2973 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2974 RTE_PTYPE_INNER_L4_TCP,
2975 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2976 RTE_PTYPE_TUNNEL_IP |
2977 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2978 RTE_PTYPE_INNER_L4_SCTP,
2979 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2980 RTE_PTYPE_TUNNEL_IP |
2981 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2982 RTE_PTYPE_INNER_L4_ICMP,
2984 /* IPv6 --> GRE/Teredo/VXLAN */
2985 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2986 RTE_PTYPE_TUNNEL_GRENAT,
2988 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2989 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2990 RTE_PTYPE_TUNNEL_GRENAT |
2991 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2992 RTE_PTYPE_INNER_L4_FRAG,
2993 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2994 RTE_PTYPE_TUNNEL_GRENAT |
2995 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2996 RTE_PTYPE_INNER_L4_NONFRAG,
2997 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2998 RTE_PTYPE_TUNNEL_GRENAT |
2999 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3000 RTE_PTYPE_INNER_L4_UDP,
3001 /* [113] reserved */
3002 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3003 RTE_PTYPE_TUNNEL_GRENAT |
3004 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3005 RTE_PTYPE_INNER_L4_TCP,
3006 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3007 RTE_PTYPE_TUNNEL_GRENAT |
3008 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3009 RTE_PTYPE_INNER_L4_SCTP,
3010 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3011 RTE_PTYPE_TUNNEL_GRENAT |
3012 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3013 RTE_PTYPE_INNER_L4_ICMP,
3015 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3016 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3017 RTE_PTYPE_TUNNEL_GRENAT |
3018 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3019 RTE_PTYPE_INNER_L4_FRAG,
3020 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3021 RTE_PTYPE_TUNNEL_GRENAT |
3022 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3023 RTE_PTYPE_INNER_L4_NONFRAG,
3024 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3025 RTE_PTYPE_TUNNEL_GRENAT |
3026 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3027 RTE_PTYPE_INNER_L4_UDP,
3028 /* [120] reserved */
3029 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3030 RTE_PTYPE_TUNNEL_GRENAT |
3031 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3032 RTE_PTYPE_INNER_L4_TCP,
3033 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3034 RTE_PTYPE_TUNNEL_GRENAT |
3035 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3036 RTE_PTYPE_INNER_L4_SCTP,
3037 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3038 RTE_PTYPE_TUNNEL_GRENAT |
3039 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3040 RTE_PTYPE_INNER_L4_ICMP,
3042 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3043 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3044 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3046 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3047 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3048 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3049 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3050 RTE_PTYPE_INNER_L4_FRAG,
3051 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3052 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3053 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3054 RTE_PTYPE_INNER_L4_NONFRAG,
3055 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3056 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3057 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3058 RTE_PTYPE_INNER_L4_UDP,
3059 /* [128] reserved */
3060 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3061 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3062 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3063 RTE_PTYPE_INNER_L4_TCP,
3064 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3065 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3066 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3067 RTE_PTYPE_INNER_L4_SCTP,
3068 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3069 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3070 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3071 RTE_PTYPE_INNER_L4_ICMP,
3073 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3074 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3075 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3076 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3077 RTE_PTYPE_INNER_L4_FRAG,
3078 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3079 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3080 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3081 RTE_PTYPE_INNER_L4_NONFRAG,
3082 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3083 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3084 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3085 RTE_PTYPE_INNER_L4_UDP,
3086 /* [135] reserved */
3087 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3088 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3089 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3090 RTE_PTYPE_INNER_L4_TCP,
3091 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3092 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3093 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3094 RTE_PTYPE_INNER_L4_SCTP,
3095 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3096 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3097 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3098 RTE_PTYPE_INNER_L4_ICMP,
3099 /* [139] - [299] reserved */
3102 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3103 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3105 /* PPPoE --> IPv4 */
3106 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3107 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3109 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3110 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3111 RTE_PTYPE_L4_NONFRAG,
3112 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3113 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3115 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3116 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3118 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3119 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3121 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3122 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3125 /* PPPoE --> IPv6 */
3126 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3127 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3129 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3130 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131 RTE_PTYPE_L4_NONFRAG,
3132 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3133 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3135 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3136 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3138 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3139 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3141 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3142 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3144 /* [314] - [324] reserved */
3146 /* IPv4/IPv6 --> GTPC/GTPU */
3147 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3148 RTE_PTYPE_TUNNEL_GTPC,
3149 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3150 RTE_PTYPE_TUNNEL_GTPC,
3151 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3152 RTE_PTYPE_TUNNEL_GTPC,
3153 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3154 RTE_PTYPE_TUNNEL_GTPC,
3155 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3156 RTE_PTYPE_TUNNEL_GTPU,
3157 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GTPU,
3160 /* IPv4 --> GTPU --> IPv4 */
3161 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3162 RTE_PTYPE_TUNNEL_GTPU |
3163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3164 RTE_PTYPE_INNER_L4_FRAG,
3165 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3166 RTE_PTYPE_TUNNEL_GTPU |
3167 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3168 RTE_PTYPE_INNER_L4_NONFRAG,
3169 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3170 RTE_PTYPE_TUNNEL_GTPU |
3171 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3172 RTE_PTYPE_INNER_L4_UDP,
3173 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3174 RTE_PTYPE_TUNNEL_GTPU |
3175 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3176 RTE_PTYPE_INNER_L4_TCP,
3177 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3178 RTE_PTYPE_TUNNEL_GTPU |
3179 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3180 RTE_PTYPE_INNER_L4_ICMP,
3182 /* IPv6 --> GTPU --> IPv4 */
3183 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3184 RTE_PTYPE_TUNNEL_GTPU |
3185 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3186 RTE_PTYPE_INNER_L4_FRAG,
3187 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3188 RTE_PTYPE_TUNNEL_GTPU |
3189 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3190 RTE_PTYPE_INNER_L4_NONFRAG,
3191 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3192 RTE_PTYPE_TUNNEL_GTPU |
3193 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3194 RTE_PTYPE_INNER_L4_UDP,
3195 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3196 RTE_PTYPE_TUNNEL_GTPU |
3197 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3198 RTE_PTYPE_INNER_L4_TCP,
3199 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3200 RTE_PTYPE_TUNNEL_GTPU |
3201 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3202 RTE_PTYPE_INNER_L4_ICMP,
3204 /* IPv4 --> GTPU --> IPv6 */
3205 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3206 RTE_PTYPE_TUNNEL_GTPU |
3207 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3208 RTE_PTYPE_INNER_L4_FRAG,
3209 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3210 RTE_PTYPE_TUNNEL_GTPU |
3211 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3212 RTE_PTYPE_INNER_L4_NONFRAG,
3213 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3214 RTE_PTYPE_TUNNEL_GTPU |
3215 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3216 RTE_PTYPE_INNER_L4_UDP,
3217 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3218 RTE_PTYPE_TUNNEL_GTPU |
3219 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3220 RTE_PTYPE_INNER_L4_TCP,
3221 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3222 RTE_PTYPE_TUNNEL_GTPU |
3223 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3224 RTE_PTYPE_INNER_L4_ICMP,
3226 /* IPv6 --> GTPU --> IPv6 */
3227 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3228 RTE_PTYPE_TUNNEL_GTPU |
3229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3230 RTE_PTYPE_INNER_L4_FRAG,
3231 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3232 RTE_PTYPE_TUNNEL_GTPU |
3233 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3234 RTE_PTYPE_INNER_L4_NONFRAG,
3235 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3236 RTE_PTYPE_TUNNEL_GTPU |
3237 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3238 RTE_PTYPE_INNER_L4_UDP,
3239 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3240 RTE_PTYPE_TUNNEL_GTPU |
3241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3242 RTE_PTYPE_INNER_L4_TCP,
3243 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3244 RTE_PTYPE_TUNNEL_GTPU |
3245 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3246 RTE_PTYPE_INNER_L4_ICMP,
3248 /* IPv4 --> UDP ECPRI */
3249 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3251 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3253 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3255 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3257 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3259 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3261 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3263 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3265 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3267 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3270 /* IPV6 --> UDP ECPRI */
3271 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3273 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3275 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3277 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3279 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3281 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3283 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3285 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3287 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3289 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3291 /* All others reserved */