1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
63 /* The following constraints must be satisfied:
64 * thresh < rxq->nb_rx_desc
66 if (thresh >= nb_desc) {
67 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76 uint16_t tx_free_thresh)
78 /* TX descriptors will have their RS bit set after tx_rs_thresh
79 * descriptors have been used. The TX descriptor ring will be cleaned
80 * after tx_free_thresh descriptors are used or if the number of
81 * descriptors required to transmit a packet is greater than the
82 * number of free TX descriptors.
84 * The following constraints must be satisfied:
85 * - tx_rs_thresh must be less than the size of the ring minus 2.
86 * - tx_free_thresh must be less than the size of the ring minus 3.
87 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
88 * - tx_rs_thresh must be a divisor of the ring size.
90 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91 * race condition, hence the maximum threshold constraints. When set
92 * to zero use default values.
94 if (tx_rs_thresh >= (nb_desc - 2)) {
95 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96 "number of TX descriptors (%u) minus 2",
97 tx_rs_thresh, nb_desc);
100 if (tx_free_thresh >= (nb_desc - 3)) {
101 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102 "number of TX descriptors (%u) minus 3.",
103 tx_free_thresh, nb_desc);
106 if (tx_rs_thresh > tx_free_thresh) {
107 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108 "equal to tx_free_thresh (%u).",
109 tx_rs_thresh, tx_free_thresh);
112 if ((nb_desc % tx_rs_thresh) != 0) {
113 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114 "number of TX descriptors (%u).",
115 tx_rs_thresh, nb_desc);
123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
125 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
131 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
136 check_tx_vec_allow(struct iavf_tx_queue *txq)
138 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
144 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
153 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155 "rxq->rx_free_thresh=%d, "
156 "IAVF_RX_MAX_BURST=%d",
157 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
159 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161 "rxq->nb_rx_desc=%d, "
162 "rxq->rx_free_thresh=%d",
163 rxq->nb_rx_desc, rxq->rx_free_thresh);
170 reset_rx_queue(struct iavf_rx_queue *rxq)
178 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
180 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181 ((volatile char *)rxq->rx_ring)[i] = 0;
183 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
185 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
189 rxq->rx_nb_avail = 0;
190 rxq->rx_next_avail = 0;
191 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
195 rxq->pkt_first_seg = NULL;
196 rxq->pkt_last_seg = NULL;
200 reset_tx_queue(struct iavf_tx_queue *txq)
202 struct iavf_tx_entry *txe;
207 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
212 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
213 for (i = 0; i < size; i++)
214 ((volatile char *)txq->tx_ring)[i] = 0;
216 prev = (uint16_t)(txq->nb_tx_desc - 1);
217 for (i = 0; i < txq->nb_tx_desc; i++) {
218 txq->tx_ring[i].cmd_type_offset_bsz =
219 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
222 txe[prev].next_id = i;
229 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
230 txq->nb_free = txq->nb_tx_desc - 1;
232 txq->next_dd = txq->rs_thresh - 1;
233 txq->next_rs = txq->rs_thresh - 1;
237 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
239 volatile union iavf_rx_desc *rxd;
240 struct rte_mbuf *mbuf = NULL;
244 for (i = 0; i < rxq->nb_rx_desc; i++) {
245 mbuf = rte_mbuf_raw_alloc(rxq->mp);
246 if (unlikely(!mbuf)) {
247 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
251 rte_mbuf_refcnt_set(mbuf, 1);
253 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
255 mbuf->port = rxq->port_id;
258 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
260 rxd = &rxq->rx_ring[i];
261 rxd->read.pkt_addr = dma_addr;
262 rxd->read.hdr_addr = 0;
263 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
268 rxq->sw_ring[i] = mbuf;
275 release_rxq_mbufs(struct iavf_rx_queue *rxq)
282 for (i = 0; i < rxq->nb_rx_desc; i++) {
283 if (rxq->sw_ring[i]) {
284 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
285 rxq->sw_ring[i] = NULL;
290 if (rxq->rx_nb_avail == 0)
292 for (i = 0; i < rxq->rx_nb_avail; i++) {
293 struct rte_mbuf *mbuf;
295 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
296 rte_pktmbuf_free_seg(mbuf);
298 rxq->rx_nb_avail = 0;
302 release_txq_mbufs(struct iavf_tx_queue *txq)
306 if (!txq || !txq->sw_ring) {
307 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
311 for (i = 0; i < txq->nb_tx_desc; i++) {
312 if (txq->sw_ring[i].mbuf) {
313 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
314 txq->sw_ring[i].mbuf = NULL;
319 static const struct iavf_rxq_ops def_rxq_ops = {
320 .release_mbufs = release_rxq_mbufs,
323 static const struct iavf_txq_ops def_txq_ops = {
324 .release_mbufs = release_txq_mbufs,
328 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
330 volatile union iavf_rx_flex_desc *rxdp)
332 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
333 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
334 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
338 if (desc->flow_id != 0xFFFFFFFF) {
339 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
340 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
343 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
344 stat_err = rte_le_to_cpu_16(desc->status_error0);
345 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
346 mb->ol_flags |= PKT_RX_RSS_HASH;
347 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
353 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
355 volatile union iavf_rx_flex_desc *rxdp)
357 volatile struct iavf_32b_rx_flex_desc_comms *desc =
358 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
361 stat_err = rte_le_to_cpu_16(desc->status_error0);
362 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
363 mb->ol_flags |= PKT_RX_RSS_HASH;
364 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
367 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
368 if (desc->flow_id != 0xFFFFFFFF) {
369 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
370 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
373 if (rxq->xtr_ol_flag) {
374 uint32_t metadata = 0;
376 stat_err = rte_le_to_cpu_16(desc->status_error1);
378 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
379 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
381 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
383 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
386 mb->ol_flags |= rxq->xtr_ol_flag;
388 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
395 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
397 volatile union iavf_rx_flex_desc *rxdp)
399 volatile struct iavf_32b_rx_flex_desc_comms *desc =
400 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
403 stat_err = rte_le_to_cpu_16(desc->status_error0);
404 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
405 mb->ol_flags |= PKT_RX_RSS_HASH;
406 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
409 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
410 if (desc->flow_id != 0xFFFFFFFF) {
411 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
412 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
415 if (rxq->xtr_ol_flag) {
416 uint32_t metadata = 0;
418 if (desc->flex_ts.flex.aux0 != 0xFFFF)
419 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
420 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
421 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
424 mb->ol_flags |= rxq->xtr_ol_flag;
426 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
433 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
436 case IAVF_RXDID_COMMS_AUX_VLAN:
437 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
438 rxq->rxd_to_pkt_fields =
439 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
441 case IAVF_RXDID_COMMS_AUX_IPV4:
442 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
443 rxq->rxd_to_pkt_fields =
444 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
446 case IAVF_RXDID_COMMS_AUX_IPV6:
447 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
448 rxq->rxd_to_pkt_fields =
449 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
451 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
453 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
454 rxq->rxd_to_pkt_fields =
455 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
457 case IAVF_RXDID_COMMS_AUX_TCP:
458 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
459 rxq->rxd_to_pkt_fields =
460 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
462 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
464 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
465 rxq->rxd_to_pkt_fields =
466 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
468 case IAVF_RXDID_COMMS_OVS_1:
469 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
472 /* update this according to the RXDID for FLEX_DESC_NONE */
473 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
477 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
478 rxq->xtr_ol_flag = 0;
482 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
483 uint16_t nb_desc, unsigned int socket_id,
484 const struct rte_eth_rxconf *rx_conf,
485 struct rte_mempool *mp)
487 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488 struct iavf_adapter *ad =
489 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
490 struct iavf_info *vf =
491 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
492 struct iavf_vsi *vsi = &vf->vsi;
493 struct iavf_rx_queue *rxq;
494 const struct rte_memzone *mz;
498 uint16_t rx_free_thresh;
500 PMD_INIT_FUNC_TRACE();
502 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
503 nb_desc > IAVF_MAX_RING_DESC ||
504 nb_desc < IAVF_MIN_RING_DESC) {
505 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
510 /* Check free threshold */
511 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
512 IAVF_DEFAULT_RX_FREE_THRESH :
513 rx_conf->rx_free_thresh;
514 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
517 /* Free memory if needed */
518 if (dev->data->rx_queues[queue_idx]) {
519 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
520 dev->data->rx_queues[queue_idx] = NULL;
523 /* Allocate the rx queue data structure */
524 rxq = rte_zmalloc_socket("iavf rxq",
525 sizeof(struct iavf_rx_queue),
529 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
530 "rx queue data structure");
534 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
535 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
537 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
538 rxq->proto_xtr = proto_xtr;
540 rxq->rxdid = IAVF_RXDID_LEGACY_1;
541 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
544 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
547 rxq->nb_rx_desc = nb_desc;
548 rxq->rx_free_thresh = rx_free_thresh;
549 rxq->queue_id = queue_idx;
550 rxq->port_id = dev->data->port_id;
551 rxq->crc_len = 0; /* crc stripping by default */
552 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
556 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
557 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
559 /* Allocate the software ring. */
560 len = nb_desc + IAVF_RX_MAX_BURST;
562 rte_zmalloc_socket("iavf rx sw ring",
563 sizeof(struct rte_mbuf *) * len,
567 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
572 /* Allocate the maximun number of RX ring hardware descriptor with
573 * a liitle more to support bulk allocate.
575 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
576 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
578 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
579 ring_size, IAVF_RING_BASE_ALIGN,
582 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
583 rte_free(rxq->sw_ring);
587 /* Zero all the descriptors in the ring. */
588 memset(mz->addr, 0, ring_size);
589 rxq->rx_ring_phys_addr = mz->iova;
590 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
595 dev->data->rx_queues[queue_idx] = rxq;
596 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
597 rxq->ops = &def_rxq_ops;
599 if (check_rx_bulk_allow(rxq) == true) {
600 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
601 "satisfied. Rx Burst Bulk Alloc function will be "
602 "used on port=%d, queue=%d.",
603 rxq->port_id, rxq->queue_id);
605 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
606 "not satisfied, Scattered Rx is requested "
607 "on port=%d, queue=%d.",
608 rxq->port_id, rxq->queue_id);
609 ad->rx_bulk_alloc_allowed = false;
612 if (check_rx_vec_allow(rxq) == false)
613 ad->rx_vec_allowed = false;
619 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
622 unsigned int socket_id,
623 const struct rte_eth_txconf *tx_conf)
625 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
626 struct iavf_tx_queue *txq;
627 const struct rte_memzone *mz;
629 uint16_t tx_rs_thresh, tx_free_thresh;
632 PMD_INIT_FUNC_TRACE();
634 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
636 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
637 nb_desc > IAVF_MAX_RING_DESC ||
638 nb_desc < IAVF_MIN_RING_DESC) {
639 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
644 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
645 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
646 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
647 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
648 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
650 /* Free memory if needed. */
651 if (dev->data->tx_queues[queue_idx]) {
652 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
653 dev->data->tx_queues[queue_idx] = NULL;
656 /* Allocate the TX queue data structure. */
657 txq = rte_zmalloc_socket("iavf txq",
658 sizeof(struct iavf_tx_queue),
662 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
663 "tx queue structure");
667 txq->nb_tx_desc = nb_desc;
668 txq->rs_thresh = tx_rs_thresh;
669 txq->free_thresh = tx_free_thresh;
670 txq->queue_id = queue_idx;
671 txq->port_id = dev->data->port_id;
672 txq->offloads = offloads;
673 txq->tx_deferred_start = tx_conf->tx_deferred_start;
675 /* Allocate software ring */
677 rte_zmalloc_socket("iavf tx sw ring",
678 sizeof(struct iavf_tx_entry) * nb_desc,
682 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
687 /* Allocate TX hardware ring descriptors. */
688 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
689 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
690 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
691 ring_size, IAVF_RING_BASE_ALIGN,
694 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
695 rte_free(txq->sw_ring);
699 txq->tx_ring_phys_addr = mz->iova;
700 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
705 dev->data->tx_queues[queue_idx] = txq;
706 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
707 txq->ops = &def_txq_ops;
709 if (check_tx_vec_allow(txq) == false) {
710 struct iavf_adapter *ad =
711 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
712 ad->tx_vec_allowed = false;
719 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
721 struct iavf_adapter *adapter =
722 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
723 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
724 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
725 struct iavf_rx_queue *rxq;
728 PMD_DRV_FUNC_TRACE();
730 if (rx_queue_id >= dev->data->nb_rx_queues)
733 rxq = dev->data->rx_queues[rx_queue_id];
735 err = alloc_rxq_mbufs(rxq);
737 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
743 /* Init the RX tail register. */
744 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
745 IAVF_WRITE_FLUSH(hw);
747 /* Ready to switch the queue on */
749 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
751 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
754 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
757 dev->data->rx_queue_state[rx_queue_id] =
758 RTE_ETH_QUEUE_STATE_STARTED;
764 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
766 struct iavf_adapter *adapter =
767 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
768 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
769 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770 struct iavf_tx_queue *txq;
773 PMD_DRV_FUNC_TRACE();
775 if (tx_queue_id >= dev->data->nb_tx_queues)
778 txq = dev->data->tx_queues[tx_queue_id];
780 /* Init the RX tail register. */
781 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
782 IAVF_WRITE_FLUSH(hw);
784 /* Ready to switch the queue on */
786 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
788 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
791 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
794 dev->data->tx_queue_state[tx_queue_id] =
795 RTE_ETH_QUEUE_STATE_STARTED;
801 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
803 struct iavf_adapter *adapter =
804 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
805 struct iavf_rx_queue *rxq;
808 PMD_DRV_FUNC_TRACE();
810 if (rx_queue_id >= dev->data->nb_rx_queues)
813 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
815 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
820 rxq = dev->data->rx_queues[rx_queue_id];
821 rxq->ops->release_mbufs(rxq);
823 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
829 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
831 struct iavf_adapter *adapter =
832 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
833 struct iavf_tx_queue *txq;
836 PMD_DRV_FUNC_TRACE();
838 if (tx_queue_id >= dev->data->nb_tx_queues)
841 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
843 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
848 txq = dev->data->tx_queues[tx_queue_id];
849 txq->ops->release_mbufs(txq);
851 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
857 iavf_dev_rx_queue_release(void *rxq)
859 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
864 q->ops->release_mbufs(q);
865 rte_free(q->sw_ring);
866 rte_memzone_free(q->mz);
871 iavf_dev_tx_queue_release(void *txq)
873 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
878 q->ops->release_mbufs(q);
879 rte_free(q->sw_ring);
880 rte_memzone_free(q->mz);
885 iavf_stop_queues(struct rte_eth_dev *dev)
887 struct iavf_adapter *adapter =
888 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
889 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
890 struct iavf_rx_queue *rxq;
891 struct iavf_tx_queue *txq;
894 /* Stop All queues */
895 if (!vf->lv_enabled) {
896 ret = iavf_disable_queues(adapter);
898 PMD_DRV_LOG(WARNING, "Fail to stop queues");
900 ret = iavf_disable_queues_lv(adapter);
902 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
906 PMD_DRV_LOG(WARNING, "Fail to stop queues");
908 for (i = 0; i < dev->data->nb_tx_queues; i++) {
909 txq = dev->data->tx_queues[i];
912 txq->ops->release_mbufs(txq);
914 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
916 for (i = 0; i < dev->data->nb_rx_queues; i++) {
917 rxq = dev->data->rx_queues[i];
920 rxq->ops->release_mbufs(rxq);
922 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
926 #define IAVF_RX_FLEX_ERR0_BITS \
927 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
928 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
929 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
930 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
931 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
932 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
935 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
937 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
938 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
939 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
941 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
948 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
949 volatile union iavf_rx_flex_desc *rxdp)
951 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
952 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
953 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
955 rte_le_to_cpu_16(rxdp->wb.l2tag1);
960 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
961 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
962 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
963 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
964 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
965 mb->vlan_tci_outer = mb->vlan_tci;
966 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
967 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
968 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
969 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
971 mb->vlan_tci_outer = 0;
976 /* Translate the rx descriptor status and error fields to pkt flags */
977 static inline uint64_t
978 iavf_rxd_to_pkt_flags(uint64_t qword)
981 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
983 #define IAVF_RX_ERR_BITS 0x3f
985 /* Check if RSS_HASH */
986 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
987 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
988 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
990 /* Check if FDIR Match */
991 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
994 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
995 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
999 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1000 flags |= PKT_RX_IP_CKSUM_BAD;
1002 flags |= PKT_RX_IP_CKSUM_GOOD;
1004 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1005 flags |= PKT_RX_L4_CKSUM_BAD;
1007 flags |= PKT_RX_L4_CKSUM_GOOD;
1009 /* TODO: Oversize error bit is not processed here */
1014 static inline uint64_t
1015 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1018 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1021 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1022 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1023 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1025 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1027 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1028 flags |= PKT_RX_FDIR_ID;
1032 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1033 flags |= PKT_RX_FDIR_ID;
1038 #define IAVF_RX_FLEX_ERR0_BITS \
1039 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1040 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1041 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1042 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1043 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1044 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1046 /* Rx L3/L4 checksum */
1047 static inline uint64_t
1048 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1052 /* check if HW has decoded the packet and checksum */
1053 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1056 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1057 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1061 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1062 flags |= PKT_RX_IP_CKSUM_BAD;
1064 flags |= PKT_RX_IP_CKSUM_GOOD;
1066 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1067 flags |= PKT_RX_L4_CKSUM_BAD;
1069 flags |= PKT_RX_L4_CKSUM_GOOD;
1071 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1072 flags |= PKT_RX_EIP_CKSUM_BAD;
1077 /* If the number of free RX descriptors is greater than the RX free
1078 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1079 * register. Update the RDT with the value of the last processed RX
1080 * descriptor minus 1, to guarantee that the RDT register is never
1081 * equal to the RDH register, which creates a "full" ring situation
1082 * from the hardware point of view.
1085 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1087 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1089 if (nb_hold > rxq->rx_free_thresh) {
1091 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1092 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1093 rx_id = (uint16_t)((rx_id == 0) ?
1094 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1095 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1098 rxq->nb_rx_hold = nb_hold;
1101 /* implement recv_pkts */
1103 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1105 volatile union iavf_rx_desc *rx_ring;
1106 volatile union iavf_rx_desc *rxdp;
1107 struct iavf_rx_queue *rxq;
1108 union iavf_rx_desc rxd;
1109 struct rte_mbuf *rxe;
1110 struct rte_eth_dev *dev;
1111 struct rte_mbuf *rxm;
1112 struct rte_mbuf *nmb;
1116 uint16_t rx_packet_len;
1117 uint16_t rx_id, nb_hold;
1120 const uint32_t *ptype_tbl;
1125 rx_id = rxq->rx_tail;
1126 rx_ring = rxq->rx_ring;
1127 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1129 while (nb_rx < nb_pkts) {
1130 rxdp = &rx_ring[rx_id];
1131 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1132 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1133 IAVF_RXD_QW1_STATUS_SHIFT;
1135 /* Check the DD bit first */
1136 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1138 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1140 nmb = rte_mbuf_raw_alloc(rxq->mp);
1141 if (unlikely(!nmb)) {
1142 dev = &rte_eth_devices[rxq->port_id];
1143 dev->data->rx_mbuf_alloc_failed++;
1144 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1145 "queue_id=%u", rxq->port_id, rxq->queue_id);
1151 rxe = rxq->sw_ring[rx_id];
1153 if (unlikely(rx_id == rxq->nb_rx_desc))
1156 /* Prefetch next mbuf */
1157 rte_prefetch0(rxq->sw_ring[rx_id]);
1159 /* When next RX descriptor is on a cache line boundary,
1160 * prefetch the next 4 RX descriptors and next 8 pointers
1163 if ((rx_id & 0x3) == 0) {
1164 rte_prefetch0(&rx_ring[rx_id]);
1165 rte_prefetch0(rxq->sw_ring[rx_id]);
1169 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1170 rxdp->read.hdr_addr = 0;
1171 rxdp->read.pkt_addr = dma_addr;
1173 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1174 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1176 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1177 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1180 rxm->pkt_len = rx_packet_len;
1181 rxm->data_len = rx_packet_len;
1182 rxm->port = rxq->port_id;
1184 iavf_rxd_to_vlan_tci(rxm, &rxd);
1185 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1187 ptype_tbl[(uint8_t)((qword1 &
1188 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1190 if (pkt_flags & PKT_RX_RSS_HASH)
1192 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1194 if (pkt_flags & PKT_RX_FDIR)
1195 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1197 rxm->ol_flags |= pkt_flags;
1199 rx_pkts[nb_rx++] = rxm;
1201 rxq->rx_tail = rx_id;
1203 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1208 /* implement recv_pkts for flexible Rx descriptor */
1210 iavf_recv_pkts_flex_rxd(void *rx_queue,
1211 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1213 volatile union iavf_rx_desc *rx_ring;
1214 volatile union iavf_rx_flex_desc *rxdp;
1215 struct iavf_rx_queue *rxq;
1216 union iavf_rx_flex_desc rxd;
1217 struct rte_mbuf *rxe;
1218 struct rte_eth_dev *dev;
1219 struct rte_mbuf *rxm;
1220 struct rte_mbuf *nmb;
1222 uint16_t rx_stat_err0;
1223 uint16_t rx_packet_len;
1224 uint16_t rx_id, nb_hold;
1227 const uint32_t *ptype_tbl;
1232 rx_id = rxq->rx_tail;
1233 rx_ring = rxq->rx_ring;
1234 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1236 while (nb_rx < nb_pkts) {
1237 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1238 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1240 /* Check the DD bit first */
1241 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1243 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1245 nmb = rte_mbuf_raw_alloc(rxq->mp);
1246 if (unlikely(!nmb)) {
1247 dev = &rte_eth_devices[rxq->port_id];
1248 dev->data->rx_mbuf_alloc_failed++;
1249 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1250 "queue_id=%u", rxq->port_id, rxq->queue_id);
1256 rxe = rxq->sw_ring[rx_id];
1258 if (unlikely(rx_id == rxq->nb_rx_desc))
1261 /* Prefetch next mbuf */
1262 rte_prefetch0(rxq->sw_ring[rx_id]);
1264 /* When next RX descriptor is on a cache line boundary,
1265 * prefetch the next 4 RX descriptors and next 8 pointers
1268 if ((rx_id & 0x3) == 0) {
1269 rte_prefetch0(&rx_ring[rx_id]);
1270 rte_prefetch0(rxq->sw_ring[rx_id]);
1274 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1275 rxdp->read.hdr_addr = 0;
1276 rxdp->read.pkt_addr = dma_addr;
1278 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1279 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1281 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1282 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1285 rxm->pkt_len = rx_packet_len;
1286 rxm->data_len = rx_packet_len;
1287 rxm->port = rxq->port_id;
1289 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1290 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1291 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1292 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1293 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1294 rxm->ol_flags |= pkt_flags;
1296 rx_pkts[nb_rx++] = rxm;
1298 rxq->rx_tail = rx_id;
1300 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1305 /* implement recv_scattered_pkts for flexible Rx descriptor */
1307 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1310 struct iavf_rx_queue *rxq = rx_queue;
1311 union iavf_rx_flex_desc rxd;
1312 struct rte_mbuf *rxe;
1313 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1314 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1315 struct rte_mbuf *nmb, *rxm;
1316 uint16_t rx_id = rxq->rx_tail;
1317 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1318 struct rte_eth_dev *dev;
1319 uint16_t rx_stat_err0;
1323 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1324 volatile union iavf_rx_flex_desc *rxdp;
1325 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1327 while (nb_rx < nb_pkts) {
1328 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1329 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1331 /* Check the DD bit */
1332 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1334 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1336 nmb = rte_mbuf_raw_alloc(rxq->mp);
1337 if (unlikely(!nmb)) {
1338 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1339 "queue_id=%u", rxq->port_id, rxq->queue_id);
1340 dev = &rte_eth_devices[rxq->port_id];
1341 dev->data->rx_mbuf_alloc_failed++;
1347 rxe = rxq->sw_ring[rx_id];
1349 if (rx_id == rxq->nb_rx_desc)
1352 /* Prefetch next mbuf */
1353 rte_prefetch0(rxq->sw_ring[rx_id]);
1355 /* When next RX descriptor is on a cache line boundary,
1356 * prefetch the next 4 RX descriptors and next 8 pointers
1359 if ((rx_id & 0x3) == 0) {
1360 rte_prefetch0(&rx_ring[rx_id]);
1361 rte_prefetch0(rxq->sw_ring[rx_id]);
1366 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1368 /* Set data buffer address and data length of the mbuf */
1369 rxdp->read.hdr_addr = 0;
1370 rxdp->read.pkt_addr = dma_addr;
1371 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1372 IAVF_RX_FLX_DESC_PKT_LEN_M;
1373 rxm->data_len = rx_packet_len;
1374 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1376 /* If this is the first buffer of the received packet, set the
1377 * pointer to the first mbuf of the packet and initialize its
1378 * context. Otherwise, update the total length and the number
1379 * of segments of the current scattered packet, and update the
1380 * pointer to the last mbuf of the current packet.
1384 first_seg->nb_segs = 1;
1385 first_seg->pkt_len = rx_packet_len;
1387 first_seg->pkt_len =
1388 (uint16_t)(first_seg->pkt_len +
1390 first_seg->nb_segs++;
1391 last_seg->next = rxm;
1394 /* If this is not the last buffer of the received packet,
1395 * update the pointer to the last mbuf of the current scattered
1396 * packet and continue to parse the RX ring.
1398 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1403 /* This is the last buffer of the received packet. If the CRC
1404 * is not stripped by the hardware:
1405 * - Subtract the CRC length from the total packet length.
1406 * - If the last buffer only contains the whole CRC or a part
1407 * of it, free the mbuf associated to the last buffer. If part
1408 * of the CRC is also contained in the previous mbuf, subtract
1409 * the length of that CRC part from the data length of the
1413 if (unlikely(rxq->crc_len > 0)) {
1414 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1415 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1416 rte_pktmbuf_free_seg(rxm);
1417 first_seg->nb_segs--;
1418 last_seg->data_len =
1419 (uint16_t)(last_seg->data_len -
1420 (RTE_ETHER_CRC_LEN - rx_packet_len));
1421 last_seg->next = NULL;
1423 rxm->data_len = (uint16_t)(rx_packet_len -
1428 first_seg->port = rxq->port_id;
1429 first_seg->ol_flags = 0;
1430 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1431 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1432 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1433 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1434 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1436 first_seg->ol_flags |= pkt_flags;
1438 /* Prefetch data of first segment, if configured to do so. */
1439 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1440 first_seg->data_off));
1441 rx_pkts[nb_rx++] = first_seg;
1445 /* Record index of the next RX descriptor to probe. */
1446 rxq->rx_tail = rx_id;
1447 rxq->pkt_first_seg = first_seg;
1448 rxq->pkt_last_seg = last_seg;
1450 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1455 /* implement recv_scattered_pkts */
1457 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1460 struct iavf_rx_queue *rxq = rx_queue;
1461 union iavf_rx_desc rxd;
1462 struct rte_mbuf *rxe;
1463 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1464 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1465 struct rte_mbuf *nmb, *rxm;
1466 uint16_t rx_id = rxq->rx_tail;
1467 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1468 struct rte_eth_dev *dev;
1474 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1475 volatile union iavf_rx_desc *rxdp;
1476 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1478 while (nb_rx < nb_pkts) {
1479 rxdp = &rx_ring[rx_id];
1480 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1481 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1482 IAVF_RXD_QW1_STATUS_SHIFT;
1484 /* Check the DD bit */
1485 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1487 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1489 nmb = rte_mbuf_raw_alloc(rxq->mp);
1490 if (unlikely(!nmb)) {
1491 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1492 "queue_id=%u", rxq->port_id, rxq->queue_id);
1493 dev = &rte_eth_devices[rxq->port_id];
1494 dev->data->rx_mbuf_alloc_failed++;
1500 rxe = rxq->sw_ring[rx_id];
1502 if (rx_id == rxq->nb_rx_desc)
1505 /* Prefetch next mbuf */
1506 rte_prefetch0(rxq->sw_ring[rx_id]);
1508 /* When next RX descriptor is on a cache line boundary,
1509 * prefetch the next 4 RX descriptors and next 8 pointers
1512 if ((rx_id & 0x3) == 0) {
1513 rte_prefetch0(&rx_ring[rx_id]);
1514 rte_prefetch0(rxq->sw_ring[rx_id]);
1519 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1521 /* Set data buffer address and data length of the mbuf */
1522 rxdp->read.hdr_addr = 0;
1523 rxdp->read.pkt_addr = dma_addr;
1524 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1525 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1526 rxm->data_len = rx_packet_len;
1527 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1529 /* If this is the first buffer of the received packet, set the
1530 * pointer to the first mbuf of the packet and initialize its
1531 * context. Otherwise, update the total length and the number
1532 * of segments of the current scattered packet, and update the
1533 * pointer to the last mbuf of the current packet.
1537 first_seg->nb_segs = 1;
1538 first_seg->pkt_len = rx_packet_len;
1540 first_seg->pkt_len =
1541 (uint16_t)(first_seg->pkt_len +
1543 first_seg->nb_segs++;
1544 last_seg->next = rxm;
1547 /* If this is not the last buffer of the received packet,
1548 * update the pointer to the last mbuf of the current scattered
1549 * packet and continue to parse the RX ring.
1551 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1556 /* This is the last buffer of the received packet. If the CRC
1557 * is not stripped by the hardware:
1558 * - Subtract the CRC length from the total packet length.
1559 * - If the last buffer only contains the whole CRC or a part
1560 * of it, free the mbuf associated to the last buffer. If part
1561 * of the CRC is also contained in the previous mbuf, subtract
1562 * the length of that CRC part from the data length of the
1566 if (unlikely(rxq->crc_len > 0)) {
1567 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1568 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1569 rte_pktmbuf_free_seg(rxm);
1570 first_seg->nb_segs--;
1571 last_seg->data_len =
1572 (uint16_t)(last_seg->data_len -
1573 (RTE_ETHER_CRC_LEN - rx_packet_len));
1574 last_seg->next = NULL;
1576 rxm->data_len = (uint16_t)(rx_packet_len -
1580 first_seg->port = rxq->port_id;
1581 first_seg->ol_flags = 0;
1582 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1583 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1584 first_seg->packet_type =
1585 ptype_tbl[(uint8_t)((qword1 &
1586 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1588 if (pkt_flags & PKT_RX_RSS_HASH)
1589 first_seg->hash.rss =
1590 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1592 if (pkt_flags & PKT_RX_FDIR)
1593 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1595 first_seg->ol_flags |= pkt_flags;
1597 /* Prefetch data of first segment, if configured to do so. */
1598 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1599 first_seg->data_off));
1600 rx_pkts[nb_rx++] = first_seg;
1604 /* Record index of the next RX descriptor to probe. */
1605 rxq->rx_tail = rx_id;
1606 rxq->pkt_first_seg = first_seg;
1607 rxq->pkt_last_seg = last_seg;
1609 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1614 #define IAVF_LOOK_AHEAD 8
1616 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1618 volatile union iavf_rx_flex_desc *rxdp;
1619 struct rte_mbuf **rxep;
1620 struct rte_mbuf *mb;
1623 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1624 int32_t i, j, nb_rx = 0;
1626 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1628 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1629 rxep = &rxq->sw_ring[rxq->rx_tail];
1631 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1633 /* Make sure there is at least 1 packet to receive */
1634 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1637 /* Scan LOOK_AHEAD descriptors at a time to determine which
1638 * descriptors reference packets that are ready to be received.
1640 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1641 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1642 /* Read desc statuses backwards to avoid race condition */
1643 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1644 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1648 /* Compute how many status bits were set */
1649 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1650 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1654 /* Translate descriptor info to mbuf parameters */
1655 for (j = 0; j < nb_dd; j++) {
1656 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1658 i * IAVF_LOOK_AHEAD + j);
1661 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1662 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1663 mb->data_len = pkt_len;
1664 mb->pkt_len = pkt_len;
1667 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1668 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1669 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1670 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1671 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1672 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1674 mb->ol_flags |= pkt_flags;
1677 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1678 rxq->rx_stage[i + j] = rxep[j];
1680 if (nb_dd != IAVF_LOOK_AHEAD)
1684 /* Clear software ring entries */
1685 for (i = 0; i < nb_rx; i++)
1686 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1692 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1694 volatile union iavf_rx_desc *rxdp;
1695 struct rte_mbuf **rxep;
1696 struct rte_mbuf *mb;
1700 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1701 int32_t i, j, nb_rx = 0;
1703 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1705 rxdp = &rxq->rx_ring[rxq->rx_tail];
1706 rxep = &rxq->sw_ring[rxq->rx_tail];
1708 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1709 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1710 IAVF_RXD_QW1_STATUS_SHIFT;
1712 /* Make sure there is at least 1 packet to receive */
1713 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1716 /* Scan LOOK_AHEAD descriptors at a time to determine which
1717 * descriptors reference packets that are ready to be received.
1719 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1720 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1721 /* Read desc statuses backwards to avoid race condition */
1722 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1723 qword1 = rte_le_to_cpu_64(
1724 rxdp[j].wb.qword1.status_error_len);
1725 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1726 IAVF_RXD_QW1_STATUS_SHIFT;
1731 /* Compute how many status bits were set */
1732 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1733 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1737 /* Translate descriptor info to mbuf parameters */
1738 for (j = 0; j < nb_dd; j++) {
1739 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1740 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1743 qword1 = rte_le_to_cpu_64
1744 (rxdp[j].wb.qword1.status_error_len);
1745 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1746 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1747 mb->data_len = pkt_len;
1748 mb->pkt_len = pkt_len;
1750 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1751 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1753 ptype_tbl[(uint8_t)((qword1 &
1754 IAVF_RXD_QW1_PTYPE_MASK) >>
1755 IAVF_RXD_QW1_PTYPE_SHIFT)];
1757 if (pkt_flags & PKT_RX_RSS_HASH)
1758 mb->hash.rss = rte_le_to_cpu_32(
1759 rxdp[j].wb.qword0.hi_dword.rss);
1761 if (pkt_flags & PKT_RX_FDIR)
1762 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1764 mb->ol_flags |= pkt_flags;
1767 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1768 rxq->rx_stage[i + j] = rxep[j];
1770 if (nb_dd != IAVF_LOOK_AHEAD)
1774 /* Clear software ring entries */
1775 for (i = 0; i < nb_rx; i++)
1776 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1781 static inline uint16_t
1782 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1783 struct rte_mbuf **rx_pkts,
1787 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1789 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1791 for (i = 0; i < nb_pkts; i++)
1792 rx_pkts[i] = stage[i];
1794 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1795 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1801 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1803 volatile union iavf_rx_desc *rxdp;
1804 struct rte_mbuf **rxep;
1805 struct rte_mbuf *mb;
1806 uint16_t alloc_idx, i;
1810 /* Allocate buffers in bulk */
1811 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1812 (rxq->rx_free_thresh - 1));
1813 rxep = &rxq->sw_ring[alloc_idx];
1814 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1815 rxq->rx_free_thresh);
1816 if (unlikely(diag != 0)) {
1817 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1821 rxdp = &rxq->rx_ring[alloc_idx];
1822 for (i = 0; i < rxq->rx_free_thresh; i++) {
1823 if (likely(i < (rxq->rx_free_thresh - 1)))
1824 /* Prefetch next mbuf */
1825 rte_prefetch0(rxep[i + 1]);
1828 rte_mbuf_refcnt_set(mb, 1);
1830 mb->data_off = RTE_PKTMBUF_HEADROOM;
1832 mb->port = rxq->port_id;
1833 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1834 rxdp[i].read.hdr_addr = 0;
1835 rxdp[i].read.pkt_addr = dma_addr;
1838 /* Update rx tail register */
1840 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1842 rxq->rx_free_trigger =
1843 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1844 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1845 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1850 static inline uint16_t
1851 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1853 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1859 if (rxq->rx_nb_avail)
1860 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1862 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1863 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1865 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1866 rxq->rx_next_avail = 0;
1867 rxq->rx_nb_avail = nb_rx;
1868 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1870 if (rxq->rx_tail > rxq->rx_free_trigger) {
1871 if (iavf_rx_alloc_bufs(rxq) != 0) {
1874 /* TODO: count rx_mbuf_alloc_failed here */
1876 rxq->rx_nb_avail = 0;
1877 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1878 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1879 rxq->sw_ring[j] = rxq->rx_stage[i];
1885 if (rxq->rx_tail >= rxq->nb_rx_desc)
1888 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1889 rxq->port_id, rxq->queue_id,
1890 rxq->rx_tail, nb_rx);
1892 if (rxq->rx_nb_avail)
1893 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1899 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1900 struct rte_mbuf **rx_pkts,
1903 uint16_t nb_rx = 0, n, count;
1905 if (unlikely(nb_pkts == 0))
1908 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1909 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1912 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1913 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1914 nb_rx = (uint16_t)(nb_rx + count);
1915 nb_pkts = (uint16_t)(nb_pkts - count);
1924 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1926 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1927 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1928 uint16_t nb_tx_desc = txq->nb_tx_desc;
1929 uint16_t desc_to_clean_to;
1930 uint16_t nb_tx_to_clean;
1932 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1934 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1935 if (desc_to_clean_to >= nb_tx_desc)
1936 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1938 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1939 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1940 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1941 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1942 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1943 "(port=%d queue=%d)", desc_to_clean_to,
1944 txq->port_id, txq->queue_id);
1948 if (last_desc_cleaned > desc_to_clean_to)
1949 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1952 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1955 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1957 txq->last_desc_cleaned = desc_to_clean_to;
1958 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1963 /* Check if the context descriptor is needed for TX offloading */
1964 static inline uint16_t
1965 iavf_calc_context_desc(uint64_t flags)
1967 static uint64_t mask = PKT_TX_TCP_SEG;
1969 return (flags & mask) ? 1 : 0;
1973 iavf_txd_enable_checksum(uint64_t ol_flags,
1975 uint32_t *td_offset,
1976 union iavf_tx_offload tx_offload)
1979 *td_offset |= (tx_offload.l2_len >> 1) <<
1980 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1982 /* Enable L3 checksum offloads */
1983 if (ol_flags & PKT_TX_IP_CKSUM) {
1984 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1985 *td_offset |= (tx_offload.l3_len >> 2) <<
1986 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1987 } else if (ol_flags & PKT_TX_IPV4) {
1988 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1989 *td_offset |= (tx_offload.l3_len >> 2) <<
1990 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1991 } else if (ol_flags & PKT_TX_IPV6) {
1992 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1993 *td_offset |= (tx_offload.l3_len >> 2) <<
1994 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1997 if (ol_flags & PKT_TX_TCP_SEG) {
1998 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1999 *td_offset |= (tx_offload.l4_len >> 2) <<
2000 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2004 /* Enable L4 checksum offloads */
2005 switch (ol_flags & PKT_TX_L4_MASK) {
2006 case PKT_TX_TCP_CKSUM:
2007 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2008 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2009 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2011 case PKT_TX_SCTP_CKSUM:
2012 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2013 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2014 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2016 case PKT_TX_UDP_CKSUM:
2017 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2018 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2019 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2026 /* set TSO context descriptor
2027 * support IP -> L4 and IP -> IP -> L4
2029 static inline uint64_t
2030 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2032 uint64_t ctx_desc = 0;
2033 uint32_t cd_cmd, hdr_len, cd_tso_len;
2035 if (!tx_offload.l4_len) {
2036 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2040 hdr_len = tx_offload.l2_len +
2044 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2045 cd_tso_len = mbuf->pkt_len - hdr_len;
2046 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2047 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2048 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2053 /* Construct the tx flags */
2054 static inline uint64_t
2055 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2058 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2059 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2060 ((uint64_t)td_offset <<
2061 IAVF_TXD_QW1_OFFSET_SHIFT) |
2063 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2064 ((uint64_t)td_tag <<
2065 IAVF_TXD_QW1_L2TAG1_SHIFT));
2070 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2072 volatile struct iavf_tx_desc *txd;
2073 volatile struct iavf_tx_desc *txr;
2074 struct iavf_tx_queue *txq;
2075 struct iavf_tx_entry *sw_ring;
2076 struct iavf_tx_entry *txe, *txn;
2077 struct rte_mbuf *tx_pkt;
2078 struct rte_mbuf *m_seg;
2089 uint64_t buf_dma_addr;
2090 union iavf_tx_offload tx_offload = {0};
2093 sw_ring = txq->sw_ring;
2095 tx_id = txq->tx_tail;
2096 txe = &sw_ring[tx_id];
2098 /* Check if the descriptor ring needs to be cleaned. */
2099 if (txq->nb_free < txq->free_thresh)
2100 (void)iavf_xmit_cleanup(txq);
2102 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2107 tx_pkt = *tx_pkts++;
2108 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2110 ol_flags = tx_pkt->ol_flags;
2111 tx_offload.l2_len = tx_pkt->l2_len;
2112 tx_offload.l3_len = tx_pkt->l3_len;
2113 tx_offload.l4_len = tx_pkt->l4_len;
2114 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2115 /* Calculate the number of context descriptors needed. */
2116 nb_ctx = iavf_calc_context_desc(ol_flags);
2118 /* The number of descriptors that must be allocated for
2119 * a packet equals to the number of the segments of that
2120 * packet plus 1 context descriptor if needed.
2122 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2123 tx_last = (uint16_t)(tx_id + nb_used - 1);
2126 if (tx_last >= txq->nb_tx_desc)
2127 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2129 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2130 " tx_first=%u tx_last=%u",
2131 txq->port_id, txq->queue_id, tx_id, tx_last);
2133 if (nb_used > txq->nb_free) {
2134 if (iavf_xmit_cleanup(txq)) {
2139 if (unlikely(nb_used > txq->rs_thresh)) {
2140 while (nb_used > txq->nb_free) {
2141 if (iavf_xmit_cleanup(txq)) {
2150 /* Descriptor based VLAN insertion */
2151 if (ol_flags & PKT_TX_VLAN_PKT) {
2152 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2153 td_tag = tx_pkt->vlan_tci;
2156 /* According to datasheet, the bit2 is reserved and must be
2161 /* Enable checksum offloading */
2162 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2163 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2164 &td_offset, tx_offload);
2167 /* Setup TX context descriptor if required */
2168 uint64_t cd_type_cmd_tso_mss =
2169 IAVF_TX_DESC_DTYPE_CONTEXT;
2170 volatile struct iavf_tx_context_desc *ctx_txd =
2171 (volatile struct iavf_tx_context_desc *)
2174 txn = &sw_ring[txe->next_id];
2175 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2177 rte_pktmbuf_free_seg(txe->mbuf);
2182 if (ol_flags & PKT_TX_TCP_SEG)
2183 cd_type_cmd_tso_mss |=
2184 iavf_set_tso_ctx(tx_pkt, tx_offload);
2186 ctx_txd->type_cmd_tso_mss =
2187 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2189 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2190 txe->last_id = tx_last;
2191 tx_id = txe->next_id;
2198 txn = &sw_ring[txe->next_id];
2201 rte_pktmbuf_free_seg(txe->mbuf);
2204 /* Setup TX Descriptor */
2205 slen = m_seg->data_len;
2206 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2207 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2208 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2213 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2214 txe->last_id = tx_last;
2215 tx_id = txe->next_id;
2217 m_seg = m_seg->next;
2220 /* The last packet data descriptor needs End Of Packet (EOP) */
2221 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2222 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2223 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2225 if (txq->nb_used >= txq->rs_thresh) {
2226 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2227 "%4u (port=%d queue=%d)",
2228 tx_last, txq->port_id, txq->queue_id);
2230 td_cmd |= IAVF_TX_DESC_CMD_RS;
2232 /* Update txq RS bit counters */
2236 txd->cmd_type_offset_bsz |=
2237 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2238 IAVF_TXD_QW1_CMD_SHIFT);
2239 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2245 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2246 txq->port_id, txq->queue_id, tx_id, nb_tx);
2248 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2249 txq->tx_tail = tx_id;
2254 /* TX prep functions */
2256 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2263 for (i = 0; i < nb_pkts; i++) {
2265 ol_flags = m->ol_flags;
2267 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2268 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2269 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2273 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2274 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2275 /* MSS outside the range are considered malicious */
2280 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2281 rte_errno = ENOTSUP;
2285 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2286 ret = rte_validate_tx_offload(m);
2292 ret = rte_net_intel_cksum_prepare(m);
2302 /* choose rx function*/
2304 iavf_set_rx_function(struct rte_eth_dev *dev)
2306 struct iavf_adapter *adapter =
2307 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2308 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2311 struct iavf_rx_queue *rxq;
2313 bool use_avx2 = false;
2314 #ifdef CC_AVX512_SUPPORT
2315 bool use_avx512 = false;
2318 if (!iavf_rx_vec_dev_check(dev) &&
2319 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2320 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2321 rxq = dev->data->rx_queues[i];
2322 (void)iavf_rxq_vec_setup(rxq);
2325 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2326 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2327 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2329 #ifdef CC_AVX512_SUPPORT
2330 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2331 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2332 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2336 if (dev->data->scattered_rx) {
2338 "Using %sVector Scattered Rx (port %d).",
2339 use_avx2 ? "avx2 " : "",
2340 dev->data->port_id);
2341 if (vf->vf_res->vf_cap_flags &
2342 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2343 dev->rx_pkt_burst = use_avx2 ?
2344 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2345 iavf_recv_scattered_pkts_vec_flex_rxd;
2346 #ifdef CC_AVX512_SUPPORT
2349 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2352 dev->rx_pkt_burst = use_avx2 ?
2353 iavf_recv_scattered_pkts_vec_avx2 :
2354 iavf_recv_scattered_pkts_vec;
2355 #ifdef CC_AVX512_SUPPORT
2358 iavf_recv_scattered_pkts_vec_avx512;
2362 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2363 use_avx2 ? "avx2 " : "",
2364 dev->data->port_id);
2365 if (vf->vf_res->vf_cap_flags &
2366 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2367 dev->rx_pkt_burst = use_avx2 ?
2368 iavf_recv_pkts_vec_avx2_flex_rxd :
2369 iavf_recv_pkts_vec_flex_rxd;
2370 #ifdef CC_AVX512_SUPPORT
2373 iavf_recv_pkts_vec_avx512_flex_rxd;
2376 dev->rx_pkt_burst = use_avx2 ?
2377 iavf_recv_pkts_vec_avx2 :
2379 #ifdef CC_AVX512_SUPPORT
2382 iavf_recv_pkts_vec_avx512;
2391 if (dev->data->scattered_rx) {
2392 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2393 dev->data->port_id);
2394 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2395 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2397 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2398 } else if (adapter->rx_bulk_alloc_allowed) {
2399 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2400 dev->data->port_id);
2401 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2403 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2404 dev->data->port_id);
2405 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2406 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2408 dev->rx_pkt_burst = iavf_recv_pkts;
2412 /* choose tx function*/
2414 iavf_set_tx_function(struct rte_eth_dev *dev)
2417 struct iavf_tx_queue *txq;
2419 bool use_avx2 = false;
2420 #ifdef CC_AVX512_SUPPORT
2421 bool use_avx512 = false;
2424 if (!iavf_tx_vec_dev_check(dev) &&
2425 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2426 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2427 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2428 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2430 #ifdef CC_AVX512_SUPPORT
2431 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2432 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2433 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2437 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2438 use_avx2 ? "avx2 " : "",
2439 dev->data->port_id);
2440 dev->tx_pkt_burst = use_avx2 ?
2441 iavf_xmit_pkts_vec_avx2 :
2443 #ifdef CC_AVX512_SUPPORT
2445 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2447 dev->tx_pkt_prepare = NULL;
2449 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2450 txq = dev->data->tx_queues[i];
2453 #ifdef CC_AVX512_SUPPORT
2455 iavf_txq_vec_setup_avx512(txq);
2457 iavf_txq_vec_setup(txq);
2459 iavf_txq_vec_setup(txq);
2467 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2468 dev->data->port_id);
2469 dev->tx_pkt_burst = iavf_xmit_pkts;
2470 dev->tx_pkt_prepare = iavf_prep_pkts;
2474 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2477 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2478 uint16_t i, tx_last, tx_id;
2479 uint16_t nb_tx_free_last;
2480 uint16_t nb_tx_to_clean;
2483 /* Start free mbuf from the next of tx_tail */
2484 tx_last = txq->tx_tail;
2485 tx_id = swr_ring[tx_last].next_id;
2487 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2490 nb_tx_to_clean = txq->nb_free;
2491 nb_tx_free_last = txq->nb_free;
2493 free_cnt = txq->nb_tx_desc;
2495 /* Loop through swr_ring to count the amount of
2496 * freeable mubfs and packets.
2498 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2499 for (i = 0; i < nb_tx_to_clean &&
2500 pkt_cnt < free_cnt &&
2501 tx_id != tx_last; i++) {
2502 if (swr_ring[tx_id].mbuf != NULL) {
2503 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2504 swr_ring[tx_id].mbuf = NULL;
2507 * last segment in the packet,
2508 * increment packet count
2510 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2513 tx_id = swr_ring[tx_id].next_id;
2516 if (txq->rs_thresh > txq->nb_tx_desc -
2517 txq->nb_free || tx_id == tx_last)
2520 if (pkt_cnt < free_cnt) {
2521 if (iavf_xmit_cleanup(txq))
2524 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2525 nb_tx_free_last = txq->nb_free;
2529 return (int)pkt_cnt;
2533 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2535 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2537 return iavf_tx_done_cleanup_full(q, free_cnt);
2541 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2542 struct rte_eth_rxq_info *qinfo)
2544 struct iavf_rx_queue *rxq;
2546 rxq = dev->data->rx_queues[queue_id];
2548 qinfo->mp = rxq->mp;
2549 qinfo->scattered_rx = dev->data->scattered_rx;
2550 qinfo->nb_desc = rxq->nb_rx_desc;
2552 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2553 qinfo->conf.rx_drop_en = true;
2554 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2558 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2559 struct rte_eth_txq_info *qinfo)
2561 struct iavf_tx_queue *txq;
2563 txq = dev->data->tx_queues[queue_id];
2565 qinfo->nb_desc = txq->nb_tx_desc;
2567 qinfo->conf.tx_free_thresh = txq->free_thresh;
2568 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2569 qinfo->conf.offloads = txq->offloads;
2570 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2573 /* Get the number of used descriptors of a rx queue */
2575 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2577 #define IAVF_RXQ_SCAN_INTERVAL 4
2578 volatile union iavf_rx_desc *rxdp;
2579 struct iavf_rx_queue *rxq;
2582 rxq = dev->data->rx_queues[queue_id];
2583 rxdp = &rxq->rx_ring[rxq->rx_tail];
2585 while ((desc < rxq->nb_rx_desc) &&
2586 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2587 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2588 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2589 /* Check the DD bit of a rx descriptor of each 4 in a group,
2590 * to avoid checking too frequently and downgrading performance
2593 desc += IAVF_RXQ_SCAN_INTERVAL;
2594 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2595 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2596 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2597 desc - rxq->nb_rx_desc]);
2604 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2606 struct iavf_rx_queue *rxq = rx_queue;
2607 volatile uint64_t *status;
2611 if (unlikely(offset >= rxq->nb_rx_desc))
2614 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2615 return RTE_ETH_RX_DESC_UNAVAIL;
2617 desc = rxq->rx_tail + offset;
2618 if (desc >= rxq->nb_rx_desc)
2619 desc -= rxq->nb_rx_desc;
2621 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2622 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2623 << IAVF_RXD_QW1_STATUS_SHIFT);
2625 return RTE_ETH_RX_DESC_DONE;
2627 return RTE_ETH_RX_DESC_AVAIL;
2631 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2633 struct iavf_tx_queue *txq = tx_queue;
2634 volatile uint64_t *status;
2635 uint64_t mask, expect;
2638 if (unlikely(offset >= txq->nb_tx_desc))
2641 desc = txq->tx_tail + offset;
2642 /* go to next desc that has the RS bit */
2643 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2645 if (desc >= txq->nb_tx_desc) {
2646 desc -= txq->nb_tx_desc;
2647 if (desc >= txq->nb_tx_desc)
2648 desc -= txq->nb_tx_desc;
2651 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2652 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2653 expect = rte_cpu_to_le_64(
2654 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2655 if ((*status & mask) == expect)
2656 return RTE_ETH_TX_DESC_DONE;
2658 return RTE_ETH_TX_DESC_FULL;
2662 iavf_get_default_ptype_table(void)
2664 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2665 __rte_cache_aligned = {
2668 [1] = RTE_PTYPE_L2_ETHER,
2669 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2670 /* [3] - [5] reserved */
2671 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2672 /* [7] - [10] reserved */
2673 [11] = RTE_PTYPE_L2_ETHER_ARP,
2674 /* [12] - [21] reserved */
2676 /* Non tunneled IPv4 */
2677 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2679 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680 RTE_PTYPE_L4_NONFRAG,
2681 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2684 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2686 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2688 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2692 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2693 RTE_PTYPE_TUNNEL_IP |
2694 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2695 RTE_PTYPE_INNER_L4_FRAG,
2696 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2697 RTE_PTYPE_TUNNEL_IP |
2698 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2699 RTE_PTYPE_INNER_L4_NONFRAG,
2700 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2701 RTE_PTYPE_TUNNEL_IP |
2702 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2703 RTE_PTYPE_INNER_L4_UDP,
2705 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2706 RTE_PTYPE_TUNNEL_IP |
2707 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2708 RTE_PTYPE_INNER_L4_TCP,
2709 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2710 RTE_PTYPE_TUNNEL_IP |
2711 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2712 RTE_PTYPE_INNER_L4_SCTP,
2713 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2714 RTE_PTYPE_TUNNEL_IP |
2715 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2716 RTE_PTYPE_INNER_L4_ICMP,
2719 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2720 RTE_PTYPE_TUNNEL_IP |
2721 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2722 RTE_PTYPE_INNER_L4_FRAG,
2723 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2724 RTE_PTYPE_TUNNEL_IP |
2725 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2726 RTE_PTYPE_INNER_L4_NONFRAG,
2727 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2728 RTE_PTYPE_TUNNEL_IP |
2729 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2730 RTE_PTYPE_INNER_L4_UDP,
2732 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_IP |
2734 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2735 RTE_PTYPE_INNER_L4_TCP,
2736 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2737 RTE_PTYPE_TUNNEL_IP |
2738 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2739 RTE_PTYPE_INNER_L4_SCTP,
2740 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2741 RTE_PTYPE_TUNNEL_IP |
2742 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2743 RTE_PTYPE_INNER_L4_ICMP,
2745 /* IPv4 --> GRE/Teredo/VXLAN */
2746 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2747 RTE_PTYPE_TUNNEL_GRENAT,
2749 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2750 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2751 RTE_PTYPE_TUNNEL_GRENAT |
2752 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2753 RTE_PTYPE_INNER_L4_FRAG,
2754 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2755 RTE_PTYPE_TUNNEL_GRENAT |
2756 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2757 RTE_PTYPE_INNER_L4_NONFRAG,
2758 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759 RTE_PTYPE_TUNNEL_GRENAT |
2760 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2761 RTE_PTYPE_INNER_L4_UDP,
2763 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2764 RTE_PTYPE_TUNNEL_GRENAT |
2765 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2766 RTE_PTYPE_INNER_L4_TCP,
2767 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2768 RTE_PTYPE_TUNNEL_GRENAT |
2769 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2770 RTE_PTYPE_INNER_L4_SCTP,
2771 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2772 RTE_PTYPE_TUNNEL_GRENAT |
2773 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2774 RTE_PTYPE_INNER_L4_ICMP,
2776 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2777 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2778 RTE_PTYPE_TUNNEL_GRENAT |
2779 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2780 RTE_PTYPE_INNER_L4_FRAG,
2781 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2782 RTE_PTYPE_TUNNEL_GRENAT |
2783 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2784 RTE_PTYPE_INNER_L4_NONFRAG,
2785 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786 RTE_PTYPE_TUNNEL_GRENAT |
2787 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2788 RTE_PTYPE_INNER_L4_UDP,
2790 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2791 RTE_PTYPE_TUNNEL_GRENAT |
2792 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2793 RTE_PTYPE_INNER_L4_TCP,
2794 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2795 RTE_PTYPE_TUNNEL_GRENAT |
2796 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2797 RTE_PTYPE_INNER_L4_SCTP,
2798 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2799 RTE_PTYPE_TUNNEL_GRENAT |
2800 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2801 RTE_PTYPE_INNER_L4_ICMP,
2803 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2804 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2805 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2807 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2808 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2809 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2810 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2811 RTE_PTYPE_INNER_L4_FRAG,
2812 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2813 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2814 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2815 RTE_PTYPE_INNER_L4_NONFRAG,
2816 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2817 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2818 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2819 RTE_PTYPE_INNER_L4_UDP,
2821 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2822 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2823 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2824 RTE_PTYPE_INNER_L4_TCP,
2825 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2826 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2827 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2828 RTE_PTYPE_INNER_L4_SCTP,
2829 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2830 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2831 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2832 RTE_PTYPE_INNER_L4_ICMP,
2834 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2835 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2836 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2837 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2838 RTE_PTYPE_INNER_L4_FRAG,
2839 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2840 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2841 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2842 RTE_PTYPE_INNER_L4_NONFRAG,
2843 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2844 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2845 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2846 RTE_PTYPE_INNER_L4_UDP,
2848 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2849 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2850 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2851 RTE_PTYPE_INNER_L4_TCP,
2852 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2853 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2854 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2855 RTE_PTYPE_INNER_L4_SCTP,
2856 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2857 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2858 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2859 RTE_PTYPE_INNER_L4_ICMP,
2860 /* [73] - [87] reserved */
2862 /* Non tunneled IPv6 */
2863 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2865 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2866 RTE_PTYPE_L4_NONFRAG,
2867 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2870 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2872 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2874 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2878 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2879 RTE_PTYPE_TUNNEL_IP |
2880 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2881 RTE_PTYPE_INNER_L4_FRAG,
2882 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2883 RTE_PTYPE_TUNNEL_IP |
2884 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2885 RTE_PTYPE_INNER_L4_NONFRAG,
2886 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2887 RTE_PTYPE_TUNNEL_IP |
2888 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2889 RTE_PTYPE_INNER_L4_UDP,
2891 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2892 RTE_PTYPE_TUNNEL_IP |
2893 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2894 RTE_PTYPE_INNER_L4_TCP,
2895 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2896 RTE_PTYPE_TUNNEL_IP |
2897 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2898 RTE_PTYPE_INNER_L4_SCTP,
2899 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2900 RTE_PTYPE_TUNNEL_IP |
2901 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2902 RTE_PTYPE_INNER_L4_ICMP,
2905 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2906 RTE_PTYPE_TUNNEL_IP |
2907 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2908 RTE_PTYPE_INNER_L4_FRAG,
2909 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2910 RTE_PTYPE_TUNNEL_IP |
2911 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2912 RTE_PTYPE_INNER_L4_NONFRAG,
2913 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2914 RTE_PTYPE_TUNNEL_IP |
2915 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2916 RTE_PTYPE_INNER_L4_UDP,
2917 /* [105] reserved */
2918 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2919 RTE_PTYPE_TUNNEL_IP |
2920 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2921 RTE_PTYPE_INNER_L4_TCP,
2922 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_TUNNEL_IP |
2924 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2925 RTE_PTYPE_INNER_L4_SCTP,
2926 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2927 RTE_PTYPE_TUNNEL_IP |
2928 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2929 RTE_PTYPE_INNER_L4_ICMP,
2931 /* IPv6 --> GRE/Teredo/VXLAN */
2932 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2933 RTE_PTYPE_TUNNEL_GRENAT,
2935 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2936 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2937 RTE_PTYPE_TUNNEL_GRENAT |
2938 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2939 RTE_PTYPE_INNER_L4_FRAG,
2940 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2941 RTE_PTYPE_TUNNEL_GRENAT |
2942 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2943 RTE_PTYPE_INNER_L4_NONFRAG,
2944 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2945 RTE_PTYPE_TUNNEL_GRENAT |
2946 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2947 RTE_PTYPE_INNER_L4_UDP,
2948 /* [113] reserved */
2949 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2950 RTE_PTYPE_TUNNEL_GRENAT |
2951 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2952 RTE_PTYPE_INNER_L4_TCP,
2953 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2954 RTE_PTYPE_TUNNEL_GRENAT |
2955 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2956 RTE_PTYPE_INNER_L4_SCTP,
2957 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2958 RTE_PTYPE_TUNNEL_GRENAT |
2959 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2960 RTE_PTYPE_INNER_L4_ICMP,
2962 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2963 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2964 RTE_PTYPE_TUNNEL_GRENAT |
2965 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2966 RTE_PTYPE_INNER_L4_FRAG,
2967 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2968 RTE_PTYPE_TUNNEL_GRENAT |
2969 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2970 RTE_PTYPE_INNER_L4_NONFRAG,
2971 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2972 RTE_PTYPE_TUNNEL_GRENAT |
2973 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2974 RTE_PTYPE_INNER_L4_UDP,
2975 /* [120] reserved */
2976 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2977 RTE_PTYPE_TUNNEL_GRENAT |
2978 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2979 RTE_PTYPE_INNER_L4_TCP,
2980 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2981 RTE_PTYPE_TUNNEL_GRENAT |
2982 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2983 RTE_PTYPE_INNER_L4_SCTP,
2984 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2985 RTE_PTYPE_TUNNEL_GRENAT |
2986 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2987 RTE_PTYPE_INNER_L4_ICMP,
2989 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2990 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2991 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2993 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2994 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2995 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2996 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2997 RTE_PTYPE_INNER_L4_FRAG,
2998 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2999 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3000 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3001 RTE_PTYPE_INNER_L4_NONFRAG,
3002 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3003 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3004 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3005 RTE_PTYPE_INNER_L4_UDP,
3006 /* [128] reserved */
3007 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3008 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3009 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3010 RTE_PTYPE_INNER_L4_TCP,
3011 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3012 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3013 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3014 RTE_PTYPE_INNER_L4_SCTP,
3015 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3016 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3017 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3018 RTE_PTYPE_INNER_L4_ICMP,
3020 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3021 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3022 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3023 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3024 RTE_PTYPE_INNER_L4_FRAG,
3025 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3026 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3027 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3028 RTE_PTYPE_INNER_L4_NONFRAG,
3029 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3030 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3031 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3032 RTE_PTYPE_INNER_L4_UDP,
3033 /* [135] reserved */
3034 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3035 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3036 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3037 RTE_PTYPE_INNER_L4_TCP,
3038 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3039 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3040 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3041 RTE_PTYPE_INNER_L4_SCTP,
3042 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3043 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3044 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3045 RTE_PTYPE_INNER_L4_ICMP,
3046 /* [139] - [299] reserved */
3049 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3050 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3052 /* PPPoE --> IPv4 */
3053 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3054 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3056 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3057 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3058 RTE_PTYPE_L4_NONFRAG,
3059 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3060 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3062 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3063 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3065 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3066 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3068 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3069 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3072 /* PPPoE --> IPv6 */
3073 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3074 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3076 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3077 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3078 RTE_PTYPE_L4_NONFRAG,
3079 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3080 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3082 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3083 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3085 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3086 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3088 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3089 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3091 /* [314] - [324] reserved */
3093 /* IPv4/IPv6 --> GTPC/GTPU */
3094 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3095 RTE_PTYPE_TUNNEL_GTPC,
3096 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3097 RTE_PTYPE_TUNNEL_GTPC,
3098 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3099 RTE_PTYPE_TUNNEL_GTPC,
3100 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3101 RTE_PTYPE_TUNNEL_GTPC,
3102 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3103 RTE_PTYPE_TUNNEL_GTPU,
3104 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3105 RTE_PTYPE_TUNNEL_GTPU,
3107 /* IPv4 --> GTPU --> IPv4 */
3108 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3109 RTE_PTYPE_TUNNEL_GTPU |
3110 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3111 RTE_PTYPE_INNER_L4_FRAG,
3112 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3113 RTE_PTYPE_TUNNEL_GTPU |
3114 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3115 RTE_PTYPE_INNER_L4_NONFRAG,
3116 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3117 RTE_PTYPE_TUNNEL_GTPU |
3118 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3119 RTE_PTYPE_INNER_L4_UDP,
3120 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3121 RTE_PTYPE_TUNNEL_GTPU |
3122 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3123 RTE_PTYPE_INNER_L4_TCP,
3124 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3125 RTE_PTYPE_TUNNEL_GTPU |
3126 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3127 RTE_PTYPE_INNER_L4_ICMP,
3129 /* IPv6 --> GTPU --> IPv4 */
3130 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3131 RTE_PTYPE_TUNNEL_GTPU |
3132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3133 RTE_PTYPE_INNER_L4_FRAG,
3134 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3135 RTE_PTYPE_TUNNEL_GTPU |
3136 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3137 RTE_PTYPE_INNER_L4_NONFRAG,
3138 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3139 RTE_PTYPE_TUNNEL_GTPU |
3140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3141 RTE_PTYPE_INNER_L4_UDP,
3142 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3143 RTE_PTYPE_TUNNEL_GTPU |
3144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3145 RTE_PTYPE_INNER_L4_TCP,
3146 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3147 RTE_PTYPE_TUNNEL_GTPU |
3148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3149 RTE_PTYPE_INNER_L4_ICMP,
3151 /* IPv4 --> GTPU --> IPv6 */
3152 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3153 RTE_PTYPE_TUNNEL_GTPU |
3154 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3155 RTE_PTYPE_INNER_L4_FRAG,
3156 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3157 RTE_PTYPE_TUNNEL_GTPU |
3158 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3159 RTE_PTYPE_INNER_L4_NONFRAG,
3160 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3161 RTE_PTYPE_TUNNEL_GTPU |
3162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3163 RTE_PTYPE_INNER_L4_UDP,
3164 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3165 RTE_PTYPE_TUNNEL_GTPU |
3166 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3167 RTE_PTYPE_INNER_L4_TCP,
3168 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3169 RTE_PTYPE_TUNNEL_GTPU |
3170 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3171 RTE_PTYPE_INNER_L4_ICMP,
3173 /* IPv6 --> GTPU --> IPv6 */
3174 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3175 RTE_PTYPE_TUNNEL_GTPU |
3176 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3177 RTE_PTYPE_INNER_L4_FRAG,
3178 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3179 RTE_PTYPE_TUNNEL_GTPU |
3180 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3181 RTE_PTYPE_INNER_L4_NONFRAG,
3182 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3183 RTE_PTYPE_TUNNEL_GTPU |
3184 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3185 RTE_PTYPE_INNER_L4_UDP,
3186 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3187 RTE_PTYPE_TUNNEL_GTPU |
3188 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3189 RTE_PTYPE_INNER_L4_TCP,
3190 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3191 RTE_PTYPE_TUNNEL_GTPU |
3192 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3193 RTE_PTYPE_INNER_L4_ICMP,
3194 /* All others reserved */