1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
29 #include "iavf_rxtx.h"
32 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
34 /* The following constraints must be satisfied:
35 * thresh < rxq->nb_rx_desc
37 if (thresh >= nb_desc) {
38 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
46 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
47 uint16_t tx_free_thresh)
49 /* TX descriptors will have their RS bit set after tx_rs_thresh
50 * descriptors have been used. The TX descriptor ring will be cleaned
51 * after tx_free_thresh descriptors are used or if the number of
52 * descriptors required to transmit a packet is greater than the
53 * number of free TX descriptors.
55 * The following constraints must be satisfied:
56 * - tx_rs_thresh must be less than the size of the ring minus 2.
57 * - tx_free_thresh must be less than the size of the ring minus 3.
58 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
59 * - tx_rs_thresh must be a divisor of the ring size.
61 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
62 * race condition, hence the maximum threshold constraints. When set
63 * to zero use default values.
65 if (tx_rs_thresh >= (nb_desc - 2)) {
66 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
67 "number of TX descriptors (%u) minus 2",
68 tx_rs_thresh, nb_desc);
71 if (tx_free_thresh >= (nb_desc - 3)) {
72 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
73 "number of TX descriptors (%u) minus 3.",
74 tx_free_thresh, nb_desc);
77 if (tx_rs_thresh > tx_free_thresh) {
78 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
79 "equal to tx_free_thresh (%u).",
80 tx_rs_thresh, tx_free_thresh);
83 if ((nb_desc % tx_rs_thresh) != 0) {
84 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
85 "number of TX descriptors (%u).",
86 tx_rs_thresh, nb_desc);
94 check_rx_vec_allow(struct iavf_rx_queue *rxq)
96 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
97 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
98 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
102 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
107 check_tx_vec_allow(struct iavf_tx_queue *txq)
109 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
110 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
111 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
112 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
115 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
120 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
124 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
125 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
126 "rxq->rx_free_thresh=%d, "
127 "IAVF_RX_MAX_BURST=%d",
128 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
130 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
131 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
132 "rxq->nb_rx_desc=%d, "
133 "rxq->rx_free_thresh=%d",
134 rxq->nb_rx_desc, rxq->rx_free_thresh);
141 reset_rx_queue(struct iavf_rx_queue *rxq)
149 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
151 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
152 ((volatile char *)rxq->rx_ring)[i] = 0;
154 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
156 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
157 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
160 rxq->rx_nb_avail = 0;
161 rxq->rx_next_avail = 0;
162 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
166 rxq->pkt_first_seg = NULL;
167 rxq->pkt_last_seg = NULL;
171 reset_tx_queue(struct iavf_tx_queue *txq)
173 struct iavf_tx_entry *txe;
178 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
183 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
184 for (i = 0; i < size; i++)
185 ((volatile char *)txq->tx_ring)[i] = 0;
187 prev = (uint16_t)(txq->nb_tx_desc - 1);
188 for (i = 0; i < txq->nb_tx_desc; i++) {
189 txq->tx_ring[i].cmd_type_offset_bsz =
190 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
193 txe[prev].next_id = i;
200 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
201 txq->nb_free = txq->nb_tx_desc - 1;
203 txq->next_dd = txq->rs_thresh - 1;
204 txq->next_rs = txq->rs_thresh - 1;
208 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
210 volatile union iavf_rx_desc *rxd;
211 struct rte_mbuf *mbuf = NULL;
215 for (i = 0; i < rxq->nb_rx_desc; i++) {
216 mbuf = rte_mbuf_raw_alloc(rxq->mp);
217 if (unlikely(!mbuf)) {
218 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
222 rte_mbuf_refcnt_set(mbuf, 1);
224 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
226 mbuf->port = rxq->port_id;
229 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
231 rxd = &rxq->rx_ring[i];
232 rxd->read.pkt_addr = dma_addr;
233 rxd->read.hdr_addr = 0;
234 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
239 rxq->sw_ring[i] = mbuf;
246 release_rxq_mbufs(struct iavf_rx_queue *rxq)
253 for (i = 0; i < rxq->nb_rx_desc; i++) {
254 if (rxq->sw_ring[i]) {
255 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
256 rxq->sw_ring[i] = NULL;
261 if (rxq->rx_nb_avail == 0)
263 for (i = 0; i < rxq->rx_nb_avail; i++) {
264 struct rte_mbuf *mbuf;
266 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
267 rte_pktmbuf_free_seg(mbuf);
269 rxq->rx_nb_avail = 0;
273 release_txq_mbufs(struct iavf_tx_queue *txq)
277 if (!txq || !txq->sw_ring) {
278 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
282 for (i = 0; i < txq->nb_tx_desc; i++) {
283 if (txq->sw_ring[i].mbuf) {
284 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
285 txq->sw_ring[i].mbuf = NULL;
290 static const struct iavf_rxq_ops def_rxq_ops = {
291 .release_mbufs = release_rxq_mbufs,
294 static const struct iavf_txq_ops def_txq_ops = {
295 .release_mbufs = release_txq_mbufs,
299 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
300 uint16_t nb_desc, unsigned int socket_id,
301 const struct rte_eth_rxconf *rx_conf,
302 struct rte_mempool *mp)
304 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
305 struct iavf_adapter *ad =
306 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
307 struct iavf_info *vf =
308 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
309 struct iavf_vsi *vsi = &vf->vsi;
310 struct iavf_rx_queue *rxq;
311 const struct rte_memzone *mz;
314 uint16_t rx_free_thresh;
316 PMD_INIT_FUNC_TRACE();
318 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
319 nb_desc > IAVF_MAX_RING_DESC ||
320 nb_desc < IAVF_MIN_RING_DESC) {
321 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
326 /* Check free threshold */
327 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
328 IAVF_DEFAULT_RX_FREE_THRESH :
329 rx_conf->rx_free_thresh;
330 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
333 /* Free memory if needed */
334 if (dev->data->rx_queues[queue_idx]) {
335 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
336 dev->data->rx_queues[queue_idx] = NULL;
339 /* Allocate the rx queue data structure */
340 rxq = rte_zmalloc_socket("iavf rxq",
341 sizeof(struct iavf_rx_queue),
345 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
346 "rx queue data structure");
350 if (vf->vf_res->vf_cap_flags &
351 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
352 vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
353 rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
355 rxq->rxdid = IAVF_RXDID_LEGACY_1;
359 rxq->nb_rx_desc = nb_desc;
360 rxq->rx_free_thresh = rx_free_thresh;
361 rxq->queue_id = queue_idx;
362 rxq->port_id = dev->data->port_id;
363 rxq->crc_len = 0; /* crc stripping by default */
364 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
368 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
369 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
371 /* Allocate the software ring. */
372 len = nb_desc + IAVF_RX_MAX_BURST;
374 rte_zmalloc_socket("iavf rx sw ring",
375 sizeof(struct rte_mbuf *) * len,
379 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
384 /* Allocate the maximun number of RX ring hardware descriptor with
385 * a liitle more to support bulk allocate.
387 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
388 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
390 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
391 ring_size, IAVF_RING_BASE_ALIGN,
394 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
395 rte_free(rxq->sw_ring);
399 /* Zero all the descriptors in the ring. */
400 memset(mz->addr, 0, ring_size);
401 rxq->rx_ring_phys_addr = mz->iova;
402 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
407 dev->data->rx_queues[queue_idx] = rxq;
408 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
409 rxq->ops = &def_rxq_ops;
411 if (check_rx_bulk_allow(rxq) == true) {
412 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
413 "satisfied. Rx Burst Bulk Alloc function will be "
414 "used on port=%d, queue=%d.",
415 rxq->port_id, rxq->queue_id);
417 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
418 "not satisfied, Scattered Rx is requested "
419 "on port=%d, queue=%d.",
420 rxq->port_id, rxq->queue_id);
421 ad->rx_bulk_alloc_allowed = false;
424 if (check_rx_vec_allow(rxq) == false)
425 ad->rx_vec_allowed = false;
431 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
434 unsigned int socket_id,
435 const struct rte_eth_txconf *tx_conf)
437 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 struct iavf_tx_queue *txq;
439 const struct rte_memzone *mz;
441 uint16_t tx_rs_thresh, tx_free_thresh;
444 PMD_INIT_FUNC_TRACE();
446 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
448 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
449 nb_desc > IAVF_MAX_RING_DESC ||
450 nb_desc < IAVF_MIN_RING_DESC) {
451 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
456 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
457 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
458 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
459 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
460 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
462 /* Free memory if needed. */
463 if (dev->data->tx_queues[queue_idx]) {
464 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
465 dev->data->tx_queues[queue_idx] = NULL;
468 /* Allocate the TX queue data structure. */
469 txq = rte_zmalloc_socket("iavf txq",
470 sizeof(struct iavf_tx_queue),
474 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
475 "tx queue structure");
479 txq->nb_tx_desc = nb_desc;
480 txq->rs_thresh = tx_rs_thresh;
481 txq->free_thresh = tx_free_thresh;
482 txq->queue_id = queue_idx;
483 txq->port_id = dev->data->port_id;
484 txq->offloads = offloads;
485 txq->tx_deferred_start = tx_conf->tx_deferred_start;
487 /* Allocate software ring */
489 rte_zmalloc_socket("iavf tx sw ring",
490 sizeof(struct iavf_tx_entry) * nb_desc,
494 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
499 /* Allocate TX hardware ring descriptors. */
500 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
501 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
502 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
503 ring_size, IAVF_RING_BASE_ALIGN,
506 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
507 rte_free(txq->sw_ring);
511 txq->tx_ring_phys_addr = mz->iova;
512 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
517 dev->data->tx_queues[queue_idx] = txq;
518 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
519 txq->ops = &def_txq_ops;
521 if (check_tx_vec_allow(txq) == false) {
522 struct iavf_adapter *ad =
523 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
524 ad->tx_vec_allowed = false;
531 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
533 struct iavf_adapter *adapter =
534 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
535 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
536 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537 struct iavf_rx_queue *rxq;
540 PMD_DRV_FUNC_TRACE();
542 if (rx_queue_id >= dev->data->nb_rx_queues)
545 rxq = dev->data->rx_queues[rx_queue_id];
547 err = alloc_rxq_mbufs(rxq);
549 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
555 /* Init the RX tail register. */
556 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
557 IAVF_WRITE_FLUSH(hw);
559 /* Ready to switch the queue on */
561 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
563 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
566 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
569 dev->data->rx_queue_state[rx_queue_id] =
570 RTE_ETH_QUEUE_STATE_STARTED;
576 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
578 struct iavf_adapter *adapter =
579 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
580 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
581 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
582 struct iavf_tx_queue *txq;
585 PMD_DRV_FUNC_TRACE();
587 if (tx_queue_id >= dev->data->nb_tx_queues)
590 txq = dev->data->tx_queues[tx_queue_id];
592 /* Init the RX tail register. */
593 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
594 IAVF_WRITE_FLUSH(hw);
596 /* Ready to switch the queue on */
598 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
600 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
603 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
606 dev->data->tx_queue_state[tx_queue_id] =
607 RTE_ETH_QUEUE_STATE_STARTED;
613 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
615 struct iavf_adapter *adapter =
616 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
617 struct iavf_rx_queue *rxq;
620 PMD_DRV_FUNC_TRACE();
622 if (rx_queue_id >= dev->data->nb_rx_queues)
625 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
627 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
632 rxq = dev->data->rx_queues[rx_queue_id];
633 rxq->ops->release_mbufs(rxq);
635 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
641 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
643 struct iavf_adapter *adapter =
644 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
645 struct iavf_tx_queue *txq;
648 PMD_DRV_FUNC_TRACE();
650 if (tx_queue_id >= dev->data->nb_tx_queues)
653 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
655 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
660 txq = dev->data->tx_queues[tx_queue_id];
661 txq->ops->release_mbufs(txq);
663 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
669 iavf_dev_rx_queue_release(void *rxq)
671 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
676 q->ops->release_mbufs(q);
677 rte_free(q->sw_ring);
678 rte_memzone_free(q->mz);
683 iavf_dev_tx_queue_release(void *txq)
685 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
690 q->ops->release_mbufs(q);
691 rte_free(q->sw_ring);
692 rte_memzone_free(q->mz);
697 iavf_stop_queues(struct rte_eth_dev *dev)
699 struct iavf_adapter *adapter =
700 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
701 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
702 struct iavf_rx_queue *rxq;
703 struct iavf_tx_queue *txq;
706 /* Stop All queues */
707 if (!vf->lv_enabled) {
708 ret = iavf_disable_queues(adapter);
710 PMD_DRV_LOG(WARNING, "Fail to stop queues");
712 ret = iavf_disable_queues_lv(adapter);
714 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
718 PMD_DRV_LOG(WARNING, "Fail to stop queues");
720 for (i = 0; i < dev->data->nb_tx_queues; i++) {
721 txq = dev->data->tx_queues[i];
724 txq->ops->release_mbufs(txq);
726 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
728 for (i = 0; i < dev->data->nb_rx_queues; i++) {
729 rxq = dev->data->rx_queues[i];
732 rxq->ops->release_mbufs(rxq);
734 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
739 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
741 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
742 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
743 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
745 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
752 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
753 volatile union iavf_rx_flex_desc *rxdp)
755 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
756 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
757 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
759 rte_le_to_cpu_16(rxdp->wb.l2tag1);
765 /* Translate the rx descriptor status and error fields to pkt flags */
766 static inline uint64_t
767 iavf_rxd_to_pkt_flags(uint64_t qword)
770 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
772 #define IAVF_RX_ERR_BITS 0x3f
774 /* Check if RSS_HASH */
775 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
776 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
777 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
779 /* Check if FDIR Match */
780 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
783 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
784 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
788 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
789 flags |= PKT_RX_IP_CKSUM_BAD;
791 flags |= PKT_RX_IP_CKSUM_GOOD;
793 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
794 flags |= PKT_RX_L4_CKSUM_BAD;
796 flags |= PKT_RX_L4_CKSUM_GOOD;
798 /* TODO: Oversize error bit is not processed here */
803 static inline uint64_t
804 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
807 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
810 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
811 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
812 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
814 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
816 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
817 flags |= PKT_RX_FDIR_ID;
821 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
822 flags |= PKT_RX_FDIR_ID;
828 /* Translate the rx flex descriptor status to pkt flags */
830 iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
831 volatile union iavf_rx_flex_desc *rxdp)
833 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
834 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
835 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
838 stat_err = rte_le_to_cpu_16(desc->status_error0);
839 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
840 mb->ol_flags |= PKT_RX_RSS_HASH;
841 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
845 if (desc->flow_id != 0xFFFFFFFF) {
846 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
847 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
851 #define IAVF_RX_FLEX_ERR0_BITS \
852 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
853 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
854 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
855 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
856 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
857 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
859 /* Rx L3/L4 checksum */
860 static inline uint64_t
861 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
865 /* check if HW has decoded the packet and checksum */
866 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
869 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
870 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
874 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
875 flags |= PKT_RX_IP_CKSUM_BAD;
877 flags |= PKT_RX_IP_CKSUM_GOOD;
879 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
880 flags |= PKT_RX_L4_CKSUM_BAD;
882 flags |= PKT_RX_L4_CKSUM_GOOD;
884 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
885 flags |= PKT_RX_EIP_CKSUM_BAD;
890 /* If the number of free RX descriptors is greater than the RX free
891 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
892 * register. Update the RDT with the value of the last processed RX
893 * descriptor minus 1, to guarantee that the RDT register is never
894 * equal to the RDH register, which creates a "full" ring situation
895 * from the hardware point of view.
898 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
900 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
902 if (nb_hold > rxq->rx_free_thresh) {
904 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
905 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
906 rx_id = (uint16_t)((rx_id == 0) ?
907 (rxq->nb_rx_desc - 1) : (rx_id - 1));
908 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
911 rxq->nb_rx_hold = nb_hold;
914 /* implement recv_pkts */
916 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
918 volatile union iavf_rx_desc *rx_ring;
919 volatile union iavf_rx_desc *rxdp;
920 struct iavf_rx_queue *rxq;
921 union iavf_rx_desc rxd;
922 struct rte_mbuf *rxe;
923 struct rte_eth_dev *dev;
924 struct rte_mbuf *rxm;
925 struct rte_mbuf *nmb;
929 uint16_t rx_packet_len;
930 uint16_t rx_id, nb_hold;
933 const uint32_t *ptype_tbl;
938 rx_id = rxq->rx_tail;
939 rx_ring = rxq->rx_ring;
940 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
942 while (nb_rx < nb_pkts) {
943 rxdp = &rx_ring[rx_id];
944 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
945 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
946 IAVF_RXD_QW1_STATUS_SHIFT;
948 /* Check the DD bit first */
949 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
951 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
953 nmb = rte_mbuf_raw_alloc(rxq->mp);
954 if (unlikely(!nmb)) {
955 dev = &rte_eth_devices[rxq->port_id];
956 dev->data->rx_mbuf_alloc_failed++;
957 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
958 "queue_id=%u", rxq->port_id, rxq->queue_id);
964 rxe = rxq->sw_ring[rx_id];
966 if (unlikely(rx_id == rxq->nb_rx_desc))
969 /* Prefetch next mbuf */
970 rte_prefetch0(rxq->sw_ring[rx_id]);
972 /* When next RX descriptor is on a cache line boundary,
973 * prefetch the next 4 RX descriptors and next 8 pointers
976 if ((rx_id & 0x3) == 0) {
977 rte_prefetch0(&rx_ring[rx_id]);
978 rte_prefetch0(rxq->sw_ring[rx_id]);
982 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
983 rxdp->read.hdr_addr = 0;
984 rxdp->read.pkt_addr = dma_addr;
986 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
987 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
989 rxm->data_off = RTE_PKTMBUF_HEADROOM;
990 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
993 rxm->pkt_len = rx_packet_len;
994 rxm->data_len = rx_packet_len;
995 rxm->port = rxq->port_id;
997 iavf_rxd_to_vlan_tci(rxm, &rxd);
998 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1000 ptype_tbl[(uint8_t)((qword1 &
1001 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1003 if (pkt_flags & PKT_RX_RSS_HASH)
1005 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1007 if (pkt_flags & PKT_RX_FDIR)
1008 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1010 rxm->ol_flags |= pkt_flags;
1012 rx_pkts[nb_rx++] = rxm;
1014 rxq->rx_tail = rx_id;
1016 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1021 /* implement recv_pkts for flexible Rx descriptor */
1023 iavf_recv_pkts_flex_rxd(void *rx_queue,
1024 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1026 volatile union iavf_rx_desc *rx_ring;
1027 volatile union iavf_rx_flex_desc *rxdp;
1028 struct iavf_rx_queue *rxq;
1029 union iavf_rx_flex_desc rxd;
1030 struct rte_mbuf *rxe;
1031 struct rte_eth_dev *dev;
1032 struct rte_mbuf *rxm;
1033 struct rte_mbuf *nmb;
1035 uint16_t rx_stat_err0;
1036 uint16_t rx_packet_len;
1037 uint16_t rx_id, nb_hold;
1040 const uint32_t *ptype_tbl;
1045 rx_id = rxq->rx_tail;
1046 rx_ring = rxq->rx_ring;
1047 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1049 while (nb_rx < nb_pkts) {
1050 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1051 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1053 /* Check the DD bit first */
1054 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1056 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1058 nmb = rte_mbuf_raw_alloc(rxq->mp);
1059 if (unlikely(!nmb)) {
1060 dev = &rte_eth_devices[rxq->port_id];
1061 dev->data->rx_mbuf_alloc_failed++;
1062 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1063 "queue_id=%u", rxq->port_id, rxq->queue_id);
1069 rxe = rxq->sw_ring[rx_id];
1071 if (unlikely(rx_id == rxq->nb_rx_desc))
1074 /* Prefetch next mbuf */
1075 rte_prefetch0(rxq->sw_ring[rx_id]);
1077 /* When next RX descriptor is on a cache line boundary,
1078 * prefetch the next 4 RX descriptors and next 8 pointers
1081 if ((rx_id & 0x3) == 0) {
1082 rte_prefetch0(&rx_ring[rx_id]);
1083 rte_prefetch0(rxq->sw_ring[rx_id]);
1087 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1088 rxdp->read.hdr_addr = 0;
1089 rxdp->read.pkt_addr = dma_addr;
1091 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1092 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1094 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1095 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1098 rxm->pkt_len = rx_packet_len;
1099 rxm->data_len = rx_packet_len;
1100 rxm->port = rxq->port_id;
1102 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1103 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1104 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1105 iavf_rxd_to_pkt_fields(rxm, &rxd);
1106 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1107 rxm->ol_flags |= pkt_flags;
1109 rx_pkts[nb_rx++] = rxm;
1111 rxq->rx_tail = rx_id;
1113 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1118 /* implement recv_scattered_pkts for flexible Rx descriptor */
1120 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1123 struct iavf_rx_queue *rxq = rx_queue;
1124 union iavf_rx_flex_desc rxd;
1125 struct rte_mbuf *rxe;
1126 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1127 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1128 struct rte_mbuf *nmb, *rxm;
1129 uint16_t rx_id = rxq->rx_tail;
1130 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1131 struct rte_eth_dev *dev;
1132 uint16_t rx_stat_err0;
1136 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1137 volatile union iavf_rx_flex_desc *rxdp;
1138 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1140 while (nb_rx < nb_pkts) {
1141 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1142 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1144 /* Check the DD bit */
1145 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1147 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1149 nmb = rte_mbuf_raw_alloc(rxq->mp);
1150 if (unlikely(!nmb)) {
1151 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1152 "queue_id=%u", rxq->port_id, rxq->queue_id);
1153 dev = &rte_eth_devices[rxq->port_id];
1154 dev->data->rx_mbuf_alloc_failed++;
1160 rxe = rxq->sw_ring[rx_id];
1162 if (rx_id == rxq->nb_rx_desc)
1165 /* Prefetch next mbuf */
1166 rte_prefetch0(rxq->sw_ring[rx_id]);
1168 /* When next RX descriptor is on a cache line boundary,
1169 * prefetch the next 4 RX descriptors and next 8 pointers
1172 if ((rx_id & 0x3) == 0) {
1173 rte_prefetch0(&rx_ring[rx_id]);
1174 rte_prefetch0(rxq->sw_ring[rx_id]);
1179 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1181 /* Set data buffer address and data length of the mbuf */
1182 rxdp->read.hdr_addr = 0;
1183 rxdp->read.pkt_addr = dma_addr;
1184 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1185 IAVF_RX_FLX_DESC_PKT_LEN_M;
1186 rxm->data_len = rx_packet_len;
1187 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1189 /* If this is the first buffer of the received packet, set the
1190 * pointer to the first mbuf of the packet and initialize its
1191 * context. Otherwise, update the total length and the number
1192 * of segments of the current scattered packet, and update the
1193 * pointer to the last mbuf of the current packet.
1197 first_seg->nb_segs = 1;
1198 first_seg->pkt_len = rx_packet_len;
1200 first_seg->pkt_len =
1201 (uint16_t)(first_seg->pkt_len +
1203 first_seg->nb_segs++;
1204 last_seg->next = rxm;
1207 /* If this is not the last buffer of the received packet,
1208 * update the pointer to the last mbuf of the current scattered
1209 * packet and continue to parse the RX ring.
1211 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1216 /* This is the last buffer of the received packet. If the CRC
1217 * is not stripped by the hardware:
1218 * - Subtract the CRC length from the total packet length.
1219 * - If the last buffer only contains the whole CRC or a part
1220 * of it, free the mbuf associated to the last buffer. If part
1221 * of the CRC is also contained in the previous mbuf, subtract
1222 * the length of that CRC part from the data length of the
1226 if (unlikely(rxq->crc_len > 0)) {
1227 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1228 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1229 rte_pktmbuf_free_seg(rxm);
1230 first_seg->nb_segs--;
1231 last_seg->data_len =
1232 (uint16_t)(last_seg->data_len -
1233 (RTE_ETHER_CRC_LEN - rx_packet_len));
1234 last_seg->next = NULL;
1236 rxm->data_len = (uint16_t)(rx_packet_len -
1241 first_seg->port = rxq->port_id;
1242 first_seg->ol_flags = 0;
1243 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1244 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1245 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1246 iavf_rxd_to_pkt_fields(first_seg, &rxd);
1247 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1249 first_seg->ol_flags |= pkt_flags;
1251 /* Prefetch data of first segment, if configured to do so. */
1252 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1253 first_seg->data_off));
1254 rx_pkts[nb_rx++] = first_seg;
1258 /* Record index of the next RX descriptor to probe. */
1259 rxq->rx_tail = rx_id;
1260 rxq->pkt_first_seg = first_seg;
1261 rxq->pkt_last_seg = last_seg;
1263 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1268 /* implement recv_scattered_pkts */
1270 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1273 struct iavf_rx_queue *rxq = rx_queue;
1274 union iavf_rx_desc rxd;
1275 struct rte_mbuf *rxe;
1276 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1277 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1278 struct rte_mbuf *nmb, *rxm;
1279 uint16_t rx_id = rxq->rx_tail;
1280 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1281 struct rte_eth_dev *dev;
1287 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1288 volatile union iavf_rx_desc *rxdp;
1289 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1291 while (nb_rx < nb_pkts) {
1292 rxdp = &rx_ring[rx_id];
1293 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1294 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1295 IAVF_RXD_QW1_STATUS_SHIFT;
1297 /* Check the DD bit */
1298 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1300 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1302 nmb = rte_mbuf_raw_alloc(rxq->mp);
1303 if (unlikely(!nmb)) {
1304 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1305 "queue_id=%u", rxq->port_id, rxq->queue_id);
1306 dev = &rte_eth_devices[rxq->port_id];
1307 dev->data->rx_mbuf_alloc_failed++;
1313 rxe = rxq->sw_ring[rx_id];
1315 if (rx_id == rxq->nb_rx_desc)
1318 /* Prefetch next mbuf */
1319 rte_prefetch0(rxq->sw_ring[rx_id]);
1321 /* When next RX descriptor is on a cache line boundary,
1322 * prefetch the next 4 RX descriptors and next 8 pointers
1325 if ((rx_id & 0x3) == 0) {
1326 rte_prefetch0(&rx_ring[rx_id]);
1327 rte_prefetch0(rxq->sw_ring[rx_id]);
1332 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1334 /* Set data buffer address and data length of the mbuf */
1335 rxdp->read.hdr_addr = 0;
1336 rxdp->read.pkt_addr = dma_addr;
1337 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1338 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1339 rxm->data_len = rx_packet_len;
1340 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1342 /* If this is the first buffer of the received packet, set the
1343 * pointer to the first mbuf of the packet and initialize its
1344 * context. Otherwise, update the total length and the number
1345 * of segments of the current scattered packet, and update the
1346 * pointer to the last mbuf of the current packet.
1350 first_seg->nb_segs = 1;
1351 first_seg->pkt_len = rx_packet_len;
1353 first_seg->pkt_len =
1354 (uint16_t)(first_seg->pkt_len +
1356 first_seg->nb_segs++;
1357 last_seg->next = rxm;
1360 /* If this is not the last buffer of the received packet,
1361 * update the pointer to the last mbuf of the current scattered
1362 * packet and continue to parse the RX ring.
1364 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1369 /* This is the last buffer of the received packet. If the CRC
1370 * is not stripped by the hardware:
1371 * - Subtract the CRC length from the total packet length.
1372 * - If the last buffer only contains the whole CRC or a part
1373 * of it, free the mbuf associated to the last buffer. If part
1374 * of the CRC is also contained in the previous mbuf, subtract
1375 * the length of that CRC part from the data length of the
1379 if (unlikely(rxq->crc_len > 0)) {
1380 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1381 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1382 rte_pktmbuf_free_seg(rxm);
1383 first_seg->nb_segs--;
1384 last_seg->data_len =
1385 (uint16_t)(last_seg->data_len -
1386 (RTE_ETHER_CRC_LEN - rx_packet_len));
1387 last_seg->next = NULL;
1389 rxm->data_len = (uint16_t)(rx_packet_len -
1393 first_seg->port = rxq->port_id;
1394 first_seg->ol_flags = 0;
1395 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1396 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1397 first_seg->packet_type =
1398 ptype_tbl[(uint8_t)((qword1 &
1399 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1401 if (pkt_flags & PKT_RX_RSS_HASH)
1402 first_seg->hash.rss =
1403 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1405 if (pkt_flags & PKT_RX_FDIR)
1406 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1408 first_seg->ol_flags |= pkt_flags;
1410 /* Prefetch data of first segment, if configured to do so. */
1411 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1412 first_seg->data_off));
1413 rx_pkts[nb_rx++] = first_seg;
1417 /* Record index of the next RX descriptor to probe. */
1418 rxq->rx_tail = rx_id;
1419 rxq->pkt_first_seg = first_seg;
1420 rxq->pkt_last_seg = last_seg;
1422 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1427 #define IAVF_LOOK_AHEAD 8
1429 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1431 volatile union iavf_rx_flex_desc *rxdp;
1432 struct rte_mbuf **rxep;
1433 struct rte_mbuf *mb;
1436 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1437 int32_t i, j, nb_rx = 0;
1439 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1441 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1442 rxep = &rxq->sw_ring[rxq->rx_tail];
1444 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1446 /* Make sure there is at least 1 packet to receive */
1447 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1450 /* Scan LOOK_AHEAD descriptors at a time to determine which
1451 * descriptors reference packets that are ready to be received.
1453 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1454 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1455 /* Read desc statuses backwards to avoid race condition */
1456 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1457 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1461 /* Compute how many status bits were set */
1462 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1463 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1467 /* Translate descriptor info to mbuf parameters */
1468 for (j = 0; j < nb_dd; j++) {
1469 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1471 i * IAVF_LOOK_AHEAD + j);
1474 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1475 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1476 mb->data_len = pkt_len;
1477 mb->pkt_len = pkt_len;
1480 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1481 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1482 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1483 iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
1484 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1485 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1487 mb->ol_flags |= pkt_flags;
1490 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1491 rxq->rx_stage[i + j] = rxep[j];
1493 if (nb_dd != IAVF_LOOK_AHEAD)
1497 /* Clear software ring entries */
1498 for (i = 0; i < nb_rx; i++)
1499 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1505 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1507 volatile union iavf_rx_desc *rxdp;
1508 struct rte_mbuf **rxep;
1509 struct rte_mbuf *mb;
1513 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1514 int32_t i, j, nb_rx = 0;
1516 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1518 rxdp = &rxq->rx_ring[rxq->rx_tail];
1519 rxep = &rxq->sw_ring[rxq->rx_tail];
1521 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1522 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1523 IAVF_RXD_QW1_STATUS_SHIFT;
1525 /* Make sure there is at least 1 packet to receive */
1526 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1529 /* Scan LOOK_AHEAD descriptors at a time to determine which
1530 * descriptors reference packets that are ready to be received.
1532 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1533 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1534 /* Read desc statuses backwards to avoid race condition */
1535 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1536 qword1 = rte_le_to_cpu_64(
1537 rxdp[j].wb.qword1.status_error_len);
1538 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1539 IAVF_RXD_QW1_STATUS_SHIFT;
1544 /* Compute how many status bits were set */
1545 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1546 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1550 /* Translate descriptor info to mbuf parameters */
1551 for (j = 0; j < nb_dd; j++) {
1552 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1553 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1556 qword1 = rte_le_to_cpu_64
1557 (rxdp[j].wb.qword1.status_error_len);
1558 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1559 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1560 mb->data_len = pkt_len;
1561 mb->pkt_len = pkt_len;
1563 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1564 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1566 ptype_tbl[(uint8_t)((qword1 &
1567 IAVF_RXD_QW1_PTYPE_MASK) >>
1568 IAVF_RXD_QW1_PTYPE_SHIFT)];
1570 if (pkt_flags & PKT_RX_RSS_HASH)
1571 mb->hash.rss = rte_le_to_cpu_32(
1572 rxdp[j].wb.qword0.hi_dword.rss);
1574 if (pkt_flags & PKT_RX_FDIR)
1575 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1577 mb->ol_flags |= pkt_flags;
1580 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1581 rxq->rx_stage[i + j] = rxep[j];
1583 if (nb_dd != IAVF_LOOK_AHEAD)
1587 /* Clear software ring entries */
1588 for (i = 0; i < nb_rx; i++)
1589 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1594 static inline uint16_t
1595 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1596 struct rte_mbuf **rx_pkts,
1600 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1602 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1604 for (i = 0; i < nb_pkts; i++)
1605 rx_pkts[i] = stage[i];
1607 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1608 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1614 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1616 volatile union iavf_rx_desc *rxdp;
1617 struct rte_mbuf **rxep;
1618 struct rte_mbuf *mb;
1619 uint16_t alloc_idx, i;
1623 /* Allocate buffers in bulk */
1624 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1625 (rxq->rx_free_thresh - 1));
1626 rxep = &rxq->sw_ring[alloc_idx];
1627 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1628 rxq->rx_free_thresh);
1629 if (unlikely(diag != 0)) {
1630 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1634 rxdp = &rxq->rx_ring[alloc_idx];
1635 for (i = 0; i < rxq->rx_free_thresh; i++) {
1636 if (likely(i < (rxq->rx_free_thresh - 1)))
1637 /* Prefetch next mbuf */
1638 rte_prefetch0(rxep[i + 1]);
1641 rte_mbuf_refcnt_set(mb, 1);
1643 mb->data_off = RTE_PKTMBUF_HEADROOM;
1645 mb->port = rxq->port_id;
1646 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1647 rxdp[i].read.hdr_addr = 0;
1648 rxdp[i].read.pkt_addr = dma_addr;
1651 /* Update rx tail register */
1653 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1655 rxq->rx_free_trigger =
1656 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1657 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1658 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1663 static inline uint16_t
1664 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1666 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1672 if (rxq->rx_nb_avail)
1673 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1675 if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
1676 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1678 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1679 rxq->rx_next_avail = 0;
1680 rxq->rx_nb_avail = nb_rx;
1681 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1683 if (rxq->rx_tail > rxq->rx_free_trigger) {
1684 if (iavf_rx_alloc_bufs(rxq) != 0) {
1687 /* TODO: count rx_mbuf_alloc_failed here */
1689 rxq->rx_nb_avail = 0;
1690 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1691 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1692 rxq->sw_ring[j] = rxq->rx_stage[i];
1698 if (rxq->rx_tail >= rxq->nb_rx_desc)
1701 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1702 rxq->port_id, rxq->queue_id,
1703 rxq->rx_tail, nb_rx);
1705 if (rxq->rx_nb_avail)
1706 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1712 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1713 struct rte_mbuf **rx_pkts,
1716 uint16_t nb_rx = 0, n, count;
1718 if (unlikely(nb_pkts == 0))
1721 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1722 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1725 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1726 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1727 nb_rx = (uint16_t)(nb_rx + count);
1728 nb_pkts = (uint16_t)(nb_pkts - count);
1737 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1739 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1740 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1741 uint16_t nb_tx_desc = txq->nb_tx_desc;
1742 uint16_t desc_to_clean_to;
1743 uint16_t nb_tx_to_clean;
1745 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1747 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1748 if (desc_to_clean_to >= nb_tx_desc)
1749 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1751 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1752 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1753 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1754 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1755 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1756 "(port=%d queue=%d)", desc_to_clean_to,
1757 txq->port_id, txq->queue_id);
1761 if (last_desc_cleaned > desc_to_clean_to)
1762 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1765 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1768 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1770 txq->last_desc_cleaned = desc_to_clean_to;
1771 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1776 /* Check if the context descriptor is needed for TX offloading */
1777 static inline uint16_t
1778 iavf_calc_context_desc(uint64_t flags)
1780 static uint64_t mask = PKT_TX_TCP_SEG;
1782 return (flags & mask) ? 1 : 0;
1786 iavf_txd_enable_checksum(uint64_t ol_flags,
1788 uint32_t *td_offset,
1789 union iavf_tx_offload tx_offload)
1792 *td_offset |= (tx_offload.l2_len >> 1) <<
1793 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1795 /* Enable L3 checksum offloads */
1796 if (ol_flags & PKT_TX_IP_CKSUM) {
1797 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1798 *td_offset |= (tx_offload.l3_len >> 2) <<
1799 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1800 } else if (ol_flags & PKT_TX_IPV4) {
1801 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1802 *td_offset |= (tx_offload.l3_len >> 2) <<
1803 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1804 } else if (ol_flags & PKT_TX_IPV6) {
1805 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1806 *td_offset |= (tx_offload.l3_len >> 2) <<
1807 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1810 if (ol_flags & PKT_TX_TCP_SEG) {
1811 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1812 *td_offset |= (tx_offload.l4_len >> 2) <<
1813 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1817 /* Enable L4 checksum offloads */
1818 switch (ol_flags & PKT_TX_L4_MASK) {
1819 case PKT_TX_TCP_CKSUM:
1820 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1821 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1822 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1824 case PKT_TX_SCTP_CKSUM:
1825 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1826 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1827 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1829 case PKT_TX_UDP_CKSUM:
1830 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1831 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1832 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1839 /* set TSO context descriptor
1840 * support IP -> L4 and IP -> IP -> L4
1842 static inline uint64_t
1843 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
1845 uint64_t ctx_desc = 0;
1846 uint32_t cd_cmd, hdr_len, cd_tso_len;
1848 if (!tx_offload.l4_len) {
1849 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1853 hdr_len = tx_offload.l2_len +
1857 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1858 cd_tso_len = mbuf->pkt_len - hdr_len;
1859 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1860 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1861 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1866 /* Construct the tx flags */
1867 static inline uint64_t
1868 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1871 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
1872 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
1873 ((uint64_t)td_offset <<
1874 IAVF_TXD_QW1_OFFSET_SHIFT) |
1876 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1877 ((uint64_t)td_tag <<
1878 IAVF_TXD_QW1_L2TAG1_SHIFT));
1883 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1885 volatile struct iavf_tx_desc *txd;
1886 volatile struct iavf_tx_desc *txr;
1887 struct iavf_tx_queue *txq;
1888 struct iavf_tx_entry *sw_ring;
1889 struct iavf_tx_entry *txe, *txn;
1890 struct rte_mbuf *tx_pkt;
1891 struct rte_mbuf *m_seg;
1902 uint64_t buf_dma_addr;
1903 union iavf_tx_offload tx_offload = {0};
1906 sw_ring = txq->sw_ring;
1908 tx_id = txq->tx_tail;
1909 txe = &sw_ring[tx_id];
1911 /* Check if the descriptor ring needs to be cleaned. */
1912 if (txq->nb_free < txq->free_thresh)
1913 (void)iavf_xmit_cleanup(txq);
1915 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1920 tx_pkt = *tx_pkts++;
1921 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1923 ol_flags = tx_pkt->ol_flags;
1924 tx_offload.l2_len = tx_pkt->l2_len;
1925 tx_offload.l3_len = tx_pkt->l3_len;
1926 tx_offload.l4_len = tx_pkt->l4_len;
1927 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1928 /* Calculate the number of context descriptors needed. */
1929 nb_ctx = iavf_calc_context_desc(ol_flags);
1931 /* The number of descriptors that must be allocated for
1932 * a packet equals to the number of the segments of that
1933 * packet plus 1 context descriptor if needed.
1935 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1936 tx_last = (uint16_t)(tx_id + nb_used - 1);
1939 if (tx_last >= txq->nb_tx_desc)
1940 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1942 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1943 " tx_first=%u tx_last=%u",
1944 txq->port_id, txq->queue_id, tx_id, tx_last);
1946 if (nb_used > txq->nb_free) {
1947 if (iavf_xmit_cleanup(txq)) {
1952 if (unlikely(nb_used > txq->rs_thresh)) {
1953 while (nb_used > txq->nb_free) {
1954 if (iavf_xmit_cleanup(txq)) {
1963 /* Descriptor based VLAN insertion */
1964 if (ol_flags & PKT_TX_VLAN_PKT) {
1965 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
1966 td_tag = tx_pkt->vlan_tci;
1969 /* According to datasheet, the bit2 is reserved and must be
1974 /* Enable checksum offloading */
1975 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
1976 iavf_txd_enable_checksum(ol_flags, &td_cmd,
1977 &td_offset, tx_offload);
1980 /* Setup TX context descriptor if required */
1981 uint64_t cd_type_cmd_tso_mss =
1982 IAVF_TX_DESC_DTYPE_CONTEXT;
1983 volatile struct iavf_tx_context_desc *ctx_txd =
1984 (volatile struct iavf_tx_context_desc *)
1987 txn = &sw_ring[txe->next_id];
1988 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1990 rte_pktmbuf_free_seg(txe->mbuf);
1995 if (ol_flags & PKT_TX_TCP_SEG)
1996 cd_type_cmd_tso_mss |=
1997 iavf_set_tso_ctx(tx_pkt, tx_offload);
1999 ctx_txd->type_cmd_tso_mss =
2000 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2002 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2003 txe->last_id = tx_last;
2004 tx_id = txe->next_id;
2011 txn = &sw_ring[txe->next_id];
2014 rte_pktmbuf_free_seg(txe->mbuf);
2017 /* Setup TX Descriptor */
2018 slen = m_seg->data_len;
2019 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2020 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2021 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2026 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2027 txe->last_id = tx_last;
2028 tx_id = txe->next_id;
2030 m_seg = m_seg->next;
2033 /* The last packet data descriptor needs End Of Packet (EOP) */
2034 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2035 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2036 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2038 if (txq->nb_used >= txq->rs_thresh) {
2039 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2040 "%4u (port=%d queue=%d)",
2041 tx_last, txq->port_id, txq->queue_id);
2043 td_cmd |= IAVF_TX_DESC_CMD_RS;
2045 /* Update txq RS bit counters */
2049 txd->cmd_type_offset_bsz |=
2050 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2051 IAVF_TXD_QW1_CMD_SHIFT);
2052 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2058 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2059 txq->port_id, txq->queue_id, tx_id, nb_tx);
2061 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2062 txq->tx_tail = tx_id;
2067 /* TX prep functions */
2069 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2076 for (i = 0; i < nb_pkts; i++) {
2078 ol_flags = m->ol_flags;
2080 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2081 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2082 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2086 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2087 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2088 /* MSS outside the range are considered malicious */
2093 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2094 rte_errno = ENOTSUP;
2098 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2099 ret = rte_validate_tx_offload(m);
2105 ret = rte_net_intel_cksum_prepare(m);
2115 /* choose rx function*/
2117 iavf_set_rx_function(struct rte_eth_dev *dev)
2119 struct iavf_adapter *adapter =
2120 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2121 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2123 struct iavf_rx_queue *rxq;
2125 bool use_avx2 = false;
2126 #ifdef CC_AVX512_SUPPORT
2127 bool use_avx512 = false;
2130 if (!iavf_rx_vec_dev_check(dev) &&
2131 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2132 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2133 rxq = dev->data->rx_queues[i];
2134 (void)iavf_rxq_vec_setup(rxq);
2137 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2138 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2139 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2141 #ifdef CC_AVX512_SUPPORT
2142 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2143 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2144 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2148 if (dev->data->scattered_rx) {
2150 "Using %sVector Scattered Rx (port %d).",
2151 use_avx2 ? "avx2 " : "",
2152 dev->data->port_id);
2153 if (vf->vf_res->vf_cap_flags &
2154 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2155 dev->rx_pkt_burst = use_avx2 ?
2156 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2157 iavf_recv_scattered_pkts_vec_flex_rxd;
2158 #ifdef CC_AVX512_SUPPORT
2161 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2164 dev->rx_pkt_burst = use_avx2 ?
2165 iavf_recv_scattered_pkts_vec_avx2 :
2166 iavf_recv_scattered_pkts_vec;
2167 #ifdef CC_AVX512_SUPPORT
2170 iavf_recv_scattered_pkts_vec_avx512;
2174 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2175 use_avx2 ? "avx2 " : "",
2176 dev->data->port_id);
2177 if (vf->vf_res->vf_cap_flags &
2178 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2179 dev->rx_pkt_burst = use_avx2 ?
2180 iavf_recv_pkts_vec_avx2_flex_rxd :
2181 iavf_recv_pkts_vec_flex_rxd;
2182 #ifdef CC_AVX512_SUPPORT
2185 iavf_recv_pkts_vec_avx512_flex_rxd;
2188 dev->rx_pkt_burst = use_avx2 ?
2189 iavf_recv_pkts_vec_avx2 :
2191 #ifdef CC_AVX512_SUPPORT
2194 iavf_recv_pkts_vec_avx512;
2203 if (dev->data->scattered_rx) {
2204 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2205 dev->data->port_id);
2206 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2207 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2209 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2210 } else if (adapter->rx_bulk_alloc_allowed) {
2211 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2212 dev->data->port_id);
2213 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2215 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2216 dev->data->port_id);
2217 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2218 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2220 dev->rx_pkt_burst = iavf_recv_pkts;
2224 /* choose tx function*/
2226 iavf_set_tx_function(struct rte_eth_dev *dev)
2229 struct iavf_tx_queue *txq;
2231 bool use_avx2 = false;
2233 if (!iavf_tx_vec_dev_check(dev) &&
2234 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2235 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2236 txq = dev->data->tx_queues[i];
2239 iavf_txq_vec_setup(txq);
2242 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2243 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2244 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2247 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2248 use_avx2 ? "avx2 " : "",
2249 dev->data->port_id);
2250 dev->tx_pkt_burst = use_avx2 ?
2251 iavf_xmit_pkts_vec_avx2 :
2253 dev->tx_pkt_prepare = NULL;
2259 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2260 dev->data->port_id);
2261 dev->tx_pkt_burst = iavf_xmit_pkts;
2262 dev->tx_pkt_prepare = iavf_prep_pkts;
2266 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2269 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2270 uint16_t i, tx_last, tx_id;
2271 uint16_t nb_tx_free_last;
2272 uint16_t nb_tx_to_clean;
2275 /* Start free mbuf from the next of tx_tail */
2276 tx_last = txq->tx_tail;
2277 tx_id = swr_ring[tx_last].next_id;
2279 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2282 nb_tx_to_clean = txq->nb_free;
2283 nb_tx_free_last = txq->nb_free;
2285 free_cnt = txq->nb_tx_desc;
2287 /* Loop through swr_ring to count the amount of
2288 * freeable mubfs and packets.
2290 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2291 for (i = 0; i < nb_tx_to_clean &&
2292 pkt_cnt < free_cnt &&
2293 tx_id != tx_last; i++) {
2294 if (swr_ring[tx_id].mbuf != NULL) {
2295 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2296 swr_ring[tx_id].mbuf = NULL;
2299 * last segment in the packet,
2300 * increment packet count
2302 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2305 tx_id = swr_ring[tx_id].next_id;
2308 if (txq->rs_thresh > txq->nb_tx_desc -
2309 txq->nb_free || tx_id == tx_last)
2312 if (pkt_cnt < free_cnt) {
2313 if (iavf_xmit_cleanup(txq))
2316 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2317 nb_tx_free_last = txq->nb_free;
2321 return (int)pkt_cnt;
2325 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2327 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2329 return iavf_tx_done_cleanup_full(q, free_cnt);
2333 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2334 struct rte_eth_rxq_info *qinfo)
2336 struct iavf_rx_queue *rxq;
2338 rxq = dev->data->rx_queues[queue_id];
2340 qinfo->mp = rxq->mp;
2341 qinfo->scattered_rx = dev->data->scattered_rx;
2342 qinfo->nb_desc = rxq->nb_rx_desc;
2344 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2345 qinfo->conf.rx_drop_en = true;
2346 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2350 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2351 struct rte_eth_txq_info *qinfo)
2353 struct iavf_tx_queue *txq;
2355 txq = dev->data->tx_queues[queue_id];
2357 qinfo->nb_desc = txq->nb_tx_desc;
2359 qinfo->conf.tx_free_thresh = txq->free_thresh;
2360 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2361 qinfo->conf.offloads = txq->offloads;
2362 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2365 /* Get the number of used descriptors of a rx queue */
2367 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2369 #define IAVF_RXQ_SCAN_INTERVAL 4
2370 volatile union iavf_rx_desc *rxdp;
2371 struct iavf_rx_queue *rxq;
2374 rxq = dev->data->rx_queues[queue_id];
2375 rxdp = &rxq->rx_ring[rxq->rx_tail];
2377 while ((desc < rxq->nb_rx_desc) &&
2378 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2379 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2380 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2381 /* Check the DD bit of a rx descriptor of each 4 in a group,
2382 * to avoid checking too frequently and downgrading performance
2385 desc += IAVF_RXQ_SCAN_INTERVAL;
2386 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2387 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2388 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2389 desc - rxq->nb_rx_desc]);
2396 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2398 struct iavf_rx_queue *rxq = rx_queue;
2399 volatile uint64_t *status;
2403 if (unlikely(offset >= rxq->nb_rx_desc))
2406 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2407 return RTE_ETH_RX_DESC_UNAVAIL;
2409 desc = rxq->rx_tail + offset;
2410 if (desc >= rxq->nb_rx_desc)
2411 desc -= rxq->nb_rx_desc;
2413 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2414 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2415 << IAVF_RXD_QW1_STATUS_SHIFT);
2417 return RTE_ETH_RX_DESC_DONE;
2419 return RTE_ETH_RX_DESC_AVAIL;
2423 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2425 struct iavf_tx_queue *txq = tx_queue;
2426 volatile uint64_t *status;
2427 uint64_t mask, expect;
2430 if (unlikely(offset >= txq->nb_tx_desc))
2433 desc = txq->tx_tail + offset;
2434 /* go to next desc that has the RS bit */
2435 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2437 if (desc >= txq->nb_tx_desc) {
2438 desc -= txq->nb_tx_desc;
2439 if (desc >= txq->nb_tx_desc)
2440 desc -= txq->nb_tx_desc;
2443 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2444 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2445 expect = rte_cpu_to_le_64(
2446 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2447 if ((*status & mask) == expect)
2448 return RTE_ETH_TX_DESC_DONE;
2450 return RTE_ETH_TX_DESC_FULL;
2454 iavf_get_default_ptype_table(void)
2456 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2457 __rte_cache_aligned = {
2460 [1] = RTE_PTYPE_L2_ETHER,
2461 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2462 /* [3] - [5] reserved */
2463 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2464 /* [7] - [10] reserved */
2465 [11] = RTE_PTYPE_L2_ETHER_ARP,
2466 /* [12] - [21] reserved */
2468 /* Non tunneled IPv4 */
2469 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2471 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2472 RTE_PTYPE_L4_NONFRAG,
2473 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2476 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2478 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2480 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2484 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2485 RTE_PTYPE_TUNNEL_IP |
2486 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2487 RTE_PTYPE_INNER_L4_FRAG,
2488 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2489 RTE_PTYPE_TUNNEL_IP |
2490 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2491 RTE_PTYPE_INNER_L4_NONFRAG,
2492 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2493 RTE_PTYPE_TUNNEL_IP |
2494 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2495 RTE_PTYPE_INNER_L4_UDP,
2497 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2498 RTE_PTYPE_TUNNEL_IP |
2499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2500 RTE_PTYPE_INNER_L4_TCP,
2501 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2502 RTE_PTYPE_TUNNEL_IP |
2503 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2504 RTE_PTYPE_INNER_L4_SCTP,
2505 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2506 RTE_PTYPE_TUNNEL_IP |
2507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2508 RTE_PTYPE_INNER_L4_ICMP,
2511 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2512 RTE_PTYPE_TUNNEL_IP |
2513 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2514 RTE_PTYPE_INNER_L4_FRAG,
2515 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2516 RTE_PTYPE_TUNNEL_IP |
2517 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2518 RTE_PTYPE_INNER_L4_NONFRAG,
2519 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2520 RTE_PTYPE_TUNNEL_IP |
2521 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2522 RTE_PTYPE_INNER_L4_UDP,
2524 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2525 RTE_PTYPE_TUNNEL_IP |
2526 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2527 RTE_PTYPE_INNER_L4_TCP,
2528 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2529 RTE_PTYPE_TUNNEL_IP |
2530 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2531 RTE_PTYPE_INNER_L4_SCTP,
2532 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2533 RTE_PTYPE_TUNNEL_IP |
2534 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2535 RTE_PTYPE_INNER_L4_ICMP,
2537 /* IPv4 --> GRE/Teredo/VXLAN */
2538 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2539 RTE_PTYPE_TUNNEL_GRENAT,
2541 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2542 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2543 RTE_PTYPE_TUNNEL_GRENAT |
2544 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2545 RTE_PTYPE_INNER_L4_FRAG,
2546 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2547 RTE_PTYPE_TUNNEL_GRENAT |
2548 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2549 RTE_PTYPE_INNER_L4_NONFRAG,
2550 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2551 RTE_PTYPE_TUNNEL_GRENAT |
2552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2553 RTE_PTYPE_INNER_L4_UDP,
2555 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2556 RTE_PTYPE_TUNNEL_GRENAT |
2557 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2558 RTE_PTYPE_INNER_L4_TCP,
2559 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2560 RTE_PTYPE_TUNNEL_GRENAT |
2561 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2562 RTE_PTYPE_INNER_L4_SCTP,
2563 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2564 RTE_PTYPE_TUNNEL_GRENAT |
2565 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2566 RTE_PTYPE_INNER_L4_ICMP,
2568 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2569 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2570 RTE_PTYPE_TUNNEL_GRENAT |
2571 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2572 RTE_PTYPE_INNER_L4_FRAG,
2573 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2574 RTE_PTYPE_TUNNEL_GRENAT |
2575 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2576 RTE_PTYPE_INNER_L4_NONFRAG,
2577 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2578 RTE_PTYPE_TUNNEL_GRENAT |
2579 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2580 RTE_PTYPE_INNER_L4_UDP,
2582 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2583 RTE_PTYPE_TUNNEL_GRENAT |
2584 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2585 RTE_PTYPE_INNER_L4_TCP,
2586 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2587 RTE_PTYPE_TUNNEL_GRENAT |
2588 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2589 RTE_PTYPE_INNER_L4_SCTP,
2590 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2591 RTE_PTYPE_TUNNEL_GRENAT |
2592 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2593 RTE_PTYPE_INNER_L4_ICMP,
2595 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2596 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2597 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2599 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2600 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2601 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2602 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2603 RTE_PTYPE_INNER_L4_FRAG,
2604 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2605 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2607 RTE_PTYPE_INNER_L4_NONFRAG,
2608 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2609 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2611 RTE_PTYPE_INNER_L4_UDP,
2613 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2614 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2616 RTE_PTYPE_INNER_L4_TCP,
2617 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2618 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2620 RTE_PTYPE_INNER_L4_SCTP,
2621 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2622 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2624 RTE_PTYPE_INNER_L4_ICMP,
2626 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2627 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2628 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2629 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2630 RTE_PTYPE_INNER_L4_FRAG,
2631 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2632 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2634 RTE_PTYPE_INNER_L4_NONFRAG,
2635 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2636 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2638 RTE_PTYPE_INNER_L4_UDP,
2640 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2643 RTE_PTYPE_INNER_L4_TCP,
2644 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2645 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2647 RTE_PTYPE_INNER_L4_SCTP,
2648 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2649 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2651 RTE_PTYPE_INNER_L4_ICMP,
2652 /* [73] - [87] reserved */
2654 /* Non tunneled IPv6 */
2655 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2657 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2658 RTE_PTYPE_L4_NONFRAG,
2659 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2662 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2664 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2666 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2670 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2671 RTE_PTYPE_TUNNEL_IP |
2672 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2673 RTE_PTYPE_INNER_L4_FRAG,
2674 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2675 RTE_PTYPE_TUNNEL_IP |
2676 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2677 RTE_PTYPE_INNER_L4_NONFRAG,
2678 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2679 RTE_PTYPE_TUNNEL_IP |
2680 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2681 RTE_PTYPE_INNER_L4_UDP,
2683 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2684 RTE_PTYPE_TUNNEL_IP |
2685 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2686 RTE_PTYPE_INNER_L4_TCP,
2687 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2688 RTE_PTYPE_TUNNEL_IP |
2689 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2690 RTE_PTYPE_INNER_L4_SCTP,
2691 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2692 RTE_PTYPE_TUNNEL_IP |
2693 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2694 RTE_PTYPE_INNER_L4_ICMP,
2697 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2698 RTE_PTYPE_TUNNEL_IP |
2699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2700 RTE_PTYPE_INNER_L4_FRAG,
2701 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2702 RTE_PTYPE_TUNNEL_IP |
2703 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2704 RTE_PTYPE_INNER_L4_NONFRAG,
2705 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2706 RTE_PTYPE_TUNNEL_IP |
2707 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2708 RTE_PTYPE_INNER_L4_UDP,
2709 /* [105] reserved */
2710 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2711 RTE_PTYPE_TUNNEL_IP |
2712 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2713 RTE_PTYPE_INNER_L4_TCP,
2714 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2715 RTE_PTYPE_TUNNEL_IP |
2716 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2717 RTE_PTYPE_INNER_L4_SCTP,
2718 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2719 RTE_PTYPE_TUNNEL_IP |
2720 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2721 RTE_PTYPE_INNER_L4_ICMP,
2723 /* IPv6 --> GRE/Teredo/VXLAN */
2724 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2725 RTE_PTYPE_TUNNEL_GRENAT,
2727 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2728 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2729 RTE_PTYPE_TUNNEL_GRENAT |
2730 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2731 RTE_PTYPE_INNER_L4_FRAG,
2732 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_GRENAT |
2734 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2735 RTE_PTYPE_INNER_L4_NONFRAG,
2736 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2737 RTE_PTYPE_TUNNEL_GRENAT |
2738 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2739 RTE_PTYPE_INNER_L4_UDP,
2740 /* [113] reserved */
2741 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2742 RTE_PTYPE_TUNNEL_GRENAT |
2743 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2744 RTE_PTYPE_INNER_L4_TCP,
2745 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2746 RTE_PTYPE_TUNNEL_GRENAT |
2747 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2748 RTE_PTYPE_INNER_L4_SCTP,
2749 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2750 RTE_PTYPE_TUNNEL_GRENAT |
2751 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2752 RTE_PTYPE_INNER_L4_ICMP,
2754 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2755 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2756 RTE_PTYPE_TUNNEL_GRENAT |
2757 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2758 RTE_PTYPE_INNER_L4_FRAG,
2759 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2760 RTE_PTYPE_TUNNEL_GRENAT |
2761 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2762 RTE_PTYPE_INNER_L4_NONFRAG,
2763 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2764 RTE_PTYPE_TUNNEL_GRENAT |
2765 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2766 RTE_PTYPE_INNER_L4_UDP,
2767 /* [120] reserved */
2768 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2769 RTE_PTYPE_TUNNEL_GRENAT |
2770 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2771 RTE_PTYPE_INNER_L4_TCP,
2772 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2773 RTE_PTYPE_TUNNEL_GRENAT |
2774 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2775 RTE_PTYPE_INNER_L4_SCTP,
2776 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2777 RTE_PTYPE_TUNNEL_GRENAT |
2778 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_ICMP,
2781 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2782 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2783 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2785 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2786 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2787 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2788 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2789 RTE_PTYPE_INNER_L4_FRAG,
2790 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2791 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2792 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2793 RTE_PTYPE_INNER_L4_NONFRAG,
2794 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2795 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2796 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2797 RTE_PTYPE_INNER_L4_UDP,
2798 /* [128] reserved */
2799 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2800 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2801 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2802 RTE_PTYPE_INNER_L4_TCP,
2803 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2804 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2806 RTE_PTYPE_INNER_L4_SCTP,
2807 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2808 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2809 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2810 RTE_PTYPE_INNER_L4_ICMP,
2812 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2813 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2814 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2815 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2816 RTE_PTYPE_INNER_L4_FRAG,
2817 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2818 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2819 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2820 RTE_PTYPE_INNER_L4_NONFRAG,
2821 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2822 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2823 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2824 RTE_PTYPE_INNER_L4_UDP,
2825 /* [135] reserved */
2826 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2827 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2828 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2829 RTE_PTYPE_INNER_L4_TCP,
2830 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2831 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2832 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2833 RTE_PTYPE_INNER_L4_SCTP,
2834 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2835 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2836 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2837 RTE_PTYPE_INNER_L4_ICMP,
2838 /* [139] - [299] reserved */
2841 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
2842 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
2844 /* PPPoE --> IPv4 */
2845 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
2846 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2848 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
2849 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2850 RTE_PTYPE_L4_NONFRAG,
2851 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
2852 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2854 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
2855 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2857 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
2858 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2860 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
2861 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2864 /* PPPoE --> IPv6 */
2865 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
2866 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2868 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
2869 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2870 RTE_PTYPE_L4_NONFRAG,
2871 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
2872 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2874 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
2875 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2877 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
2878 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2880 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
2881 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2883 /* [314] - [324] reserved */
2885 /* IPv4/IPv6 --> GTPC/GTPU */
2886 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2887 RTE_PTYPE_TUNNEL_GTPC,
2888 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2889 RTE_PTYPE_TUNNEL_GTPC,
2890 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2891 RTE_PTYPE_TUNNEL_GTPC,
2892 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2893 RTE_PTYPE_TUNNEL_GTPC,
2894 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2895 RTE_PTYPE_TUNNEL_GTPU,
2896 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2897 RTE_PTYPE_TUNNEL_GTPU,
2899 /* IPv4 --> GTPU --> IPv4 */
2900 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2901 RTE_PTYPE_TUNNEL_GTPU |
2902 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2903 RTE_PTYPE_INNER_L4_FRAG,
2904 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2905 RTE_PTYPE_TUNNEL_GTPU |
2906 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2907 RTE_PTYPE_INNER_L4_NONFRAG,
2908 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2909 RTE_PTYPE_TUNNEL_GTPU |
2910 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2911 RTE_PTYPE_INNER_L4_UDP,
2912 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2913 RTE_PTYPE_TUNNEL_GTPU |
2914 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2915 RTE_PTYPE_INNER_L4_TCP,
2916 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2917 RTE_PTYPE_TUNNEL_GTPU |
2918 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2919 RTE_PTYPE_INNER_L4_ICMP,
2921 /* IPv6 --> GTPU --> IPv4 */
2922 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_TUNNEL_GTPU |
2924 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2925 RTE_PTYPE_INNER_L4_FRAG,
2926 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2927 RTE_PTYPE_TUNNEL_GTPU |
2928 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2929 RTE_PTYPE_INNER_L4_NONFRAG,
2930 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2931 RTE_PTYPE_TUNNEL_GTPU |
2932 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2933 RTE_PTYPE_INNER_L4_UDP,
2934 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2935 RTE_PTYPE_TUNNEL_GTPU |
2936 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2937 RTE_PTYPE_INNER_L4_TCP,
2938 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2939 RTE_PTYPE_TUNNEL_GTPU |
2940 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2941 RTE_PTYPE_INNER_L4_ICMP,
2943 /* IPv4 --> GTPU --> IPv6 */
2944 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2945 RTE_PTYPE_TUNNEL_GTPU |
2946 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2947 RTE_PTYPE_INNER_L4_FRAG,
2948 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2949 RTE_PTYPE_TUNNEL_GTPU |
2950 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2951 RTE_PTYPE_INNER_L4_NONFRAG,
2952 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2953 RTE_PTYPE_TUNNEL_GTPU |
2954 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2955 RTE_PTYPE_INNER_L4_UDP,
2956 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2957 RTE_PTYPE_TUNNEL_GTPU |
2958 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2959 RTE_PTYPE_INNER_L4_TCP,
2960 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2961 RTE_PTYPE_TUNNEL_GTPU |
2962 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2963 RTE_PTYPE_INNER_L4_ICMP,
2965 /* IPv6 --> GTPU --> IPv6 */
2966 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2967 RTE_PTYPE_TUNNEL_GTPU |
2968 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2969 RTE_PTYPE_INNER_L4_FRAG,
2970 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2971 RTE_PTYPE_TUNNEL_GTPU |
2972 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2973 RTE_PTYPE_INNER_L4_NONFRAG,
2974 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2975 RTE_PTYPE_TUNNEL_GTPU |
2976 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2977 RTE_PTYPE_INNER_L4_UDP,
2978 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2979 RTE_PTYPE_TUNNEL_GTPU |
2980 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2981 RTE_PTYPE_INNER_L4_TCP,
2982 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2983 RTE_PTYPE_TUNNEL_GTPU |
2984 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2985 RTE_PTYPE_INNER_L4_ICMP,
2986 /* All others reserved */