1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
28 #include "iavf_rxtx.h"
31 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
33 /* The following constraints must be satisfied:
34 * thresh < rxq->nb_rx_desc
36 if (thresh >= nb_desc) {
37 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
45 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
46 uint16_t tx_free_thresh)
48 /* TX descriptors will have their RS bit set after tx_rs_thresh
49 * descriptors have been used. The TX descriptor ring will be cleaned
50 * after tx_free_thresh descriptors are used or if the number of
51 * descriptors required to transmit a packet is greater than the
52 * number of free TX descriptors.
54 * The following constraints must be satisfied:
55 * - tx_rs_thresh must be less than the size of the ring minus 2.
56 * - tx_free_thresh must be less than the size of the ring minus 3.
57 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
58 * - tx_rs_thresh must be a divisor of the ring size.
60 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
61 * race condition, hence the maximum threshold constraints. When set
62 * to zero use default values.
64 if (tx_rs_thresh >= (nb_desc - 2)) {
65 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
66 "number of TX descriptors (%u) minus 2",
67 tx_rs_thresh, nb_desc);
70 if (tx_free_thresh >= (nb_desc - 3)) {
71 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
72 "number of TX descriptors (%u) minus 3.",
73 tx_free_thresh, nb_desc);
76 if (tx_rs_thresh > tx_free_thresh) {
77 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
78 "equal to tx_free_thresh (%u).",
79 tx_rs_thresh, tx_free_thresh);
82 if ((nb_desc % tx_rs_thresh) != 0) {
83 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
84 "number of TX descriptors (%u).",
85 tx_rs_thresh, nb_desc);
93 check_rx_vec_allow(struct iavf_rx_queue *rxq)
95 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
96 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
97 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
101 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
106 check_tx_vec_allow(struct iavf_tx_queue *txq)
108 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
109 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
110 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
111 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
114 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
119 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
123 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
124 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
125 "rxq->rx_free_thresh=%d, "
126 "IAVF_RX_MAX_BURST=%d",
127 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
129 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
130 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
131 "rxq->nb_rx_desc=%d, "
132 "rxq->rx_free_thresh=%d",
133 rxq->nb_rx_desc, rxq->rx_free_thresh);
140 reset_rx_queue(struct iavf_rx_queue *rxq)
148 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
150 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
151 ((volatile char *)rxq->rx_ring)[i] = 0;
153 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
155 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
156 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
159 rxq->rx_nb_avail = 0;
160 rxq->rx_next_avail = 0;
161 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
165 rxq->pkt_first_seg = NULL;
166 rxq->pkt_last_seg = NULL;
170 reset_tx_queue(struct iavf_tx_queue *txq)
172 struct iavf_tx_entry *txe;
177 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
182 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
183 for (i = 0; i < size; i++)
184 ((volatile char *)txq->tx_ring)[i] = 0;
186 prev = (uint16_t)(txq->nb_tx_desc - 1);
187 for (i = 0; i < txq->nb_tx_desc; i++) {
188 txq->tx_ring[i].cmd_type_offset_bsz =
189 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
192 txe[prev].next_id = i;
199 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
200 txq->nb_free = txq->nb_tx_desc - 1;
202 txq->next_dd = txq->rs_thresh - 1;
203 txq->next_rs = txq->rs_thresh - 1;
207 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
209 volatile union iavf_rx_desc *rxd;
210 struct rte_mbuf *mbuf = NULL;
214 for (i = 0; i < rxq->nb_rx_desc; i++) {
215 mbuf = rte_mbuf_raw_alloc(rxq->mp);
216 if (unlikely(!mbuf)) {
217 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
221 rte_mbuf_refcnt_set(mbuf, 1);
223 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
225 mbuf->port = rxq->port_id;
228 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
230 rxd = &rxq->rx_ring[i];
231 rxd->read.pkt_addr = dma_addr;
232 rxd->read.hdr_addr = 0;
233 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
238 rxq->sw_ring[i] = mbuf;
245 release_rxq_mbufs(struct iavf_rx_queue *rxq)
252 for (i = 0; i < rxq->nb_rx_desc; i++) {
253 if (rxq->sw_ring[i]) {
254 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
255 rxq->sw_ring[i] = NULL;
260 if (rxq->rx_nb_avail == 0)
262 for (i = 0; i < rxq->rx_nb_avail; i++) {
263 struct rte_mbuf *mbuf;
265 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
266 rte_pktmbuf_free_seg(mbuf);
268 rxq->rx_nb_avail = 0;
272 release_txq_mbufs(struct iavf_tx_queue *txq)
276 if (!txq || !txq->sw_ring) {
277 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
281 for (i = 0; i < txq->nb_tx_desc; i++) {
282 if (txq->sw_ring[i].mbuf) {
283 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
284 txq->sw_ring[i].mbuf = NULL;
289 static const struct iavf_rxq_ops def_rxq_ops = {
290 .release_mbufs = release_rxq_mbufs,
293 static const struct iavf_txq_ops def_txq_ops = {
294 .release_mbufs = release_txq_mbufs,
298 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
299 uint16_t nb_desc, unsigned int socket_id,
300 const struct rte_eth_rxconf *rx_conf,
301 struct rte_mempool *mp)
303 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
304 struct iavf_adapter *ad =
305 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
306 struct iavf_info *vf =
307 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
308 struct iavf_vsi *vsi = &vf->vsi;
309 struct iavf_rx_queue *rxq;
310 const struct rte_memzone *mz;
313 uint16_t rx_free_thresh;
315 PMD_INIT_FUNC_TRACE();
317 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
318 nb_desc > IAVF_MAX_RING_DESC ||
319 nb_desc < IAVF_MIN_RING_DESC) {
320 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
325 /* Check free threshold */
326 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
327 IAVF_DEFAULT_RX_FREE_THRESH :
328 rx_conf->rx_free_thresh;
329 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
332 /* Free memory if needed */
333 if (dev->data->rx_queues[queue_idx]) {
334 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
335 dev->data->rx_queues[queue_idx] = NULL;
338 /* Allocate the rx queue data structure */
339 rxq = rte_zmalloc_socket("iavf rxq",
340 sizeof(struct iavf_rx_queue),
344 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
345 "rx queue data structure");
349 if (vf->vf_res->vf_cap_flags &
350 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
351 vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
352 rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
354 rxq->rxdid = IAVF_RXDID_LEGACY_1;
358 rxq->nb_rx_desc = nb_desc;
359 rxq->rx_free_thresh = rx_free_thresh;
360 rxq->queue_id = queue_idx;
361 rxq->port_id = dev->data->port_id;
362 rxq->crc_len = 0; /* crc stripping by default */
363 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
367 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
368 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
370 /* Allocate the software ring. */
371 len = nb_desc + IAVF_RX_MAX_BURST;
373 rte_zmalloc_socket("iavf rx sw ring",
374 sizeof(struct rte_mbuf *) * len,
378 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
383 /* Allocate the maximun number of RX ring hardware descriptor with
384 * a liitle more to support bulk allocate.
386 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
387 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
389 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
390 ring_size, IAVF_RING_BASE_ALIGN,
393 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
394 rte_free(rxq->sw_ring);
398 /* Zero all the descriptors in the ring. */
399 memset(mz->addr, 0, ring_size);
400 rxq->rx_ring_phys_addr = mz->iova;
401 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
406 dev->data->rx_queues[queue_idx] = rxq;
407 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
408 rxq->ops = &def_rxq_ops;
410 if (check_rx_bulk_allow(rxq) == true) {
411 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
412 "satisfied. Rx Burst Bulk Alloc function will be "
413 "used on port=%d, queue=%d.",
414 rxq->port_id, rxq->queue_id);
416 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
417 "not satisfied, Scattered Rx is requested "
418 "on port=%d, queue=%d.",
419 rxq->port_id, rxq->queue_id);
420 ad->rx_bulk_alloc_allowed = false;
423 if (check_rx_vec_allow(rxq) == false)
424 ad->rx_vec_allowed = false;
430 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
433 unsigned int socket_id,
434 const struct rte_eth_txconf *tx_conf)
436 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
437 struct iavf_tx_queue *txq;
438 const struct rte_memzone *mz;
440 uint16_t tx_rs_thresh, tx_free_thresh;
443 PMD_INIT_FUNC_TRACE();
445 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
447 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
448 nb_desc > IAVF_MAX_RING_DESC ||
449 nb_desc < IAVF_MIN_RING_DESC) {
450 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
455 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
456 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
457 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
458 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
459 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
461 /* Free memory if needed. */
462 if (dev->data->tx_queues[queue_idx]) {
463 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
464 dev->data->tx_queues[queue_idx] = NULL;
467 /* Allocate the TX queue data structure. */
468 txq = rte_zmalloc_socket("iavf txq",
469 sizeof(struct iavf_tx_queue),
473 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
474 "tx queue structure");
478 txq->nb_tx_desc = nb_desc;
479 txq->rs_thresh = tx_rs_thresh;
480 txq->free_thresh = tx_free_thresh;
481 txq->queue_id = queue_idx;
482 txq->port_id = dev->data->port_id;
483 txq->offloads = offloads;
484 txq->tx_deferred_start = tx_conf->tx_deferred_start;
486 /* Allocate software ring */
488 rte_zmalloc_socket("iavf tx sw ring",
489 sizeof(struct iavf_tx_entry) * nb_desc,
493 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
498 /* Allocate TX hardware ring descriptors. */
499 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
500 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
501 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
502 ring_size, IAVF_RING_BASE_ALIGN,
505 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
506 rte_free(txq->sw_ring);
510 txq->tx_ring_phys_addr = mz->iova;
511 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
516 dev->data->tx_queues[queue_idx] = txq;
517 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
518 txq->ops = &def_txq_ops;
520 if (check_tx_vec_allow(txq) == false) {
521 struct iavf_adapter *ad =
522 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
523 ad->tx_vec_allowed = false;
530 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
532 struct iavf_adapter *adapter =
533 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
534 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
535 struct iavf_rx_queue *rxq;
538 PMD_DRV_FUNC_TRACE();
540 if (rx_queue_id >= dev->data->nb_rx_queues)
543 rxq = dev->data->rx_queues[rx_queue_id];
545 err = alloc_rxq_mbufs(rxq);
547 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
553 /* Init the RX tail register. */
554 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
555 IAVF_WRITE_FLUSH(hw);
557 /* Ready to switch the queue on */
558 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
560 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
563 dev->data->rx_queue_state[rx_queue_id] =
564 RTE_ETH_QUEUE_STATE_STARTED;
570 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
572 struct iavf_adapter *adapter =
573 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
574 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
575 struct iavf_tx_queue *txq;
578 PMD_DRV_FUNC_TRACE();
580 if (tx_queue_id >= dev->data->nb_tx_queues)
583 txq = dev->data->tx_queues[tx_queue_id];
585 /* Init the RX tail register. */
586 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
587 IAVF_WRITE_FLUSH(hw);
589 /* Ready to switch the queue on */
590 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
593 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
596 dev->data->tx_queue_state[tx_queue_id] =
597 RTE_ETH_QUEUE_STATE_STARTED;
603 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
605 struct iavf_adapter *adapter =
606 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
607 struct iavf_rx_queue *rxq;
610 PMD_DRV_FUNC_TRACE();
612 if (rx_queue_id >= dev->data->nb_rx_queues)
615 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
617 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
622 rxq = dev->data->rx_queues[rx_queue_id];
623 rxq->ops->release_mbufs(rxq);
625 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
631 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
633 struct iavf_adapter *adapter =
634 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
635 struct iavf_tx_queue *txq;
638 PMD_DRV_FUNC_TRACE();
640 if (tx_queue_id >= dev->data->nb_tx_queues)
643 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
645 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
650 txq = dev->data->tx_queues[tx_queue_id];
651 txq->ops->release_mbufs(txq);
653 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
659 iavf_dev_rx_queue_release(void *rxq)
661 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
666 q->ops->release_mbufs(q);
667 rte_free(q->sw_ring);
668 rte_memzone_free(q->mz);
673 iavf_dev_tx_queue_release(void *txq)
675 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
680 q->ops->release_mbufs(q);
681 rte_free(q->sw_ring);
682 rte_memzone_free(q->mz);
687 iavf_stop_queues(struct rte_eth_dev *dev)
689 struct iavf_adapter *adapter =
690 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
691 struct iavf_rx_queue *rxq;
692 struct iavf_tx_queue *txq;
695 /* Stop All queues */
696 ret = iavf_disable_queues(adapter);
698 PMD_DRV_LOG(WARNING, "Fail to stop queues");
700 for (i = 0; i < dev->data->nb_tx_queues; i++) {
701 txq = dev->data->tx_queues[i];
704 txq->ops->release_mbufs(txq);
706 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
708 for (i = 0; i < dev->data->nb_rx_queues; i++) {
709 rxq = dev->data->rx_queues[i];
712 rxq->ops->release_mbufs(rxq);
714 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
719 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
721 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
722 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
723 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
725 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
732 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
733 volatile union iavf_rx_flex_desc *rxdp)
735 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
736 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
737 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
739 rte_le_to_cpu_16(rxdp->wb.l2tag1);
745 /* Translate the rx descriptor status and error fields to pkt flags */
746 static inline uint64_t
747 iavf_rxd_to_pkt_flags(uint64_t qword)
750 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
752 #define IAVF_RX_ERR_BITS 0x3f
754 /* Check if RSS_HASH */
755 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
756 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
757 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
759 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
760 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
764 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
765 flags |= PKT_RX_IP_CKSUM_BAD;
767 flags |= PKT_RX_IP_CKSUM_GOOD;
769 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
770 flags |= PKT_RX_L4_CKSUM_BAD;
772 flags |= PKT_RX_L4_CKSUM_GOOD;
774 /* TODO: Oversize error bit is not processed here */
779 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
780 /* Translate the rx flex descriptor status to pkt flags */
782 iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
783 volatile union iavf_rx_flex_desc *rxdp)
785 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
786 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
789 stat_err = rte_le_to_cpu_16(desc->status_error0);
790 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
791 mb->ol_flags |= PKT_RX_RSS_HASH;
792 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
797 #define IAVF_RX_FLEX_ERR0_BITS \
798 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
799 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
800 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
801 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
802 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
803 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
805 /* Rx L3/L4 checksum */
806 static inline uint64_t
807 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
811 /* check if HW has decoded the packet and checksum */
812 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
815 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
816 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
820 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
821 flags |= PKT_RX_IP_CKSUM_BAD;
823 flags |= PKT_RX_IP_CKSUM_GOOD;
825 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
826 flags |= PKT_RX_L4_CKSUM_BAD;
828 flags |= PKT_RX_L4_CKSUM_GOOD;
830 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
831 flags |= PKT_RX_EIP_CKSUM_BAD;
836 /* If the number of free RX descriptors is greater than the RX free
837 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
838 * register. Update the RDT with the value of the last processed RX
839 * descriptor minus 1, to guarantee that the RDT register is never
840 * equal to the RDH register, which creates a "full" ring situation
841 * from the hardware point of view.
844 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
846 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
848 if (nb_hold > rxq->rx_free_thresh) {
850 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
851 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
852 rx_id = (uint16_t)((rx_id == 0) ?
853 (rxq->nb_rx_desc - 1) : (rx_id - 1));
854 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
857 rxq->nb_rx_hold = nb_hold;
860 /* implement recv_pkts */
862 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
864 volatile union iavf_rx_desc *rx_ring;
865 volatile union iavf_rx_desc *rxdp;
866 struct iavf_rx_queue *rxq;
867 union iavf_rx_desc rxd;
868 struct rte_mbuf *rxe;
869 struct rte_eth_dev *dev;
870 struct rte_mbuf *rxm;
871 struct rte_mbuf *nmb;
875 uint16_t rx_packet_len;
876 uint16_t rx_id, nb_hold;
879 const uint32_t *ptype_tbl;
884 rx_id = rxq->rx_tail;
885 rx_ring = rxq->rx_ring;
886 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
888 while (nb_rx < nb_pkts) {
889 rxdp = &rx_ring[rx_id];
890 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
891 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
892 IAVF_RXD_QW1_STATUS_SHIFT;
894 /* Check the DD bit first */
895 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
897 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
899 nmb = rte_mbuf_raw_alloc(rxq->mp);
900 if (unlikely(!nmb)) {
901 dev = &rte_eth_devices[rxq->port_id];
902 dev->data->rx_mbuf_alloc_failed++;
903 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
904 "queue_id=%u", rxq->port_id, rxq->queue_id);
910 rxe = rxq->sw_ring[rx_id];
912 if (unlikely(rx_id == rxq->nb_rx_desc))
915 /* Prefetch next mbuf */
916 rte_prefetch0(rxq->sw_ring[rx_id]);
918 /* When next RX descriptor is on a cache line boundary,
919 * prefetch the next 4 RX descriptors and next 8 pointers
922 if ((rx_id & 0x3) == 0) {
923 rte_prefetch0(&rx_ring[rx_id]);
924 rte_prefetch0(rxq->sw_ring[rx_id]);
929 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
930 rxdp->read.hdr_addr = 0;
931 rxdp->read.pkt_addr = dma_addr;
933 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
934 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
936 rxm->data_off = RTE_PKTMBUF_HEADROOM;
937 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
940 rxm->pkt_len = rx_packet_len;
941 rxm->data_len = rx_packet_len;
942 rxm->port = rxq->port_id;
944 iavf_rxd_to_vlan_tci(rxm, &rxd);
945 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
947 ptype_tbl[(uint8_t)((qword1 &
948 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
950 if (pkt_flags & PKT_RX_RSS_HASH)
952 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
954 rxm->ol_flags |= pkt_flags;
956 rx_pkts[nb_rx++] = rxm;
958 rxq->rx_tail = rx_id;
960 iavf_update_rx_tail(rxq, nb_hold, rx_id);
965 /* implement recv_pkts for flexible Rx descriptor */
967 iavf_recv_pkts_flex_rxd(void *rx_queue,
968 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
970 volatile union iavf_rx_desc *rx_ring;
971 volatile union iavf_rx_flex_desc *rxdp;
972 struct iavf_rx_queue *rxq;
973 union iavf_rx_flex_desc rxd;
974 struct rte_mbuf *rxe;
975 struct rte_eth_dev *dev;
976 struct rte_mbuf *rxm;
977 struct rte_mbuf *nmb;
979 uint16_t rx_stat_err0;
980 uint16_t rx_packet_len;
981 uint16_t rx_id, nb_hold;
984 const uint32_t *ptype_tbl;
989 rx_id = rxq->rx_tail;
990 rx_ring = rxq->rx_ring;
991 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
993 while (nb_rx < nb_pkts) {
994 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
995 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
997 /* Check the DD bit first */
998 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1000 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1002 nmb = rte_mbuf_raw_alloc(rxq->mp);
1003 if (unlikely(!nmb)) {
1004 dev = &rte_eth_devices[rxq->port_id];
1005 dev->data->rx_mbuf_alloc_failed++;
1006 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1007 "queue_id=%u", rxq->port_id, rxq->queue_id);
1013 rxe = rxq->sw_ring[rx_id];
1015 if (unlikely(rx_id == rxq->nb_rx_desc))
1018 /* Prefetch next mbuf */
1019 rte_prefetch0(rxq->sw_ring[rx_id]);
1021 /* When next RX descriptor is on a cache line boundary,
1022 * prefetch the next 4 RX descriptors and next 8 pointers
1025 if ((rx_id & 0x3) == 0) {
1026 rte_prefetch0(&rx_ring[rx_id]);
1027 rte_prefetch0(rxq->sw_ring[rx_id]);
1032 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1033 rxdp->read.hdr_addr = 0;
1034 rxdp->read.pkt_addr = dma_addr;
1036 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1037 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1039 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1040 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1043 rxm->pkt_len = rx_packet_len;
1044 rxm->data_len = rx_packet_len;
1045 rxm->port = rxq->port_id;
1047 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1048 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1049 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1050 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1051 iavf_rxd_to_pkt_fields(rxm, &rxd);
1053 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1054 rxm->ol_flags |= pkt_flags;
1056 rx_pkts[nb_rx++] = rxm;
1058 rxq->rx_tail = rx_id;
1060 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1065 /* implement recv_scattered_pkts for flexible Rx descriptor */
1067 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1070 struct iavf_rx_queue *rxq = rx_queue;
1071 union iavf_rx_flex_desc rxd;
1072 struct rte_mbuf *rxe;
1073 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1074 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1075 struct rte_mbuf *nmb, *rxm;
1076 uint16_t rx_id = rxq->rx_tail;
1077 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1078 struct rte_eth_dev *dev;
1079 uint16_t rx_stat_err0;
1083 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1084 volatile union iavf_rx_flex_desc *rxdp;
1085 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1087 while (nb_rx < nb_pkts) {
1088 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1089 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1091 /* Check the DD bit */
1092 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1094 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1096 nmb = rte_mbuf_raw_alloc(rxq->mp);
1097 if (unlikely(!nmb)) {
1098 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1099 "queue_id=%u", rxq->port_id, rxq->queue_id);
1100 dev = &rte_eth_devices[rxq->port_id];
1101 dev->data->rx_mbuf_alloc_failed++;
1107 rxe = rxq->sw_ring[rx_id];
1109 if (rx_id == rxq->nb_rx_desc)
1112 /* Prefetch next mbuf */
1113 rte_prefetch0(rxq->sw_ring[rx_id]);
1115 /* When next RX descriptor is on a cache line boundary,
1116 * prefetch the next 4 RX descriptors and next 8 pointers
1119 if ((rx_id & 0x3) == 0) {
1120 rte_prefetch0(&rx_ring[rx_id]);
1121 rte_prefetch0(rxq->sw_ring[rx_id]);
1127 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1129 /* Set data buffer address and data length of the mbuf */
1130 rxdp->read.hdr_addr = 0;
1131 rxdp->read.pkt_addr = dma_addr;
1132 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1133 IAVF_RX_FLX_DESC_PKT_LEN_M;
1134 rxm->data_len = rx_packet_len;
1135 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1137 /* If this is the first buffer of the received packet, set the
1138 * pointer to the first mbuf of the packet and initialize its
1139 * context. Otherwise, update the total length and the number
1140 * of segments of the current scattered packet, and update the
1141 * pointer to the last mbuf of the current packet.
1145 first_seg->nb_segs = 1;
1146 first_seg->pkt_len = rx_packet_len;
1148 first_seg->pkt_len =
1149 (uint16_t)(first_seg->pkt_len +
1151 first_seg->nb_segs++;
1152 last_seg->next = rxm;
1155 /* If this is not the last buffer of the received packet,
1156 * update the pointer to the last mbuf of the current scattered
1157 * packet and continue to parse the RX ring.
1159 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1164 /* This is the last buffer of the received packet. If the CRC
1165 * is not stripped by the hardware:
1166 * - Subtract the CRC length from the total packet length.
1167 * - If the last buffer only contains the whole CRC or a part
1168 * of it, free the mbuf associated to the last buffer. If part
1169 * of the CRC is also contained in the previous mbuf, subtract
1170 * the length of that CRC part from the data length of the
1174 if (unlikely(rxq->crc_len > 0)) {
1175 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1176 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1177 rte_pktmbuf_free_seg(rxm);
1178 first_seg->nb_segs--;
1179 last_seg->data_len =
1180 (uint16_t)(last_seg->data_len -
1181 (RTE_ETHER_CRC_LEN - rx_packet_len));
1182 last_seg->next = NULL;
1184 rxm->data_len = (uint16_t)(rx_packet_len -
1189 first_seg->port = rxq->port_id;
1190 first_seg->ol_flags = 0;
1191 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1192 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1193 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1194 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1195 iavf_rxd_to_pkt_fields(first_seg, &rxd);
1197 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1199 first_seg->ol_flags |= pkt_flags;
1201 /* Prefetch data of first segment, if configured to do so. */
1202 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1203 first_seg->data_off));
1204 rx_pkts[nb_rx++] = first_seg;
1208 /* Record index of the next RX descriptor to probe. */
1209 rxq->rx_tail = rx_id;
1210 rxq->pkt_first_seg = first_seg;
1211 rxq->pkt_last_seg = last_seg;
1213 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1218 /* implement recv_scattered_pkts */
1220 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1223 struct iavf_rx_queue *rxq = rx_queue;
1224 union iavf_rx_desc rxd;
1225 struct rte_mbuf *rxe;
1226 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1227 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1228 struct rte_mbuf *nmb, *rxm;
1229 uint16_t rx_id = rxq->rx_tail;
1230 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1231 struct rte_eth_dev *dev;
1237 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1238 volatile union iavf_rx_desc *rxdp;
1239 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1241 while (nb_rx < nb_pkts) {
1242 rxdp = &rx_ring[rx_id];
1243 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1244 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1245 IAVF_RXD_QW1_STATUS_SHIFT;
1247 /* Check the DD bit */
1248 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1250 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1252 nmb = rte_mbuf_raw_alloc(rxq->mp);
1253 if (unlikely(!nmb)) {
1254 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1255 "queue_id=%u", rxq->port_id, rxq->queue_id);
1256 dev = &rte_eth_devices[rxq->port_id];
1257 dev->data->rx_mbuf_alloc_failed++;
1263 rxe = rxq->sw_ring[rx_id];
1265 if (rx_id == rxq->nb_rx_desc)
1268 /* Prefetch next mbuf */
1269 rte_prefetch0(rxq->sw_ring[rx_id]);
1271 /* When next RX descriptor is on a cache line boundary,
1272 * prefetch the next 4 RX descriptors and next 8 pointers
1275 if ((rx_id & 0x3) == 0) {
1276 rte_prefetch0(&rx_ring[rx_id]);
1277 rte_prefetch0(rxq->sw_ring[rx_id]);
1283 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1285 /* Set data buffer address and data length of the mbuf */
1286 rxdp->read.hdr_addr = 0;
1287 rxdp->read.pkt_addr = dma_addr;
1288 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1289 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1290 rxm->data_len = rx_packet_len;
1291 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1293 /* If this is the first buffer of the received packet, set the
1294 * pointer to the first mbuf of the packet and initialize its
1295 * context. Otherwise, update the total length and the number
1296 * of segments of the current scattered packet, and update the
1297 * pointer to the last mbuf of the current packet.
1301 first_seg->nb_segs = 1;
1302 first_seg->pkt_len = rx_packet_len;
1304 first_seg->pkt_len =
1305 (uint16_t)(first_seg->pkt_len +
1307 first_seg->nb_segs++;
1308 last_seg->next = rxm;
1311 /* If this is not the last buffer of the received packet,
1312 * update the pointer to the last mbuf of the current scattered
1313 * packet and continue to parse the RX ring.
1315 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1320 /* This is the last buffer of the received packet. If the CRC
1321 * is not stripped by the hardware:
1322 * - Subtract the CRC length from the total packet length.
1323 * - If the last buffer only contains the whole CRC or a part
1324 * of it, free the mbuf associated to the last buffer. If part
1325 * of the CRC is also contained in the previous mbuf, subtract
1326 * the length of that CRC part from the data length of the
1330 if (unlikely(rxq->crc_len > 0)) {
1331 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1332 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1333 rte_pktmbuf_free_seg(rxm);
1334 first_seg->nb_segs--;
1335 last_seg->data_len =
1336 (uint16_t)(last_seg->data_len -
1337 (RTE_ETHER_CRC_LEN - rx_packet_len));
1338 last_seg->next = NULL;
1340 rxm->data_len = (uint16_t)(rx_packet_len -
1344 first_seg->port = rxq->port_id;
1345 first_seg->ol_flags = 0;
1346 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1347 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1348 first_seg->packet_type =
1349 ptype_tbl[(uint8_t)((qword1 &
1350 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1352 if (pkt_flags & PKT_RX_RSS_HASH)
1353 first_seg->hash.rss =
1354 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1356 first_seg->ol_flags |= pkt_flags;
1358 /* Prefetch data of first segment, if configured to do so. */
1359 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1360 first_seg->data_off));
1361 rx_pkts[nb_rx++] = first_seg;
1365 /* Record index of the next RX descriptor to probe. */
1366 rxq->rx_tail = rx_id;
1367 rxq->pkt_first_seg = first_seg;
1368 rxq->pkt_last_seg = last_seg;
1370 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1375 #define IAVF_LOOK_AHEAD 8
1377 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1379 volatile union iavf_rx_flex_desc *rxdp;
1380 struct rte_mbuf **rxep;
1381 struct rte_mbuf *mb;
1384 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1385 int32_t i, j, nb_rx = 0;
1387 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1389 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1390 rxep = &rxq->sw_ring[rxq->rx_tail];
1392 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1394 /* Make sure there is at least 1 packet to receive */
1395 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1398 /* Scan LOOK_AHEAD descriptors at a time to determine which
1399 * descriptors reference packets that are ready to be received.
1401 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1402 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1403 /* Read desc statuses backwards to avoid race condition */
1404 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1405 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1409 /* Compute how many status bits were set */
1410 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1411 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1415 /* Translate descriptor info to mbuf parameters */
1416 for (j = 0; j < nb_dd; j++) {
1417 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1419 i * IAVF_LOOK_AHEAD + j);
1422 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1423 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1424 mb->data_len = pkt_len;
1425 mb->pkt_len = pkt_len;
1428 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1429 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1430 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1431 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1432 iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
1434 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1435 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1437 mb->ol_flags |= pkt_flags;
1440 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1441 rxq->rx_stage[i + j] = rxep[j];
1443 if (nb_dd != IAVF_LOOK_AHEAD)
1447 /* Clear software ring entries */
1448 for (i = 0; i < nb_rx; i++)
1449 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1455 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1457 volatile union iavf_rx_desc *rxdp;
1458 struct rte_mbuf **rxep;
1459 struct rte_mbuf *mb;
1463 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1464 int32_t i, j, nb_rx = 0;
1466 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1468 rxdp = &rxq->rx_ring[rxq->rx_tail];
1469 rxep = &rxq->sw_ring[rxq->rx_tail];
1471 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1472 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1473 IAVF_RXD_QW1_STATUS_SHIFT;
1475 /* Make sure there is at least 1 packet to receive */
1476 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1479 /* Scan LOOK_AHEAD descriptors at a time to determine which
1480 * descriptors reference packets that are ready to be received.
1482 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1483 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1484 /* Read desc statuses backwards to avoid race condition */
1485 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1486 qword1 = rte_le_to_cpu_64(
1487 rxdp[j].wb.qword1.status_error_len);
1488 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1489 IAVF_RXD_QW1_STATUS_SHIFT;
1494 /* Compute how many status bits were set */
1495 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1496 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1500 /* Translate descriptor info to mbuf parameters */
1501 for (j = 0; j < nb_dd; j++) {
1502 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1503 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1506 qword1 = rte_le_to_cpu_64
1507 (rxdp[j].wb.qword1.status_error_len);
1508 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1509 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1510 mb->data_len = pkt_len;
1511 mb->pkt_len = pkt_len;
1513 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1514 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1516 ptype_tbl[(uint8_t)((qword1 &
1517 IAVF_RXD_QW1_PTYPE_MASK) >>
1518 IAVF_RXD_QW1_PTYPE_SHIFT)];
1520 if (pkt_flags & PKT_RX_RSS_HASH)
1521 mb->hash.rss = rte_le_to_cpu_32(
1522 rxdp[j].wb.qword0.hi_dword.rss);
1524 mb->ol_flags |= pkt_flags;
1527 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1528 rxq->rx_stage[i + j] = rxep[j];
1530 if (nb_dd != IAVF_LOOK_AHEAD)
1534 /* Clear software ring entries */
1535 for (i = 0; i < nb_rx; i++)
1536 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1541 static inline uint16_t
1542 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1543 struct rte_mbuf **rx_pkts,
1547 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1549 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1551 for (i = 0; i < nb_pkts; i++)
1552 rx_pkts[i] = stage[i];
1554 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1555 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1561 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1563 volatile union iavf_rx_desc *rxdp;
1564 struct rte_mbuf **rxep;
1565 struct rte_mbuf *mb;
1566 uint16_t alloc_idx, i;
1570 /* Allocate buffers in bulk */
1571 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1572 (rxq->rx_free_thresh - 1));
1573 rxep = &rxq->sw_ring[alloc_idx];
1574 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1575 rxq->rx_free_thresh);
1576 if (unlikely(diag != 0)) {
1577 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1581 rxdp = &rxq->rx_ring[alloc_idx];
1582 for (i = 0; i < rxq->rx_free_thresh; i++) {
1583 if (likely(i < (rxq->rx_free_thresh - 1)))
1584 /* Prefetch next mbuf */
1585 rte_prefetch0(rxep[i + 1]);
1588 rte_mbuf_refcnt_set(mb, 1);
1590 mb->data_off = RTE_PKTMBUF_HEADROOM;
1592 mb->port = rxq->port_id;
1593 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1594 rxdp[i].read.hdr_addr = 0;
1595 rxdp[i].read.pkt_addr = dma_addr;
1598 /* Update rx tail register */
1600 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1602 rxq->rx_free_trigger =
1603 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1604 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1605 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1610 static inline uint16_t
1611 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1613 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1619 if (rxq->rx_nb_avail)
1620 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1622 if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
1623 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1625 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1626 rxq->rx_next_avail = 0;
1627 rxq->rx_nb_avail = nb_rx;
1628 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1630 if (rxq->rx_tail > rxq->rx_free_trigger) {
1631 if (iavf_rx_alloc_bufs(rxq) != 0) {
1634 /* TODO: count rx_mbuf_alloc_failed here */
1636 rxq->rx_nb_avail = 0;
1637 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1638 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1639 rxq->sw_ring[j] = rxq->rx_stage[i];
1645 if (rxq->rx_tail >= rxq->nb_rx_desc)
1648 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1649 rxq->port_id, rxq->queue_id,
1650 rxq->rx_tail, nb_rx);
1652 if (rxq->rx_nb_avail)
1653 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1659 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1660 struct rte_mbuf **rx_pkts,
1663 uint16_t nb_rx = 0, n, count;
1665 if (unlikely(nb_pkts == 0))
1668 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1669 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1672 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1673 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1674 nb_rx = (uint16_t)(nb_rx + count);
1675 nb_pkts = (uint16_t)(nb_pkts - count);
1684 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1686 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1687 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1688 uint16_t nb_tx_desc = txq->nb_tx_desc;
1689 uint16_t desc_to_clean_to;
1690 uint16_t nb_tx_to_clean;
1692 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1694 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1695 if (desc_to_clean_to >= nb_tx_desc)
1696 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1698 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1699 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1700 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1701 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1702 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1703 "(port=%d queue=%d)", desc_to_clean_to,
1704 txq->port_id, txq->queue_id);
1708 if (last_desc_cleaned > desc_to_clean_to)
1709 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1712 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1715 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1717 txq->last_desc_cleaned = desc_to_clean_to;
1718 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1723 /* Check if the context descriptor is needed for TX offloading */
1724 static inline uint16_t
1725 iavf_calc_context_desc(uint64_t flags)
1727 static uint64_t mask = PKT_TX_TCP_SEG;
1729 return (flags & mask) ? 1 : 0;
1733 iavf_txd_enable_checksum(uint64_t ol_flags,
1735 uint32_t *td_offset,
1736 union iavf_tx_offload tx_offload)
1739 *td_offset |= (tx_offload.l2_len >> 1) <<
1740 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1742 /* Enable L3 checksum offloads */
1743 if (ol_flags & PKT_TX_IP_CKSUM) {
1744 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1745 *td_offset |= (tx_offload.l3_len >> 2) <<
1746 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1747 } else if (ol_flags & PKT_TX_IPV4) {
1748 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1749 *td_offset |= (tx_offload.l3_len >> 2) <<
1750 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1751 } else if (ol_flags & PKT_TX_IPV6) {
1752 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1753 *td_offset |= (tx_offload.l3_len >> 2) <<
1754 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1757 if (ol_flags & PKT_TX_TCP_SEG) {
1758 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1759 *td_offset |= (tx_offload.l4_len >> 2) <<
1760 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1764 /* Enable L4 checksum offloads */
1765 switch (ol_flags & PKT_TX_L4_MASK) {
1766 case PKT_TX_TCP_CKSUM:
1767 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1768 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1769 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1771 case PKT_TX_SCTP_CKSUM:
1772 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1773 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1774 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1776 case PKT_TX_UDP_CKSUM:
1777 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1778 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1779 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1786 /* set TSO context descriptor
1787 * support IP -> L4 and IP -> IP -> L4
1789 static inline uint64_t
1790 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
1792 uint64_t ctx_desc = 0;
1793 uint32_t cd_cmd, hdr_len, cd_tso_len;
1795 if (!tx_offload.l4_len) {
1796 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1800 hdr_len = tx_offload.l2_len +
1804 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1805 cd_tso_len = mbuf->pkt_len - hdr_len;
1806 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1807 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1808 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1813 /* Construct the tx flags */
1814 static inline uint64_t
1815 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1818 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
1819 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
1820 ((uint64_t)td_offset <<
1821 IAVF_TXD_QW1_OFFSET_SHIFT) |
1823 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1824 ((uint64_t)td_tag <<
1825 IAVF_TXD_QW1_L2TAG1_SHIFT));
1830 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1832 volatile struct iavf_tx_desc *txd;
1833 volatile struct iavf_tx_desc *txr;
1834 struct iavf_tx_queue *txq;
1835 struct iavf_tx_entry *sw_ring;
1836 struct iavf_tx_entry *txe, *txn;
1837 struct rte_mbuf *tx_pkt;
1838 struct rte_mbuf *m_seg;
1849 uint64_t buf_dma_addr;
1850 union iavf_tx_offload tx_offload = {0};
1853 sw_ring = txq->sw_ring;
1855 tx_id = txq->tx_tail;
1856 txe = &sw_ring[tx_id];
1858 /* Check if the descriptor ring needs to be cleaned. */
1859 if (txq->nb_free < txq->free_thresh)
1860 iavf_xmit_cleanup(txq);
1862 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1867 tx_pkt = *tx_pkts++;
1868 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1870 ol_flags = tx_pkt->ol_flags;
1871 tx_offload.l2_len = tx_pkt->l2_len;
1872 tx_offload.l3_len = tx_pkt->l3_len;
1873 tx_offload.l4_len = tx_pkt->l4_len;
1874 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1876 /* Calculate the number of context descriptors needed. */
1877 nb_ctx = iavf_calc_context_desc(ol_flags);
1879 /* The number of descriptors that must be allocated for
1880 * a packet equals to the number of the segments of that
1881 * packet plus 1 context descriptor if needed.
1883 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1884 tx_last = (uint16_t)(tx_id + nb_used - 1);
1887 if (tx_last >= txq->nb_tx_desc)
1888 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1890 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1891 " tx_first=%u tx_last=%u",
1892 txq->port_id, txq->queue_id, tx_id, tx_last);
1894 if (nb_used > txq->nb_free) {
1895 if (iavf_xmit_cleanup(txq)) {
1900 if (unlikely(nb_used > txq->rs_thresh)) {
1901 while (nb_used > txq->nb_free) {
1902 if (iavf_xmit_cleanup(txq)) {
1911 /* Descriptor based VLAN insertion */
1912 if (ol_flags & PKT_TX_VLAN_PKT) {
1913 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
1914 td_tag = tx_pkt->vlan_tci;
1917 /* According to datasheet, the bit2 is reserved and must be
1922 /* Enable checksum offloading */
1923 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
1924 iavf_txd_enable_checksum(ol_flags, &td_cmd,
1925 &td_offset, tx_offload);
1928 /* Setup TX context descriptor if required */
1929 uint64_t cd_type_cmd_tso_mss =
1930 IAVF_TX_DESC_DTYPE_CONTEXT;
1931 volatile struct iavf_tx_context_desc *ctx_txd =
1932 (volatile struct iavf_tx_context_desc *)
1935 txn = &sw_ring[txe->next_id];
1936 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1938 rte_pktmbuf_free_seg(txe->mbuf);
1943 if (ol_flags & PKT_TX_TCP_SEG)
1944 cd_type_cmd_tso_mss |=
1945 iavf_set_tso_ctx(tx_pkt, tx_offload);
1947 ctx_txd->type_cmd_tso_mss =
1948 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1950 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
1951 txe->last_id = tx_last;
1952 tx_id = txe->next_id;
1959 txn = &sw_ring[txe->next_id];
1962 rte_pktmbuf_free_seg(txe->mbuf);
1965 /* Setup TX Descriptor */
1966 slen = m_seg->data_len;
1967 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1968 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1969 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
1974 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
1975 txe->last_id = tx_last;
1976 tx_id = txe->next_id;
1978 m_seg = m_seg->next;
1981 /* The last packet data descriptor needs End Of Packet (EOP) */
1982 td_cmd |= IAVF_TX_DESC_CMD_EOP;
1983 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
1984 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
1986 if (txq->nb_used >= txq->rs_thresh) {
1987 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
1988 "%4u (port=%d queue=%d)",
1989 tx_last, txq->port_id, txq->queue_id);
1991 td_cmd |= IAVF_TX_DESC_CMD_RS;
1993 /* Update txq RS bit counters */
1997 txd->cmd_type_offset_bsz |=
1998 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1999 IAVF_TXD_QW1_CMD_SHIFT);
2000 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2006 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2007 txq->port_id, txq->queue_id, tx_id, nb_tx);
2009 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2010 txq->tx_tail = tx_id;
2015 /* TX prep functions */
2017 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2024 for (i = 0; i < nb_pkts; i++) {
2026 ol_flags = m->ol_flags;
2028 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2029 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2030 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2034 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2035 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2036 /* MSS outside the range are considered malicious */
2041 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2042 rte_errno = ENOTSUP;
2046 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2047 ret = rte_validate_tx_offload(m);
2053 ret = rte_net_intel_cksum_prepare(m);
2063 /* choose rx function*/
2065 iavf_set_rx_function(struct rte_eth_dev *dev)
2067 struct iavf_adapter *adapter =
2068 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2069 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2071 struct iavf_rx_queue *rxq;
2073 bool use_avx2 = false;
2075 if (!iavf_rx_vec_dev_check(dev)) {
2076 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2077 rxq = dev->data->rx_queues[i];
2078 (void)iavf_rxq_vec_setup(rxq);
2081 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2082 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2085 if (dev->data->scattered_rx) {
2087 "Using %sVector Scattered Rx (port %d).",
2088 use_avx2 ? "avx2 " : "",
2089 dev->data->port_id);
2090 if (vf->vf_res->vf_cap_flags &
2091 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2092 dev->rx_pkt_burst = use_avx2 ?
2093 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2094 iavf_recv_scattered_pkts_vec_flex_rxd;
2096 dev->rx_pkt_burst = use_avx2 ?
2097 iavf_recv_scattered_pkts_vec_avx2 :
2098 iavf_recv_scattered_pkts_vec;
2100 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2101 use_avx2 ? "avx2 " : "",
2102 dev->data->port_id);
2103 if (vf->vf_res->vf_cap_flags &
2104 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2105 dev->rx_pkt_burst = use_avx2 ?
2106 iavf_recv_pkts_vec_avx2_flex_rxd :
2107 iavf_recv_pkts_vec_flex_rxd;
2109 dev->rx_pkt_burst = use_avx2 ?
2110 iavf_recv_pkts_vec_avx2 :
2118 if (dev->data->scattered_rx) {
2119 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2120 dev->data->port_id);
2121 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2122 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2124 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2125 } else if (adapter->rx_bulk_alloc_allowed) {
2126 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2127 dev->data->port_id);
2128 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2130 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2131 dev->data->port_id);
2132 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2133 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2135 dev->rx_pkt_burst = iavf_recv_pkts;
2139 /* choose tx function*/
2141 iavf_set_tx_function(struct rte_eth_dev *dev)
2144 struct iavf_tx_queue *txq;
2146 bool use_avx2 = false;
2148 if (!iavf_tx_vec_dev_check(dev)) {
2149 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2150 txq = dev->data->tx_queues[i];
2153 iavf_txq_vec_setup(txq);
2156 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2157 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2160 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2161 use_avx2 ? "avx2 " : "",
2162 dev->data->port_id);
2163 dev->tx_pkt_burst = use_avx2 ?
2164 iavf_xmit_pkts_vec_avx2 :
2166 dev->tx_pkt_prepare = NULL;
2172 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2173 dev->data->port_id);
2174 dev->tx_pkt_burst = iavf_xmit_pkts;
2175 dev->tx_pkt_prepare = iavf_prep_pkts;
2179 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2180 struct rte_eth_rxq_info *qinfo)
2182 struct iavf_rx_queue *rxq;
2184 rxq = dev->data->rx_queues[queue_id];
2186 qinfo->mp = rxq->mp;
2187 qinfo->scattered_rx = dev->data->scattered_rx;
2188 qinfo->nb_desc = rxq->nb_rx_desc;
2190 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2191 qinfo->conf.rx_drop_en = true;
2192 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2196 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2197 struct rte_eth_txq_info *qinfo)
2199 struct iavf_tx_queue *txq;
2201 txq = dev->data->tx_queues[queue_id];
2203 qinfo->nb_desc = txq->nb_tx_desc;
2205 qinfo->conf.tx_free_thresh = txq->free_thresh;
2206 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2207 qinfo->conf.offloads = txq->offloads;
2208 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2211 /* Get the number of used descriptors of a rx queue */
2213 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2215 #define IAVF_RXQ_SCAN_INTERVAL 4
2216 volatile union iavf_rx_desc *rxdp;
2217 struct iavf_rx_queue *rxq;
2220 rxq = dev->data->rx_queues[queue_id];
2221 rxdp = &rxq->rx_ring[rxq->rx_tail];
2223 while ((desc < rxq->nb_rx_desc) &&
2224 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2225 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2226 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2227 /* Check the DD bit of a rx descriptor of each 4 in a group,
2228 * to avoid checking too frequently and downgrading performance
2231 desc += IAVF_RXQ_SCAN_INTERVAL;
2232 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2233 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2234 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2235 desc - rxq->nb_rx_desc]);
2242 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2244 struct iavf_rx_queue *rxq = rx_queue;
2245 volatile uint64_t *status;
2249 if (unlikely(offset >= rxq->nb_rx_desc))
2252 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2253 return RTE_ETH_RX_DESC_UNAVAIL;
2255 desc = rxq->rx_tail + offset;
2256 if (desc >= rxq->nb_rx_desc)
2257 desc -= rxq->nb_rx_desc;
2259 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2260 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2261 << IAVF_RXD_QW1_STATUS_SHIFT);
2263 return RTE_ETH_RX_DESC_DONE;
2265 return RTE_ETH_RX_DESC_AVAIL;
2269 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2271 struct iavf_tx_queue *txq = tx_queue;
2272 volatile uint64_t *status;
2273 uint64_t mask, expect;
2276 if (unlikely(offset >= txq->nb_tx_desc))
2279 desc = txq->tx_tail + offset;
2280 /* go to next desc that has the RS bit */
2281 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2283 if (desc >= txq->nb_tx_desc) {
2284 desc -= txq->nb_tx_desc;
2285 if (desc >= txq->nb_tx_desc)
2286 desc -= txq->nb_tx_desc;
2289 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2290 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2291 expect = rte_cpu_to_le_64(
2292 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2293 if ((*status & mask) == expect)
2294 return RTE_ETH_TX_DESC_DONE;
2296 return RTE_ETH_TX_DESC_FULL;
2300 iavf_get_default_ptype_table(void)
2302 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2303 __rte_cache_aligned = {
2306 [1] = RTE_PTYPE_L2_ETHER,
2307 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2308 /* [3] - [5] reserved */
2309 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2310 /* [7] - [10] reserved */
2311 [11] = RTE_PTYPE_L2_ETHER_ARP,
2312 /* [12] - [21] reserved */
2314 /* Non tunneled IPv4 */
2315 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2317 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2318 RTE_PTYPE_L4_NONFRAG,
2319 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2322 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2324 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2326 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2330 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2331 RTE_PTYPE_TUNNEL_IP |
2332 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2333 RTE_PTYPE_INNER_L4_FRAG,
2334 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2335 RTE_PTYPE_TUNNEL_IP |
2336 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2337 RTE_PTYPE_INNER_L4_NONFRAG,
2338 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2339 RTE_PTYPE_TUNNEL_IP |
2340 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2341 RTE_PTYPE_INNER_L4_UDP,
2343 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2344 RTE_PTYPE_TUNNEL_IP |
2345 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2346 RTE_PTYPE_INNER_L4_TCP,
2347 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2348 RTE_PTYPE_TUNNEL_IP |
2349 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2350 RTE_PTYPE_INNER_L4_SCTP,
2351 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2352 RTE_PTYPE_TUNNEL_IP |
2353 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2354 RTE_PTYPE_INNER_L4_ICMP,
2357 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2358 RTE_PTYPE_TUNNEL_IP |
2359 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2360 RTE_PTYPE_INNER_L4_FRAG,
2361 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2362 RTE_PTYPE_TUNNEL_IP |
2363 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2364 RTE_PTYPE_INNER_L4_NONFRAG,
2365 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2366 RTE_PTYPE_TUNNEL_IP |
2367 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2368 RTE_PTYPE_INNER_L4_UDP,
2370 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2371 RTE_PTYPE_TUNNEL_IP |
2372 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2373 RTE_PTYPE_INNER_L4_TCP,
2374 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2375 RTE_PTYPE_TUNNEL_IP |
2376 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2377 RTE_PTYPE_INNER_L4_SCTP,
2378 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2379 RTE_PTYPE_TUNNEL_IP |
2380 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2381 RTE_PTYPE_INNER_L4_ICMP,
2383 /* IPv4 --> GRE/Teredo/VXLAN */
2384 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2385 RTE_PTYPE_TUNNEL_GRENAT,
2387 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2388 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2389 RTE_PTYPE_TUNNEL_GRENAT |
2390 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2391 RTE_PTYPE_INNER_L4_FRAG,
2392 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2393 RTE_PTYPE_TUNNEL_GRENAT |
2394 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2395 RTE_PTYPE_INNER_L4_NONFRAG,
2396 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2397 RTE_PTYPE_TUNNEL_GRENAT |
2398 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2399 RTE_PTYPE_INNER_L4_UDP,
2401 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2402 RTE_PTYPE_TUNNEL_GRENAT |
2403 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2404 RTE_PTYPE_INNER_L4_TCP,
2405 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2406 RTE_PTYPE_TUNNEL_GRENAT |
2407 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2408 RTE_PTYPE_INNER_L4_SCTP,
2409 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2410 RTE_PTYPE_TUNNEL_GRENAT |
2411 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2412 RTE_PTYPE_INNER_L4_ICMP,
2414 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2415 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2416 RTE_PTYPE_TUNNEL_GRENAT |
2417 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2418 RTE_PTYPE_INNER_L4_FRAG,
2419 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2420 RTE_PTYPE_TUNNEL_GRENAT |
2421 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2422 RTE_PTYPE_INNER_L4_NONFRAG,
2423 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2424 RTE_PTYPE_TUNNEL_GRENAT |
2425 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2426 RTE_PTYPE_INNER_L4_UDP,
2428 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2429 RTE_PTYPE_TUNNEL_GRENAT |
2430 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2431 RTE_PTYPE_INNER_L4_TCP,
2432 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2433 RTE_PTYPE_TUNNEL_GRENAT |
2434 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2435 RTE_PTYPE_INNER_L4_SCTP,
2436 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2437 RTE_PTYPE_TUNNEL_GRENAT |
2438 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2439 RTE_PTYPE_INNER_L4_ICMP,
2441 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2442 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2443 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2445 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2446 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2447 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2448 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2449 RTE_PTYPE_INNER_L4_FRAG,
2450 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2451 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2452 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2453 RTE_PTYPE_INNER_L4_NONFRAG,
2454 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2455 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2456 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2457 RTE_PTYPE_INNER_L4_UDP,
2459 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2460 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2461 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2462 RTE_PTYPE_INNER_L4_TCP,
2463 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2464 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2465 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2466 RTE_PTYPE_INNER_L4_SCTP,
2467 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2468 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2469 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2470 RTE_PTYPE_INNER_L4_ICMP,
2472 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2473 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2474 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2475 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2476 RTE_PTYPE_INNER_L4_FRAG,
2477 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2478 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2479 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2480 RTE_PTYPE_INNER_L4_NONFRAG,
2481 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2482 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2483 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2484 RTE_PTYPE_INNER_L4_UDP,
2486 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2487 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2488 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2489 RTE_PTYPE_INNER_L4_TCP,
2490 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2491 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2492 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2493 RTE_PTYPE_INNER_L4_SCTP,
2494 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2495 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2496 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2497 RTE_PTYPE_INNER_L4_ICMP,
2498 /* [73] - [87] reserved */
2500 /* Non tunneled IPv6 */
2501 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2503 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2504 RTE_PTYPE_L4_NONFRAG,
2505 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2508 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2510 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2512 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2516 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2517 RTE_PTYPE_TUNNEL_IP |
2518 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2519 RTE_PTYPE_INNER_L4_FRAG,
2520 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2521 RTE_PTYPE_TUNNEL_IP |
2522 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2523 RTE_PTYPE_INNER_L4_NONFRAG,
2524 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2525 RTE_PTYPE_TUNNEL_IP |
2526 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2527 RTE_PTYPE_INNER_L4_UDP,
2529 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2530 RTE_PTYPE_TUNNEL_IP |
2531 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2532 RTE_PTYPE_INNER_L4_TCP,
2533 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2534 RTE_PTYPE_TUNNEL_IP |
2535 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2536 RTE_PTYPE_INNER_L4_SCTP,
2537 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2538 RTE_PTYPE_TUNNEL_IP |
2539 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2540 RTE_PTYPE_INNER_L4_ICMP,
2543 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2544 RTE_PTYPE_TUNNEL_IP |
2545 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2546 RTE_PTYPE_INNER_L4_FRAG,
2547 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2548 RTE_PTYPE_TUNNEL_IP |
2549 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2550 RTE_PTYPE_INNER_L4_NONFRAG,
2551 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2552 RTE_PTYPE_TUNNEL_IP |
2553 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2554 RTE_PTYPE_INNER_L4_UDP,
2555 /* [105] reserved */
2556 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2557 RTE_PTYPE_TUNNEL_IP |
2558 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2559 RTE_PTYPE_INNER_L4_TCP,
2560 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2561 RTE_PTYPE_TUNNEL_IP |
2562 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2563 RTE_PTYPE_INNER_L4_SCTP,
2564 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2565 RTE_PTYPE_TUNNEL_IP |
2566 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2567 RTE_PTYPE_INNER_L4_ICMP,
2569 /* IPv6 --> GRE/Teredo/VXLAN */
2570 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2571 RTE_PTYPE_TUNNEL_GRENAT,
2573 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2574 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2575 RTE_PTYPE_TUNNEL_GRENAT |
2576 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2577 RTE_PTYPE_INNER_L4_FRAG,
2578 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2579 RTE_PTYPE_TUNNEL_GRENAT |
2580 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2581 RTE_PTYPE_INNER_L4_NONFRAG,
2582 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2583 RTE_PTYPE_TUNNEL_GRENAT |
2584 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2585 RTE_PTYPE_INNER_L4_UDP,
2586 /* [113] reserved */
2587 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2588 RTE_PTYPE_TUNNEL_GRENAT |
2589 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2590 RTE_PTYPE_INNER_L4_TCP,
2591 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2592 RTE_PTYPE_TUNNEL_GRENAT |
2593 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2594 RTE_PTYPE_INNER_L4_SCTP,
2595 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2596 RTE_PTYPE_TUNNEL_GRENAT |
2597 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2598 RTE_PTYPE_INNER_L4_ICMP,
2600 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2601 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2602 RTE_PTYPE_TUNNEL_GRENAT |
2603 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2604 RTE_PTYPE_INNER_L4_FRAG,
2605 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2606 RTE_PTYPE_TUNNEL_GRENAT |
2607 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2608 RTE_PTYPE_INNER_L4_NONFRAG,
2609 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2610 RTE_PTYPE_TUNNEL_GRENAT |
2611 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2612 RTE_PTYPE_INNER_L4_UDP,
2613 /* [120] reserved */
2614 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2615 RTE_PTYPE_TUNNEL_GRENAT |
2616 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2617 RTE_PTYPE_INNER_L4_TCP,
2618 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2619 RTE_PTYPE_TUNNEL_GRENAT |
2620 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2621 RTE_PTYPE_INNER_L4_SCTP,
2622 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2623 RTE_PTYPE_TUNNEL_GRENAT |
2624 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2625 RTE_PTYPE_INNER_L4_ICMP,
2627 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2628 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2629 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2631 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2632 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2633 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2634 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2635 RTE_PTYPE_INNER_L4_FRAG,
2636 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2637 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2638 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2639 RTE_PTYPE_INNER_L4_NONFRAG,
2640 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2642 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2643 RTE_PTYPE_INNER_L4_UDP,
2644 /* [128] reserved */
2645 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2646 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2647 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2648 RTE_PTYPE_INNER_L4_TCP,
2649 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2650 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2651 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2652 RTE_PTYPE_INNER_L4_SCTP,
2653 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2654 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2655 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2656 RTE_PTYPE_INNER_L4_ICMP,
2658 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2659 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2660 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2661 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2662 RTE_PTYPE_INNER_L4_FRAG,
2663 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2664 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2665 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2666 RTE_PTYPE_INNER_L4_NONFRAG,
2667 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2668 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2669 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2670 RTE_PTYPE_INNER_L4_UDP,
2671 /* [135] reserved */
2672 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2673 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2674 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2675 RTE_PTYPE_INNER_L4_TCP,
2676 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2677 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2678 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2679 RTE_PTYPE_INNER_L4_SCTP,
2680 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2681 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2682 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2683 RTE_PTYPE_INNER_L4_ICMP,
2684 /* [139] - [299] reserved */
2687 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
2688 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
2690 /* PPPoE --> IPv4 */
2691 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
2692 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2694 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
2695 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2696 RTE_PTYPE_L4_NONFRAG,
2697 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
2698 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2700 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
2701 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2703 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
2704 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2706 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
2707 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2710 /* PPPoE --> IPv6 */
2711 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
2712 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2714 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
2715 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2716 RTE_PTYPE_L4_NONFRAG,
2717 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
2718 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2720 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
2721 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2723 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
2724 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2726 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
2727 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2729 /* [314] - [324] reserved */
2731 /* IPv4/IPv6 --> GTPC/GTPU */
2732 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_GTPC,
2734 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2735 RTE_PTYPE_TUNNEL_GTPC,
2736 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2737 RTE_PTYPE_TUNNEL_GTPC,
2738 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2739 RTE_PTYPE_TUNNEL_GTPC,
2740 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2741 RTE_PTYPE_TUNNEL_GTPU,
2742 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2743 RTE_PTYPE_TUNNEL_GTPU,
2745 /* IPv4 --> GTPU --> IPv4 */
2746 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2747 RTE_PTYPE_TUNNEL_GTPU |
2748 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2749 RTE_PTYPE_INNER_L4_FRAG,
2750 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2751 RTE_PTYPE_TUNNEL_GTPU |
2752 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2753 RTE_PTYPE_INNER_L4_NONFRAG,
2754 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2755 RTE_PTYPE_TUNNEL_GTPU |
2756 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2757 RTE_PTYPE_INNER_L4_UDP,
2758 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2759 RTE_PTYPE_TUNNEL_GTPU |
2760 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2761 RTE_PTYPE_INNER_L4_TCP,
2762 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2763 RTE_PTYPE_TUNNEL_GTPU |
2764 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2765 RTE_PTYPE_INNER_L4_ICMP,
2767 /* IPv6 --> GTPU --> IPv4 */
2768 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2769 RTE_PTYPE_TUNNEL_GTPU |
2770 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2771 RTE_PTYPE_INNER_L4_FRAG,
2772 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2773 RTE_PTYPE_TUNNEL_GTPU |
2774 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2775 RTE_PTYPE_INNER_L4_NONFRAG,
2776 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2777 RTE_PTYPE_TUNNEL_GTPU |
2778 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_UDP,
2780 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2781 RTE_PTYPE_TUNNEL_GTPU |
2782 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2783 RTE_PTYPE_INNER_L4_TCP,
2784 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2785 RTE_PTYPE_TUNNEL_GTPU |
2786 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2787 RTE_PTYPE_INNER_L4_ICMP,
2789 /* IPv4 --> GTPU --> IPv6 */
2790 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2791 RTE_PTYPE_TUNNEL_GTPU |
2792 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2793 RTE_PTYPE_INNER_L4_FRAG,
2794 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2795 RTE_PTYPE_TUNNEL_GTPU |
2796 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2797 RTE_PTYPE_INNER_L4_NONFRAG,
2798 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2799 RTE_PTYPE_TUNNEL_GTPU |
2800 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2801 RTE_PTYPE_INNER_L4_UDP,
2802 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2803 RTE_PTYPE_TUNNEL_GTPU |
2804 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2805 RTE_PTYPE_INNER_L4_TCP,
2806 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2807 RTE_PTYPE_TUNNEL_GTPU |
2808 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2809 RTE_PTYPE_INNER_L4_ICMP,
2811 /* IPv6 --> GTPU --> IPv6 */
2812 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2813 RTE_PTYPE_TUNNEL_GTPU |
2814 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2815 RTE_PTYPE_INNER_L4_FRAG,
2816 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2817 RTE_PTYPE_TUNNEL_GTPU |
2818 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2819 RTE_PTYPE_INNER_L4_NONFRAG,
2820 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2821 RTE_PTYPE_TUNNEL_GTPU |
2822 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2823 RTE_PTYPE_INNER_L4_UDP,
2824 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2825 RTE_PTYPE_TUNNEL_GTPU |
2826 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2827 RTE_PTYPE_INNER_L4_TCP,
2828 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2829 RTE_PTYPE_TUNNEL_GTPU |
2830 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2831 RTE_PTYPE_INNER_L4_ICMP,
2832 /* All others reserved */